2025-07-02 11:05:23 +08:00
|
|
|
|
"""
|
|
|
|
|
多店铺销售预测系统 - 数据处理工具函数
|
|
|
|
|
支持多店铺数据的加载、过滤和处理
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
import pandas as pd
|
|
|
|
|
import numpy as np
|
|
|
|
|
import os
|
|
|
|
|
from datetime import datetime, timedelta
|
|
|
|
|
from typing import Optional, List, Tuple, Dict, Any
|
2025-07-15 10:37:25 +08:00
|
|
|
|
from core.config import DEFAULT_DATA_PATH
|
2025-07-02 11:05:23 +08:00
|
|
|
|
|
2025-07-15 10:37:25 +08:00
|
|
|
|
def load_multi_store_data(file_path: str = None,
|
2025-07-02 11:05:23 +08:00
|
|
|
|
store_id: Optional[str] = None,
|
|
|
|
|
product_id: Optional[str] = None,
|
|
|
|
|
start_date: Optional[str] = None,
|
|
|
|
|
end_date: Optional[str] = None) -> pd.DataFrame:
|
|
|
|
|
"""
|
|
|
|
|
加载多店铺销售数据,支持按店铺、产品、时间范围过滤
|
|
|
|
|
|
|
|
|
|
参数:
|
2025-07-15 10:37:25 +08:00
|
|
|
|
file_path: 数据文件路径 (支持 .csv, .xlsx, .parquet)。如果为None,则使用config中定义的默认路径。
|
2025-07-02 11:05:23 +08:00
|
|
|
|
store_id: 店铺ID,为None时返回所有店铺数据
|
|
|
|
|
product_id: 产品ID,为None时返回所有产品数据
|
|
|
|
|
start_date: 开始日期 (YYYY-MM-DD)
|
|
|
|
|
end_date: 结束日期 (YYYY-MM-DD)
|
|
|
|
|
|
|
|
|
|
返回:
|
|
|
|
|
DataFrame: 过滤后的销售数据
|
|
|
|
|
"""
|
|
|
|
|
|
2025-07-15 10:37:25 +08:00
|
|
|
|
# 如果未提供文件路径,则使用配置文件中的默认路径
|
|
|
|
|
if file_path is None:
|
|
|
|
|
file_path = DEFAULT_DATA_PATH
|
2025-07-14 19:26:57 +08:00
|
|
|
|
|
2025-07-15 10:37:25 +08:00
|
|
|
|
if not os.path.exists(file_path):
|
|
|
|
|
raise FileNotFoundError(f"数据文件不存在: {file_path}")
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
if file_path.endswith('.csv'):
|
|
|
|
|
df = pd.read_csv(file_path)
|
|
|
|
|
elif file_path.endswith('.xlsx'):
|
|
|
|
|
df = pd.read_excel(file_path)
|
|
|
|
|
elif file_path.endswith('.parquet'):
|
|
|
|
|
df = pd.read_parquet(file_path)
|
|
|
|
|
else:
|
|
|
|
|
raise ValueError(f"不支持的文件格式: {file_path}")
|
|
|
|
|
|
|
|
|
|
print(f"成功加载数据文件: {file_path}")
|
|
|
|
|
except Exception as e:
|
|
|
|
|
print(f"加载文件 {file_path} 失败: {e}")
|
|
|
|
|
raise
|
2025-07-02 11:05:23 +08:00
|
|
|
|
|
|
|
|
|
# 按店铺过滤
|
|
|
|
|
if store_id:
|
|
|
|
|
df = df[df['store_id'] == store_id].copy()
|
|
|
|
|
print(f"按店铺过滤: {store_id}, 剩余记录数: {len(df)}")
|
|
|
|
|
|
|
|
|
|
# 按产品过滤
|
|
|
|
|
if product_id:
|
|
|
|
|
df = df[df['product_id'] == product_id].copy()
|
|
|
|
|
print(f"按产品过滤: {product_id}, 剩余记录数: {len(df)}")
|
|
|
|
|
|
2025-07-14 19:26:57 +08:00
|
|
|
|
# 标准化列名和数据类型
|
|
|
|
|
df = standardize_column_names(df)
|
|
|
|
|
|
|
|
|
|
# 在标准化之后进行时间范围过滤
|
2025-07-02 11:05:23 +08:00
|
|
|
|
if start_date:
|
2025-07-14 19:26:57 +08:00
|
|
|
|
try:
|
|
|
|
|
start_date_dt = pd.to_datetime(start_date)
|
|
|
|
|
# 确保比较是在datetime对象之间
|
|
|
|
|
if 'date' in df.columns:
|
|
|
|
|
df = df[df['date'] >= start_date_dt].copy()
|
|
|
|
|
print(f"开始日期过滤: {start_date_dt}, 剩余记录数: {len(df)}")
|
|
|
|
|
except (ValueError, TypeError):
|
|
|
|
|
print(f"警告: 无效的开始日期格式 '{start_date}',已忽略。")
|
|
|
|
|
|
2025-07-02 11:05:23 +08:00
|
|
|
|
if end_date:
|
2025-07-14 19:26:57 +08:00
|
|
|
|
try:
|
|
|
|
|
end_date_dt = pd.to_datetime(end_date)
|
|
|
|
|
# 确保比较是在datetime对象之间
|
|
|
|
|
if 'date' in df.columns:
|
|
|
|
|
df = df[df['date'] <= end_date_dt].copy()
|
|
|
|
|
print(f"结束日期过滤: {end_date_dt}, 剩余记录数: {len(df)}")
|
|
|
|
|
except (ValueError, TypeError):
|
|
|
|
|
print(f"警告: 无效的结束日期格式 '{end_date}',已忽略。")
|
2025-07-02 11:05:23 +08:00
|
|
|
|
|
|
|
|
|
if len(df) == 0:
|
|
|
|
|
print("警告: 过滤后没有数据")
|
|
|
|
|
|
|
|
|
|
return df
|
|
|
|
|
|
|
|
|
|
def standardize_column_names(df: pd.DataFrame) -> pd.DataFrame:
|
|
|
|
|
"""
|
2025-07-14 19:26:57 +08:00
|
|
|
|
标准化列名以匹配训练代码和API期望的格式
|
2025-07-02 11:05:23 +08:00
|
|
|
|
|
|
|
|
|
参数:
|
|
|
|
|
df: 原始DataFrame
|
|
|
|
|
|
|
|
|
|
返回:
|
|
|
|
|
DataFrame: 标准化列名后的DataFrame
|
|
|
|
|
"""
|
|
|
|
|
df = df.copy()
|
|
|
|
|
|
2025-07-14 19:26:57 +08:00
|
|
|
|
# 定义列名映射并强制重命名
|
|
|
|
|
rename_map = {
|
|
|
|
|
'sales_quantity': 'sales', # 修复:匹配原始列名
|
|
|
|
|
'temperature_2m_mean': 'temperature', # 新增:处理温度列
|
|
|
|
|
'dayofweek': 'weekday' # 修复:匹配原始列名
|
2025-07-02 11:05:23 +08:00
|
|
|
|
}
|
2025-07-14 19:26:57 +08:00
|
|
|
|
df.rename(columns={k: v for k, v in rename_map.items() if k in df.columns}, inplace=True)
|
2025-07-02 11:05:23 +08:00
|
|
|
|
|
2025-07-14 19:26:57 +08:00
|
|
|
|
# 确保date列是datetime类型
|
2025-07-02 11:05:23 +08:00
|
|
|
|
if 'date' in df.columns:
|
2025-07-14 19:26:57 +08:00
|
|
|
|
df['date'] = pd.to_datetime(df['date'], errors='coerce')
|
|
|
|
|
df.dropna(subset=['date'], inplace=True) # 移除无法解析的日期行
|
|
|
|
|
else:
|
|
|
|
|
# 如果没有date列,无法继续,返回空DataFrame
|
|
|
|
|
return pd.DataFrame()
|
|
|
|
|
|
|
|
|
|
# 计算 sales_amount
|
|
|
|
|
# 由于没有price列,sales_amount的计算逻辑需要调整或移除
|
|
|
|
|
# 这里我们注释掉它,因为原始数据中已有sales_amount
|
|
|
|
|
# if 'sales_amount' not in df.columns and 'sales' in df.columns and 'price' in df.columns:
|
|
|
|
|
# # 先确保sales和price是数字
|
|
|
|
|
# df['sales'] = pd.to_numeric(df['sales'], errors='coerce')
|
|
|
|
|
# df['price'] = pd.to_numeric(df['price'], errors='coerce')
|
|
|
|
|
# df['sales_amount'] = df['sales'] * df['price']
|
|
|
|
|
|
|
|
|
|
# 创建缺失的特征列
|
|
|
|
|
if 'weekday' not in df.columns:
|
|
|
|
|
df['weekday'] = df['date'].dt.dayofweek
|
|
|
|
|
|
|
|
|
|
if 'month' not in df.columns:
|
|
|
|
|
df['month'] = df['date'].dt.month
|
|
|
|
|
|
|
|
|
|
# 添加缺失的元数据列
|
|
|
|
|
meta_columns = {
|
|
|
|
|
'store_name': 'Unknown Store',
|
|
|
|
|
'store_location': 'Unknown Location',
|
|
|
|
|
'store_type': 'Unknown',
|
|
|
|
|
'product_name': 'Unknown Product',
|
|
|
|
|
'product_category': 'Unknown Category'
|
|
|
|
|
}
|
|
|
|
|
for col, default in meta_columns.items():
|
|
|
|
|
if col not in df.columns:
|
|
|
|
|
df[col] = default
|
|
|
|
|
|
|
|
|
|
# 添加缺失的布尔特征列
|
2025-07-02 11:05:23 +08:00
|
|
|
|
default_features = {
|
2025-07-14 19:26:57 +08:00
|
|
|
|
'is_holiday': False,
|
|
|
|
|
'is_weekend': None,
|
|
|
|
|
'is_promotion': False,
|
|
|
|
|
'temperature': 20.0
|
2025-07-02 11:05:23 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for feature, default_value in default_features.items():
|
|
|
|
|
if feature not in df.columns:
|
2025-07-14 19:26:57 +08:00
|
|
|
|
if feature == 'is_weekend':
|
2025-07-02 11:05:23 +08:00
|
|
|
|
df['is_weekend'] = df['weekday'].isin([5, 6])
|
|
|
|
|
else:
|
|
|
|
|
df[feature] = default_value
|
|
|
|
|
|
|
|
|
|
# 确保数值类型正确
|
2025-07-14 19:26:57 +08:00
|
|
|
|
numeric_columns = ['sales', 'sales_amount', 'weekday', 'month', 'temperature']
|
2025-07-02 11:05:23 +08:00
|
|
|
|
for col in numeric_columns:
|
|
|
|
|
if col in df.columns:
|
|
|
|
|
df[col] = pd.to_numeric(df[col], errors='coerce')
|
|
|
|
|
|
|
|
|
|
# 确保布尔类型正确
|
|
|
|
|
boolean_columns = ['is_holiday', 'is_weekend', 'is_promotion']
|
|
|
|
|
for col in boolean_columns:
|
|
|
|
|
if col in df.columns:
|
|
|
|
|
df[col] = df[col].astype(bool)
|
|
|
|
|
|
2025-07-14 19:26:57 +08:00
|
|
|
|
print(f"数据标准化完成,可用特征列: {[col for col in ['sales', 'weekday', 'month', 'is_holiday', 'is_weekend', 'is_promotion', 'temperature'] if col in df.columns]}")
|
2025-07-02 11:05:23 +08:00
|
|
|
|
|
|
|
|
|
return df
|
|
|
|
|
|
2025-07-15 10:37:25 +08:00
|
|
|
|
def get_available_stores(file_path: str = None) -> List[Dict[str, Any]]:
|
2025-07-02 11:05:23 +08:00
|
|
|
|
"""
|
|
|
|
|
获取可用的店铺列表
|
|
|
|
|
|
|
|
|
|
参数:
|
|
|
|
|
file_path: 数据文件路径
|
|
|
|
|
|
|
|
|
|
返回:
|
|
|
|
|
List[Dict]: 店铺信息列表
|
|
|
|
|
"""
|
|
|
|
|
try:
|
|
|
|
|
df = load_multi_store_data(file_path)
|
|
|
|
|
|
2025-07-14 19:26:57 +08:00
|
|
|
|
if 'store_id' not in df.columns:
|
|
|
|
|
print("数据文件中缺少 'store_id' 列")
|
|
|
|
|
return []
|
|
|
|
|
|
|
|
|
|
# 智能地获取店铺信息,即使某些列缺失
|
|
|
|
|
store_info = []
|
2025-07-02 11:05:23 +08:00
|
|
|
|
|
2025-07-14 19:26:57 +08:00
|
|
|
|
# 使用drop_duplicates获取唯一的店铺组合
|
|
|
|
|
stores_df = df.drop_duplicates(subset=['store_id'])
|
|
|
|
|
|
|
|
|
|
for _, row in stores_df.iterrows():
|
|
|
|
|
store_info.append({
|
|
|
|
|
'store_id': row['store_id'],
|
|
|
|
|
'store_name': row.get('store_name', f"店铺 {row['store_id']}"),
|
|
|
|
|
'location': row.get('store_location', '未知位置'),
|
|
|
|
|
'type': row.get('store_type', '标准'),
|
|
|
|
|
'opening_date': row.get('opening_date', '未知'),
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
return store_info
|
2025-07-02 11:05:23 +08:00
|
|
|
|
except Exception as e:
|
|
|
|
|
print(f"获取店铺列表失败: {e}")
|
|
|
|
|
return []
|
|
|
|
|
|
2025-07-15 10:37:25 +08:00
|
|
|
|
def get_available_products(file_path: str = None,
|
2025-07-02 11:05:23 +08:00
|
|
|
|
store_id: Optional[str] = None) -> List[Dict[str, Any]]:
|
|
|
|
|
"""
|
|
|
|
|
获取可用的产品列表
|
|
|
|
|
|
|
|
|
|
参数:
|
|
|
|
|
file_path: 数据文件路径
|
|
|
|
|
store_id: 店铺ID,为None时返回所有产品
|
|
|
|
|
|
|
|
|
|
返回:
|
|
|
|
|
List[Dict]: 产品信息列表
|
|
|
|
|
"""
|
|
|
|
|
try:
|
|
|
|
|
df = load_multi_store_data(file_path, store_id=store_id)
|
|
|
|
|
|
|
|
|
|
# 获取唯一产品信息
|
|
|
|
|
product_columns = ['product_id', 'product_name']
|
|
|
|
|
if 'product_category' in df.columns:
|
|
|
|
|
product_columns.append('product_category')
|
|
|
|
|
if 'unit_price' in df.columns:
|
|
|
|
|
product_columns.append('unit_price')
|
|
|
|
|
|
|
|
|
|
products = df[product_columns].drop_duplicates()
|
|
|
|
|
|
|
|
|
|
return products.to_dict('records')
|
|
|
|
|
except Exception as e:
|
|
|
|
|
print(f"获取产品列表失败: {e}")
|
|
|
|
|
return []
|
|
|
|
|
|
|
|
|
|
def get_store_product_sales_data(store_id: str,
|
|
|
|
|
product_id: str,
|
2025-07-15 10:37:25 +08:00
|
|
|
|
file_path: str = None) -> pd.DataFrame:
|
2025-07-02 11:05:23 +08:00
|
|
|
|
"""
|
|
|
|
|
获取特定店铺和产品的销售数据,用于模型训练
|
|
|
|
|
|
|
|
|
|
参数:
|
|
|
|
|
file_path: 数据文件路径
|
|
|
|
|
store_id: 店铺ID
|
|
|
|
|
product_id: 产品ID
|
|
|
|
|
|
|
|
|
|
返回:
|
|
|
|
|
DataFrame: 处理后的销售数据,包含模型需要的特征
|
|
|
|
|
"""
|
|
|
|
|
# 加载数据
|
|
|
|
|
df = load_multi_store_data(file_path, store_id=store_id, product_id=product_id)
|
|
|
|
|
|
|
|
|
|
if len(df) == 0:
|
|
|
|
|
raise ValueError(f"没有找到店铺 {store_id} 产品 {product_id} 的销售数据")
|
|
|
|
|
|
|
|
|
|
# 确保数据按日期排序
|
|
|
|
|
df = df.sort_values('date').copy()
|
|
|
|
|
|
|
|
|
|
# 数据标准化已在load_multi_store_data中完成
|
|
|
|
|
# 验证必要的列是否存在
|
2025-07-16 15:34:48 +08:00
|
|
|
|
required_columns = ['sales', 'weekday', 'month', 'is_holiday', 'is_weekend', 'is_promotion', 'temperature']
|
2025-07-02 11:05:23 +08:00
|
|
|
|
missing_columns = [col for col in required_columns if col not in df.columns]
|
|
|
|
|
|
|
|
|
|
if missing_columns:
|
|
|
|
|
print(f"警告: 数据标准化后仍缺少列 {missing_columns}")
|
|
|
|
|
raise ValueError(f"无法获取完整的特征数据,缺少列: {missing_columns}")
|
|
|
|
|
|
2025-07-14 19:26:57 +08:00
|
|
|
|
# 定义模型训练所需的所有列(特征 + 目标)
|
|
|
|
|
final_columns = [
|
|
|
|
|
'date', 'sales', 'product_id', 'product_name', 'store_id', 'store_name',
|
|
|
|
|
'weekday', 'month', 'is_holiday', 'is_weekend', 'is_promotion', 'temperature'
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
# 筛选出DataFrame中实际存在的列
|
|
|
|
|
existing_columns = [col for col in final_columns if col in df.columns]
|
|
|
|
|
|
|
|
|
|
# 返回只包含这些必需列的DataFrame
|
|
|
|
|
return df[existing_columns]
|
2025-07-02 11:05:23 +08:00
|
|
|
|
|
2025-07-16 15:34:48 +08:00
|
|
|
|
def aggregate_multi_store_data(product_id: Optional[Any] = None,
|
|
|
|
|
store_id: Optional[Any] = None,
|
|
|
|
|
aggregation_method: str = 'sum',
|
|
|
|
|
file_path: Optional[str] = None) -> Optional[pd.DataFrame]:
|
2025-07-02 11:05:23 +08:00
|
|
|
|
"""
|
2025-07-16 15:34:48 +08:00
|
|
|
|
聚合销售数据 (已修复,支持ID列表)。
|
|
|
|
|
- 如果提供了 product_id(s),则聚合指定产品的数据。
|
|
|
|
|
- 如果提供了 store_id(s),则聚合指定店铺的数据。
|
2025-07-02 11:05:23 +08:00
|
|
|
|
"""
|
2025-07-16 15:34:48 +08:00
|
|
|
|
if file_path is None:
|
|
|
|
|
file_path = DEFAULT_DATA_PATH
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
# 先加载所有数据,再进行过滤
|
2025-07-14 19:26:57 +08:00
|
|
|
|
df = load_multi_store_data(file_path)
|
2025-07-16 15:34:48 +08:00
|
|
|
|
if df.empty:
|
|
|
|
|
raise ValueError("数据文件为空或加载失败")
|
|
|
|
|
|
|
|
|
|
# 根据 store_id 和 product_id 进行过滤 (支持列表和单个ID)
|
|
|
|
|
if store_id:
|
|
|
|
|
if isinstance(store_id, list):
|
|
|
|
|
df = df[df['store_id'].isin(store_id)]
|
|
|
|
|
else:
|
|
|
|
|
df = df[df['store_id'] == store_id]
|
|
|
|
|
|
|
|
|
|
if product_id:
|
|
|
|
|
if isinstance(product_id, list):
|
|
|
|
|
df = df[df['product_id'].isin(product_id)]
|
|
|
|
|
else:
|
|
|
|
|
df = df[df['product_id'] == product_id]
|
|
|
|
|
|
|
|
|
|
if df.empty:
|
|
|
|
|
raise ValueError(f"根据所选店铺/产品过滤后无数据")
|
|
|
|
|
|
|
|
|
|
# 确定聚合后的实体名称
|
|
|
|
|
if store_id and not product_id:
|
|
|
|
|
grouping_entity_name = df['store_name'].iloc[0] if len(df['store_id'].unique()) == 1 else "多个店铺聚合"
|
|
|
|
|
elif product_id and not store_id:
|
|
|
|
|
grouping_entity_name = df['product_name'].iloc[0] if len(df['product_id'].unique()) == 1 else "多个产品聚合"
|
|
|
|
|
elif store_id and product_id:
|
|
|
|
|
grouping_entity_name = f"{df['store_name'].iloc[0]} - {df['product_name'].iloc[0]}" if len(df['store_id'].unique()) == 1 and len(df['product_id'].unique()) == 1 else "自定义聚合"
|
|
|
|
|
else:
|
|
|
|
|
grouping_entity_name = "全局聚合模型"
|
|
|
|
|
|
|
|
|
|
# 按日期聚合
|
|
|
|
|
agg_df = df.groupby('date').agg({
|
|
|
|
|
'sales': aggregation_method,
|
|
|
|
|
'temperature': 'mean',
|
|
|
|
|
'is_holiday': 'max',
|
|
|
|
|
'is_weekend': 'max',
|
|
|
|
|
'is_promotion': 'max',
|
|
|
|
|
'weekday': 'first',
|
|
|
|
|
'month': 'first'
|
|
|
|
|
}).reset_index()
|
|
|
|
|
|
|
|
|
|
agg_df['product_name'] = grouping_entity_name
|
|
|
|
|
|
|
|
|
|
for col in ['is_holiday', 'is_weekend', 'is_promotion']:
|
|
|
|
|
if col not in agg_df:
|
|
|
|
|
agg_df[col] = 0
|
|
|
|
|
|
|
|
|
|
return agg_df
|
|
|
|
|
|
|
|
|
|
except Exception as e:
|
|
|
|
|
print(f"聚合数据失败: {e}")
|
|
|
|
|
return None
|
2025-07-02 11:05:23 +08:00
|
|
|
|
|
2025-07-15 10:37:25 +08:00
|
|
|
|
def get_sales_statistics(file_path: str = None,
|
2025-07-02 11:05:23 +08:00
|
|
|
|
store_id: Optional[str] = None,
|
|
|
|
|
product_id: Optional[str] = None) -> Dict[str, Any]:
|
|
|
|
|
"""
|
|
|
|
|
获取销售数据统计信息
|
|
|
|
|
|
|
|
|
|
参数:
|
|
|
|
|
file_path: 数据文件路径
|
|
|
|
|
store_id: 店铺ID
|
|
|
|
|
product_id: 产品ID
|
|
|
|
|
|
|
|
|
|
返回:
|
|
|
|
|
Dict: 统计信息
|
|
|
|
|
"""
|
|
|
|
|
try:
|
|
|
|
|
df = load_multi_store_data(file_path, store_id=store_id, product_id=product_id)
|
|
|
|
|
|
|
|
|
|
if len(df) == 0:
|
|
|
|
|
return {'error': '没有数据'}
|
|
|
|
|
|
|
|
|
|
stats = {
|
|
|
|
|
'total_records': len(df),
|
|
|
|
|
'date_range': {
|
|
|
|
|
'start': df['date'].min().strftime('%Y-%m-%d'),
|
|
|
|
|
'end': df['date'].max().strftime('%Y-%m-%d')
|
|
|
|
|
},
|
|
|
|
|
'stores': df['store_id'].nunique(),
|
|
|
|
|
'products': df['product_id'].nunique(),
|
|
|
|
|
'total_sales_amount': float(df['sales_amount'].sum()) if 'sales_amount' in df.columns else 0,
|
|
|
|
|
'total_quantity': int(df['quantity_sold'].sum()) if 'quantity_sold' in df.columns else 0,
|
|
|
|
|
'avg_daily_sales': float(df.groupby('date')['quantity_sold'].sum().mean()) if 'quantity_sold' in df.columns else 0
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return stats
|
|
|
|
|
|
|
|
|
|
except Exception as e:
|
|
|
|
|
return {'error': str(e)}
|
|
|
|
|
|
|
|
|
|
# 向后兼容的函数
|
2025-07-15 10:37:25 +08:00
|
|
|
|
def load_data(file_path=None, store_id=None):
|
2025-07-02 11:05:23 +08:00
|
|
|
|
"""
|
|
|
|
|
向后兼容的数据加载函数
|
|
|
|
|
"""
|
|
|
|
|
return load_multi_store_data(file_path, store_id=store_id)
|