Compare commits
6 Commits
e1980b3755
...
87df49f764
Author | SHA1 | Date | |
---|---|---|---|
87df49f764 | |||
28bae35783 | |||
341d8d179c | |||
5b2cdfa74a | |||
9bd824c389 | |||
7a4bfedcaa |
@ -237,11 +237,13 @@ body {
|
||||
}
|
||||
|
||||
.logo-container {
|
||||
padding: 20px;
|
||||
padding: 10px 20px;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 12px;
|
||||
border-bottom: 1px solid var(--card-border);
|
||||
height: 60px;
|
||||
box-sizing: border-box;
|
||||
}
|
||||
|
||||
.logo-icon {
|
||||
@ -351,4 +353,4 @@ body {
|
||||
::-webkit-scrollbar-thumb:hover {
|
||||
background: rgba(93, 156, 255, 0.5);
|
||||
}
|
||||
</style>
|
||||
</style>
|
||||
|
@ -242,7 +242,7 @@ watch(() => props.storeId, () => {
|
||||
|
||||
.product-name {
|
||||
font-weight: 500;
|
||||
color: #303133;
|
||||
color: var(--el-text-color-primary);
|
||||
margin-bottom: 2px;
|
||||
}
|
||||
|
||||
@ -254,7 +254,7 @@ watch(() => props.storeId, () => {
|
||||
}
|
||||
|
||||
.product-id {
|
||||
color: #909399;
|
||||
color: var(--el-text-color-secondary);
|
||||
margin-right: 8px;
|
||||
}
|
||||
|
||||
|
@ -242,7 +242,7 @@ watch(() => props.filterByStatus, () => {
|
||||
|
||||
.store-name {
|
||||
font-weight: 500;
|
||||
color: #303133;
|
||||
color: var(--el-text-color-primary);
|
||||
margin-bottom: 2px;
|
||||
}
|
||||
|
||||
@ -254,7 +254,7 @@ watch(() => props.filterByStatus, () => {
|
||||
}
|
||||
|
||||
.store-location {
|
||||
color: #909399;
|
||||
color: var(--el-text-color-secondary);
|
||||
margin-right: 8px;
|
||||
}
|
||||
|
||||
|
@ -5,6 +5,7 @@ import ElementPlus from 'element-plus'
|
||||
import 'element-plus/dist/index.css'
|
||||
import * as ElementPlusIconsVue from '@element-plus/icons-vue'
|
||||
import axios from 'axios'
|
||||
import zhCn from 'element-plus/dist/locale/zh-cn.mjs'
|
||||
|
||||
// 导入Google Roboto字体
|
||||
import '@/assets/fonts.css'
|
||||
@ -22,6 +23,6 @@ for (const [key, component] of Object.entries(ElementPlusIconsVue)) {
|
||||
}
|
||||
|
||||
app.use(router)
|
||||
app.use(ElementPlus)
|
||||
app.use(ElementPlus, { locale: zhCn })
|
||||
|
||||
app.mount('#app')
|
@ -90,7 +90,7 @@
|
||||
|
||||
<script setup>
|
||||
import { ref, computed, onMounted } from 'vue'
|
||||
import { ArrowRight, DataAnalysis, TrendCharts, CircleCheckFilled, WarningFilled, CircleCloseFilled } from '@element-plus/icons-vue'
|
||||
import { ArrowRight, DataAnalysis, TrendCharts, CircleCheckFilled, WarningFilled, CircleCloseFilled, Shop } from '@element-plus/icons-vue'
|
||||
|
||||
// 模拟数据
|
||||
const data = ref({
|
||||
@ -102,10 +102,10 @@ const data = ref({
|
||||
// 功能卡片数据
|
||||
const featureCards = [
|
||||
{
|
||||
title: '数据管理',
|
||||
description: '管理产品和销售数据',
|
||||
icon: 'FolderOpened',
|
||||
path: '/data',
|
||||
title: '店铺管理',
|
||||
description: '管理店铺信息和库存',
|
||||
icon: 'Shop',
|
||||
path: '/store-management',
|
||||
type: 'data'
|
||||
},
|
||||
{
|
||||
|
@ -1,6 +1,6 @@
|
||||
<template>
|
||||
<div class="store-management-container">
|
||||
<el-card>
|
||||
<el-card class="full-height-card">
|
||||
<template #header>
|
||||
<div class="card-header">
|
||||
<span>店铺管理</span>
|
||||
@ -18,67 +18,70 @@
|
||||
</template>
|
||||
|
||||
<!-- 搜索和过滤 -->
|
||||
<div class="filter-section">
|
||||
<el-row :gutter="20">
|
||||
<el-col :span="6">
|
||||
<el-input
|
||||
v-model="searchQuery"
|
||||
placeholder="搜索店铺名称或ID"
|
||||
clearable
|
||||
@input="handleSearch"
|
||||
>
|
||||
<template #prefix>
|
||||
<el-icon><Search /></el-icon>
|
||||
</template>
|
||||
</el-input>
|
||||
</el-col>
|
||||
<el-col :span="4">
|
||||
<el-select v-model="statusFilter" placeholder="状态筛选" clearable @change="handleFilter">
|
||||
<el-option label="全部状态" value="" />
|
||||
<el-option label="营业中" value="active" />
|
||||
<el-option label="暂停营业" value="inactive" />
|
||||
</el-select>
|
||||
</el-col>
|
||||
<el-col :span="4">
|
||||
<el-select v-model="typeFilter" placeholder="类型筛选" clearable @change="handleFilter">
|
||||
<el-option label="全部类型" value="" />
|
||||
<el-option label="旗舰店" value="旗舰店" />
|
||||
<el-option label="标准店" value="标准店" />
|
||||
<el-option label="便民店" value="便民店" />
|
||||
<el-option label="社区店" value="社区店" />
|
||||
</el-select>
|
||||
</el-col>
|
||||
</el-row>
|
||||
</div>
|
||||
<div class="table-container" ref="tableContainerRef">
|
||||
<div class="filter-section" ref="filterSectionRef">
|
||||
<el-row :gutter="20" align="middle">
|
||||
<el-col :span="6">
|
||||
<el-input
|
||||
v-model="searchQuery"
|
||||
placeholder="搜索店铺名称或ID"
|
||||
clearable
|
||||
@input="handleSearch"
|
||||
>
|
||||
<template #prefix>
|
||||
<el-icon><Search /></el-icon>
|
||||
</template>
|
||||
</el-input>
|
||||
</el-col>
|
||||
<el-col :span="4">
|
||||
<el-select v-model="statusFilter" placeholder="状态筛选" clearable @change="handleFilter" style="width: 100%;">
|
||||
<el-option label="全部状态" value="" />
|
||||
<el-option label="营业中" value="active" />
|
||||
<el-option label="暂停营业" value="inactive" />
|
||||
</el-select>
|
||||
</el-col>
|
||||
<el-col :span="4">
|
||||
<el-select v-model="typeFilter" placeholder="类型筛选" clearable @change="handleFilter" style="width: 100%;">
|
||||
<el-option label="全部类型" value="" />
|
||||
<el-option label="旗舰店" value="旗舰店" />
|
||||
<el-option label="标准店" value="标准店" />
|
||||
<el-option label="便民店" value="便民店" />
|
||||
<el-option label="社区店" value="社区店" />
|
||||
</el-select>
|
||||
</el-col>
|
||||
</el-row>
|
||||
</div>
|
||||
|
||||
<!-- 店铺列表 -->
|
||||
<el-table
|
||||
:data="filteredStores"
|
||||
v-loading="loading"
|
||||
stripe
|
||||
@selection-change="handleSelectionChange"
|
||||
>
|
||||
<!-- 店铺列表 -->
|
||||
<el-table
|
||||
:data="pagedStores"
|
||||
v-loading="loading"
|
||||
stripe
|
||||
@selection-change="handleSelectionChange"
|
||||
class="store-table"
|
||||
:height="tableHeight"
|
||||
>
|
||||
<el-table-column type="selection" width="55" />
|
||||
<el-table-column prop="store_id" label="店铺ID" width="100" />
|
||||
<el-table-column prop="store_name" label="店铺名称" width="150" />
|
||||
<el-table-column prop="location" label="位置" width="200" />
|
||||
<el-table-column prop="type" label="类型" width="100">
|
||||
<el-table-column prop="store_id" label="店铺ID" width="100" align="center" />
|
||||
<el-table-column prop="store_name" label="店铺名称" width="250" align="center" show-overflow-tooltip />
|
||||
<el-table-column prop="location" label="位置" width="250" align="center" show-overflow-tooltip/>
|
||||
<el-table-column prop="type" label="类型" width="120" align="center">
|
||||
<template #default="{ row }">
|
||||
<el-tag :type="getStoreTypeTag(row.type)">
|
||||
{{ row.type }}
|
||||
</el-tag>
|
||||
</template>
|
||||
</el-table-column>
|
||||
<el-table-column prop="size" label="面积(㎡)" width="100" align="right" />
|
||||
<el-table-column prop="opening_date" label="开业日期" width="120" />
|
||||
<el-table-column prop="status" label="状态" width="100">
|
||||
<el-table-column prop="size" label="面积(㎡)" width="150" align="center"/>
|
||||
<el-table-column prop="opening_date" label="开业日期" width="150" align="center"/>
|
||||
<el-table-column prop="status" label="状态" width="150" align="center">
|
||||
<template #default="{ row }">
|
||||
<el-tag :type="row.status === 'active' ? 'success' : 'danger'">
|
||||
{{ row.status === 'active' ? '营业中' : '暂停营业' }}
|
||||
</el-tag>
|
||||
</template>
|
||||
</el-table-column>
|
||||
<el-table-column label="操作" width="200" fixed="right">
|
||||
<el-table-column label="操作" width="200" fixed="right" align="center">
|
||||
<template #default="{ row }">
|
||||
<el-button link type="primary" @click="viewStoreDetails(row)">
|
||||
详情
|
||||
@ -99,14 +102,16 @@
|
||||
<!-- 分页 -->
|
||||
<el-pagination
|
||||
v-if="total > pageSize"
|
||||
layout="total, sizes, prev, pager, next, jumper"
|
||||
layout="total, prev, pager, next, jumper"
|
||||
:total="total"
|
||||
:page-size="pageSize"
|
||||
:page-sizes="[10, 20, 50, 100]"
|
||||
:current-page="currentPage"
|
||||
@current-change="handlePageChange"
|
||||
@size-change="handleSizeChange"
|
||||
class="pagination"
|
||||
ref="paginationRef"
|
||||
/>
|
||||
</div>
|
||||
</el-card>
|
||||
|
||||
<!-- 新增/编辑店铺对话框 -->
|
||||
@ -115,6 +120,7 @@
|
||||
:title="isEditing ? '编辑店铺' : '新增店铺'"
|
||||
width="600px"
|
||||
@close="resetForm"
|
||||
class="form-dialog"
|
||||
>
|
||||
<el-form
|
||||
ref="formRef"
|
||||
@ -229,6 +235,7 @@
|
||||
</div>
|
||||
</el-dialog>
|
||||
|
||||
|
||||
<!-- 店铺产品对话框 -->
|
||||
<el-dialog
|
||||
v-model="productsDialogVisible"
|
||||
@ -255,7 +262,7 @@
|
||||
</template>
|
||||
|
||||
<script setup>
|
||||
import { ref, onMounted, computed } from 'vue'
|
||||
import { ref, onMounted, onUnmounted, computed, nextTick } from 'vue'
|
||||
import axios from 'axios'
|
||||
import { ElMessage, ElMessageBox } from 'element-plus'
|
||||
import { Plus, Refresh, Search } from '@element-plus/icons-vue'
|
||||
@ -272,9 +279,15 @@ const typeFilter = ref('')
|
||||
|
||||
// 分页
|
||||
const currentPage = ref(1)
|
||||
const pageSize = ref(20)
|
||||
const pageSize = ref(12)
|
||||
const total = ref(0)
|
||||
|
||||
// 布局和高度
|
||||
const tableContainerRef = ref(null);
|
||||
const filterSectionRef = ref(null);
|
||||
const paginationRef = ref(null);
|
||||
const tableHeight = ref(400); // 默认高度
|
||||
|
||||
// 对话框
|
||||
const dialogVisible = ref(false)
|
||||
const detailDialogVisible = ref(false)
|
||||
@ -319,34 +332,34 @@ const rules = {
|
||||
|
||||
// 计算属性
|
||||
const filteredStores = computed(() => {
|
||||
let result = stores.value
|
||||
let result = stores.value;
|
||||
|
||||
// 搜索过滤
|
||||
if (searchQuery.value) {
|
||||
const query = searchQuery.value.toLowerCase()
|
||||
result = result.filter(store =>
|
||||
store.store_name.toLowerCase().includes(query) ||
|
||||
store.store_id.toLowerCase().includes(query)
|
||||
)
|
||||
const query = searchQuery.value.toLowerCase();
|
||||
result = result.filter(
|
||||
(store) =>
|
||||
store.store_name.toLowerCase().includes(query) ||
|
||||
store.store_id.toLowerCase().includes(query)
|
||||
);
|
||||
}
|
||||
|
||||
// 状态过滤
|
||||
if (statusFilter.value) {
|
||||
result = result.filter(store => store.status === statusFilter.value)
|
||||
result = result.filter((store) => store.status === statusFilter.value);
|
||||
}
|
||||
|
||||
// 类型过滤
|
||||
if (typeFilter.value) {
|
||||
result = result.filter(store => store.type === typeFilter.value)
|
||||
result = result.filter((store) => store.type === typeFilter.value);
|
||||
}
|
||||
|
||||
total.value = result.length
|
||||
|
||||
// 分页
|
||||
const start = (currentPage.value - 1) * pageSize.value
|
||||
const end = start + pageSize.value
|
||||
return result.slice(start, end)
|
||||
})
|
||||
return result;
|
||||
});
|
||||
|
||||
const pagedStores = computed(() => {
|
||||
const start = (currentPage.value - 1) * pageSize.value;
|
||||
const end = start + pageSize.value;
|
||||
total.value = filteredStores.value.length;
|
||||
return filteredStores.value.slice(start, end);
|
||||
});
|
||||
|
||||
// 方法
|
||||
const fetchStores = async () => {
|
||||
@ -516,14 +529,49 @@ const viewStoreProducts = async (store) => {
|
||||
}
|
||||
|
||||
// 生命周期
|
||||
const updateTableHeight = () => {
|
||||
nextTick(() => {
|
||||
if (tableContainerRef.value) {
|
||||
const containerHeight = tableContainerRef.value.clientHeight;
|
||||
const filterHeight = filterSectionRef.value?.offsetHeight || 0;
|
||||
const paginationHeight = paginationRef.value?.$el.offsetHeight || 0;
|
||||
|
||||
// 减去筛选区、分页区以及一些间距
|
||||
const calculatedHeight = containerHeight - filterHeight - paginationHeight - 20;
|
||||
tableHeight.value = calculatedHeight > 200 ? calculatedHeight : 200; // 最小高度
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
onMounted(() => {
|
||||
fetchStores()
|
||||
})
|
||||
fetchStores();
|
||||
updateTableHeight();
|
||||
window.addEventListener('resize', updateTableHeight);
|
||||
});
|
||||
|
||||
onUnmounted(() => {
|
||||
window.removeEventListener('resize', updateTableHeight);
|
||||
});
|
||||
</script>
|
||||
|
||||
<style scoped>
|
||||
.store-management-container {
|
||||
height: 97%;
|
||||
padding: 6px 10px 15px 15px;
|
||||
}
|
||||
|
||||
.full-height-card {
|
||||
height: 100%;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
}
|
||||
|
||||
:deep(.el-card__body) {
|
||||
flex-grow: 1;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
padding: 20px;
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
.card-header {
|
||||
@ -537,21 +585,35 @@ onMounted(() => {
|
||||
gap: 10px;
|
||||
}
|
||||
|
||||
.table-container {
|
||||
flex-grow: 1;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
overflow: hidden; /* 确保容器本身不滚动 */
|
||||
}
|
||||
|
||||
.filter-section {
|
||||
margin-bottom: 20px;
|
||||
padding: 20px;
|
||||
background-color: #f8f9fa;
|
||||
border-radius: 8px;
|
||||
padding-bottom: 20px;
|
||||
}
|
||||
|
||||
|
||||
.store-table {
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
:deep(.store-table .el-table__cell) {
|
||||
padding: 12px 2px;
|
||||
}
|
||||
|
||||
.pagination {
|
||||
margin-top: 20px;
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
padding: 14px 0;
|
||||
}
|
||||
|
||||
.store-detail {
|
||||
padding: 10px 0;
|
||||
padding: 5px 0;
|
||||
}
|
||||
|
||||
.store-stats {
|
||||
@ -579,4 +641,9 @@ onMounted(() => {
|
||||
gap: 5px;
|
||||
}
|
||||
}
|
||||
</style>
|
||||
|
||||
.form-dialog :deep(.el-dialog) {
|
||||
background: transparent;
|
||||
box-shadow: none;
|
||||
}
|
||||
</style>
|
||||
|
@ -191,7 +191,7 @@ const startPrediction = async () => {
|
||||
start_date: form.start_date,
|
||||
analyze_result: form.analyze_result
|
||||
}
|
||||
const response = await axios.post('/api/predict', payload)
|
||||
const response = await axios.post('/api/prediction', payload)
|
||||
if (response.data.status === 'success') {
|
||||
predictionResult.value = response.data.data
|
||||
ElMessage.success('预测完成!')
|
||||
@ -213,7 +213,7 @@ const renderChart = () => {
|
||||
chart.destroy()
|
||||
}
|
||||
const predictions = predictionResult.value.predictions
|
||||
const labels = predictions.map(p => p.date)
|
||||
const labels = predictions.map(p => new Date(p.date).toLocaleDateString('zh-CN', { weekday: 'short', year: 'numeric', month: 'long', day: 'numeric' }))
|
||||
const data = predictions.map(p => p.sales)
|
||||
chart = new Chart(chartCanvas.value, {
|
||||
type: 'line',
|
||||
|
@ -200,6 +200,15 @@ const handleModelTypeChange = () => {
|
||||
}
|
||||
|
||||
const startPrediction = async () => {
|
||||
if (!form.product_id) {
|
||||
ElMessage.error('请选择目标药品')
|
||||
return
|
||||
}
|
||||
if (!form.model_type) {
|
||||
ElMessage.error('请选择算法类型')
|
||||
return
|
||||
}
|
||||
|
||||
try {
|
||||
predicting.value = true
|
||||
const payload = {
|
||||
@ -210,7 +219,7 @@ const startPrediction = async () => {
|
||||
analyze_result: form.analyze_result,
|
||||
product_id: form.product_id
|
||||
}
|
||||
const response = await axios.post('/api/predict', payload)
|
||||
const response = await axios.post('/api/prediction', payload)
|
||||
if (response.data.status === 'success') {
|
||||
predictionResult.value = response.data.data
|
||||
ElMessage.success('预测完成!')
|
||||
@ -232,7 +241,7 @@ const renderChart = () => {
|
||||
chart.destroy()
|
||||
}
|
||||
const predictions = predictionResult.value.predictions
|
||||
const labels = predictions.map(p => p.date)
|
||||
const labels = predictions.map(p => new Date(p.date).toLocaleDateString('zh-CN', { weekday: 'short', year: 'numeric', month: 'long', day: 'numeric' }))
|
||||
const data = predictions.map(p => p.sales)
|
||||
chart = new Chart(chartCanvas.value, {
|
||||
type: 'line',
|
||||
|
@ -200,6 +200,15 @@ const handleModelTypeChange = () => {
|
||||
}
|
||||
|
||||
const startPrediction = async () => {
|
||||
if (!form.store_id) {
|
||||
ElMessage.error('请选择目标店铺')
|
||||
return
|
||||
}
|
||||
if (!form.model_type) {
|
||||
ElMessage.error('请选择算法类型')
|
||||
return
|
||||
}
|
||||
|
||||
try {
|
||||
predicting.value = true
|
||||
const payload = {
|
||||
@ -208,9 +217,10 @@ const startPrediction = async () => {
|
||||
future_days: form.future_days,
|
||||
start_date: form.start_date,
|
||||
analyze_result: form.analyze_result,
|
||||
store_id: form.store_id
|
||||
store_id: form.store_id,
|
||||
training_mode: form.training_mode
|
||||
}
|
||||
const response = await axios.post('/api/predict', payload)
|
||||
const response = await axios.post('/api/prediction', payload)
|
||||
if (response.data.status === 'success') {
|
||||
predictionResult.value = response.data.data
|
||||
ElMessage.success('预测完成!')
|
||||
@ -232,7 +242,7 @@ const renderChart = () => {
|
||||
chart.destroy()
|
||||
}
|
||||
const predictions = predictionResult.value.predictions
|
||||
const labels = predictions.map(p => p.date)
|
||||
const labels = predictions.map(p => new Date(p.date).toLocaleDateString('zh-CN', { weekday: 'short', year: 'numeric', month: 'long', day: 'numeric' }))
|
||||
const data = predictions.map(p => p.sales)
|
||||
chart = new Chart(chartCanvas.value, {
|
||||
type: 'line',
|
||||
|
@ -244,7 +244,12 @@
|
||||
prop="version"
|
||||
label="版本"
|
||||
width="80"
|
||||
/>
|
||||
>
|
||||
<template #default="{ row }">
|
||||
<el-tag v-if="row.version" type="primary" size="small">v{{ row.version }}</el-tag>
|
||||
<span v-else>-</span>
|
||||
</template>
|
||||
</el-table-column>
|
||||
<el-table-column prop="status" label="状态" width="100">
|
||||
<template #default="{ row }">
|
||||
<el-tag :type="statusTag(row.status)">
|
||||
@ -266,11 +271,11 @@
|
||||
<div v-if="row.status === 'completed'">
|
||||
<h4>评估指标</h4>
|
||||
<pre>{{ JSON.stringify(row.metrics, null, 2) }}</pre>
|
||||
<div v-if="row.version">
|
||||
<!-- <div v-if="row.version">
|
||||
<h4>版本信息</h4>
|
||||
<p><strong>版本:</strong> {{ row.version }}</p>
|
||||
<p><strong>模型路径:</strong> {{ row.model_path }}</p>
|
||||
</div>
|
||||
</div> -->
|
||||
</div>
|
||||
<div v-if="row.status === 'failed'">
|
||||
<h4>错误信息</h4>
|
||||
|
@ -213,7 +213,12 @@
|
||||
prop="version"
|
||||
label="版本"
|
||||
width="80"
|
||||
/>
|
||||
>
|
||||
<template #default="{ row }">
|
||||
<el-tag v-if="row.version" type="primary" size="small">v{{ row.version }}</el-tag>
|
||||
<span v-else>-</span>
|
||||
</template>
|
||||
</el-table-column>
|
||||
<el-table-column prop="status" label="状态" width="100">
|
||||
<template #default="{ row }">
|
||||
<el-tag :type="statusTag(row.status)">
|
||||
@ -235,11 +240,11 @@
|
||||
<div v-if="row.status === 'completed'">
|
||||
<h4>评估指标</h4>
|
||||
<pre>{{ JSON.stringify(row.metrics, null, 2) }}</pre>
|
||||
<div v-if="row.version">
|
||||
<!-- <div v-if="row.version">
|
||||
<h4>版本信息</h4>
|
||||
<p><strong>版本:</strong> {{ row.version }}</p>
|
||||
<p><strong>模型路径:</strong> {{ row.model_path }}</p>
|
||||
</div>
|
||||
</div> -->
|
||||
</div>
|
||||
<div v-if="row.status === 'failed'">
|
||||
<h4>错误信息</h4>
|
||||
@ -428,8 +433,8 @@ const initWebSocket = () => {
|
||||
};
|
||||
}
|
||||
|
||||
// 刷新任务列表
|
||||
fetchTrainingTasks();
|
||||
// 刷新任务列表 (注释掉,因为WebSocket已经提供了最新数据)
|
||||
// fetchTrainingTasks();
|
||||
});
|
||||
|
||||
socket.on("disconnect", () => {
|
||||
@ -563,7 +568,7 @@ const startTraining = async () => {
|
||||
product_id: form.product_id,
|
||||
store_id: form.data_scope === 'global' ? null : form.store_id,
|
||||
model_type: form.model_type,
|
||||
version: response.data.new_version || "v1",
|
||||
version: response.data.path_info?.version || response.data.new_version || "v1",
|
||||
status: "starting",
|
||||
progress: 0,
|
||||
message: "正在启动药品训练...",
|
||||
|
@ -228,7 +228,12 @@
|
||||
prop="version"
|
||||
label="版本"
|
||||
width="80"
|
||||
/>
|
||||
>
|
||||
<template #default="{ row }">
|
||||
<el-tag v-if="row.version" type="primary" size="small">v{{ row.version }}</el-tag>
|
||||
<span v-else>-</span>
|
||||
</template>
|
||||
</el-table-column>
|
||||
<el-table-column prop="status" label="状态" width="100">
|
||||
<template #default="{ row }">
|
||||
<el-tag :type="statusTag(row.status)">
|
||||
@ -250,11 +255,11 @@
|
||||
<div v-if="row.status === 'completed'">
|
||||
<h4>评估指标</h4>
|
||||
<pre>{{ JSON.stringify(row.metrics, null, 2) }}</pre>
|
||||
<div v-if="row.version">
|
||||
<!-- <div v-if="row.version">
|
||||
<h4>版本信息</h4>
|
||||
<p><strong>版本:</strong> {{ row.version }}</p>
|
||||
<p><strong>模型路径:</strong> {{ row.model_path }}</p>
|
||||
</div>
|
||||
</div> -->
|
||||
</div>
|
||||
<div v-if="row.status === 'failed'">
|
||||
<h4>错误信息</h4>
|
||||
|
Binary file not shown.
@ -56,3 +56,4 @@ tzdata==2025.2
|
||||
werkzeug==3.1.3
|
||||
win32-setctime==1.2.0
|
||||
wsproto==1.2.0
|
||||
xgboost
|
||||
|
429
server/api.py
429
server/api.py
@ -11,12 +11,14 @@ sys.path.append(current_dir)
|
||||
# 使用新的现代化日志系统
|
||||
from utils.logging_config import setup_api_logging, get_logger
|
||||
from utils.training_process_manager import get_training_manager
|
||||
from utils.file_save import ModelPathManager
|
||||
|
||||
# 初始化现代化日志系统
|
||||
logger = setup_api_logging(log_dir=".", log_level="INFO")
|
||||
|
||||
# 获取训练进程管理器
|
||||
training_manager = get_training_manager()
|
||||
path_manager = ModelPathManager()
|
||||
|
||||
import json
|
||||
import pandas as pd
|
||||
@ -33,7 +35,6 @@ from flask_socketio import SocketIO, emit, join_room, leave_room
|
||||
from flasgger import Swagger
|
||||
from werkzeug.utils import secure_filename
|
||||
import sqlite3
|
||||
import traceback
|
||||
import time
|
||||
import threading
|
||||
|
||||
@ -45,6 +46,7 @@ from trainers.mlstm_trainer import train_product_model_with_mlstm
|
||||
from trainers.kan_trainer import train_product_model_with_kan
|
||||
from trainers.tcn_trainer import train_product_model_with_tcn
|
||||
from trainers.transformer_trainer import train_product_model_with_transformer
|
||||
from trainers.xgboost_trainer import train_product_model_with_xgboost
|
||||
|
||||
# 导入预测函数
|
||||
from predictors.model_predictor import load_model_and_predict
|
||||
@ -55,9 +57,7 @@ from analysis.metrics import evaluate_model, compare_models
|
||||
|
||||
# 导入配置和版本管理
|
||||
from core.config import (
|
||||
DEFAULT_MODEL_DIR, WEBSOCKET_NAMESPACE,
|
||||
get_model_versions, get_latest_model_version, get_next_model_version,
|
||||
get_model_file_path, save_model_version_info
|
||||
DEFAULT_MODEL_DIR, WEBSOCKET_NAMESPACE
|
||||
)
|
||||
|
||||
# 导入多店铺数据工具
|
||||
@ -943,7 +943,7 @@ def get_all_training_tasks():
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'product_id': {'type': 'string', 'description': '例如 P001'},
|
||||
'model_type': {'type': 'string', 'enum': ['mlstm', 'transformer', 'kan', 'optimized_kan', 'tcn'], 'description': '要训练的模型类型'},
|
||||
'model_type': {'type': 'string', 'enum': ['mlstm', 'transformer', 'kan', 'optimized_kan', 'tcn', 'xgboost'], 'description': '要训练的模型类型'},
|
||||
'store_id': {'type': 'string', 'description': '店铺ID,如 S001。为空时使用全局聚合数据'},
|
||||
'epochs': {'type': 'integer', 'default': 50, 'description': '训练轮次'}
|
||||
},
|
||||
@ -1007,33 +1007,43 @@ def start_training():
|
||||
pass
|
||||
|
||||
# 检查模型类型是否有效
|
||||
valid_model_types = ['mlstm', 'kan', 'optimized_kan', 'transformer', 'tcn']
|
||||
valid_model_types = ['mlstm', 'kan', 'optimized_kan', 'transformer', 'tcn', 'xgboost']
|
||||
if model_type not in valid_model_types:
|
||||
return jsonify({'error': '无效的模型类型'}), 400
|
||||
|
||||
# 使用新的训练进程管理器提交任务
|
||||
# 使用新的路径和版本管理器
|
||||
try:
|
||||
task_id = training_manager.submit_task(
|
||||
product_id=product_id or "unknown",
|
||||
model_type=model_type,
|
||||
# 从新模块获取所有路径和版本信息
|
||||
# 移除 model_type 和 training_mode 以避免重复关键字参数错误
|
||||
data_for_path = data.copy()
|
||||
data_for_path.pop('model_type', None)
|
||||
data_for_path.pop('training_mode', None)
|
||||
path_info = path_manager.get_model_paths(
|
||||
training_mode=training_mode,
|
||||
store_id=store_id,
|
||||
epochs=epochs
|
||||
model_type=model_type,
|
||||
**data_for_path # 传递剩余的payload
|
||||
)
|
||||
|
||||
# 使用新的训练进程管理器提交任务
|
||||
# 注意:这里我们将整个请求数据和路径信息都传递给后台任务
|
||||
task_id = training_manager.submit_task(
|
||||
training_params=data,
|
||||
path_info=path_info
|
||||
)
|
||||
|
||||
logger.info(f"🚀 训练任务已提交到进程管理器: {task_id[:8]}")
|
||||
|
||||
logger.info(f"🗂️ 路径信息: {path_info}")
|
||||
|
||||
return jsonify({
|
||||
'message': '模型训练已开始(使用独立进程)',
|
||||
'task_id': task_id,
|
||||
'training_mode': training_mode,
|
||||
'model_type': model_type,
|
||||
'product_id': product_id,
|
||||
'epochs': epochs
|
||||
'path_info': path_info
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
import traceback # 在此处导入以修复 UnboundLocalError
|
||||
logger.error(f"❌ 提交训练任务失败: {str(e)}")
|
||||
traceback.print_exc() # 打印详细错误
|
||||
return jsonify({'error': f'启动训练任务失败: {str(e)}'}), 500
|
||||
|
||||
# 旧的训练逻辑已被现代化进程管理器替代
|
||||
@ -1089,16 +1099,9 @@ def start_training():
|
||||
thread_safe_print(f"⚙️ 配置参数: 共 {epochs} 个轮次", "[CONFIG]")
|
||||
logger.info(f"📋 任务详情: 训练 {model_type} 模型 - {scope_msg}, 轮次: {epochs}")
|
||||
|
||||
# 根据训练模式生成版本号和模型标识
|
||||
if training_mode == 'product':
|
||||
model_identifier = product_id
|
||||
version = get_next_model_version(product_id, model_type) if version is None else version
|
||||
elif training_mode == 'store':
|
||||
model_identifier = f"store_{store_id}"
|
||||
version = get_next_model_version(f"store_{store_id}", model_type) if version is None else version
|
||||
elif training_mode == 'global':
|
||||
model_identifier = "global"
|
||||
version = get_next_model_version("global", model_type) if version is None else version
|
||||
# 版本号和模型标识现在由 path_info 提供
|
||||
version = path_info.get('version', 'v_unknown')
|
||||
model_identifier = path_info.get('identifier', 'unknown_identifier')
|
||||
|
||||
thread_safe_print(f"🏷️ 版本信息: 版本号 {version}, 模型标识: {model_identifier}", "[VERSION]")
|
||||
logger.info(f"🏷️ 版本信息: 版本号 {version}, 模型标识: {model_identifier}")
|
||||
@ -1195,8 +1198,8 @@ def start_training():
|
||||
thread_safe_print("⚠️ 训练指标为空", "[WARNING]")
|
||||
logger.info(f"📈 训练完成 - 结果类型: {type(metrics)}, 内容: {metrics}")
|
||||
|
||||
# 更新模型路径使用版本管理
|
||||
model_path = get_model_file_path(model_identifier, model_type, version)
|
||||
# 模型路径现在直接从 path_info 获取
|
||||
model_path = path_info.get('model_path', 'unknown_path')
|
||||
thread_safe_print(f"💾 模型保存路径: {model_path}", "[SAVE]")
|
||||
logger.info(f"💾 模型保存路径: {model_path}")
|
||||
|
||||
@ -1209,8 +1212,8 @@ def start_training():
|
||||
print(f"✔️ 任务状态更新: 已完成, 版本: {version}", flush=True)
|
||||
logger.info(f"✔️ 任务状态更新: 已完成, 版本: {version}, 任务ID: {task_id}")
|
||||
|
||||
# 保存模型版本信息到数据库
|
||||
save_model_version_info(product_id, model_type, version, model_path, metrics)
|
||||
# 保存模型版本信息的逻辑现在应统一处理,此处暂时注释
|
||||
# save_model_version_info(product_id, model_type, version, model_path, metrics)
|
||||
|
||||
# 完成训练进度管理器
|
||||
progress_manager.finish_training(success=True)
|
||||
@ -1423,9 +1426,10 @@ def get_training_status(task_id):
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'product_id': {'type': 'string'},
|
||||
'model_type': {'type': 'string', 'enum': ['mlstm', 'transformer', 'kan', 'optimized_kan', 'tcn']},
|
||||
'model_type': {'type': 'string', 'enum': ['mlstm', 'transformer', 'kan', 'optimized_kan', 'tcn', 'xgboost']},
|
||||
'store_id': {'type': 'string', 'description': '店铺ID,如 S001。为空时使用全局模型'},
|
||||
'version': {'type': 'string'},
|
||||
'training_mode': {'type': 'string', 'enum': ['product', 'store', 'global'], 'default': 'product'},
|
||||
'future_days': {'type': 'integer'},
|
||||
'include_visualization': {'type': 'boolean'},
|
||||
'start_date': {'type': 'string', 'description': '预测起始日期,格式为YYYY-MM-DD'}
|
||||
@ -1468,127 +1472,113 @@ def get_training_status(task_id):
|
||||
def predict():
|
||||
"""
|
||||
使用指定的模型进行预测
|
||||
---
|
||||
tags:
|
||||
- 模型预测
|
||||
parameters:
|
||||
- in: body
|
||||
name: body
|
||||
schema:
|
||||
type: object
|
||||
required:
|
||||
- product_id
|
||||
- model_type
|
||||
properties:
|
||||
product_id:
|
||||
type: string
|
||||
description: 产品ID
|
||||
model_type:
|
||||
type: string
|
||||
description: "模型类型 (mlstm, kan, transformer)"
|
||||
version:
|
||||
type: string
|
||||
description: "模型版本 (v1, v2, v3 等),如果不指定则使用最新版本"
|
||||
future_days:
|
||||
type: integer
|
||||
description: 预测未来天数
|
||||
default: 7
|
||||
start_date:
|
||||
type: string
|
||||
description: 预测起始日期,格式为YYYY-MM-DD
|
||||
default: ''
|
||||
responses:
|
||||
200:
|
||||
description: 预测成功
|
||||
400:
|
||||
description: 请求参数错误
|
||||
404:
|
||||
description: 模型文件未找到
|
||||
"""
|
||||
try:
|
||||
data = request.json
|
||||
product_id = data.get('product_id')
|
||||
|
||||
# 从请求中获取所有参数
|
||||
training_mode = data.get('training_mode', 'product')
|
||||
model_type = data.get('model_type')
|
||||
store_id = data.get('store_id') # 新增店铺ID参数
|
||||
version = data.get('version') # 新增版本参数
|
||||
future_days = int(data.get('future_days', 7))
|
||||
start_date = data.get('start_date', '')
|
||||
include_visualization = data.get('include_visualization', False)
|
||||
include_visualization = data.get('include_visualization', True)
|
||||
|
||||
scope_msg = f", store_id={store_id}" if store_id else ", 全局模型"
|
||||
print(f"API接收到预测请求: product_id={product_id}, model_type={model_type}, version={version}{scope_msg}, future_days={future_days}, start_date={start_date}")
|
||||
if training_mode == 'product':
|
||||
product_id = data.get('product_id')
|
||||
version_prefix = data.get('version')
|
||||
store_id = data.get('store_id') # 可以为None
|
||||
if not all([model_type, version_prefix, product_id]):
|
||||
return jsonify({"status": "error", "message": "按药品预测缺少必需参数: model_type, version, product_id"}), 400
|
||||
|
||||
elif training_mode == 'store':
|
||||
store_id = data.get('store_id')
|
||||
if not all([store_id, model_type]):
|
||||
return jsonify({"status": "error", "message": "按店铺预测缺少必需参数: store_id, model_type"}), 400
|
||||
|
||||
if not product_id or not model_type:
|
||||
return jsonify({"status": "error", "error": "product_id 和 model_type 是必需的"}), 400
|
||||
# 使用正则表达式查找最新的店铺模型版本
|
||||
import re
|
||||
pattern = re.compile(rf"store_{store_id}_.*_{model_type}_v\d+_model\.pth")
|
||||
model_files = os.listdir(DEFAULT_MODEL_DIR)
|
||||
|
||||
found_versions = [f.replace("_model.pth", "") for f in model_files if pattern.match(f)]
|
||||
|
||||
if not found_versions:
|
||||
return jsonify({"status": "error", "message": f"找不到店铺 {store_id} 的 {model_type} 模型"}), 404
|
||||
|
||||
# 获取产品名称
|
||||
product_name = get_product_name(product_id)
|
||||
if not product_name:
|
||||
product_name = product_id
|
||||
# 按版本号降序排序找到最新的
|
||||
sorted_versions = sorted(found_versions, key=lambda s: int(s.split('_v')[-1]), reverse=True)
|
||||
version_prefix = sorted_versions[0]
|
||||
|
||||
# 从版本前缀中提取product_id
|
||||
try:
|
||||
# 格式: store_{store_id}_{product_id}_{model_type}_v{version}
|
||||
parts = version_prefix.split('_')
|
||||
product_id = parts[2]
|
||||
logger.info(f"自动选择最新版本 '{version_prefix}',并提取到药品ID: {product_id}")
|
||||
except IndexError:
|
||||
return jsonify({"status": "error", "message": f"无法从模型版本 '{version_prefix}' 中解析药品ID"}), 500
|
||||
|
||||
# 根据版本获取模型ID
|
||||
if version:
|
||||
# 如果指定了版本,构造版本化的模型ID
|
||||
model_id = f"{product_id}_{model_type}_{version}"
|
||||
# 检查指定版本的模型是否存在
|
||||
model_file_path = get_model_file_path(product_id, model_type, version)
|
||||
if not os.path.exists(model_file_path):
|
||||
return jsonify({"status": "error", "error": f"未找到产品 {product_id} 的 {model_type} 类型模型版本 {version}"}), 404
|
||||
else:
|
||||
# 如果没有指定版本,使用最新版本
|
||||
latest_version = get_latest_model_version(product_id, model_type)
|
||||
if latest_version:
|
||||
model_id = f"{product_id}_{model_type}_{latest_version}"
|
||||
version = latest_version
|
||||
else:
|
||||
# 兼容旧的无版本模型
|
||||
model_id = get_latest_model_id(model_type, product_id)
|
||||
if not model_id:
|
||||
return jsonify({"status": "error", "error": f"未找到产品 {product_id} 的 {model_type} 类型模型"}), 404
|
||||
else: # 其他模式或未知模式
|
||||
product_id = data.get('product_id')
|
||||
version_prefix = data.get('version')
|
||||
store_id = data.get('store_id')
|
||||
if not all([model_type, version_prefix, product_id]):
|
||||
return jsonify({"status": "error", "message": "缺少必需参数: model_type, version, product_id"}), 400
|
||||
|
||||
# 直接使用version_prefix构建模型文件名
|
||||
model_filename = f"{version_prefix}_model.pth"
|
||||
model_file_path = os.path.join(DEFAULT_MODEL_DIR, model_filename)
|
||||
|
||||
logger.info(f"正在查找模型文件: {model_file_path}")
|
||||
|
||||
if not os.path.exists(model_file_path):
|
||||
logger.error(f"模型文件未找到: {model_file_path}")
|
||||
return jsonify({"status": "error", "error": f"模型文件未找到: {model_filename}"}), 404
|
||||
|
||||
# 提取版本号用于记录
|
||||
try:
|
||||
version_str = version_prefix.split('_v')[-1]
|
||||
except:
|
||||
version_str = 'unknown'
|
||||
|
||||
# 执行预测
|
||||
prediction_result = run_prediction(model_type, product_id, model_id, future_days, start_date, version, store_id)
|
||||
prediction_result = run_prediction(
|
||||
model_type=model_type,
|
||||
product_id=product_id,
|
||||
model_id=version_prefix, # 使用完整前缀作为model_id
|
||||
future_days=future_days,
|
||||
start_date=start_date,
|
||||
version=version_str,
|
||||
store_id=store_id,
|
||||
model_path=model_file_path
|
||||
)
|
||||
|
||||
if prediction_result is None:
|
||||
return jsonify({"status": "error", "error": "预测失败,预测器返回None"}), 500
|
||||
if prediction_result is None or prediction_result.get("status") == "error":
|
||||
return jsonify({"status": "error", "error": prediction_result.get("error", "预测失败,预测器返回None")}), 500
|
||||
|
||||
# 添加版本信息到预测结果
|
||||
prediction_result['version'] = version
|
||||
product_name = get_product_name(product_id) or product_id
|
||||
|
||||
# 如果需要可视化,添加图表数据
|
||||
# 如果需要可视化和分析,添加相应数据
|
||||
if include_visualization:
|
||||
try:
|
||||
# 添加图表数据
|
||||
chart_data = prepare_chart_data(prediction_result)
|
||||
prediction_result['chart_data'] = chart_data
|
||||
|
||||
# 添加分析结果
|
||||
if 'analysis' not in prediction_result or prediction_result['analysis'] is None:
|
||||
analysis_result = analyze_prediction(prediction_result)
|
||||
prediction_result['analysis'] = analysis_result
|
||||
analysis_result = analyze_prediction(prediction_result)
|
||||
prediction_result['analysis'] = analysis_result
|
||||
except Exception as e:
|
||||
print(f"生成可视化或分析数据失败: {str(e)}")
|
||||
# 可视化失败不影响主要功能,继续执行
|
||||
logger.warning(f"生成可视化或分析数据失败: {str(e)}")
|
||||
|
||||
# 保存预测结果到文件和数据库
|
||||
# 保存预测结果
|
||||
try:
|
||||
prediction_id, file_path = save_prediction_result(
|
||||
prediction_result,
|
||||
product_id,
|
||||
product_name,
|
||||
model_type,
|
||||
model_id,
|
||||
start_date,
|
||||
future_days
|
||||
prediction_result, product_id, product_name, model_type,
|
||||
version_prefix, start_date, future_days
|
||||
)
|
||||
|
||||
# 添加预测ID到结果中
|
||||
prediction_result['prediction_id'] = prediction_id
|
||||
except Exception as e:
|
||||
print(f"保存预测结果失败: {str(e)}")
|
||||
# 保存失败不影响返回结果,继续执行
|
||||
|
||||
# 在调用jsonify之前,确保所有数据都是JSON可序列化的
|
||||
logger.error(f"保存预测结果失败: {str(e)}")
|
||||
|
||||
# 确保所有数据都是JSON可序列化的
|
||||
def convert_numpy_types(obj):
|
||||
if isinstance(obj, dict):
|
||||
return {k: convert_numpy_types(v) for k, v in obj.items()}
|
||||
@ -1597,48 +1587,26 @@ def predict():
|
||||
elif isinstance(obj, pd.DataFrame):
|
||||
return obj.to_dict(orient='records')
|
||||
elif isinstance(obj, pd.Series):
|
||||
return obj.to_dict()
|
||||
elif isinstance(obj, np.generic):
|
||||
return obj.item() # 将NumPy标量转换为Python原生类型
|
||||
return obj.tolist()
|
||||
elif isinstance(obj, (np.generic, np.integer, np.floating)):
|
||||
return obj.item()
|
||||
elif isinstance(obj, np.ndarray):
|
||||
return obj.tolist()
|
||||
elif pd.isna(obj):
|
||||
return None
|
||||
else:
|
||||
return obj
|
||||
|
||||
# 递归处理整个预测结果对象,确保所有NumPy类型都被转换
|
||||
|
||||
if not isinstance(obj, (pd.DataFrame, pd.Series)):
|
||||
try:
|
||||
if pd.isna(obj):
|
||||
return None
|
||||
except (TypeError, ValueError):
|
||||
pass
|
||||
return obj
|
||||
|
||||
processed_result = convert_numpy_types(prediction_result)
|
||||
|
||||
# 构建前端期望的响应格式
|
||||
response_data = {
|
||||
'status': 'success',
|
||||
'data': processed_result
|
||||
}
|
||||
|
||||
# 将history_data和prediction_data移到顶级
|
||||
if 'history_data' in processed_result:
|
||||
response_data['history_data'] = processed_result['history_data']
|
||||
|
||||
if 'prediction_data' in processed_result:
|
||||
response_data['prediction_data'] = processed_result['prediction_data']
|
||||
|
||||
# 调试日志:打印响应数据结构
|
||||
print("=== 预测API响应数据结构 ===")
|
||||
print(f"响应包含的顶级键: {list(response_data.keys())}")
|
||||
print(f"data字段存在: {'data' in response_data}")
|
||||
print(f"history_data字段存在: {'history_data' in response_data}")
|
||||
print(f"prediction_data字段存在: {'prediction_data' in response_data}")
|
||||
if 'history_data' in response_data:
|
||||
print(f"history_data长度: {len(response_data['history_data'])}")
|
||||
if 'prediction_data' in response_data:
|
||||
print(f"prediction_data长度: {len(response_data['prediction_data'])}")
|
||||
print("========================")
|
||||
|
||||
# 使用处理后的结果进行JSON序列化
|
||||
return jsonify(response_data)
|
||||
|
||||
return jsonify({"status": "success", "data": processed_result})
|
||||
|
||||
except Exception as e:
|
||||
print(f"预测失败: {str(e)}")
|
||||
logger.error(f"预测失败: {str(e)}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return jsonify({"status": "error", "error": str(e)}), 500
|
||||
@ -2143,7 +2111,7 @@ def delete_prediction(prediction_id):
|
||||
'in': 'query',
|
||||
'type': 'string',
|
||||
'required': False,
|
||||
'description': "按模型类型筛选 (mlstm, kan, transformer, tcn)"
|
||||
'description': "按模型类型筛选 (mlstm, kan, transformer, tcn, xgboost)"
|
||||
},
|
||||
{
|
||||
'name': 'page',
|
||||
@ -2205,7 +2173,7 @@ def list_models():
|
||||
in: query
|
||||
type: string
|
||||
required: false
|
||||
description: "按模型类型筛选 (mlstm, kan, transformer, tcn)"
|
||||
description: "按模型类型筛选 (mlstm, kan, transformer, tcn, xgboost)"
|
||||
- name: store_id
|
||||
in: query
|
||||
type: string
|
||||
@ -2263,10 +2231,10 @@ def list_models():
|
||||
logger.info(f"[API] 目录存在: {os.path.exists(model_manager.model_dir)}")
|
||||
|
||||
# 获取查询参数
|
||||
product_id_filter = request.args.get('product_id')
|
||||
model_type_filter = request.args.get('model_type')
|
||||
store_id_filter = request.args.get('store_id')
|
||||
training_mode_filter = request.args.get('training_mode')
|
||||
product_id_filter = request.args.get('product_id') or None
|
||||
model_type_filter = request.args.get('model_type') or None
|
||||
store_id_filter = request.args.get('store_id') or None
|
||||
training_mode_filter = request.args.get('training_mode') or None
|
||||
|
||||
# 获取分页参数
|
||||
page = request.args.get('page', type=int)
|
||||
@ -2288,6 +2256,16 @@ def list_models():
|
||||
pagination = result['pagination']
|
||||
|
||||
# 格式化响应数据
|
||||
# 辅助函数:递归转换float32为float
|
||||
def convert_float32(obj):
|
||||
if isinstance(obj, dict):
|
||||
return {k: convert_float32(v) for k, v in obj.items()}
|
||||
elif isinstance(obj, list):
|
||||
return [convert_float32(i) for i in obj]
|
||||
elif isinstance(obj, np.float32):
|
||||
return float(obj)
|
||||
return obj
|
||||
|
||||
formatted_models = []
|
||||
for model in models:
|
||||
# 生成唯一且有意义的model_id
|
||||
@ -2320,7 +2298,7 @@ def list_models():
|
||||
'version': model.get('version', 'v1'),
|
||||
'created_at': model.get('created_at', model.get('modified_at', '')),
|
||||
'file_size': model.get('file_size', 0),
|
||||
'metrics': model.get('metrics', {}),
|
||||
'metrics': convert_float32(model.get('metrics', {})),
|
||||
'config': model.get('config', {})
|
||||
}
|
||||
formatted_models.append(formatted_model)
|
||||
@ -2704,24 +2682,20 @@ def get_product_name(product_id):
|
||||
return None
|
||||
|
||||
# 执行预测的辅助函数
|
||||
def run_prediction(model_type, product_id, model_id, future_days, start_date, version=None, store_id=None):
|
||||
def run_prediction(model_type, product_id, model_id, future_days, start_date, version=None, store_id=None, model_path=None):
|
||||
"""执行模型预测"""
|
||||
try:
|
||||
scope_msg = f", store_id={store_id}" if store_id else ", 全局模型"
|
||||
print(f"开始运行预测: model_type={model_type}, product_id={product_id}, model_id={model_id}, version={version}{scope_msg}")
|
||||
|
||||
# 创建预测器实例
|
||||
predictor = PharmacyPredictor()
|
||||
|
||||
# 解析模型类型映射
|
||||
predictor_model_type = model_type
|
||||
if model_type == 'optimized_kan':
|
||||
predictor_model_type = 'optimized_kan'
|
||||
|
||||
# 生成预测
|
||||
prediction_result = predictor.predict(
|
||||
# 导入预测函数
|
||||
from predictors.model_predictor import load_model_and_predict
|
||||
|
||||
# 直接调用已更新的加载和预测函数
|
||||
prediction_result = load_model_and_predict(
|
||||
product_id=product_id,
|
||||
model_type=predictor_model_type,
|
||||
model_type=model_type,
|
||||
model_path=model_path, # 传递正确的路径
|
||||
store_id=store_id,
|
||||
future_days=future_days,
|
||||
start_date=start_date,
|
||||
@ -2802,7 +2776,7 @@ def run_prediction(model_type, product_id, model_id, future_days, start_date, ve
|
||||
'in': 'path',
|
||||
'type': 'string',
|
||||
'required': True,
|
||||
'description': '模型类型,例如mlstm, kan, transformer, tcn, optimized_kan'
|
||||
'description': '模型类型,例如mlstm, kan, transformer, tcn, optimized_kan, xgboost'
|
||||
},
|
||||
{
|
||||
'name': 'product_id',
|
||||
@ -3357,11 +3331,11 @@ def save_prediction_result(prediction_result, product_id, product_name, model_ty
|
||||
|
||||
cursor.execute('''
|
||||
INSERT INTO prediction_history (
|
||||
id, product_id, product_name, model_type, model_id,
|
||||
prediction_id, product_id, product_name, model_type, model_id,
|
||||
start_date, future_days, created_at, file_path
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
''', (
|
||||
prediction_id, product_id, product_name, model_type, model_id,
|
||||
prediction_id, str(product_id), product_name, model_type, model_id,
|
||||
start_date if start_date else datetime.now().strftime('%Y-%m-%d'),
|
||||
future_days, datetime.now().isoformat(), file_path
|
||||
))
|
||||
@ -3714,6 +3688,12 @@ def get_model_types():
|
||||
'name': 'TCN',
|
||||
'description': '时间卷积网络,适合处理长序列和平行计算',
|
||||
'tag_type': 'danger'
|
||||
},
|
||||
{
|
||||
'id': 'xgboost',
|
||||
'name': 'XGBoost',
|
||||
'description': '一种高效的梯度提升决策树模型,广泛用于各种预测任务。',
|
||||
'tag_type': 'success'
|
||||
}
|
||||
]
|
||||
|
||||
@ -3767,59 +3747,95 @@ def get_model_types():
|
||||
}
|
||||
})
|
||||
def get_model_versions_api(product_id, model_type):
|
||||
"""获取模型版本列表API"""
|
||||
"""获取模型版本列表API - 匹配所有范围(全局和特定店铺)"""
|
||||
try:
|
||||
versions = get_model_versions(product_id, model_type)
|
||||
latest_version = get_latest_model_version(product_id, model_type)
|
||||
import re
|
||||
# 正则表达式模式: product_{product_id}_(all|S...)_{model_type}_v(\d+)_model.pth
|
||||
pattern = re.compile(f"product_{product_id}_.*_{model_type}_v\\d+_model\\.pth")
|
||||
|
||||
model_files = os.listdir(DEFAULT_MODEL_DIR)
|
||||
|
||||
found_versions = []
|
||||
for filename in model_files:
|
||||
if pattern.match(filename):
|
||||
version_prefix = filename.replace("_model.pth", "")
|
||||
found_versions.append(version_prefix)
|
||||
|
||||
# 按版本号降序排序
|
||||
sorted_versions = sorted(found_versions, key=lambda s: int(s.split('_v')[-1]), reverse=True)
|
||||
|
||||
latest_version = sorted_versions[0] if sorted_versions else None
|
||||
|
||||
return jsonify({
|
||||
"status": "success",
|
||||
"data": {
|
||||
"product_id": product_id,
|
||||
"model_type": model_type,
|
||||
"versions": versions,
|
||||
"versions": sorted_versions,
|
||||
"latest_version": latest_version
|
||||
}
|
||||
})
|
||||
except Exception as e:
|
||||
print(f"获取模型版本失败: {str(e)}")
|
||||
logger.error(f"获取模型版本失败: {str(e)}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return jsonify({"status": "error", "message": str(e)}), 500
|
||||
|
||||
@app.route('/api/models/store/<store_id>/<model_type>/versions', methods=['GET'])
|
||||
def get_store_model_versions_api(store_id, model_type):
|
||||
"""获取店铺模型版本列表API"""
|
||||
"""获取店铺模型版本列表API - 匹配所有药品范围"""
|
||||
try:
|
||||
model_identifier = f"store_{store_id}"
|
||||
versions = get_model_versions(model_identifier, model_type)
|
||||
latest_version = get_latest_model_version(model_identifier, model_type)
|
||||
import re
|
||||
pattern = re.compile(rf"store_{store_id}_.*_{model_type}_v\d+_model\.pth")
|
||||
|
||||
model_files = os.listdir(DEFAULT_MODEL_DIR)
|
||||
|
||||
found_versions = []
|
||||
for filename in model_files:
|
||||
if pattern.match(filename):
|
||||
# 提取文件名前缀作为版本标识
|
||||
version_prefix = filename.replace("_model.pth", "")
|
||||
found_versions.append(version_prefix)
|
||||
|
||||
# 按版本号(数字部分)降序排序
|
||||
if found_versions:
|
||||
sorted_versions = sorted(
|
||||
found_versions,
|
||||
key=lambda s: int(s.split('_v')[-1]),
|
||||
reverse=True
|
||||
)
|
||||
else:
|
||||
sorted_versions = []
|
||||
|
||||
latest_version = sorted_versions[0] if sorted_versions else None
|
||||
|
||||
return jsonify({
|
||||
"status": "success",
|
||||
"data": {
|
||||
"store_id": store_id,
|
||||
"model_type": model_type,
|
||||
"versions": versions,
|
||||
"versions": sorted_versions,
|
||||
"latest_version": latest_version
|
||||
}
|
||||
})
|
||||
except Exception as e:
|
||||
print(f"获取店铺模型版本失败: {str(e)}")
|
||||
logger.error(f"获取店铺模型版本失败: {str(e)}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return jsonify({"status": "error", "message": str(e)}), 500
|
||||
|
||||
@app.route('/api/models/global/<model_type>/versions', methods=['GET'])
|
||||
def get_global_model_versions_api(model_type):
|
||||
"""获取全局模型版本列表API"""
|
||||
try:
|
||||
model_identifier = "global"
|
||||
versions = get_model_versions(model_identifier, model_type)
|
||||
latest_version = get_latest_model_version(model_identifier, model_type)
|
||||
|
||||
# TODO: 此处需要更新为从数据库或文件系统扫描获取版本信息
|
||||
# 暂时返回模拟数据
|
||||
return jsonify({
|
||||
"status": "success",
|
||||
"data": {
|
||||
"model_type": model_type,
|
||||
"versions": versions,
|
||||
"latest_version": latest_version
|
||||
"versions": ["v1"],
|
||||
"latest_version": "v1"
|
||||
}
|
||||
})
|
||||
except Exception as e:
|
||||
@ -3885,8 +3901,8 @@ def retrain_model():
|
||||
else:
|
||||
return jsonify({'error': '无效的训练模式'}), 400
|
||||
|
||||
# 生成新版本号
|
||||
new_version = get_next_model_version(model_identifier, model_type)
|
||||
# 生成新版本号 - 此逻辑已废弃,应通过 path_manager 处理
|
||||
new_version = "v_retrain_unknown"
|
||||
|
||||
# 生成任务ID
|
||||
task_id = str(uuid.uuid4())
|
||||
@ -4623,3 +4639,4 @@ def test_models_fix():
|
||||
"message": str(e),
|
||||
"test_name": "ModelManager修复测试"
|
||||
}), 500
|
||||
|
||||
|
@ -36,7 +36,7 @@ DEVICE = get_device()
|
||||
# 使用 os.path.join 构造跨平台的路径
|
||||
DEFAULT_DATA_PATH = os.path.join(PROJECT_ROOT, 'data', 'timeseries_training_data_sample_10s50p.parquet')
|
||||
DEFAULT_MODEL_DIR = os.path.join(PROJECT_ROOT, 'saved_models')
|
||||
DEFAULT_FEATURES = ['sales', 'price', 'weekday', 'month', 'is_holiday', 'is_weekend', 'is_promotion', 'temperature']
|
||||
DEFAULT_FEATURES = ['sales', 'weekday', 'month', 'is_holiday', 'is_weekend', 'is_promotion', 'temperature']
|
||||
|
||||
# 时间序列参数
|
||||
LOOK_BACK = 5 # 使用过去5天数据(适应小数据集)
|
||||
@ -71,216 +71,6 @@ TRAINING_UPDATE_INTERVAL = 1 # 训练进度更新间隔(秒)
|
||||
# 创建模型保存目录
|
||||
os.makedirs(DEFAULT_MODEL_DIR, exist_ok=True)
|
||||
|
||||
def get_next_model_version(product_id: str, model_type: str) -> str:
|
||||
"""
|
||||
获取指定产品和模型类型的下一个版本号
|
||||
|
||||
Args:
|
||||
product_id: 产品ID
|
||||
model_type: 模型类型
|
||||
|
||||
Returns:
|
||||
下一个版本号,格式如 'v2', 'v3' 等
|
||||
"""
|
||||
# 新格式:带版本号的文件
|
||||
pattern_new = f"{model_type}_model_product_{product_id}_v*.pth"
|
||||
existing_files_new = glob.glob(os.path.join(DEFAULT_MODEL_DIR, pattern_new))
|
||||
|
||||
# 旧格式:不带版本号的文件(兼容性支持)
|
||||
pattern_old = f"{model_type}_model_product_{product_id}.pth"
|
||||
old_file_path = os.path.join(DEFAULT_MODEL_DIR, pattern_old)
|
||||
has_old_format = os.path.exists(old_file_path)
|
||||
|
||||
# 如果没有任何格式的文件,返回默认版本
|
||||
if not existing_files_new and not has_old_format:
|
||||
return DEFAULT_VERSION
|
||||
|
||||
# 提取新格式文件的版本号
|
||||
versions = []
|
||||
for file_path in existing_files_new:
|
||||
filename = os.path.basename(file_path)
|
||||
version_match = re.search(rf"_v(\d+)\.pth$", filename)
|
||||
if version_match:
|
||||
versions.append(int(version_match.group(1)))
|
||||
|
||||
# 如果存在旧格式文件,将其视为v1
|
||||
if has_old_format:
|
||||
versions.append(1)
|
||||
print(f"检测到旧格式模型文件: {old_file_path},将其视为版本v1")
|
||||
|
||||
if versions:
|
||||
next_version_num = max(versions) + 1
|
||||
return f"v{next_version_num}"
|
||||
else:
|
||||
return DEFAULT_VERSION
|
||||
|
||||
def get_model_file_path(product_id: str, model_type: str, version: str = None) -> str:
|
||||
"""
|
||||
生成模型文件路径
|
||||
|
||||
Args:
|
||||
product_id: 产品ID
|
||||
model_type: 模型类型
|
||||
version: 版本号,如果为None则获取下一个版本
|
||||
|
||||
Returns:
|
||||
模型文件的完整路径
|
||||
"""
|
||||
if version is None:
|
||||
version = get_next_model_version(product_id, model_type)
|
||||
|
||||
# 特殊处理v1版本:检查是否存在旧格式文件
|
||||
if version == "v1":
|
||||
# 检查旧格式文件是否存在
|
||||
old_format_filename = f"{model_type}_model_product_{product_id}.pth"
|
||||
old_format_path = os.path.join(DEFAULT_MODEL_DIR, old_format_filename)
|
||||
|
||||
if os.path.exists(old_format_path):
|
||||
print(f"找到旧格式模型文件: {old_format_path},将其作为v1版本")
|
||||
return old_format_path
|
||||
|
||||
# 使用新格式文件名
|
||||
filename = f"{model_type}_model_product_{product_id}_{version}.pth"
|
||||
return os.path.join(DEFAULT_MODEL_DIR, filename)
|
||||
|
||||
def get_model_versions(product_id: str, model_type: str) -> list:
|
||||
"""
|
||||
获取指定产品和模型类型的所有版本
|
||||
|
||||
Args:
|
||||
product_id: 产品ID
|
||||
model_type: 模型类型
|
||||
|
||||
Returns:
|
||||
版本列表,按版本号排序
|
||||
"""
|
||||
# 新格式:带版本号的文件
|
||||
pattern_new = f"{model_type}_model_product_{product_id}_v*.pth"
|
||||
existing_files_new = glob.glob(os.path.join(DEFAULT_MODEL_DIR, pattern_new))
|
||||
|
||||
# 旧格式:不带版本号的文件(兼容性支持)
|
||||
pattern_old = f"{model_type}_model_product_{product_id}.pth"
|
||||
old_file_path = os.path.join(DEFAULT_MODEL_DIR, pattern_old)
|
||||
has_old_format = os.path.exists(old_file_path)
|
||||
|
||||
versions = []
|
||||
|
||||
# 处理新格式文件
|
||||
for file_path in existing_files_new:
|
||||
filename = os.path.basename(file_path)
|
||||
version_match = re.search(rf"_v(\d+)\.pth$", filename)
|
||||
if version_match:
|
||||
version_num = int(version_match.group(1))
|
||||
versions.append(f"v{version_num}")
|
||||
|
||||
# 如果存在旧格式文件,将其视为v1
|
||||
if has_old_format:
|
||||
if "v1" not in versions: # 避免重复添加
|
||||
versions.append("v1")
|
||||
print(f"检测到旧格式模型文件: {old_file_path},将其视为版本v1")
|
||||
|
||||
# 按版本号排序
|
||||
versions.sort(key=lambda v: int(v[1:]))
|
||||
return versions
|
||||
|
||||
def get_latest_model_version(product_id: str, model_type: str) -> str:
|
||||
"""
|
||||
获取指定产品和模型类型的最新版本
|
||||
|
||||
Args:
|
||||
product_id: 产品ID
|
||||
model_type: 模型类型
|
||||
|
||||
Returns:
|
||||
最新版本号,如果没有则返回None
|
||||
"""
|
||||
versions = get_model_versions(product_id, model_type)
|
||||
return versions[-1] if versions else None
|
||||
|
||||
def save_model_version_info(product_id: str, model_type: str, version: str, file_path: str, metrics: dict = None):
|
||||
"""
|
||||
保存模型版本信息到数据库
|
||||
|
||||
Args:
|
||||
product_id: 产品ID
|
||||
model_type: 模型类型
|
||||
version: 版本号
|
||||
file_path: 模型文件路径
|
||||
metrics: 模型性能指标
|
||||
"""
|
||||
import sqlite3
|
||||
import json
|
||||
from datetime import datetime
|
||||
|
||||
try:
|
||||
conn = sqlite3.connect('prediction_history.db')
|
||||
cursor = conn.cursor()
|
||||
|
||||
# 插入模型版本记录
|
||||
cursor.execute('''
|
||||
INSERT INTO model_versions (
|
||||
product_id, model_type, version, file_path, created_at, metrics, is_active
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?)
|
||||
''', (
|
||||
product_id,
|
||||
model_type,
|
||||
version,
|
||||
file_path,
|
||||
datetime.now().isoformat(),
|
||||
json.dumps(metrics) if metrics else None,
|
||||
1 # 新模型默认为激活状态
|
||||
))
|
||||
|
||||
conn.commit()
|
||||
conn.close()
|
||||
print(f"已保存模型版本信息: {product_id}_{model_type}_{version}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"保存模型版本信息失败: {str(e)}")
|
||||
|
||||
def get_model_version_info(product_id: str, model_type: str, version: str = None):
|
||||
"""
|
||||
从数据库获取模型版本信息
|
||||
|
||||
Args:
|
||||
product_id: 产品ID
|
||||
model_type: 模型类型
|
||||
version: 版本号,如果为None则获取最新版本
|
||||
|
||||
Returns:
|
||||
模型版本信息字典
|
||||
"""
|
||||
import sqlite3
|
||||
import json
|
||||
|
||||
try:
|
||||
conn = sqlite3.connect('prediction_history.db')
|
||||
conn.row_factory = sqlite3.Row
|
||||
cursor = conn.cursor()
|
||||
|
||||
if version:
|
||||
cursor.execute('''
|
||||
SELECT * FROM model_versions
|
||||
WHERE product_id = ? AND model_type = ? AND version = ?
|
||||
ORDER BY created_at DESC LIMIT 1
|
||||
''', (product_id, model_type, version))
|
||||
else:
|
||||
cursor.execute('''
|
||||
SELECT * FROM model_versions
|
||||
WHERE product_id = ? AND model_type = ?
|
||||
ORDER BY created_at DESC LIMIT 1
|
||||
''', (product_id, model_type))
|
||||
|
||||
row = cursor.fetchone()
|
||||
conn.close()
|
||||
|
||||
if row:
|
||||
result = dict(row)
|
||||
if result['metrics']:
|
||||
result['metrics'] = json.loads(result['metrics'])
|
||||
return result
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
print(f"获取模型版本信息失败: {str(e)}")
|
||||
return None
|
||||
# 注意:所有与模型路径、版本管理相关的函数(如 get_next_model_version, get_model_file_path 等)
|
||||
# 已被移除,因为这些功能现在由 server.utils.file_save.ModelPathManager 统一处理。
|
||||
# 这种集中化管理确保了整个应用程序遵循统一的、基于规范的扁平化文件保存策略。
|
@ -15,7 +15,8 @@ from trainers import (
|
||||
train_product_model_with_mlstm,
|
||||
train_product_model_with_kan,
|
||||
train_product_model_with_tcn,
|
||||
train_product_model_with_transformer
|
||||
train_product_model_with_transformer,
|
||||
train_product_model_with_xgboost
|
||||
)
|
||||
from predictors.model_predictor import load_model_and_predict
|
||||
from utils.data_utils import prepare_data, prepare_sequences
|
||||
@ -64,8 +65,9 @@ class PharmacyPredictor:
|
||||
learning_rate=0.001, sequence_length=30, forecast_horizon=7,
|
||||
hidden_size=64, num_layers=2, dropout=0.1, use_optimized=False,
|
||||
store_id=None, training_mode='product', aggregation_method='sum',
|
||||
product_scope='all', product_ids=None,
|
||||
socketio=None, task_id=None, version=None, continue_training=False,
|
||||
progress_callback=None):
|
||||
progress_callback=None, path_info=None):
|
||||
"""
|
||||
训练预测模型 - 支持多店铺训练
|
||||
|
||||
@ -123,29 +125,38 @@ class PharmacyPredictor:
|
||||
return None
|
||||
|
||||
# 如果product_id是'unknown',则表示为店铺所有商品训练一个聚合模型
|
||||
if product_id == 'unknown':
|
||||
if product_scope == 'specific' and product_ids:
|
||||
# 为店铺的指定产品列表训练
|
||||
try:
|
||||
# 从该店铺的数据中筛选出指定的产品
|
||||
store_data = self.data[self.data['store_id'] == store_id]
|
||||
product_data = store_data[store_data['product_id'].isin(product_ids)].copy()
|
||||
log_message(f"按店铺-指定药品训练: 店铺 {store_id}, {len(product_ids)}种药品, 数据量: {len(product_data)}")
|
||||
except Exception as e:
|
||||
log_message(f"获取店铺指定药品数据失败: {e}", 'error')
|
||||
return None
|
||||
elif product_id == 'unknown' or product_scope == 'all':
|
||||
# 为店铺所有商品训练一个聚合模型
|
||||
try:
|
||||
# 使用新的聚合函数,按店铺聚合
|
||||
product_data = aggregate_multi_store_data(
|
||||
store_id=store_id,
|
||||
aggregation_method=aggregation_method,
|
||||
file_path=self.data_path
|
||||
)
|
||||
log_message(f"按店铺聚合训练: 店铺 {store_id}, 聚合方法 {aggregation_method}, 数据量: {len(product_data)}")
|
||||
# 将product_id设置为店铺ID,以便模型保存时使用有意义的标识
|
||||
product_id = store_id
|
||||
product_id = store_id # 使用店铺ID作为模型标识
|
||||
except Exception as e:
|
||||
log_message(f"聚合店铺 {store_id} 数据失败: {e}", 'error')
|
||||
return None
|
||||
else:
|
||||
# 为店铺的单个特定产品训练
|
||||
# 为店铺的单个特定产品训练(兼容旧逻辑)
|
||||
try:
|
||||
product_data = get_store_product_sales_data(
|
||||
store_id=store_id,
|
||||
product_id=product_id,
|
||||
file_path=self.data_path
|
||||
)
|
||||
log_message(f"按店铺-产品训练: 店铺 {store_id}, 产品 {product_id}, 数据量: {len(product_data)}")
|
||||
log_message(f"按店铺-单个产品训练: 店铺 {store_id}, 产品 {product_id}, 数据量: {len(product_data)}")
|
||||
except Exception as e:
|
||||
log_message(f"获取店铺产品数据失败: {e}", 'error')
|
||||
return None
|
||||
@ -189,7 +200,7 @@ class PharmacyPredictor:
|
||||
try:
|
||||
log_message(f"🤖 开始调用 {model_type} 训练器")
|
||||
if model_type == 'transformer':
|
||||
model_result, metrics, actual_version = train_product_model_with_transformer(
|
||||
model_result, metrics, actual_version, _ = train_product_model_with_transformer(
|
||||
product_id=product_id,
|
||||
product_df=product_data,
|
||||
store_id=store_id,
|
||||
@ -200,11 +211,12 @@ class PharmacyPredictor:
|
||||
version=version,
|
||||
socketio=socketio,
|
||||
task_id=task_id,
|
||||
continue_training=continue_training
|
||||
continue_training=continue_training,
|
||||
path_info=path_info
|
||||
)
|
||||
log_message(f"✅ {model_type} 训练器返回: metrics={type(metrics)}, version={actual_version}", 'success')
|
||||
elif model_type == 'mlstm':
|
||||
_, metrics, _, _ = train_product_model_with_mlstm(
|
||||
model_result, metrics, _, _ = train_product_model_with_mlstm(
|
||||
product_id=product_id,
|
||||
product_df=product_data,
|
||||
store_id=store_id,
|
||||
@ -214,7 +226,8 @@ class PharmacyPredictor:
|
||||
model_dir=self.model_dir,
|
||||
socketio=socketio,
|
||||
task_id=task_id,
|
||||
progress_callback=progress_callback
|
||||
progress_callback=progress_callback,
|
||||
path_info=path_info
|
||||
)
|
||||
elif model_type == 'kan':
|
||||
_, metrics = train_product_model_with_kan(
|
||||
@ -225,7 +238,7 @@ class PharmacyPredictor:
|
||||
aggregation_method=aggregation_method,
|
||||
epochs=epochs,
|
||||
use_optimized=use_optimized,
|
||||
model_dir=self.model_dir
|
||||
path_info=path_info
|
||||
)
|
||||
elif model_type == 'optimized_kan':
|
||||
_, metrics = train_product_model_with_kan(
|
||||
@ -236,10 +249,10 @@ class PharmacyPredictor:
|
||||
aggregation_method=aggregation_method,
|
||||
epochs=epochs,
|
||||
use_optimized=True,
|
||||
model_dir=self.model_dir
|
||||
path_info=path_info
|
||||
)
|
||||
elif model_type == 'tcn':
|
||||
_, metrics, _, _ = train_product_model_with_tcn(
|
||||
model_result, metrics, _, _ = train_product_model_with_tcn(
|
||||
product_id=product_id,
|
||||
product_df=product_data,
|
||||
store_id=store_id,
|
||||
@ -248,7 +261,18 @@ class PharmacyPredictor:
|
||||
epochs=epochs,
|
||||
model_dir=self.model_dir,
|
||||
socketio=socketio,
|
||||
task_id=task_id
|
||||
task_id=task_id,
|
||||
path_info=path_info
|
||||
)
|
||||
elif model_type == 'xgboost':
|
||||
metrics, _ = train_product_model_with_xgboost(
|
||||
product_id=product_id,
|
||||
store_id=store_id,
|
||||
epochs=epochs,
|
||||
socketio=socketio,
|
||||
task_id=task_id,
|
||||
version=version,
|
||||
path_info=path_info
|
||||
)
|
||||
else:
|
||||
log_message(f"不支持的模型类型: {model_type}", 'error')
|
||||
|
@ -14,695 +14,31 @@ from .kan_model import KANForecaster
|
||||
|
||||
class ModelManager:
|
||||
"""
|
||||
模型管理类:负责模型的保存、加载、列出和删除等操作
|
||||
模型管理类:此类现在主要负责提供模型类的映射。
|
||||
注意:所有与文件系统交互的逻辑(保存、加载、删除等)已被移除,
|
||||
并由 server.utils.file_save.ModelPathManager 统一处理,
|
||||
以遵循新的扁平化文件存储规范。
|
||||
"""
|
||||
|
||||
def __init__(self, models_dir='models'):
|
||||
def __init__(self):
|
||||
"""
|
||||
初始化模型管理器
|
||||
|
||||
参数:
|
||||
models_dir: 模型存储目录
|
||||
"""
|
||||
self.models_dir = models_dir
|
||||
self._ensure_model_dir()
|
||||
|
||||
# 模型类型映射
|
||||
# 模型类型到其对应类的映射
|
||||
self.model_types = {
|
||||
'mlstm': MLSTMTransformer,
|
||||
'transformer': TimeSeriesTransformer,
|
||||
'kan': KANForecaster
|
||||
}
|
||||
|
||||
def _ensure_model_dir(self):
|
||||
"""确保模型目录存在"""
|
||||
if not os.path.exists(self.models_dir):
|
||||
try:
|
||||
os.makedirs(self.models_dir, exist_ok=True)
|
||||
print(f"创建模型目录: {os.path.abspath(self.models_dir)}")
|
||||
except Exception as e:
|
||||
print(f"创建模型目录失败: {str(e)}")
|
||||
raise
|
||||
|
||||
def save_model(self, model, model_type, product_id, optimizer=None,
|
||||
train_loss=None, test_loss=None, scaler_X=None,
|
||||
scaler_y=None, features=None, look_back=None, T=None,
|
||||
metrics=None, version=None):
|
||||
|
||||
def get_model_class(self, model_type: str):
|
||||
"""
|
||||
保存模型及其相关信息
|
||||
|
||||
参数:
|
||||
model: 训练好的模型
|
||||
model_type: 模型类型 ('mlstm', 'transformer', 'kan')
|
||||
product_id: 产品ID
|
||||
optimizer: 优化器
|
||||
train_loss: 训练损失历史
|
||||
test_loss: 测试损失历史
|
||||
scaler_X: 特征缩放器
|
||||
scaler_y: 目标缩放器
|
||||
features: 使用的特征列表
|
||||
look_back: 回看天数
|
||||
T: 预测天数
|
||||
metrics: 模型评估指标
|
||||
version: 模型版本(可选),如果不提供则使用时间戳
|
||||
根据模型类型字符串获取模型类。
|
||||
|
||||
Args:
|
||||
model_type (str): 模型类型 (e.g., 'mlstm', 'kan')。
|
||||
|
||||
Returns:
|
||||
模型类,如果不存在则返回 None。
|
||||
"""
|
||||
self._ensure_model_dir()
|
||||
|
||||
# 设置版本
|
||||
if version is None:
|
||||
version = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
|
||||
# 设置文件名
|
||||
model_filename = f"{product_id}_{model_type}_model_v{version}.pt"
|
||||
model_path = os.path.join(self.models_dir, model_filename)
|
||||
|
||||
# 准备要保存的数据
|
||||
save_dict = {
|
||||
'model_state_dict': model.state_dict(),
|
||||
'model_type': model_type,
|
||||
'product_id': product_id,
|
||||
'version': version,
|
||||
'created_at': datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
|
||||
'features': features,
|
||||
'look_back': look_back,
|
||||
'T': T
|
||||
}
|
||||
|
||||
# 添加可选数据
|
||||
if optimizer is not None:
|
||||
save_dict['optimizer_state_dict'] = optimizer.state_dict()
|
||||
if train_loss is not None:
|
||||
save_dict['train_loss'] = train_loss
|
||||
if test_loss is not None:
|
||||
save_dict['test_loss'] = test_loss
|
||||
if scaler_X is not None:
|
||||
save_dict['scaler_X'] = scaler_X
|
||||
if scaler_y is not None:
|
||||
save_dict['scaler_y'] = scaler_y
|
||||
if metrics is not None:
|
||||
save_dict['metrics'] = metrics
|
||||
|
||||
try:
|
||||
# 保存模型
|
||||
torch.save(save_dict, model_path)
|
||||
print(f"模型已成功保存到 {os.path.abspath(model_path)}")
|
||||
|
||||
# 保存模型的元数据到JSON文件,便于查询
|
||||
meta_path = os.path.join(self.models_dir, f"{product_id}_{model_type}_meta_v{version}.json")
|
||||
meta_dict = {k: str(v) if not isinstance(v, (int, float, bool, list, dict, type(None))) else v
|
||||
for k, v in save_dict.items() if k != 'model_state_dict' and
|
||||
k != 'optimizer_state_dict' and k != 'scaler_X' and k != 'scaler_y'}
|
||||
|
||||
# 如果有评估指标,添加到元数据
|
||||
if metrics is not None:
|
||||
meta_dict['metrics'] = metrics
|
||||
|
||||
with open(meta_path, 'w') as f:
|
||||
json.dump(meta_dict, f, indent=4)
|
||||
|
||||
return model_path
|
||||
except Exception as e:
|
||||
print(f"保存模型时出错: {str(e)}")
|
||||
raise
|
||||
|
||||
def load_model(self, product_id, model_type='mlstm', version=None, device=None):
|
||||
"""
|
||||
加载指定的模型
|
||||
|
||||
参数:
|
||||
product_id: 产品ID
|
||||
model_type: 模型类型 ('mlstm', 'transformer', 'kan')
|
||||
version: 模型版本,如果不指定则加载最新版本
|
||||
device: 设备 (cuda/cpu)
|
||||
|
||||
返回:
|
||||
model: 加载的模型
|
||||
checkpoint: 包含模型信息的字典
|
||||
"""
|
||||
if device is None:
|
||||
device = get_device()
|
||||
|
||||
# 查找匹配的模型文件
|
||||
if version is None:
|
||||
# 查找最新版本
|
||||
pattern = os.path.join(self.models_dir, f"{product_id}_{model_type}_model_v*.pt")
|
||||
model_files = glob.glob(pattern)
|
||||
|
||||
if not model_files:
|
||||
print(f"错误: 未找到产品 {product_id} 的 {model_type} 模型文件")
|
||||
return None, None
|
||||
|
||||
# 按照文件修改时间排序,获取最新的
|
||||
model_path = max(model_files, key=os.path.getmtime)
|
||||
else:
|
||||
# 指定版本
|
||||
model_path = os.path.join(self.models_dir, f"{product_id}_{model_type}_model_v{version}.pt")
|
||||
if not os.path.exists(model_path):
|
||||
print(f"错误: 未找到产品 {product_id} 的 {model_type} 模型版本 {version}")
|
||||
return None, None
|
||||
|
||||
try:
|
||||
# 加载模型
|
||||
checkpoint = torch.load(model_path, map_location=device)
|
||||
|
||||
# 创建模型实例
|
||||
if model_type == 'mlstm':
|
||||
model = MLSTMTransformer(
|
||||
num_features=len(checkpoint['features']),
|
||||
hidden_size=128,
|
||||
mlstm_layers=1,
|
||||
embed_dim=32,
|
||||
dense_dim=32,
|
||||
num_heads=4,
|
||||
dropout_rate=0.1,
|
||||
num_blocks=3,
|
||||
output_sequence_length=checkpoint['T']
|
||||
)
|
||||
elif model_type == 'transformer':
|
||||
model = TimeSeriesTransformer(
|
||||
num_features=len(checkpoint['features']),
|
||||
d_model=32,
|
||||
nhead=4,
|
||||
num_encoder_layers=3,
|
||||
dim_feedforward=32,
|
||||
dropout=0.1,
|
||||
output_sequence_length=checkpoint['T']
|
||||
)
|
||||
elif model_type == 'kan':
|
||||
model = KANForecaster(
|
||||
input_features=len(checkpoint['features']),
|
||||
hidden_sizes=[64, 128, 64],
|
||||
output_size=1,
|
||||
grid_size=5,
|
||||
spline_order=3,
|
||||
dropout_rate=0.1,
|
||||
output_sequence_length=checkpoint['T']
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"不支持的模型类型: {model_type}")
|
||||
|
||||
# 加载模型参数
|
||||
model.load_state_dict(checkpoint['model_state_dict'])
|
||||
model = model.to(device)
|
||||
model.eval()
|
||||
|
||||
print(f"模型已从 {os.path.abspath(model_path)} 成功加载")
|
||||
return model, checkpoint
|
||||
except Exception as e:
|
||||
print(f"加载模型时出错: {str(e)}")
|
||||
raise
|
||||
|
||||
def list_models(self, product_id=None, model_type=None):
|
||||
"""
|
||||
列出所有保存的模型
|
||||
|
||||
参数:
|
||||
product_id: 按产品ID筛选 (可选)
|
||||
model_type: 按模型类型筛选 (可选)
|
||||
|
||||
返回:
|
||||
models_list: 模型信息列表
|
||||
"""
|
||||
self._ensure_model_dir()
|
||||
|
||||
# 构建搜索模式
|
||||
if product_id and model_type:
|
||||
pattern = os.path.join(self.models_dir, f"{product_id}_{model_type}_model_v*.pt")
|
||||
elif product_id:
|
||||
pattern = os.path.join(self.models_dir, f"{product_id}_*_model_v*.pt")
|
||||
elif model_type:
|
||||
pattern = os.path.join(self.models_dir, f"*_{model_type}_model_v*.pt")
|
||||
else:
|
||||
pattern = os.path.join(self.models_dir, "*_model_v*.pt")
|
||||
|
||||
model_files = glob.glob(pattern)
|
||||
|
||||
if not model_files:
|
||||
print("未找到匹配的模型文件")
|
||||
return []
|
||||
|
||||
# 收集模型信息
|
||||
models_list = []
|
||||
for model_path in model_files:
|
||||
try:
|
||||
# 从文件名解析信息
|
||||
filename = os.path.basename(model_path)
|
||||
parts = filename.split('_')
|
||||
if len(parts) < 4:
|
||||
continue
|
||||
|
||||
product_id = parts[0]
|
||||
model_type = parts[1]
|
||||
version = parts[-1].replace('model_v', '').replace('.pt', '')
|
||||
|
||||
# 查找对应的元数据文件
|
||||
meta_path = os.path.join(self.models_dir, f"{product_id}_{model_type}_meta_v{version}.json")
|
||||
|
||||
model_info = {
|
||||
'product_id': product_id,
|
||||
'model_type': model_type,
|
||||
'version': version,
|
||||
'file_path': model_path,
|
||||
'created_at': datetime.fromtimestamp(os.path.getctime(model_path)).strftime("%Y-%m-%d %H:%M:%S"),
|
||||
'file_size': f"{os.path.getsize(model_path) / (1024 * 1024):.2f} MB"
|
||||
}
|
||||
|
||||
# 如果有元数据文件,添加更多信息
|
||||
if os.path.exists(meta_path):
|
||||
with open(meta_path, 'r') as f:
|
||||
meta = json.load(f)
|
||||
model_info.update(meta)
|
||||
|
||||
models_list.append(model_info)
|
||||
except Exception as e:
|
||||
print(f"解析模型文件 {model_path} 时出错: {str(e)}")
|
||||
|
||||
# 按创建时间排序
|
||||
models_list.sort(key=lambda x: x['created_at'], reverse=True)
|
||||
|
||||
return models_list
|
||||
|
||||
def delete_model(self, product_id, model_type, version=None):
|
||||
"""
|
||||
删除指定的模型
|
||||
|
||||
参数:
|
||||
product_id: 产品ID
|
||||
model_type: 模型类型
|
||||
version: 模型版本,如果不指定则删除所有版本
|
||||
|
||||
返回:
|
||||
success: 是否成功删除
|
||||
"""
|
||||
self._ensure_model_dir()
|
||||
|
||||
if version:
|
||||
# 删除特定版本
|
||||
model_path = os.path.join(self.models_dir, f"{product_id}_{model_type}_model_v{version}.pt")
|
||||
meta_path = os.path.join(self.models_dir, f"{product_id}_{model_type}_meta_v{version}.json")
|
||||
|
||||
if not os.path.exists(model_path):
|
||||
print(f"错误: 未找到产品 {product_id} 的 {model_type} 模型版本 {version}")
|
||||
return False
|
||||
|
||||
try:
|
||||
os.remove(model_path)
|
||||
if os.path.exists(meta_path):
|
||||
os.remove(meta_path)
|
||||
print(f"已删除产品 {product_id} 的 {model_type} 模型版本 {version}")
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"删除模型时出错: {str(e)}")
|
||||
return False
|
||||
else:
|
||||
# 删除所有版本
|
||||
pattern = os.path.join(self.models_dir, f"{product_id}_{model_type}_model_v*.pt")
|
||||
meta_pattern = os.path.join(self.models_dir, f"{product_id}_{model_type}_meta_v*.json")
|
||||
|
||||
model_files = glob.glob(pattern)
|
||||
meta_files = glob.glob(meta_pattern)
|
||||
|
||||
if not model_files:
|
||||
print(f"错误: 未找到产品 {product_id} 的 {model_type} 模型文件")
|
||||
return False
|
||||
|
||||
try:
|
||||
for file_path in model_files:
|
||||
os.remove(file_path)
|
||||
|
||||
for file_path in meta_files:
|
||||
os.remove(file_path)
|
||||
|
||||
print(f"已删除产品 {product_id} 的所有 {model_type} 模型")
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"删除模型时出错: {str(e)}")
|
||||
return False
|
||||
|
||||
def get_model_details(self, product_id, model_type, version=None):
|
||||
"""
|
||||
获取模型的详细信息
|
||||
|
||||
参数:
|
||||
product_id: 产品ID
|
||||
model_type: 模型类型
|
||||
version: 模型版本,如果不指定则获取最新版本
|
||||
|
||||
返回:
|
||||
details: 模型详细信息字典
|
||||
"""
|
||||
# 查找匹配的模型文件
|
||||
if version is None:
|
||||
# 查找最新版本
|
||||
pattern = os.path.join(self.models_dir, f"{product_id}_{model_type}_model_v*.pt")
|
||||
model_files = glob.glob(pattern)
|
||||
|
||||
if not model_files:
|
||||
print(f"错误: 未找到产品 {product_id} 的 {model_type} 模型文件")
|
||||
return None
|
||||
|
||||
# 按照文件修改时间排序,获取最新的
|
||||
model_path = max(model_files, key=os.path.getmtime)
|
||||
# 从文件名解析版本
|
||||
filename = os.path.basename(model_path)
|
||||
version = filename.split('_')[-1].replace('model_v', '').replace('.pt', '')
|
||||
|
||||
# 查找元数据文件
|
||||
meta_path = os.path.join(self.models_dir, f"{product_id}_{model_type}_meta_v{version}.json")
|
||||
|
||||
if not os.path.exists(meta_path):
|
||||
print(f"错误: 未找到产品 {product_id} 的 {model_type} 模型版本 {version} 的元数据")
|
||||
return None
|
||||
|
||||
try:
|
||||
with open(meta_path, 'r') as f:
|
||||
details = json.load(f)
|
||||
|
||||
# 添加文件路径
|
||||
model_path = os.path.join(self.models_dir, f"{product_id}_{model_type}_model_v{version}.pt")
|
||||
details['file_path'] = model_path
|
||||
details['file_size'] = f"{os.path.getsize(model_path) / (1024 * 1024):.2f} MB"
|
||||
|
||||
return details
|
||||
except Exception as e:
|
||||
print(f"获取模型详情时出错: {str(e)}")
|
||||
return None
|
||||
|
||||
def predict_with_model(self, product_id, model_type='mlstm', version=None, future_days=7,
|
||||
product_df=None, features=None, visualize=True, save_results=True):
|
||||
"""
|
||||
使用指定的模型进行预测
|
||||
|
||||
参数:
|
||||
product_id: 产品ID
|
||||
model_type: 模型类型 ('mlstm', 'transformer', 'kan')
|
||||
version: 模型版本,如果不指定则使用最新版本
|
||||
future_days: 要预测的未来天数
|
||||
product_df: 产品数据DataFrame
|
||||
features: 特征列表
|
||||
visualize: 是否可视化结果
|
||||
save_results: 是否保存结果
|
||||
|
||||
返回:
|
||||
predictions_df: 预测结果DataFrame
|
||||
"""
|
||||
# 获取设备
|
||||
device = get_device()
|
||||
print(f"使用设备: {device} 进行预测")
|
||||
|
||||
# 加载模型
|
||||
model, checkpoint = self.load_model(product_id, model_type, version, device)
|
||||
|
||||
if model is None or checkpoint is None:
|
||||
return None
|
||||
|
||||
# 如果没有提供产品数据,则从Excel文件加载
|
||||
if product_df is None:
|
||||
try:
|
||||
df = pd.read_excel('pharmacy_sales.xlsx')
|
||||
product_df = df[df['product_id'] == product_id].sort_values('date')
|
||||
except Exception as e:
|
||||
print(f"加载产品数据时出错: {str(e)}")
|
||||
return None
|
||||
|
||||
product_name = product_df['product_name'].iloc[0]
|
||||
|
||||
# 获取模型参数
|
||||
features = checkpoint['features']
|
||||
look_back = checkpoint['look_back']
|
||||
T = checkpoint['T']
|
||||
scaler_X = checkpoint['scaler_X']
|
||||
scaler_y = checkpoint['scaler_y']
|
||||
|
||||
# 获取最近的look_back天数据
|
||||
last_data = product_df[features].values[-look_back:]
|
||||
last_data_scaled = scaler_X.transform(last_data)
|
||||
|
||||
# 准备输入数据
|
||||
X_input = torch.Tensor(last_data_scaled).unsqueeze(0) # 添加批次维度
|
||||
X_input = X_input.to(device) # 移动到设备上
|
||||
|
||||
# 进行预测
|
||||
with torch.no_grad():
|
||||
y_pred_scaled = model(X_input).squeeze(0).cpu().numpy() # 返回到CPU并转换为numpy
|
||||
|
||||
# 反归一化预测结果
|
||||
y_pred = scaler_y.inverse_transform(y_pred_scaled.reshape(-1, 1)).flatten()
|
||||
|
||||
# 创建预测日期范围
|
||||
last_date = product_df['date'].iloc[-1]
|
||||
future_dates = pd.date_range(start=last_date + pd.Timedelta(days=1), periods=T, freq='D')
|
||||
|
||||
# 创建预测结果DataFrame
|
||||
predictions_df = pd.DataFrame({
|
||||
'date': future_dates,
|
||||
'product_id': product_id,
|
||||
'product_name': product_name,
|
||||
'predicted_sales': y_pred
|
||||
})
|
||||
|
||||
print(f"\n{product_name} 未来 {T} 天销售预测 (使用{model_type.upper()}模型):")
|
||||
print(predictions_df[['date', 'predicted_sales']])
|
||||
|
||||
# 可视化预测结果
|
||||
if visualize:
|
||||
plt.figure(figsize=(12, 6))
|
||||
|
||||
# 显示历史数据和预测数据
|
||||
history_days = 30 # 显示最近30天的历史数据
|
||||
history_dates = product_df['date'].iloc[-history_days:].values
|
||||
history_sales = product_df['sales'].iloc[-history_days:].values
|
||||
|
||||
plt.plot(history_dates, history_sales, 'b-', label='历史销量')
|
||||
plt.plot(future_dates, y_pred, 'r--', label=f'{model_type.upper()}预测销量')
|
||||
|
||||
plt.title(f'{product_name} - {model_type.upper()}销量预测 (未来{T}天)')
|
||||
plt.xlabel('日期')
|
||||
plt.ylabel('销量')
|
||||
plt.legend()
|
||||
plt.grid(True)
|
||||
plt.xticks(rotation=45)
|
||||
plt.tight_layout()
|
||||
|
||||
# 保存和显示图表
|
||||
forecast_chart = f'{product_id}_{model_type}_forecast.png'
|
||||
plt.savefig(forecast_chart)
|
||||
print(f"预测图表已保存为: {forecast_chart}")
|
||||
|
||||
# 保存预测结果到CSV
|
||||
if save_results:
|
||||
forecast_csv = f'{product_id}_{model_type}_forecast.csv'
|
||||
predictions_df.to_csv(forecast_csv, index=False)
|
||||
print(f"预测结果已保存到: {forecast_csv}")
|
||||
|
||||
return predictions_df
|
||||
|
||||
def compare_models(self, product_id, model_types=None, versions=None, product_df=None, visualize=True):
|
||||
"""
|
||||
比较不同模型的预测结果
|
||||
|
||||
参数:
|
||||
product_id: 产品ID
|
||||
model_types: 要比较的模型类型列表
|
||||
versions: 对应的模型版本列表,如果不指定则使用最新版本
|
||||
product_df: 产品数据DataFrame
|
||||
visualize: 是否可视化结果
|
||||
|
||||
返回:
|
||||
比较结果DataFrame
|
||||
"""
|
||||
if model_types is None:
|
||||
model_types = ['mlstm', 'transformer', 'kan']
|
||||
|
||||
if versions is None:
|
||||
versions = [None] * len(model_types)
|
||||
|
||||
if len(versions) != len(model_types):
|
||||
print("错误: 模型类型和版本列表长度不匹配")
|
||||
return None
|
||||
|
||||
# 如果没有提供产品数据,则从Excel文件加载
|
||||
if product_df is None:
|
||||
try:
|
||||
df = pd.read_excel('pharmacy_sales.xlsx')
|
||||
product_df = df[df['product_id'] == product_id].sort_values('date')
|
||||
except Exception as e:
|
||||
print(f"加载产品数据时出错: {str(e)}")
|
||||
return None
|
||||
|
||||
product_name = product_df['product_name'].iloc[0]
|
||||
|
||||
# 存储所有模型的预测结果
|
||||
predictions = {}
|
||||
|
||||
# 对每个模型进行预测
|
||||
for i, model_type in enumerate(model_types):
|
||||
version = versions[i]
|
||||
|
||||
try:
|
||||
pred_df = self.predict_with_model(
|
||||
product_id,
|
||||
model_type=model_type,
|
||||
version=version,
|
||||
product_df=product_df,
|
||||
visualize=False,
|
||||
save_results=False
|
||||
)
|
||||
|
||||
if pred_df is not None:
|
||||
predictions[model_type] = pred_df
|
||||
except Exception as e:
|
||||
print(f"{model_type} 模型预测出错: {str(e)}")
|
||||
|
||||
if not predictions:
|
||||
print("没有成功的预测结果")
|
||||
return None
|
||||
|
||||
# 合并预测结果
|
||||
result_df = predictions[list(predictions.keys())[0]][['date', 'product_id', 'product_name']].copy()
|
||||
|
||||
for model_type, pred_df in predictions.items():
|
||||
result_df[f'{model_type}_prediction'] = pred_df['predicted_sales'].values
|
||||
|
||||
# 可视化比较结果
|
||||
if visualize and len(predictions) > 0:
|
||||
plt.figure(figsize=(12, 6))
|
||||
|
||||
# 显示历史数据
|
||||
history_days = 30 # 显示最近30天的历史数据
|
||||
history_dates = product_df['date'].iloc[-history_days:].values
|
||||
history_sales = product_df['sales'].iloc[-history_days:].values
|
||||
|
||||
plt.plot(history_dates, history_sales, 'k-', label='历史销量')
|
||||
|
||||
# 显示预测数据
|
||||
colors = ['r', 'g', 'b', 'c', 'm', 'y']
|
||||
future_dates = result_df['date'].values
|
||||
|
||||
for i, (model_type, pred_df) in enumerate(predictions.items()):
|
||||
color = colors[i % len(colors)]
|
||||
plt.plot(future_dates, pred_df['predicted_sales'].values,
|
||||
f'{color}--', label=f'{model_type.upper()}预测')
|
||||
|
||||
plt.title(f'{product_name} - 不同模型预测结果比较')
|
||||
plt.xlabel('日期')
|
||||
plt.ylabel('销量')
|
||||
plt.legend()
|
||||
plt.grid(True)
|
||||
plt.xticks(rotation=45)
|
||||
plt.tight_layout()
|
||||
|
||||
# 保存和显示图表
|
||||
compare_chart = f'{product_id}_model_comparison.png'
|
||||
plt.savefig(compare_chart)
|
||||
print(f"比较图表已保存为: {compare_chart}")
|
||||
|
||||
# 保存比较结果到CSV
|
||||
compare_csv = f'{product_id}_model_comparison.csv'
|
||||
result_df.to_csv(compare_csv, index=False)
|
||||
print(f"比较结果已保存到: {compare_csv}")
|
||||
|
||||
return result_df
|
||||
|
||||
def export_model(self, product_id, model_type, version=None, export_dir='exported_models'):
|
||||
"""
|
||||
导出模型到指定目录
|
||||
|
||||
参数:
|
||||
product_id: 产品ID
|
||||
model_type: 模型类型
|
||||
version: 模型版本,如果不指定则导出最新版本
|
||||
export_dir: 导出目录
|
||||
|
||||
返回:
|
||||
export_path: 导出的文件路径
|
||||
"""
|
||||
# 确保导出目录存在
|
||||
if not os.path.exists(export_dir):
|
||||
os.makedirs(export_dir, exist_ok=True)
|
||||
|
||||
# 查找匹配的模型文件
|
||||
if version is None:
|
||||
# 查找最新版本
|
||||
pattern = os.path.join(self.models_dir, f"{product_id}_{model_type}_model_v*.pt")
|
||||
model_files = glob.glob(pattern)
|
||||
|
||||
if not model_files:
|
||||
print(f"错误: 未找到产品 {product_id} 的 {model_type} 模型文件")
|
||||
return None
|
||||
|
||||
# 按照文件修改时间排序,获取最新的
|
||||
model_path = max(model_files, key=os.path.getmtime)
|
||||
# 从文件名解析版本
|
||||
filename = os.path.basename(model_path)
|
||||
version = filename.split('_')[-1].replace('model_v', '').replace('.pt', '')
|
||||
else:
|
||||
model_path = os.path.join(self.models_dir, f"{product_id}_{model_type}_model_v{version}.pt")
|
||||
if not os.path.exists(model_path):
|
||||
print(f"错误: 未找到产品 {product_id} 的 {model_type} 模型版本 {version}")
|
||||
return None
|
||||
|
||||
# 元数据文件
|
||||
meta_path = os.path.join(self.models_dir, f"{product_id}_{model_type}_meta_v{version}.json")
|
||||
|
||||
# 导出路径
|
||||
export_model_path = os.path.join(export_dir, f"{product_id}_{model_type}_model_v{version}.pt")
|
||||
export_meta_path = os.path.join(export_dir, f"{product_id}_{model_type}_meta_v{version}.json")
|
||||
|
||||
try:
|
||||
# 复制文件
|
||||
shutil.copy2(model_path, export_model_path)
|
||||
if os.path.exists(meta_path):
|
||||
shutil.copy2(meta_path, export_meta_path)
|
||||
|
||||
print(f"模型已导出到 {os.path.abspath(export_model_path)}")
|
||||
return export_model_path
|
||||
except Exception as e:
|
||||
print(f"导出模型时出错: {str(e)}")
|
||||
return None
|
||||
|
||||
def import_model(self, import_file, overwrite=False):
|
||||
"""
|
||||
导入模型文件
|
||||
|
||||
参数:
|
||||
import_file: 要导入的模型文件路径
|
||||
overwrite: 如果存在同名文件是否覆盖
|
||||
|
||||
返回:
|
||||
import_path: 导入后的文件路径
|
||||
"""
|
||||
self._ensure_model_dir()
|
||||
|
||||
if not os.path.exists(import_file):
|
||||
print(f"错误: 导入文件 {import_file} 不存在")
|
||||
return None
|
||||
|
||||
# 获取文件名
|
||||
filename = os.path.basename(import_file)
|
||||
|
||||
# 目标路径
|
||||
target_path = os.path.join(self.models_dir, filename)
|
||||
|
||||
# 检查是否存在同名文件
|
||||
if os.path.exists(target_path) and not overwrite:
|
||||
print(f"错误: 目标文件 {target_path} 已存在,如需覆盖请设置overwrite=True")
|
||||
return None
|
||||
|
||||
try:
|
||||
# 复制文件
|
||||
shutil.copy2(import_file, target_path)
|
||||
|
||||
# 如果有对应的元数据文件,也一并导入
|
||||
meta_filename = filename.replace('_model_v', '_meta_v')
|
||||
meta_import_file = import_file.replace('_model_v', '_meta_v').replace('.pt', '.json')
|
||||
meta_target_path = os.path.join(self.models_dir, meta_filename.replace('.pt', '.json'))
|
||||
|
||||
if os.path.exists(meta_import_file):
|
||||
shutil.copy2(meta_import_file, meta_target_path)
|
||||
|
||||
print(f"模型已导入到 {os.path.abspath(target_path)}")
|
||||
return target_path
|
||||
except Exception as e:
|
||||
print(f"导入模型时出错: {str(e)}")
|
||||
return None
|
||||
return self.model_types.get(model_type)
|
@ -8,8 +8,10 @@ import pandas as pd
|
||||
import numpy as np
|
||||
from datetime import datetime, timedelta
|
||||
import matplotlib.pyplot as plt
|
||||
import xgboost as xgb
|
||||
from sklearn.preprocessing import MinMaxScaler
|
||||
import sklearn.preprocessing._data # 添加这一行以支持MinMaxScaler的反序列化
|
||||
import joblib
|
||||
|
||||
from models.transformer_model import TimeSeriesTransformer
|
||||
from models.slstm_model import sLSTM as ScalarLSTM
|
||||
@ -21,82 +23,50 @@ from models.optimized_kan_forecaster import OptimizedKANForecaster
|
||||
from analysis.trend_analysis import analyze_prediction_result
|
||||
from utils.visualization import plot_prediction_results
|
||||
from utils.multi_store_data_utils import get_store_product_sales_data, aggregate_multi_store_data
|
||||
from core.config import DEVICE, get_model_file_path
|
||||
from core.config import DEVICE
|
||||
from utils.file_save import ModelPathManager
|
||||
|
||||
def load_model_and_predict(product_id, model_type, store_id=None, future_days=7, start_date=None, analyze_result=False, version=None):
|
||||
def load_model_and_predict(product_id, model_type, model_path=None, store_id=None, future_days=7, start_date=None, analyze_result=False, version=None, training_mode='product', **kwargs):
|
||||
"""
|
||||
加载已训练的模型并进行预测
|
||||
|
||||
参数:
|
||||
product_id: 产品ID
|
||||
model_type: 模型类型 ('transformer', 'mlstm', 'kan', 'tcn', 'optimized_kan')
|
||||
model_type: 模型类型 ('transformer', 'mlstm', 'kan', 'tcn', 'optimized_kan', 'xgboost')
|
||||
model_path: 模型的完整文件路径
|
||||
store_id: 店铺ID,为None时使用全局模型
|
||||
future_days: 预测未来天数
|
||||
start_date: 预测起始日期,如果为None则使用最后一个已知日期
|
||||
analyze_result: 是否分析预测结果
|
||||
version: 模型版本,如果为None则使用最新版本
|
||||
version: 模型版本
|
||||
|
||||
返回:
|
||||
预测结果和分析(如果analyze_result为True)
|
||||
"""
|
||||
try:
|
||||
# 确定模型文件路径(支持多店铺)
|
||||
model_path = None
|
||||
|
||||
if version:
|
||||
# 使用版本管理系统获取正确的文件路径
|
||||
model_path = get_model_file_path(product_id, model_type, version)
|
||||
else:
|
||||
# 根据store_id确定搜索目录
|
||||
if store_id:
|
||||
# 查找特定店铺的模型
|
||||
possible_dirs = [
|
||||
os.path.join('saved_models', model_type, store_id),
|
||||
os.path.join('models', model_type, store_id)
|
||||
]
|
||||
else:
|
||||
# 查找全局模型
|
||||
possible_dirs = [
|
||||
os.path.join('saved_models', model_type, 'global'),
|
||||
os.path.join('models', model_type, 'global'),
|
||||
os.path.join('saved_models', model_type), # 后向兼容
|
||||
'saved_models' # 最基本的目录
|
||||
]
|
||||
|
||||
# 文件名模式
|
||||
model_suffix = '_optimized' if model_type == 'optimized_kan' else ''
|
||||
file_model_type = 'kan' if model_type == 'optimized_kan' else model_type
|
||||
|
||||
possible_names = [
|
||||
f"{product_id}_{model_type}_v1_model.pt", # 新多店铺格式
|
||||
f"{product_id}_{model_type}_v1_global_model.pt", # 全局模型格式
|
||||
f"{product_id}_{model_type}_v1.pth", # 旧版本格式
|
||||
f"{file_model_type}{model_suffix}_model_product_{product_id}.pth", # 原始格式
|
||||
f"{model_type}_model_product_{product_id}.pth" # 简化格式
|
||||
]
|
||||
|
||||
# 搜索模型文件
|
||||
for dir_path in possible_dirs:
|
||||
if not os.path.exists(dir_path):
|
||||
continue
|
||||
for name in possible_names:
|
||||
test_path = os.path.join(dir_path, name)
|
||||
if os.path.exists(test_path):
|
||||
model_path = test_path
|
||||
break
|
||||
if model_path:
|
||||
break
|
||||
|
||||
if not model_path:
|
||||
scope_msg = f"店铺 {store_id}" if store_id else "全局"
|
||||
print(f"找不到产品 {product_id} 的 {model_type} 模型文件 ({scope_msg})")
|
||||
print(f"搜索目录: {possible_dirs}")
|
||||
return None
|
||||
|
||||
print(f"尝试加载模型文件: {model_path}")
|
||||
|
||||
if not os.path.exists(model_path):
|
||||
print(f"模型文件 {model_path} 不存在")
|
||||
# 如果没有提供 model_path,则使用 ModelPathManager 动态生成
|
||||
if not model_path:
|
||||
if version is None:
|
||||
raise ValueError("使用动态路径加载时必须提供 'version'。")
|
||||
|
||||
path_manager = ModelPathManager()
|
||||
# 传递所有必要的参数以重构路径
|
||||
path_params = {
|
||||
'product_id': product_id,
|
||||
'store_id': store_id,
|
||||
**kwargs
|
||||
}
|
||||
model_path = path_manager.get_model_path_for_prediction(
|
||||
training_mode=training_mode,
|
||||
model_type=model_type,
|
||||
version=version,
|
||||
**path_params
|
||||
)
|
||||
|
||||
if not model_path or not os.path.exists(model_path):
|
||||
print(f"模型文件 {model_path} 不存在或无法生成。")
|
||||
return None
|
||||
|
||||
# 加载销售数据(支持多店铺)
|
||||
@ -104,9 +74,9 @@ def load_model_and_predict(product_id, model_type, store_id=None, future_days=7,
|
||||
if store_id:
|
||||
# 加载特定店铺的数据
|
||||
product_df = get_store_product_sales_data(
|
||||
store_id,
|
||||
store_id,
|
||||
product_id,
|
||||
'pharmacy_sales_multi_store.csv'
|
||||
None # 使用默认数据路径
|
||||
)
|
||||
store_name = product_df['store_name'].iloc[0] if 'store_name' in product_df.columns else f"店铺{store_id}"
|
||||
prediction_scope = f"店铺 '{store_name}' ({store_id})"
|
||||
@ -115,14 +85,16 @@ def load_model_and_predict(product_id, model_type, store_id=None, future_days=7,
|
||||
product_df = aggregate_multi_store_data(
|
||||
product_id,
|
||||
aggregation_method='sum',
|
||||
file_path='pharmacy_sales_multi_store.csv'
|
||||
file_path=None # 使用默认数据路径
|
||||
)
|
||||
prediction_scope = "全部店铺(聚合数据)"
|
||||
except Exception as e:
|
||||
print(f"多店铺数据加载失败,尝试使用原始数据格式: {e}")
|
||||
# 后向兼容:尝试加载原始数据格式
|
||||
try:
|
||||
df = pd.read_excel('pharmacy_sales.xlsx')
|
||||
from core.config import DEFAULT_DATA_PATH
|
||||
from utils.multi_store_data_utils import load_multi_store_data
|
||||
df = load_multi_store_data(DEFAULT_DATA_PATH)
|
||||
product_df = df[df['product_id'] == product_id].sort_values('date')
|
||||
if store_id:
|
||||
print(f"警告:原始数据不支持店铺过滤,将使用所有数据预测")
|
||||
@ -148,177 +120,241 @@ def load_model_and_predict(product_id, model_type, store_id=None, future_days=7,
|
||||
# 加载模型和配置
|
||||
try:
|
||||
# 首先尝试使用weights_only=False加载
|
||||
try:
|
||||
print("尝试使用 weights_only=False 加载模型")
|
||||
checkpoint = torch.load(model_path, map_location=DEVICE, weights_only=False)
|
||||
except Exception as e:
|
||||
print(f"使用weights_only=False加载失败: {str(e)}")
|
||||
print("尝试使用默认参数加载模型")
|
||||
checkpoint = torch.load(model_path, map_location=DEVICE)
|
||||
if model_type == 'xgboost':
|
||||
if not os.path.exists(model_path):
|
||||
print(f"XGBoost模型文件不存在: {model_path}")
|
||||
return None
|
||||
# 加载元数据
|
||||
metadata = joblib.load(model_path)
|
||||
model_file_path = metadata['model_file']
|
||||
|
||||
print(f"模型加载成功,检查checkpoint类型: {type(checkpoint)}")
|
||||
if isinstance(checkpoint, dict):
|
||||
print(f"checkpoint包含的键: {list(checkpoint.keys())}")
|
||||
if not os.path.exists(model_file_path):
|
||||
print(f"引用的XGBoost模型文件不存在: {model_file_path}")
|
||||
return None
|
||||
|
||||
# 加载原生Booster模型
|
||||
model = xgb.Booster()
|
||||
model.load_model(model_file_path)
|
||||
|
||||
config = metadata['config']
|
||||
metrics = metadata['metrics']
|
||||
scaler_X = metadata['scaler_X']
|
||||
scaler_y = metadata['scaler_y']
|
||||
print("XGBoost原生模型及元数据加载成功")
|
||||
else:
|
||||
print(f"checkpoint不是字典类型,而是: {type(checkpoint)}")
|
||||
return None
|
||||
try:
|
||||
print("尝试使用 weights_only=False 加载模型")
|
||||
checkpoint = torch.load(model_path, map_location=DEVICE, weights_only=False)
|
||||
except Exception as e:
|
||||
print(f"使用weights_only=False加载失败: {str(e)}")
|
||||
print("尝试使用默认参数加载模型")
|
||||
checkpoint = torch.load(model_path, map_location=DEVICE)
|
||||
|
||||
print(f"模型加载成功,检查checkpoint类型: {type(checkpoint)}")
|
||||
if isinstance(checkpoint, dict):
|
||||
print(f"checkpoint包含的键: {list(checkpoint.keys())}")
|
||||
else:
|
||||
print(f"checkpoint不是字典类型,而是: {type(checkpoint)}")
|
||||
return None
|
||||
except Exception as e:
|
||||
print(f"加载模型失败: {str(e)}")
|
||||
return None
|
||||
|
||||
# 检查并获取配置
|
||||
if 'config' not in checkpoint:
|
||||
print("模型文件中没有配置信息")
|
||||
return None
|
||||
# XGBoost有不同的处理逻辑
|
||||
if model_type == 'xgboost':
|
||||
look_back = config['look_back']
|
||||
features = config['features']
|
||||
|
||||
config = checkpoint['config']
|
||||
print(f"模型配置: {config}")
|
||||
|
||||
# 检查并获取缩放器
|
||||
if 'scaler_X' not in checkpoint or 'scaler_y' not in checkpoint:
|
||||
print("模型文件中没有缩放器信息")
|
||||
return None
|
||||
# 准备输入数据
|
||||
recent_data = product_df.iloc[-look_back:].copy()
|
||||
|
||||
scaler_X = checkpoint['scaler_X']
|
||||
scaler_y = checkpoint['scaler_y']
|
||||
|
||||
# 创建模型实例
|
||||
try:
|
||||
if model_type == 'transformer':
|
||||
model = TimeSeriesTransformer(
|
||||
num_features=config['input_dim'],
|
||||
d_model=config['hidden_size'],
|
||||
nhead=config['num_heads'],
|
||||
num_encoder_layers=config['num_layers'],
|
||||
dim_feedforward=config['hidden_size'] * 2,
|
||||
dropout=config['dropout'],
|
||||
output_sequence_length=config['output_dim'],
|
||||
seq_length=config['sequence_length'],
|
||||
batch_size=32
|
||||
).to(DEVICE)
|
||||
elif model_type == 'slstm':
|
||||
model = ScalarLSTM(
|
||||
input_dim=config['input_dim'],
|
||||
hidden_dim=config['hidden_size'],
|
||||
output_dim=config['output_dim'],
|
||||
num_layers=config['num_layers'],
|
||||
dropout=config['dropout']
|
||||
).to(DEVICE)
|
||||
elif model_type == 'mlstm':
|
||||
# 获取配置参数,如果不存在则使用默认值
|
||||
embed_dim = config.get('embed_dim', 32)
|
||||
dense_dim = config.get('dense_dim', 32)
|
||||
num_heads = config.get('num_heads', 4)
|
||||
num_blocks = config.get('num_blocks', 3)
|
||||
predictions = []
|
||||
current_input_df = recent_data[features].copy()
|
||||
|
||||
for _ in range(future_days):
|
||||
# 归一化输入数据并展平
|
||||
input_scaled = scaler_X.transform(current_input_df.values)
|
||||
input_vector = input_scaled.flatten().reshape(1, -1)
|
||||
|
||||
model = MatrixLSTM(
|
||||
num_features=config['input_dim'],
|
||||
hidden_size=config['hidden_size'],
|
||||
mlstm_layers=config['num_layers'],
|
||||
embed_dim=embed_dim,
|
||||
dense_dim=dense_dim,
|
||||
num_heads=num_heads,
|
||||
dropout_rate=config['dropout'],
|
||||
num_blocks=num_blocks,
|
||||
output_sequence_length=config['output_dim']
|
||||
).to(DEVICE)
|
||||
elif model_type == 'kan':
|
||||
model = KANForecaster(
|
||||
input_features=config['input_dim'],
|
||||
hidden_sizes=[config['hidden_size'], config['hidden_size']*2, config['hidden_size']],
|
||||
output_sequence_length=config['output_dim']
|
||||
).to(DEVICE)
|
||||
elif model_type == 'optimized_kan':
|
||||
model = OptimizedKANForecaster(
|
||||
input_features=config['input_dim'],
|
||||
hidden_sizes=[config['hidden_size'], config['hidden_size']*2, config['hidden_size']],
|
||||
output_sequence_length=config['output_dim']
|
||||
).to(DEVICE)
|
||||
elif model_type == 'tcn':
|
||||
model = TCNForecaster(
|
||||
num_features=config['input_dim'],
|
||||
output_sequence_length=config['output_dim'],
|
||||
num_channels=[config['hidden_size']] * config['num_layers'],
|
||||
kernel_size=3,
|
||||
dropout=config['dropout']
|
||||
).to(DEVICE)
|
||||
else:
|
||||
print(f"不支持的模型类型: {model_type}")
|
||||
return None
|
||||
# 预测缩放后的值
|
||||
dpredict = xgb.DMatrix(input_vector)
|
||||
prediction_scaled = model.predict(dpredict)
|
||||
|
||||
print(f"模型实例创建成功: {type(model)}")
|
||||
except Exception as e:
|
||||
print(f"创建模型实例失败: {str(e)}")
|
||||
return None
|
||||
|
||||
# 加载模型参数
|
||||
try:
|
||||
model.load_state_dict(checkpoint['model_state_dict'])
|
||||
model.eval()
|
||||
print("模型参数加载成功")
|
||||
except Exception as e:
|
||||
print(f"加载模型参数失败: {str(e)}")
|
||||
return None
|
||||
|
||||
# 准备输入数据
|
||||
try:
|
||||
features = ['sales', 'price', 'weekday', 'month', 'is_holiday', 'is_weekend', 'is_promotion', 'temperature']
|
||||
sequence_length = config['sequence_length']
|
||||
|
||||
# 获取最近的sequence_length天数据作为输入
|
||||
recent_data = product_df.iloc[-sequence_length:].copy()
|
||||
|
||||
# 如果指定了起始日期,则使用该日期之后的数据
|
||||
if start_date:
|
||||
if isinstance(start_date, str):
|
||||
start_date = datetime.strptime(start_date, '%Y-%m-%d')
|
||||
recent_data = product_df[product_df['date'] >= start_date].iloc[:sequence_length].copy()
|
||||
if len(recent_data) < sequence_length:
|
||||
print(f"警告: 从指定日期 {start_date} 开始的数据少于所需的 {sequence_length} 天")
|
||||
# 补充数据
|
||||
missing_days = sequence_length - len(recent_data)
|
||||
additional_data = product_df[product_df['date'] < start_date].iloc[-missing_days:].copy()
|
||||
recent_data = pd.concat([additional_data, recent_data]).reset_index(drop=True)
|
||||
|
||||
print(f"输入数据准备完成,形状: {recent_data.shape}")
|
||||
except Exception as e:
|
||||
print(f"准备输入数据失败: {str(e)}")
|
||||
return None
|
||||
|
||||
# 归一化输入数据
|
||||
try:
|
||||
X = recent_data[features].values
|
||||
X_scaled = scaler_X.transform(X)
|
||||
|
||||
# 转换为模型输入格式
|
||||
X_input = torch.tensor(X_scaled.reshape(1, sequence_length, -1), dtype=torch.float32).to(DEVICE)
|
||||
print(f"输入张量准备完成,形状: {X_input.shape}")
|
||||
except Exception as e:
|
||||
print(f"归一化输入数据失败: {str(e)}")
|
||||
return None
|
||||
|
||||
# 预测
|
||||
try:
|
||||
with torch.no_grad():
|
||||
y_pred_scaled = model(X_input).cpu().numpy()
|
||||
print(f"原始预测输出形状: {y_pred_scaled.shape}")
|
||||
|
||||
# 处理TCN、Transformer、mLSTM和KAN模型的输出,确保形状正确
|
||||
if model_type in ['tcn', 'transformer', 'mlstm', 'kan', 'optimized_kan'] and len(y_pred_scaled.shape) == 3:
|
||||
y_pred_scaled = y_pred_scaled.squeeze(-1)
|
||||
print(f"处理后的预测输出形状: {y_pred_scaled.shape}")
|
||||
|
||||
# 反归一化预测结果
|
||||
y_pred = scaler_y.inverse_transform(y_pred_scaled.reshape(-1, 1)).flatten()
|
||||
print(f"反归一化后的预测结果: {y_pred}")
|
||||
|
||||
# 反归一化得到真实预测值
|
||||
prediction = scaler_y.inverse_transform(prediction_scaled.reshape(-1, 1)).flatten()[0]
|
||||
predictions.append(prediction)
|
||||
|
||||
# 更新输入窗口以进行下一次预测
|
||||
# 创建新的一行,包含真实的预测值
|
||||
new_row_values = current_input_df.iloc[-1].copy()
|
||||
new_row_values['sales'] = prediction
|
||||
# 可以在这里添加更复杂的未来特征生成逻辑(例如,根据新日期更新weekday, month等)
|
||||
|
||||
new_row_df = pd.DataFrame([new_row_values], columns=features)
|
||||
|
||||
# 滚动窗口
|
||||
current_input_df = pd.concat([current_input_df.iloc[1:], new_row_df], ignore_index=True)
|
||||
|
||||
# 生成预测日期
|
||||
last_date = recent_data['date'].iloc[-1]
|
||||
pred_dates = [(last_date + timedelta(days=i+1)) for i in range(len(y_pred))]
|
||||
print(f"预测日期: {pred_dates}")
|
||||
except Exception as e:
|
||||
print(f"执行预测失败: {str(e)}")
|
||||
return None
|
||||
pred_dates = [last_date + timedelta(days=i+1) for i in range(future_days)]
|
||||
|
||||
y_pred = np.array(predictions)
|
||||
|
||||
else: # 原有的PyTorch模型逻辑
|
||||
# 检查并获取配置
|
||||
if 'config' not in checkpoint:
|
||||
print("模型文件中没有配置信息")
|
||||
return None
|
||||
|
||||
config = checkpoint['config']
|
||||
print(f"模型配置: {config}")
|
||||
|
||||
# 检查并获取缩放器
|
||||
if 'scaler_X' not in checkpoint or 'scaler_y' not in checkpoint:
|
||||
print("模型文件中没有缩放器信息")
|
||||
return None
|
||||
|
||||
scaler_X = checkpoint['scaler_X']
|
||||
scaler_y = checkpoint['scaler_y']
|
||||
|
||||
# 创建模型实例
|
||||
try:
|
||||
if model_type == 'transformer':
|
||||
model = TimeSeriesTransformer(
|
||||
num_features=config['input_dim'],
|
||||
d_model=config['hidden_size'],
|
||||
nhead=config['num_heads'],
|
||||
num_encoder_layers=config['num_layers'],
|
||||
dim_feedforward=config['hidden_size'] * 2,
|
||||
dropout=config['dropout'],
|
||||
output_sequence_length=config['output_dim'],
|
||||
seq_length=config['sequence_length'],
|
||||
batch_size=32
|
||||
).to(DEVICE)
|
||||
elif model_type == 'slstm':
|
||||
model = ScalarLSTM(
|
||||
input_dim=config['input_dim'],
|
||||
hidden_dim=config['hidden_size'],
|
||||
output_dim=config['output_dim'],
|
||||
num_layers=config['num_layers'],
|
||||
dropout=config['dropout']
|
||||
).to(DEVICE)
|
||||
elif model_type == 'mlstm':
|
||||
# 获取配置参数,如果不存在则使用默认值
|
||||
embed_dim = config.get('embed_dim', 32)
|
||||
dense_dim = config.get('dense_dim', 32)
|
||||
num_heads = config.get('num_heads', 4)
|
||||
num_blocks = config.get('num_blocks', 3)
|
||||
|
||||
model = MatrixLSTM(
|
||||
num_features=config['input_dim'],
|
||||
hidden_size=config['hidden_size'],
|
||||
mlstm_layers=config['num_layers'],
|
||||
embed_dim=embed_dim,
|
||||
dense_dim=dense_dim,
|
||||
num_heads=num_heads,
|
||||
dropout_rate=config['dropout'],
|
||||
num_blocks=num_blocks,
|
||||
output_sequence_length=config['output_dim']
|
||||
).to(DEVICE)
|
||||
elif model_type == 'kan':
|
||||
model = KANForecaster(
|
||||
input_features=config['input_dim'],
|
||||
hidden_sizes=[config['hidden_size'], config['hidden_size']*2, config['hidden_size']],
|
||||
output_sequence_length=config['output_dim']
|
||||
).to(DEVICE)
|
||||
elif model_type == 'optimized_kan':
|
||||
model = OptimizedKANForecaster(
|
||||
input_features=config['input_dim'],
|
||||
hidden_sizes=[config['hidden_size'], config['hidden_size']*2, config['hidden_size']],
|
||||
output_sequence_length=config['output_dim']
|
||||
).to(DEVICE)
|
||||
elif model_type == 'tcn':
|
||||
model = TCNForecaster(
|
||||
num_features=config['input_dim'],
|
||||
output_sequence_length=config['output_dim'],
|
||||
num_channels=[config['hidden_size']] * config['num_layers'],
|
||||
kernel_size=3,
|
||||
dropout=config['dropout']
|
||||
).to(DEVICE)
|
||||
else:
|
||||
print(f"不支持的模型类型: {model_type}")
|
||||
return None
|
||||
|
||||
print(f"模型实例创建成功: {type(model)}")
|
||||
except Exception as e:
|
||||
print(f"创建模型实例失败: {str(e)}")
|
||||
return None
|
||||
|
||||
# 加载模型参数
|
||||
try:
|
||||
model.load_state_dict(checkpoint['model_state_dict'])
|
||||
model.eval()
|
||||
print("模型参数加载成功")
|
||||
except Exception as e:
|
||||
print(f"加载模型参数失败: {str(e)}")
|
||||
return None
|
||||
|
||||
# 准备输入数据
|
||||
try:
|
||||
features = ['sales', 'weekday', 'month', 'is_holiday', 'is_weekend', 'is_promotion', 'temperature']
|
||||
sequence_length = config['sequence_length']
|
||||
|
||||
# 获取最近的sequence_length天数据作为输入
|
||||
recent_data = product_df.iloc[-sequence_length:].copy()
|
||||
|
||||
# 如果指定了起始日期,则使用该日期之后的数据
|
||||
if start_date:
|
||||
if isinstance(start_date, str):
|
||||
start_date = datetime.strptime(start_date, '%Y-%m-%d')
|
||||
recent_data = product_df[product_df['date'] >= start_date].iloc[:sequence_length].copy()
|
||||
if len(recent_data) < sequence_length:
|
||||
print(f"警告: 从指定日期 {start_date} 开始的数据少于所需的 {sequence_length} 天")
|
||||
# 补充数据
|
||||
missing_days = sequence_length - len(recent_data)
|
||||
additional_data = product_df[product_df['date'] < start_date].iloc[-missing_days:].copy()
|
||||
recent_data = pd.concat([additional_data, recent_data]).reset_index(drop=True)
|
||||
|
||||
print(f"输入数据准备完成,形状: {recent_data.shape}")
|
||||
except Exception as e:
|
||||
print(f"准备输入数据失败: {str(e)}")
|
||||
return None
|
||||
|
||||
# 归一化输入数据
|
||||
try:
|
||||
X = recent_data[features].values
|
||||
X_scaled = scaler_X.transform(X)
|
||||
|
||||
# 转换为模型输入格式
|
||||
X_input = torch.tensor(X_scaled.reshape(1, sequence_length, -1), dtype=torch.float32).to(DEVICE)
|
||||
print(f"输入张量准备完成,形状: {X_input.shape}")
|
||||
except Exception as e:
|
||||
print(f"归一化输入数据失败: {str(e)}")
|
||||
return None
|
||||
|
||||
# 预测
|
||||
try:
|
||||
with torch.no_grad():
|
||||
y_pred_scaled = model(X_input).cpu().numpy()
|
||||
print(f"原始预测输出形状: {y_pred_scaled.shape}")
|
||||
|
||||
# 处理TCN、Transformer、mLSTM和KAN模型的输出,确保形状正确
|
||||
if model_type in ['tcn', 'transformer', 'mlstm', 'kan', 'optimized_kan'] and len(y_pred_scaled.shape) == 3:
|
||||
y_pred_scaled = y_pred_scaled.squeeze(-1)
|
||||
print(f"处理后的预测输出形状: {y_pred_scaled.shape}")
|
||||
|
||||
# 反归一化预测结果
|
||||
y_pred = scaler_y.inverse_transform(y_pred_scaled.reshape(-1, 1)).flatten()
|
||||
print(f"反归一化后的预测结果: {y_pred}")
|
||||
|
||||
# 生成预测日期
|
||||
last_date = recent_data['date'].iloc[-1]
|
||||
pred_dates = [(last_date + timedelta(days=i+1)) for i in range(len(y_pred))]
|
||||
print(f"预测日期: {pred_dates}")
|
||||
except Exception as e:
|
||||
print(f"执行预测失败: {str(e)}")
|
||||
return None
|
||||
|
||||
# 创建预测结果DataFrame
|
||||
try:
|
||||
@ -378,4 +414,4 @@ def load_model_and_predict(product_id, model_type, store_id=None, future_days=7,
|
||||
print(f"预测过程中出现未捕获的异常: {str(e)}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return None
|
||||
return None
|
@ -6,6 +6,7 @@ from .mlstm_trainer import train_product_model_with_mlstm
|
||||
from .kan_trainer import train_product_model_with_kan
|
||||
from .tcn_trainer import train_product_model_with_tcn
|
||||
from .transformer_trainer import train_product_model_with_transformer
|
||||
from .xgboost_trainer import train_product_model_with_xgboost
|
||||
|
||||
# 默认训练函数
|
||||
from .mlstm_trainer import train_product_model_with_mlstm as train_product_model
|
||||
@ -15,5 +16,6 @@ __all__ = [
|
||||
'train_product_model_with_mlstm',
|
||||
'train_product_model_with_kan',
|
||||
'train_product_model_with_tcn',
|
||||
'train_product_model_with_transformer'
|
||||
'train_product_model_with_transformer',
|
||||
'train_product_model_with_xgboost'
|
||||
]
|
||||
|
@ -21,7 +21,7 @@ from utils.visualization import plot_loss_curve
|
||||
from analysis.metrics import evaluate_model
|
||||
from core.config import DEVICE, DEFAULT_MODEL_DIR, LOOK_BACK, FORECAST_HORIZON
|
||||
|
||||
def train_product_model_with_kan(product_id, product_df=None, store_id=None, training_mode='product', aggregation_method='sum', epochs=50, use_optimized=False, model_dir=DEFAULT_MODEL_DIR):
|
||||
def train_product_model_with_kan(product_id, product_df=None, store_id=None, training_mode='product', aggregation_method='sum', epochs=50, use_optimized=False, path_info=None, **kwargs):
|
||||
"""
|
||||
使用KAN模型训练产品销售预测模型
|
||||
|
||||
@ -29,12 +29,14 @@ def train_product_model_with_kan(product_id, product_df=None, store_id=None, tra
|
||||
product_id: 产品ID
|
||||
epochs: 训练轮次
|
||||
use_optimized: 是否使用优化版KAN
|
||||
model_dir: 模型保存目录,默认使用配置中的DEFAULT_MODEL_DIR
|
||||
path_info: 包含所有路径信息的字典
|
||||
|
||||
返回:
|
||||
model: 训练好的模型
|
||||
metrics: 模型评估指标
|
||||
"""
|
||||
if not path_info:
|
||||
raise ValueError("train_product_model_with_kan 需要 'path_info' 参数。")
|
||||
# 如果没有传入product_df,则根据训练模式加载数据
|
||||
if product_df is None:
|
||||
from utils.multi_store_data_utils import load_multi_store_data, get_store_product_sales_data, aggregate_multi_store_data
|
||||
@ -101,7 +103,7 @@ def train_product_model_with_kan(product_id, product_df=None, store_id=None, tra
|
||||
print(f"使用{model_type}模型训练产品 '{product_name}' (ID: {product_id}) 的销售预测模型")
|
||||
print(f"训练范围: {training_scope}")
|
||||
print(f"使用设备: {DEVICE}")
|
||||
print(f"模型将保存到目录: {model_dir}")
|
||||
print(f"模型将保存到: {path_info['base_dir']}")
|
||||
|
||||
# 创建特征和目标变量
|
||||
features = ['sales', 'weekday', 'month', 'is_holiday', 'is_weekend', 'is_promotion', 'temperature']
|
||||
@ -234,12 +236,13 @@ def train_product_model_with_kan(product_id, product_df=None, store_id=None, tra
|
||||
|
||||
# 绘制损失曲线并保存到模型目录
|
||||
model_name = 'optimized_kan' if use_optimized else 'kan'
|
||||
loss_curve_path = plot_loss_curve(
|
||||
train_losses,
|
||||
test_losses,
|
||||
product_name,
|
||||
model_type,
|
||||
model_dir=model_dir
|
||||
loss_curve_path = path_info['loss_curve_path']
|
||||
plot_loss_curve(
|
||||
train_losses,
|
||||
test_losses,
|
||||
product_name,
|
||||
model_type,
|
||||
save_path=loss_curve_path
|
||||
)
|
||||
print(f"损失曲线已保存到: {loss_curve_path}")
|
||||
|
||||
@ -269,9 +272,6 @@ def train_product_model_with_kan(product_id, product_df=None, store_id=None, tra
|
||||
print(f"MAPE: {metrics['mape']:.2f}%")
|
||||
print(f"训练时间: {training_time:.2f}秒")
|
||||
|
||||
# 使用统一模型管理器保存模型
|
||||
from utils.model_manager import model_manager
|
||||
|
||||
model_type_name = 'optimized_kan' if use_optimized else 'kan'
|
||||
|
||||
model_data = {
|
||||
@ -297,15 +297,14 @@ def train_product_model_with_kan(product_id, product_df=None, store_id=None, tra
|
||||
'loss_curve_path': loss_curve_path
|
||||
}
|
||||
|
||||
model_path = model_manager.save_model(
|
||||
model_data=model_data,
|
||||
product_id=product_id,
|
||||
model_type=model_type_name,
|
||||
version='v1', # KAN训练器默认使用v1
|
||||
store_id=store_id,
|
||||
training_mode=training_mode,
|
||||
aggregation_method=aggregation_method,
|
||||
product_name=product_name
|
||||
)
|
||||
# 检查模型性能是否达标
|
||||
# 移除R2检查,始终保存模型
|
||||
if metrics:
|
||||
# 使用 path_info 中的路径保存模型
|
||||
model_path = path_info['model_path']
|
||||
torch.save(model_data, model_path)
|
||||
print(f"模型已保存到: {model_path}")
|
||||
else:
|
||||
print(f"训练过程中未生成评估指标,不保存最终模型。")
|
||||
|
||||
return model, metrics
|
||||
return model, metrics
|
@ -20,90 +20,35 @@ from utils.multi_store_data_utils import get_store_product_sales_data, aggregate
|
||||
from utils.visualization import plot_loss_curve
|
||||
from analysis.metrics import evaluate_model
|
||||
from core.config import (
|
||||
DEVICE, DEFAULT_MODEL_DIR, LOOK_BACK, FORECAST_HORIZON,
|
||||
get_next_model_version, get_model_file_path, get_latest_model_version
|
||||
DEVICE, DEFAULT_MODEL_DIR, LOOK_BACK, FORECAST_HORIZON
|
||||
)
|
||||
from utils.training_progress import progress_manager
|
||||
|
||||
def save_checkpoint(checkpoint_data: dict, epoch_or_label, product_id: str,
|
||||
model_type: str, model_dir: str, store_id=None,
|
||||
training_mode: str = 'product', aggregation_method=None):
|
||||
def save_checkpoint(checkpoint_data: dict, epoch_or_label, path_info: dict):
|
||||
"""
|
||||
保存训练检查点
|
||||
保存训练检查点 (已适配扁平化路径规范)
|
||||
|
||||
Args:
|
||||
checkpoint_data: 检查点数据
|
||||
epoch_or_label: epoch编号或标签(如'best')
|
||||
product_id: 产品ID
|
||||
model_type: 模型类型
|
||||
model_dir: 模型保存目录
|
||||
store_id: 店铺ID
|
||||
training_mode: 训练模式
|
||||
aggregation_method: 聚合方法
|
||||
epoch_or_label: epoch编号或标签(如'best'或整数)
|
||||
path_info (dict): 包含所有路径信息的字典
|
||||
"""
|
||||
# 创建检查点目录
|
||||
checkpoint_dir = os.path.join(model_dir, 'checkpoints')
|
||||
os.makedirs(checkpoint_dir, exist_ok=True)
|
||||
|
||||
# 生成检查点文件名
|
||||
if training_mode == 'store' and store_id:
|
||||
filename = f"{model_type}_store_{store_id}_{product_id}_epoch_{epoch_or_label}.pth"
|
||||
elif training_mode == 'global' and aggregation_method:
|
||||
filename = f"{model_type}_global_{product_id}_{aggregation_method}_epoch_{epoch_or_label}.pth"
|
||||
if epoch_or_label == 'best':
|
||||
# 使用由 ModelPathManager 直接提供的最佳检查点路径
|
||||
checkpoint_path = path_info['best_checkpoint_path']
|
||||
else:
|
||||
filename = f"{model_type}_product_{product_id}_epoch_{epoch_or_label}.pth"
|
||||
|
||||
checkpoint_path = os.path.join(checkpoint_dir, filename)
|
||||
|
||||
# 使用 epoch 检查点模板生成路径
|
||||
template = path_info.get('epoch_checkpoint_template')
|
||||
if not template:
|
||||
raise ValueError("路径信息 'path_info' 中缺少 'epoch_checkpoint_template'。")
|
||||
checkpoint_path = template.format(N=epoch_or_label)
|
||||
|
||||
# 保存检查点
|
||||
torch.save(checkpoint_data, checkpoint_path)
|
||||
print(f"[mLSTM] 检查点已保存: {checkpoint_path}", flush=True)
|
||||
|
||||
return checkpoint_path
|
||||
|
||||
|
||||
def load_checkpoint(product_id: str, model_type: str, epoch_or_label,
|
||||
model_dir: str, store_id=None, training_mode: str = 'product',
|
||||
aggregation_method=None):
|
||||
"""
|
||||
加载训练检查点
|
||||
|
||||
Args:
|
||||
product_id: 产品ID
|
||||
model_type: 模型类型
|
||||
epoch_or_label: epoch编号或标签
|
||||
model_dir: 模型保存目录
|
||||
store_id: 店铺ID
|
||||
training_mode: 训练模式
|
||||
aggregation_method: 聚合方法
|
||||
|
||||
Returns:
|
||||
checkpoint_data: 检查点数据,如果未找到返回None
|
||||
"""
|
||||
checkpoint_dir = os.path.join(model_dir, 'checkpoints')
|
||||
|
||||
# 生成检查点文件名
|
||||
if training_mode == 'store' and store_id:
|
||||
filename = f"{model_type}_store_{store_id}_{product_id}_epoch_{epoch_or_label}.pth"
|
||||
elif training_mode == 'global' and aggregation_method:
|
||||
filename = f"{model_type}_global_{product_id}_{aggregation_method}_epoch_{epoch_or_label}.pth"
|
||||
else:
|
||||
filename = f"{model_type}_product_{product_id}_epoch_{epoch_or_label}.pth"
|
||||
|
||||
checkpoint_path = os.path.join(checkpoint_dir, filename)
|
||||
|
||||
if os.path.exists(checkpoint_path):
|
||||
try:
|
||||
checkpoint_data = torch.load(checkpoint_path, map_location=DEVICE)
|
||||
print(f"[mLSTM] 检查点已加载: {checkpoint_path}", flush=True)
|
||||
return checkpoint_data
|
||||
except Exception as e:
|
||||
print(f"[mLSTM] 加载检查点失败: {e}", flush=True)
|
||||
return None
|
||||
else:
|
||||
print(f"[mLSTM] 检查点文件不存在: {checkpoint_path}", flush=True)
|
||||
return None
|
||||
|
||||
def train_product_model_with_mlstm(
|
||||
product_id,
|
||||
product_df,
|
||||
@ -111,12 +56,13 @@ def train_product_model_with_mlstm(
|
||||
training_mode='product',
|
||||
aggregation_method='sum',
|
||||
epochs=50,
|
||||
model_dir=DEFAULT_MODEL_DIR,
|
||||
version=None,
|
||||
model_dir=DEFAULT_MODEL_DIR, # 将被 path_info 替代
|
||||
version=None, # 将被 path_info 替代
|
||||
socketio=None,
|
||||
task_id=None,
|
||||
continue_training=False,
|
||||
progress_callback=None,
|
||||
path_info=None, # 新增参数
|
||||
patience=10,
|
||||
learning_rate=0.001,
|
||||
clip_norm=1.0
|
||||
@ -138,6 +84,12 @@ def train_product_model_with_mlstm(
|
||||
progress_callback: 进度回调函数,用于多进程训练
|
||||
"""
|
||||
|
||||
# 验证 path_info 是否提供
|
||||
if not path_info:
|
||||
raise ValueError("train_product_model_with_mlstm 需要 'path_info' 参数。")
|
||||
|
||||
version = path_info['version']
|
||||
|
||||
# 创建WebSocket进度反馈函数,支持多进程
|
||||
def emit_progress(message, progress=None, metrics=None):
|
||||
"""发送训练进度到前端"""
|
||||
@ -171,18 +123,7 @@ def train_product_model_with_mlstm(
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
|
||||
emit_progress("开始mLSTM模型训练...")
|
||||
|
||||
# 确定版本号
|
||||
if version is None:
|
||||
if continue_training:
|
||||
version = get_latest_model_version(product_id, 'mlstm')
|
||||
if version is None:
|
||||
version = get_next_model_version(product_id, 'mlstm')
|
||||
else:
|
||||
version = get_next_model_version(product_id, 'mlstm')
|
||||
|
||||
emit_progress(f"开始训练 mLSTM 模型版本 {version}")
|
||||
emit_progress(f"开始训练 mLSTM 模型版本 v{version}")
|
||||
|
||||
# 初始化训练进度管理器(如果还未初始化)
|
||||
if socketio and task_id:
|
||||
@ -235,9 +176,9 @@ def train_product_model_with_mlstm(
|
||||
|
||||
print(f"[mLSTM] 使用mLSTM模型训练产品 '{product_name}' (ID: {product_id}) 的销售预测模型", flush=True)
|
||||
print(f"[mLSTM] 训练范围: {training_scope}", flush=True)
|
||||
print(f"[mLSTM] 版本: {version}", flush=True)
|
||||
print(f"[mLSTM] 版本: v{version}", flush=True)
|
||||
print(f"[mLSTM] 使用设备: {DEVICE}", flush=True)
|
||||
print(f"[mLSTM] 模型将保存到目录: {model_dir}", flush=True)
|
||||
print(f"[mLSTM] 模型将保存到: {path_info['base_dir']}", flush=True)
|
||||
print(f"[mLSTM] 数据量: {len(product_df)} 条记录", flush=True)
|
||||
|
||||
emit_progress(f"训练产品: {product_name} (ID: {product_id}) - {training_scope}")
|
||||
@ -324,23 +265,16 @@ def train_product_model_with_mlstm(
|
||||
|
||||
# 如果是继续训练,加载现有模型
|
||||
if continue_training and version != 'v1':
|
||||
try:
|
||||
existing_model_path = get_model_file_path(product_id, 'mlstm', version)
|
||||
if os.path.exists(existing_model_path):
|
||||
checkpoint = torch.load(existing_model_path, map_location=DEVICE)
|
||||
model.load_state_dict(checkpoint['model_state_dict'])
|
||||
print(f"加载现有模型: {existing_model_path}")
|
||||
emit_progress(f"加载现有模型版本 {version} 进行继续训练")
|
||||
except Exception as e:
|
||||
print(f"无法加载现有模型,将重新开始训练: {e}")
|
||||
emit_progress("无法加载现有模型,重新开始训练")
|
||||
# TODO: 继续训练的逻辑需要调整以适应新的路径结构
|
||||
# 例如,加载上一个版本的 best checkpoint
|
||||
emit_progress("继续训练功能待适配新路径结构,暂时作为新训练开始。")
|
||||
|
||||
# 将模型移动到设备上
|
||||
model = model.to(DEVICE)
|
||||
|
||||
criterion = nn.MSELoss()
|
||||
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
|
||||
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=patience // 2, factor=0.5, verbose=True)
|
||||
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=patience // 2, factor=0.5)
|
||||
|
||||
emit_progress("数据预处理完成,开始模型训练...", progress=10)
|
||||
|
||||
@ -452,14 +386,12 @@ def train_product_model_with_mlstm(
|
||||
}
|
||||
|
||||
# 保存检查点
|
||||
save_checkpoint(checkpoint_data, epoch + 1, product_id, 'mlstm',
|
||||
model_dir, store_id, training_mode, aggregation_method)
|
||||
save_checkpoint(checkpoint_data, epoch + 1, path_info)
|
||||
|
||||
# 如果是最佳模型,额外保存一份
|
||||
if test_loss < best_loss:
|
||||
best_loss = test_loss
|
||||
save_checkpoint(checkpoint_data, 'best', product_id, 'mlstm',
|
||||
model_dir, store_id, training_mode, aggregation_method)
|
||||
save_checkpoint(checkpoint_data, 'best', path_info)
|
||||
emit_progress(f"💾 保存最佳模型检查点 (epoch {epoch+1}, test_loss: {test_loss:.4f})")
|
||||
epochs_no_improve = 0
|
||||
else:
|
||||
@ -480,26 +412,15 @@ def train_product_model_with_mlstm(
|
||||
|
||||
emit_progress("生成损失曲线...", progress=95)
|
||||
|
||||
# 确定模型保存目录(支持多店铺)
|
||||
if store_id:
|
||||
# 为特定店铺创建子目录
|
||||
store_model_dir = os.path.join(model_dir, 'mlstm', store_id)
|
||||
os.makedirs(store_model_dir, exist_ok=True)
|
||||
loss_curve_filename = f"{product_id}_mlstm_{version}_loss_curve.png"
|
||||
loss_curve_path = os.path.join(store_model_dir, loss_curve_filename)
|
||||
else:
|
||||
# 全局模型保存在global目录
|
||||
global_model_dir = os.path.join(model_dir, 'mlstm', 'global')
|
||||
os.makedirs(global_model_dir, exist_ok=True)
|
||||
loss_curve_filename = f"{product_id}_mlstm_{version}_global_loss_curve.png"
|
||||
loss_curve_path = os.path.join(global_model_dir, loss_curve_filename)
|
||||
# 从 path_info 获取损失曲线保存路径
|
||||
loss_curve_path = path_info['loss_curve_path']
|
||||
|
||||
# 绘制损失曲线并保存到模型目录
|
||||
plt.figure(figsize=(10, 6))
|
||||
plt.plot(train_losses, label='Training Loss')
|
||||
plt.plot(test_losses, label='Test Loss')
|
||||
title_suffix = f" - {training_scope}" if store_id else " - 全局模型"
|
||||
plt.title(f'mLSTM 模型训练损失曲线 - {product_name} ({version}){title_suffix}')
|
||||
plt.title(f'mLSTM 模型训练损失曲线 - {product_name} (v{version}){title_suffix}')
|
||||
plt.xlabel('Epoch')
|
||||
plt.ylabel('Loss')
|
||||
plt.legend()
|
||||
@ -575,12 +496,17 @@ def train_product_model_with_mlstm(
|
||||
}
|
||||
}
|
||||
|
||||
# 保存最终模型(使用epoch标识)
|
||||
final_model_path = save_checkpoint(
|
||||
final_model_data, f"final_epoch_{epochs}", product_id, 'mlstm',
|
||||
model_dir, store_id, training_mode, aggregation_method
|
||||
)
|
||||
|
||||
# 检查模型性能是否达标
|
||||
# 移除R2检查,始终保存模型
|
||||
if metrics:
|
||||
# 保存最终模型到 model.pth
|
||||
final_model_path = path_info['model_path']
|
||||
torch.save(final_model_data, final_model_path)
|
||||
print(f"[mLSTM] 最终模型已保存: {final_model_path}", flush=True)
|
||||
else:
|
||||
final_model_path = None
|
||||
print(f"[mLSTM] 训练过程中未生成评估指标,不保存最终模型。", flush=True)
|
||||
|
||||
# 发送训练完成消息
|
||||
final_metrics = {
|
||||
'mse': metrics['mse'],
|
||||
@ -593,6 +519,9 @@ def train_product_model_with_mlstm(
|
||||
'model_path': final_model_path
|
||||
}
|
||||
|
||||
emit_progress(f"✅ mLSTM模型训练完成!最终epoch: {epochs} 已保存", progress=100, metrics=final_metrics)
|
||||
|
||||
return model, metrics, epochs, final_model_path
|
||||
if final_model_path:
|
||||
emit_progress(f"✅ mLSTM模型训练完成!最终epoch: {epochs} 已保存", progress=100, metrics=final_metrics)
|
||||
else:
|
||||
emit_progress(f"❌ mLSTM模型训练失败:性能不达标", progress=100, metrics={'error': '模型性能不佳'})
|
||||
|
||||
return model, metrics, epochs, final_model_path
|
@ -21,36 +21,25 @@ from analysis.metrics import evaluate_model
|
||||
from core.config import DEVICE, DEFAULT_MODEL_DIR, LOOK_BACK, FORECAST_HORIZON
|
||||
from utils.training_progress import progress_manager
|
||||
|
||||
def save_checkpoint(checkpoint_data: dict, epoch_or_label, product_id: str,
|
||||
model_type: str, model_dir: str, store_id=None,
|
||||
training_mode: str = 'product', aggregation_method=None):
|
||||
def save_checkpoint(checkpoint_data: dict, epoch_or_label, path_info: dict):
|
||||
"""
|
||||
保存训练检查点
|
||||
|
||||
Args:
|
||||
checkpoint_data: 检查点数据
|
||||
epoch_or_label: epoch编号或标签(如'best')
|
||||
product_id: 产品ID
|
||||
model_type: 模型类型
|
||||
model_dir: 模型保存目录
|
||||
store_id: 店铺ID
|
||||
training_mode: 训练模式
|
||||
aggregation_method: 聚合方法
|
||||
epoch_or_label: epoch编号或标签(如'best', 'final', 50)
|
||||
path_info (dict): 包含所有路径信息的字典
|
||||
"""
|
||||
# 创建检查点目录
|
||||
checkpoint_dir = os.path.join(model_dir, 'checkpoints')
|
||||
os.makedirs(checkpoint_dir, exist_ok=True)
|
||||
|
||||
# 生成检查点文件名
|
||||
if training_mode == 'store' and store_id:
|
||||
filename = f"{model_type}_store_{store_id}_{product_id}_epoch_{epoch_or_label}.pth"
|
||||
elif training_mode == 'global' and aggregation_method:
|
||||
filename = f"{model_type}_global_{product_id}_{aggregation_method}_epoch_{epoch_or_label}.pth"
|
||||
if epoch_or_label == 'best':
|
||||
# 使用由 ModelPathManager 直接提供的最佳检查点路径
|
||||
checkpoint_path = path_info['best_checkpoint_path']
|
||||
else:
|
||||
filename = f"{model_type}_product_{product_id}_epoch_{epoch_or_label}.pth"
|
||||
|
||||
checkpoint_path = os.path.join(checkpoint_dir, filename)
|
||||
|
||||
# 使用 epoch 检查点模板生成路径
|
||||
template = path_info.get('epoch_checkpoint_template')
|
||||
if not template:
|
||||
raise ValueError("路径信息 'path_info' 中缺少 'epoch_checkpoint_template'。")
|
||||
checkpoint_path = template.format(N=epoch_or_label)
|
||||
|
||||
# 保存检查点
|
||||
torch.save(checkpoint_data, checkpoint_path)
|
||||
print(f"[TCN] 检查点已保存: {checkpoint_path}", flush=True)
|
||||
@ -64,11 +53,13 @@ def train_product_model_with_tcn(
|
||||
training_mode='product',
|
||||
aggregation_method='sum',
|
||||
epochs=50,
|
||||
model_dir=DEFAULT_MODEL_DIR,
|
||||
version=None,
|
||||
model_dir=DEFAULT_MODEL_DIR, # 将被 path_info 替代
|
||||
version=None, # 将被 path_info 替代
|
||||
socketio=None,
|
||||
task_id=None,
|
||||
continue_training=False
|
||||
continue_training=False,
|
||||
path_info=None, # 新增参数
|
||||
**kwargs
|
||||
):
|
||||
"""
|
||||
使用TCN模型训练产品销售预测模型
|
||||
@ -89,6 +80,11 @@ def train_product_model_with_tcn(
|
||||
model_path: 模型文件路径
|
||||
"""
|
||||
|
||||
if not path_info:
|
||||
raise ValueError("train_product_model_with_tcn 需要 'path_info' 参数。")
|
||||
|
||||
version = path_info['version']
|
||||
|
||||
def emit_progress(message, progress=None, metrics=None):
|
||||
"""发送训练进度到前端"""
|
||||
if socketio and task_id:
|
||||
@ -103,17 +99,7 @@ def train_product_model_with_tcn(
|
||||
data['metrics'] = metrics
|
||||
socketio.emit('training_progress', data, namespace='/training')
|
||||
|
||||
# 确定版本号
|
||||
if version is None:
|
||||
from core.config import get_latest_model_version, get_next_model_version
|
||||
if continue_training:
|
||||
version = get_latest_model_version(product_id, 'tcn')
|
||||
if version is None:
|
||||
version = get_next_model_version(product_id, 'tcn')
|
||||
else:
|
||||
version = get_next_model_version(product_id, 'tcn')
|
||||
|
||||
emit_progress(f"开始训练 TCN 模型版本 {version}")
|
||||
emit_progress(f"开始训练 TCN 模型版本 v{version}")
|
||||
|
||||
# 如果没有传入product_df,则根据训练模式加载数据
|
||||
if product_df is None:
|
||||
@ -180,9 +166,9 @@ def train_product_model_with_tcn(
|
||||
|
||||
print(f"使用TCN模型训练产品 '{product_name}' (ID: {product_id}) 的销售预测模型")
|
||||
print(f"训练范围: {training_scope}")
|
||||
print(f"版本: {version}")
|
||||
print(f"版本: v{version}")
|
||||
print(f"使用设备: {DEVICE}")
|
||||
print(f"模型将保存到目录: {model_dir}")
|
||||
print(f"模型将保存到: {path_info['base_dir']}")
|
||||
|
||||
emit_progress(f"训练产品: {product_name} (ID: {product_id})")
|
||||
|
||||
@ -255,18 +241,9 @@ def train_product_model_with_tcn(
|
||||
)
|
||||
|
||||
# 如果是继续训练,加载现有模型
|
||||
if continue_training and version != 'v1':
|
||||
try:
|
||||
from core.config import get_model_file_path
|
||||
existing_model_path = get_model_file_path(product_id, 'tcn', version)
|
||||
if os.path.exists(existing_model_path):
|
||||
checkpoint = torch.load(existing_model_path, map_location=DEVICE)
|
||||
model.load_state_dict(checkpoint['model_state_dict'])
|
||||
print(f"加载现有模型: {existing_model_path}")
|
||||
emit_progress(f"加载现有模型版本 {version} 进行继续训练")
|
||||
except Exception as e:
|
||||
print(f"无法加载现有模型,将重新开始训练: {e}")
|
||||
emit_progress("无法加载现有模型,重新开始训练")
|
||||
if continue_training:
|
||||
# TODO: 继续训练的逻辑需要调整以适应新的路径结构
|
||||
emit_progress("继续训练功能待适配新路径结构,暂时作为新训练开始。")
|
||||
|
||||
# 将模型移动到设备上
|
||||
model = model.to(DEVICE)
|
||||
@ -399,14 +376,12 @@ def train_product_model_with_tcn(
|
||||
}
|
||||
|
||||
# 保存检查点
|
||||
save_checkpoint(checkpoint_data, epoch + 1, product_id, 'tcn',
|
||||
model_dir, store_id, training_mode, aggregation_method)
|
||||
save_checkpoint(checkpoint_data, epoch + 1, path_info)
|
||||
|
||||
# 如果是最佳模型,额外保存一份
|
||||
if test_loss < best_loss:
|
||||
best_loss = test_loss
|
||||
save_checkpoint(checkpoint_data, 'best', product_id, 'tcn',
|
||||
model_dir, store_id, training_mode, aggregation_method)
|
||||
save_checkpoint(checkpoint_data, 'best', path_info)
|
||||
emit_progress(f"💾 保存最佳模型检查点 (epoch {epoch+1}, test_loss: {test_loss:.4f})")
|
||||
|
||||
emit_progress(f"💾 保存训练检查点 epoch_{epoch+1}")
|
||||
@ -422,12 +397,13 @@ def train_product_model_with_tcn(
|
||||
emit_progress("训练完成,正在保存模型...")
|
||||
|
||||
# 绘制损失曲线并保存到模型目录
|
||||
loss_curve_path = plot_loss_curve(
|
||||
train_losses,
|
||||
test_losses,
|
||||
product_name,
|
||||
'TCN',
|
||||
model_dir=model_dir
|
||||
loss_curve_path = path_info['loss_curve_path']
|
||||
plot_loss_curve(
|
||||
train_losses,
|
||||
test_losses,
|
||||
product_name,
|
||||
'TCN',
|
||||
save_path=loss_curve_path
|
||||
)
|
||||
print(f"损失曲线已保存到: {loss_curve_path}")
|
||||
|
||||
@ -493,13 +469,17 @@ def train_product_model_with_tcn(
|
||||
|
||||
progress_manager.set_stage("model_saving", 50)
|
||||
|
||||
# 保存最终模型(使用epoch标识)
|
||||
final_model_path = save_checkpoint(
|
||||
final_model_data, f"final_epoch_{epochs}", product_id, 'tcn',
|
||||
model_dir, store_id, training_mode, aggregation_method
|
||||
)
|
||||
|
||||
progress_manager.set_stage("model_saving", 100)
|
||||
# 检查模型性能是否达标
|
||||
# 移除R2检查,始终保存模型
|
||||
if metrics:
|
||||
# 保存最终模型
|
||||
final_model_path = path_info['model_path']
|
||||
torch.save(final_model_data, final_model_path)
|
||||
print(f"[TCN] 最终模型已保存: {final_model_path}", flush=True)
|
||||
progress_manager.set_stage("model_saving", 100)
|
||||
else:
|
||||
final_model_path = None
|
||||
print(f"[TCN] 训练过程中未生成评估指标,不保存最终模型。", flush=True)
|
||||
|
||||
final_metrics = {
|
||||
'mse': metrics['mse'],
|
||||
@ -511,6 +491,9 @@ def train_product_model_with_tcn(
|
||||
'final_epoch': epochs
|
||||
}
|
||||
|
||||
emit_progress(f"模型训练完成!最终epoch: {epochs}", progress=100, metrics=final_metrics)
|
||||
if final_model_path:
|
||||
emit_progress(f"模型训练完成!最终epoch: {epochs}", progress=100, metrics=final_metrics)
|
||||
else:
|
||||
emit_progress(f"❌ TCN模型训练失败:性能不达标", progress=100, metrics={'error': '模型性能不佳'})
|
||||
|
||||
return model, metrics, epochs, final_model_path
|
||||
return model, metrics, epochs, final_model_path
|
@ -21,42 +21,30 @@ from utils.multi_store_data_utils import get_store_product_sales_data, aggregate
|
||||
from utils.visualization import plot_loss_curve
|
||||
from analysis.metrics import evaluate_model
|
||||
from core.config import (
|
||||
DEVICE, DEFAULT_MODEL_DIR, LOOK_BACK, FORECAST_HORIZON,
|
||||
get_next_model_version, get_model_file_path, get_latest_model_version
|
||||
DEVICE, DEFAULT_MODEL_DIR, LOOK_BACK, FORECAST_HORIZON
|
||||
)
|
||||
from utils.training_progress import progress_manager
|
||||
from utils.model_manager import model_manager
|
||||
|
||||
def save_checkpoint(checkpoint_data: dict, epoch_or_label, product_id: str,
|
||||
model_type: str, model_dir: str, store_id=None,
|
||||
training_mode: str = 'product', aggregation_method=None):
|
||||
def save_checkpoint(checkpoint_data: dict, epoch_or_label, path_info: dict):
|
||||
"""
|
||||
保存训练检查点
|
||||
|
||||
Args:
|
||||
checkpoint_data: 检查点数据
|
||||
epoch_or_label: epoch编号或标签(如'best')
|
||||
product_id: 产品ID
|
||||
model_type: 模型类型
|
||||
model_dir: 模型保存目录
|
||||
store_id: 店铺ID
|
||||
training_mode: 训练模式
|
||||
aggregation_method: 聚合方法
|
||||
epoch_or_label: epoch编号或标签(如'best', 'final', 50)
|
||||
path_info (dict): 包含所有路径信息的字典
|
||||
"""
|
||||
# 创建检查点目录
|
||||
checkpoint_dir = os.path.join(model_dir, 'checkpoints')
|
||||
os.makedirs(checkpoint_dir, exist_ok=True)
|
||||
|
||||
# 生成检查点文件名
|
||||
if training_mode == 'store' and store_id:
|
||||
filename = f"{model_type}_store_{store_id}_{product_id}_epoch_{epoch_or_label}.pth"
|
||||
elif training_mode == 'global' and aggregation_method:
|
||||
filename = f"{model_type}_global_{product_id}_{aggregation_method}_epoch_{epoch_or_label}.pth"
|
||||
if epoch_or_label == 'best':
|
||||
# 使用由 ModelPathManager 直接提供的最佳检查点路径
|
||||
checkpoint_path = path_info['best_checkpoint_path']
|
||||
else:
|
||||
filename = f"{model_type}_product_{product_id}_epoch_{epoch_or_label}.pth"
|
||||
|
||||
checkpoint_path = os.path.join(checkpoint_dir, filename)
|
||||
|
||||
# 使用 epoch 检查点模板生成路径
|
||||
template = path_info.get('epoch_checkpoint_template')
|
||||
if not template:
|
||||
raise ValueError("路径信息 'path_info' 中缺少 'epoch_checkpoint_template'。")
|
||||
checkpoint_path = template.format(N=epoch_or_label)
|
||||
|
||||
# 保存检查点
|
||||
torch.save(checkpoint_data, checkpoint_path)
|
||||
print(f"[Transformer] 检查点已保存: {checkpoint_path}", flush=True)
|
||||
@ -70,11 +58,12 @@ def train_product_model_with_transformer(
|
||||
training_mode='product',
|
||||
aggregation_method='sum',
|
||||
epochs=50,
|
||||
model_dir=DEFAULT_MODEL_DIR,
|
||||
version=None,
|
||||
model_dir=DEFAULT_MODEL_DIR, # 将被 path_info 替代
|
||||
version=None, # 将被 path_info 替代
|
||||
socketio=None,
|
||||
task_id=None,
|
||||
continue_training=False,
|
||||
path_info=None, # 新增参数
|
||||
patience=10,
|
||||
learning_rate=0.001,
|
||||
clip_norm=1.0
|
||||
@ -97,6 +86,11 @@ def train_product_model_with_transformer(
|
||||
version: 实际使用的版本号
|
||||
"""
|
||||
|
||||
if not path_info:
|
||||
raise ValueError("train_product_model_with_transformer 需要 'path_info' 参数。")
|
||||
|
||||
version = path_info['version']
|
||||
|
||||
# WebSocket进度反馈函数
|
||||
def emit_progress(message, progress=None, metrics=None):
|
||||
"""发送训练进度到前端"""
|
||||
@ -117,7 +111,7 @@ def train_product_model_with_transformer(
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
|
||||
emit_progress("开始Transformer模型训练...")
|
||||
emit_progress(f"开始Transformer模型训练... 版本 v{version}")
|
||||
|
||||
# 获取训练进度管理器实例
|
||||
try:
|
||||
@ -158,12 +152,26 @@ def train_product_model_with_transformer(
|
||||
# 默认:加载所有店铺的产品数据
|
||||
product_df = load_multi_store_data('pharmacy_sales_multi_store.csv', product_id=product_id)
|
||||
training_scope = "所有店铺"
|
||||
except ValueError as e:
|
||||
if "No objects to concatenate" in str(e):
|
||||
err_msg = f"聚合数据失败 (product: {product_id}, store: {store_id}, mode: {training_mode}): 没有找到可聚合的数据。"
|
||||
emit_progress(err_msg)
|
||||
# 在这种情况下,我们不能继续,所以抛出异常
|
||||
raise ValueError(err_msg) from e
|
||||
# 对于其他 ValueError,也打印并重新抛出
|
||||
emit_progress(f"数据加载时发生值错误: {e}")
|
||||
raise e
|
||||
except Exception as e:
|
||||
print(f"多店铺数据加载失败: {e}")
|
||||
emit_progress(f"多店铺数据加载失败: {e}, 尝试后备方案...")
|
||||
# 后备方案:尝试原始数据
|
||||
df = pd.read_excel('pharmacy_sales.xlsx')
|
||||
product_df = df[df['product_id'] == product_id].sort_values('date')
|
||||
training_scope = "原始数据"
|
||||
try:
|
||||
df = pd.read_excel('pharmacy_sales.xlsx')
|
||||
product_df = df[df['product_id'] == product_id].sort_values('date')
|
||||
training_scope = "原始数据"
|
||||
emit_progress("成功从 'pharmacy_sales.xlsx' 加载后备数据。")
|
||||
except Exception as fallback_e:
|
||||
emit_progress(f"后备数据加载失败: {fallback_e}")
|
||||
raise fallback_e from e
|
||||
else:
|
||||
# 如果传入了product_df,直接使用
|
||||
if training_mode == 'store' and store_id:
|
||||
@ -197,7 +205,7 @@ def train_product_model_with_transformer(
|
||||
|
||||
print(f"[Transformer] 训练产品 '{product_name}' (ID: {product_id}) 的销售预测模型", flush=True)
|
||||
print(f"[Device] 使用设备: {DEVICE}", flush=True)
|
||||
print(f"[Model] 模型将保存到目录: {model_dir}", flush=True)
|
||||
print(f"[Model] 模型将保存到: {path_info['base_dir']}", flush=True)
|
||||
|
||||
# 创建特征和目标变量
|
||||
features = ['sales', 'weekday', 'month', 'is_holiday', 'is_weekend', 'is_promotion', 'temperature']
|
||||
@ -279,7 +287,7 @@ def train_product_model_with_transformer(
|
||||
|
||||
criterion = nn.MSELoss()
|
||||
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
|
||||
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=patience // 2, factor=0.5, verbose=True)
|
||||
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=patience // 2, factor=0.5)
|
||||
|
||||
# 训练模型
|
||||
train_losses = []
|
||||
@ -401,20 +409,19 @@ def train_product_model_with_transformer(
|
||||
}
|
||||
}
|
||||
|
||||
# 保存检查点
|
||||
save_checkpoint(checkpoint_data, epoch + 1, product_id, 'transformer',
|
||||
model_dir, store_id, training_mode, aggregation_method)
|
||||
|
||||
# 如果是最佳模型,额外保存一份
|
||||
if test_loss < best_loss:
|
||||
# 检查是否为最佳模型
|
||||
is_best = test_loss < best_loss
|
||||
if is_best:
|
||||
best_loss = test_loss
|
||||
save_checkpoint(checkpoint_data, 'best', product_id, 'transformer',
|
||||
model_dir, store_id, training_mode, aggregation_method)
|
||||
emit_progress(f"💾 保存最佳模型检查点 (epoch {epoch+1}, test_loss: {test_loss:.4f})")
|
||||
epochs_no_improve = 0
|
||||
# 保存最佳模型检查点
|
||||
save_checkpoint(checkpoint_data, 'best', path_info)
|
||||
emit_progress(f"💾 保存最佳模型检查点 (epoch {epoch+1}, test_loss: {test_loss:.4f})")
|
||||
else:
|
||||
epochs_no_improve += 1
|
||||
|
||||
|
||||
# 保存定期的epoch检查点(如果不是最佳模型,或者即时是最佳也保存一份epoch版本)
|
||||
save_checkpoint(checkpoint_data, epoch + 1, path_info)
|
||||
emit_progress(f"💾 保存训练检查点 epoch_{epoch+1}")
|
||||
|
||||
if (epoch + 1) % 10 == 0:
|
||||
@ -433,12 +440,13 @@ def train_product_model_with_transformer(
|
||||
emit_progress("训练完成,正在保存模型...")
|
||||
|
||||
# 绘制损失曲线并保存到模型目录
|
||||
loss_curve_path = plot_loss_curve(
|
||||
train_losses,
|
||||
test_losses,
|
||||
product_name,
|
||||
'Transformer',
|
||||
model_dir=model_dir
|
||||
loss_curve_path = path_info['loss_curve_path']
|
||||
plot_loss_curve(
|
||||
train_losses,
|
||||
test_losses,
|
||||
product_name,
|
||||
'Transformer',
|
||||
save_path=loss_curve_path
|
||||
)
|
||||
print(f"📈 损失曲线已保存到: {loss_curve_path}", flush=True)
|
||||
|
||||
@ -502,16 +510,18 @@ def train_product_model_with_transformer(
|
||||
|
||||
progress_manager.set_stage("model_saving", 50)
|
||||
|
||||
# 保存最终模型(使用epoch标识)
|
||||
final_model_path = save_checkpoint(
|
||||
final_model_data, f"final_epoch_{epochs}", product_id, 'transformer',
|
||||
model_dir, store_id, training_mode, aggregation_method
|
||||
)
|
||||
|
||||
progress_manager.set_stage("model_saving", 100)
|
||||
emit_progress(f"模型已保存到 {final_model_path}")
|
||||
|
||||
print(f"💾 模型已保存到 {final_model_path}", flush=True)
|
||||
# 检查模型性能是否达标
|
||||
# 移除R2检查,始终保存模型
|
||||
if metrics:
|
||||
# 保存最终模型
|
||||
final_model_path = path_info['model_path']
|
||||
torch.save(final_model_data, final_model_path)
|
||||
progress_manager.set_stage("model_saving", 100)
|
||||
emit_progress(f"模型已保存到 {final_model_path}")
|
||||
print(f"💾 模型已保存到 {final_model_path}", flush=True)
|
||||
else:
|
||||
final_model_path = None
|
||||
print(f"[Transformer] 训练过程中未生成评估指标,不保存最终模型。", flush=True)
|
||||
|
||||
# 准备最终返回的指标
|
||||
final_metrics = {
|
||||
@ -523,5 +533,10 @@ def train_product_model_with_transformer(
|
||||
'training_time': training_time,
|
||||
'final_epoch': epochs
|
||||
}
|
||||
|
||||
if final_model_path:
|
||||
emit_progress(f"✅ Transformer模型训练完成!", progress=100, metrics=final_metrics)
|
||||
else:
|
||||
emit_progress(f"❌ Transformer模型训练失败:性能不达标", progress=100, metrics={'error': '模型性能不佳'})
|
||||
|
||||
return model, final_metrics, epochs
|
||||
return model, metrics, epochs, final_model_path
|
296
server/trainers/xgboost_trainer.py
Normal file
296
server/trainers/xgboost_trainer.py
Normal file
@ -0,0 +1,296 @@
|
||||
import xgboost as xgb
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import os
|
||||
import joblib
|
||||
import xgboost as xgb
|
||||
from xgboost.callback import EarlyStopping
|
||||
import matplotlib.pyplot as plt
|
||||
from sklearn.model_selection import train_test_split
|
||||
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
|
||||
from sklearn.preprocessing import MinMaxScaler
|
||||
|
||||
# 从项目中导入正确的工具函数和配置
|
||||
from utils.multi_store_data_utils import load_multi_store_data
|
||||
from core.config import DEFAULT_DATA_PATH
|
||||
from utils.file_save import ModelPathManager
|
||||
from analysis.metrics import evaluate_model
|
||||
|
||||
# 重构后的原生API兼容回调
|
||||
class EpochCheckpointCallback(xgb.callback.TrainingCallback):
|
||||
def __init__(self, save_period, payload, base_path):
|
||||
super().__init__()
|
||||
self.save_period = save_period
|
||||
self.payload = payload
|
||||
self.base_path = base_path
|
||||
self.best_score = float('inf')
|
||||
|
||||
def _save_checkpoint(self, model, path_suffix):
|
||||
"""辅助函数,用于保存模型和元数据检查点"""
|
||||
metadata_path = self.base_path.replace('_model.pth', f'_{path_suffix}.pth')
|
||||
model_file_path = metadata_path.replace('.pth', '.xgb')
|
||||
|
||||
# 确保目录存在
|
||||
os.makedirs(os.path.dirname(metadata_path), exist_ok=True)
|
||||
|
||||
# 保存原生Booster模型
|
||||
model.save_model(model_file_path)
|
||||
|
||||
# 更新payload中的模型文件引用
|
||||
self.payload['model_file'] = model_file_path
|
||||
joblib.dump(self.payload, metadata_path)
|
||||
|
||||
print(f"[Checkpoint] 已保存检查点到: {metadata_path}")
|
||||
|
||||
def after_iteration(self, model, epoch, evals_log):
|
||||
# 获取当前验证集的分数 (假设'test'是验证集)
|
||||
current_score = evals_log['test']['rmse'][-1]
|
||||
|
||||
# 保存最佳模型
|
||||
if current_score < self.best_score:
|
||||
self.best_score = current_score
|
||||
self._save_checkpoint(model, 'checkpoint_best')
|
||||
|
||||
# 保存周期性检查点
|
||||
if (epoch + 1) % self.save_period == 0:
|
||||
self._save_checkpoint(model, f'checkpoint_epoch_{epoch + 1}')
|
||||
|
||||
return False # 继续训练
|
||||
|
||||
def create_dataset(data, look_back=7):
|
||||
"""
|
||||
将时间序列数据转换为监督学习格式。
|
||||
:param data: 输入的DataFrame,包含特征和目标。
|
||||
:param look_back: 用于预测的时间窗口大小。
|
||||
:return: X (特征), y (目标)
|
||||
"""
|
||||
X, y = [], []
|
||||
feature_columns = [col for col in data.columns if col != 'date']
|
||||
|
||||
for i in range(len(data) - look_back):
|
||||
# 展平look_back窗口内的所有特征
|
||||
features = data[feature_columns].iloc[i:(i + look_back)].values.flatten()
|
||||
X.append(features)
|
||||
# 目标是窗口后的第一个销售值
|
||||
y.append(data['sales'].iloc[i + look_back])
|
||||
|
||||
return np.array(X), np.array(y)
|
||||
|
||||
def train_product_model_with_xgboost(
|
||||
product_id,
|
||||
store_id=None,
|
||||
epochs=100, # XGBoost中n_estimators更常用
|
||||
look_back=7,
|
||||
socketio=None,
|
||||
task_id=None,
|
||||
version='v1',
|
||||
path_info=None,
|
||||
**kwargs):
|
||||
"""
|
||||
使用XGBoost训练产品销售预测模型。
|
||||
"""
|
||||
|
||||
def emit_progress(message, progress=None):
|
||||
if socketio and task_id:
|
||||
payload = {'task_id': task_id, 'message': message}
|
||||
if progress is not None:
|
||||
payload['progress'] = progress
|
||||
socketio.emit('training_update', payload, namespace='/api/training', room=task_id)
|
||||
print(f"[{task_id}] {message}")
|
||||
|
||||
try:
|
||||
model_path = None
|
||||
emit_progress("开始XGBoost模型训练...", 0)
|
||||
|
||||
# 1. 加载数据
|
||||
# 使用正确的函数并从config导入路径
|
||||
full_df = load_multi_store_data(DEFAULT_DATA_PATH)
|
||||
|
||||
# 根据 store_id 和 product_id 筛选数据
|
||||
if store_id:
|
||||
df = full_df[(full_df['product_id'] == product_id) & (full_df['store_id'] == store_id)].copy()
|
||||
else:
|
||||
# 如果没有store_id,则聚合该产品在所有店铺的数据
|
||||
df = full_df[full_df['product_id'] == product_id].groupby('date').agg({
|
||||
'sales': 'sum',
|
||||
'weekday': 'first',
|
||||
'month': 'first',
|
||||
'is_holiday': 'max',
|
||||
'is_weekend': 'max',
|
||||
'is_promotion': 'max',
|
||||
'temperature': 'mean'
|
||||
}).reset_index()
|
||||
|
||||
if df.empty:
|
||||
raise ValueError(f"加载的数据为空 (product: {product_id}, store: {store_id}),无法进行训练。")
|
||||
|
||||
# 确保数据按日期排序
|
||||
df = df.sort_values('date').reset_index(drop=True)
|
||||
|
||||
emit_progress("数据加载完成。", 10)
|
||||
|
||||
# 2. 创建数据集
|
||||
features_to_use = ['sales', 'weekday', 'month', 'is_holiday', 'is_weekend', 'is_promotion', 'temperature']
|
||||
# 确保所有需要的特征都存在
|
||||
for col in features_to_use:
|
||||
if col not in df.columns:
|
||||
# 如果特征不存在,用0填充
|
||||
df[col] = 0
|
||||
|
||||
df_features = df[['date'] + features_to_use]
|
||||
|
||||
X, y = create_dataset(df_features, look_back)
|
||||
if X.shape[0] == 0:
|
||||
raise ValueError("创建数据集后样本数量为0,请检查数据量和look_back参数。")
|
||||
|
||||
emit_progress(f"数据集创建完成,样本数: {X.shape[0]}", 20)
|
||||
|
||||
# 3. 划分训练集和测试集
|
||||
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
|
||||
|
||||
# 数据缩放
|
||||
scaler_X = MinMaxScaler()
|
||||
X_train_scaled = scaler_X.fit_transform(X_train)
|
||||
X_test_scaled = scaler_X.transform(X_test)
|
||||
|
||||
scaler_y = MinMaxScaler()
|
||||
y_train_scaled = scaler_y.fit_transform(y_train.reshape(-1, 1))
|
||||
# y_test is not scaled, used for metric calculation against inverse_transformed predictions
|
||||
|
||||
emit_progress("数据划分和缩放完成。", 30)
|
||||
|
||||
# 4. 切换到XGBoost原生API
|
||||
params = {
|
||||
'learning_rate': kwargs.get('learning_rate', 0.1),
|
||||
'max_depth': kwargs.get('max_depth', 5),
|
||||
'subsample': kwargs.get('subsample', 0.8),
|
||||
'colsample_bytree': kwargs.get('colsample_bytree', 0.8),
|
||||
'objective': 'reg:squarederror',
|
||||
'eval_metric': 'rmse',
|
||||
'random_state': 42
|
||||
}
|
||||
|
||||
dtrain = xgb.DMatrix(X_train_scaled, label=y_train_scaled.ravel())
|
||||
dtest = xgb.DMatrix(X_test_scaled, label=scaler_y.transform(y_test.reshape(-1, 1)).ravel())
|
||||
|
||||
emit_progress("开始模型训练...", 40)
|
||||
|
||||
# 定义验证集
|
||||
evals = [(dtrain, 'train'), (dtest, 'test')]
|
||||
|
||||
# 准备回调
|
||||
callbacks = []
|
||||
checkpoint_interval = kwargs.get('checkpoint_interval', 10) # 默认每10轮保存一次
|
||||
if path_info and path_info.get('model_path') and checkpoint_interval > 0:
|
||||
# 准备用于保存的payload,模型对象将在回调中动态更新
|
||||
checkpoint_payload = {
|
||||
'metrics': {}, # 检查点不保存最终指标
|
||||
'scaler_X': scaler_X,
|
||||
'scaler_y': scaler_y,
|
||||
'config': {
|
||||
'look_back': look_back,
|
||||
'features': features_to_use,
|
||||
'product_id': product_id,
|
||||
'store_id': store_id,
|
||||
'version': version
|
||||
}
|
||||
}
|
||||
checkpoint_callback = EpochCheckpointCallback(
|
||||
save_period=checkpoint_interval,
|
||||
payload=checkpoint_payload,
|
||||
base_path=path_info['model_path']
|
||||
)
|
||||
callbacks.append(checkpoint_callback)
|
||||
|
||||
# 添加早停回调 (移除save_best)
|
||||
callbacks.append(EarlyStopping(rounds=10))
|
||||
|
||||
# 用于存储评估结果
|
||||
evals_result = {}
|
||||
|
||||
model = xgb.train(
|
||||
params=params,
|
||||
dtrain=dtrain,
|
||||
num_boost_round=epochs,
|
||||
evals=evals,
|
||||
callbacks=callbacks,
|
||||
evals_result=evals_result,
|
||||
verbose_eval=False
|
||||
)
|
||||
emit_progress("模型训练完成。", 80)
|
||||
|
||||
# 绘制并保存损失曲线
|
||||
if path_info and path_info.get('model_path'):
|
||||
try:
|
||||
loss_curve_path = path_info['model_path'].replace('_model.pth', '_loss_curve.png')
|
||||
results = evals_result
|
||||
train_rmse = results['train']['rmse']
|
||||
test_rmse = results['test']['rmse']
|
||||
num_epochs = len(train_rmse)
|
||||
x_axis = range(0, num_epochs)
|
||||
|
||||
fig, ax = plt.subplots(figsize=(10, 6))
|
||||
ax.plot(x_axis, train_rmse, label='Train')
|
||||
ax.plot(x_axis, test_rmse, label='Test')
|
||||
ax.legend()
|
||||
plt.ylabel('RMSE')
|
||||
plt.xlabel('Epoch')
|
||||
plt.title('XGBoost RMSE Loss Curve')
|
||||
plt.savefig(loss_curve_path)
|
||||
plt.close(fig)
|
||||
emit_progress(f"损失曲线图已保存到: {loss_curve_path}")
|
||||
except Exception as e:
|
||||
emit_progress(f"警告: 绘制损失曲线失败: {str(e)}")
|
||||
|
||||
# 5. 评估模型
|
||||
dtest_pred = xgb.DMatrix(X_test_scaled)
|
||||
y_pred_scaled = model.predict(dtest_pred)
|
||||
y_pred = scaler_y.inverse_transform(y_pred_scaled.reshape(-1, 1)).flatten()
|
||||
|
||||
metrics = {
|
||||
'RMSE': np.sqrt(mean_squared_error(y_test, y_pred)),
|
||||
'MAE': mean_absolute_error(y_test, y_pred),
|
||||
'R2': r2_score(y_test, y_pred)
|
||||
}
|
||||
emit_progress(f"模型评估完成: {metrics}", 90)
|
||||
|
||||
# 6. 保存模型 (原生API方式)
|
||||
if path_info and path_info.get('model_path'):
|
||||
metadata_path = path_info['model_path']
|
||||
# 使用 .xgb 扩展名保存原生Booster模型
|
||||
model_file_path = metadata_path.replace('.pth', '.xgb')
|
||||
|
||||
# 确保目录存在
|
||||
os.makedirs(os.path.dirname(metadata_path), exist_ok=True)
|
||||
|
||||
# 使用原生方法保存Booster模型
|
||||
model.save_model(model_file_path)
|
||||
emit_progress(f"原生XGBoost模型已保存到: {model_file_path}")
|
||||
|
||||
# 保存元数据(包括模型文件路径)
|
||||
metadata_payload = {
|
||||
'model_file': model_file_path, # 保存模型文件的引用
|
||||
'metrics': metrics,
|
||||
'scaler_X': scaler_X,
|
||||
'scaler_y': scaler_y,
|
||||
'config': {
|
||||
'look_back': look_back,
|
||||
'features': features_to_use,
|
||||
'product_id': product_id,
|
||||
'store_id': store_id,
|
||||
'version': version
|
||||
}
|
||||
}
|
||||
joblib.dump(metadata_payload, metadata_path)
|
||||
model_path = metadata_path # 确保model_path被赋值
|
||||
emit_progress(f"模型元数据已保存到: {metadata_path}", 100)
|
||||
else:
|
||||
emit_progress("警告: 未提供path_info,模型未保存。", 100)
|
||||
|
||||
return metrics, model_path
|
||||
|
||||
except Exception as e:
|
||||
emit_progress(f"XGBoost训练失败: {str(e)}", 100)
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return {'error': str(e)}, None
|
@ -60,7 +60,7 @@ def prepare_data(product_data, sequence_length=30, forecast_horizon=7):
|
||||
scaler_X, scaler_y: 特征和目标的归一化器
|
||||
"""
|
||||
# 创建特征和目标变量
|
||||
features = ['sales', 'price', 'weekday', 'month', 'is_holiday', 'is_weekend', 'is_promotion', 'temperature']
|
||||
features = ['sales', 'weekday', 'month', 'is_holiday', 'is_weekend', 'is_promotion', 'temperature']
|
||||
|
||||
# 预处理数据
|
||||
X_raw = product_data[features].values
|
||||
|
256
server/utils/file_save.py
Normal file
256
server/utils/file_save.py
Normal file
@ -0,0 +1,256 @@
|
||||
import os
|
||||
import json
|
||||
import hashlib
|
||||
from threading import Lock
|
||||
from typing import List, Dict, Any, Optional
|
||||
|
||||
class ModelPathManager:
|
||||
"""
|
||||
根据定义的规则管理模型训练产物的保存路径。
|
||||
此类旨在集中处理所有与文件系统交互的路径生成逻辑,
|
||||
确保整个应用程序遵循统一的模型保存标准。
|
||||
"""
|
||||
def __init__(self, base_dir: str = 'saved_models'):
|
||||
"""
|
||||
初始化路径管理器。
|
||||
|
||||
Args:
|
||||
base_dir (str): 所有模型保存的根目录。
|
||||
"""
|
||||
# 始终使用相对于项目根目录的相对路径
|
||||
self.base_dir = base_dir
|
||||
self.versions_file = os.path.join(self.base_dir, 'versions.json')
|
||||
self.lock = Lock()
|
||||
|
||||
# 确保根目录存在
|
||||
os.makedirs(self.base_dir, exist_ok=True)
|
||||
|
||||
def _hash_ids(self, ids: List[str]) -> str:
|
||||
"""
|
||||
对ID列表进行排序和哈希,生成一个稳定的、简短的哈希值。
|
||||
|
||||
Args:
|
||||
ids (List[str]): 需要哈希的ID列表。
|
||||
|
||||
Returns:
|
||||
str: 代表该ID集合的10位短哈希字符串。
|
||||
"""
|
||||
if not ids:
|
||||
return 'none'
|
||||
# 排序以确保对于相同集合的ID,即使顺序不同,结果也一样
|
||||
sorted_ids = sorted([str(i) for i in ids])
|
||||
id_string = ",".join(sorted_ids)
|
||||
|
||||
# 使用SHA256生成哈希值并截取前10位
|
||||
return hashlib.sha256(id_string.encode('utf-8')).hexdigest()[:10]
|
||||
|
||||
def _generate_identifier(self, training_mode: str, **kwargs: Any) -> str:
|
||||
"""
|
||||
根据训练模式和参数生成模型的唯一标识符 (identifier)。
|
||||
这个标识符将作为版本文件中的key,并用于构建目录路径。
|
||||
|
||||
Args:
|
||||
training_mode (str): 训练模式 ('product', 'store', 'global')。
|
||||
**kwargs: 从API请求中传递的参数字典。
|
||||
|
||||
Returns:
|
||||
str: 模型的唯一标识符。
|
||||
|
||||
Raises:
|
||||
ValueError: 如果缺少必要的参数。
|
||||
"""
|
||||
if training_mode == 'product':
|
||||
product_id = kwargs.get('product_id')
|
||||
if not product_id:
|
||||
raise ValueError("按药品训练模式需要 'product_id'。")
|
||||
# 对于药品训练,数据范围由 store_id 定义
|
||||
store_id = kwargs.get('store_id')
|
||||
scope = store_id if store_id is not None else 'all'
|
||||
return f"product_{product_id}_scope_{scope}"
|
||||
|
||||
elif training_mode == 'store':
|
||||
store_id = kwargs.get('store_id')
|
||||
if not store_id:
|
||||
raise ValueError("按店铺训练模式需要 'store_id'。")
|
||||
|
||||
product_scope = kwargs.get('product_scope', 'all')
|
||||
if product_scope == 'specific':
|
||||
product_ids = kwargs.get('product_ids')
|
||||
if not product_ids:
|
||||
raise ValueError("店铺训练选择 specific 范围时需要 'product_ids'。")
|
||||
# 如果只有一个ID,直接使用ID;否则使用哈希
|
||||
scope = product_ids[0] if len(product_ids) == 1 else self._hash_ids(product_ids)
|
||||
else:
|
||||
scope = 'all'
|
||||
return f"store_{store_id}_products_{scope}"
|
||||
|
||||
elif training_mode == 'global':
|
||||
training_scope = kwargs.get('training_scope', 'all')
|
||||
|
||||
if training_scope in ['all', 'all_stores_all_products']:
|
||||
scope_part = 'all'
|
||||
elif training_scope == 'selected_stores':
|
||||
store_ids = kwargs.get('store_ids')
|
||||
if not store_ids:
|
||||
raise ValueError("全局训练选择 selected_stores 范围时需要 'store_ids'。")
|
||||
# 如果只有一个ID,直接使用ID;否则使用哈希
|
||||
scope_id = store_ids[0] if len(store_ids) == 1 else self._hash_ids(store_ids)
|
||||
scope_part = f"stores_{scope_id}"
|
||||
elif training_scope == 'selected_products':
|
||||
product_ids = kwargs.get('product_ids')
|
||||
if not product_ids:
|
||||
raise ValueError("全局训练选择 selected_products 范围时需要 'product_ids'。")
|
||||
# 如果只有一个ID,直接使用ID;否则使用哈希
|
||||
scope_id = product_ids[0] if len(product_ids) == 1 else self._hash_ids(product_ids)
|
||||
scope_part = f"products_{scope_id}"
|
||||
elif training_scope == 'custom':
|
||||
store_ids = kwargs.get('store_ids')
|
||||
product_ids = kwargs.get('product_ids')
|
||||
if not store_ids or not product_ids:
|
||||
raise ValueError("全局训练选择 custom 范围时需要 'store_ids' 和 'product_ids'。")
|
||||
s_id = store_ids[0] if len(store_ids) == 1 else self._hash_ids(store_ids)
|
||||
p_id = product_ids[0] if len(product_ids) == 1 else self._hash_ids(product_ids)
|
||||
scope_part = f"custom_s_{s_id}_p_{p_id}"
|
||||
else:
|
||||
raise ValueError(f"未知的全局训练范围: {training_scope}")
|
||||
|
||||
aggregation_method = kwargs.get('aggregation_method', 'sum')
|
||||
return f"global_{scope_part}_{aggregation_method}"
|
||||
|
||||
else:
|
||||
raise ValueError(f"未知的训练模式: {training_mode}")
|
||||
|
||||
def get_next_version(self, identifier: str) -> int:
|
||||
"""
|
||||
获取指定标识符的下一个版本号。
|
||||
此方法是线程安全的。
|
||||
|
||||
Args:
|
||||
identifier (str): 模型的唯一标识符。
|
||||
|
||||
Returns:
|
||||
int: 下一个可用的版本号 (从1开始)。
|
||||
"""
|
||||
with self.lock:
|
||||
try:
|
||||
if os.path.exists(self.versions_file):
|
||||
with open(self.versions_file, 'r', encoding='utf-8') as f:
|
||||
versions_data = json.load(f)
|
||||
else:
|
||||
versions_data = {}
|
||||
|
||||
# 如果标识符不存在,当前版本为0,下一个版本即为1
|
||||
current_version = versions_data.get(identifier, 0)
|
||||
return current_version + 1
|
||||
except (IOError, json.JSONDecodeError) as e:
|
||||
# 如果文件损坏或读取失败,从0开始
|
||||
print(f"警告: 读取版本文件 '{self.versions_file}' 失败: {e}。将从版本1开始。")
|
||||
return 1
|
||||
|
||||
def save_version_info(self, identifier: str, new_version: int):
|
||||
"""
|
||||
训练成功后,更新版本文件。
|
||||
此方法是线程安全的。
|
||||
|
||||
Args:
|
||||
identifier (str): 模型的唯一标识符。
|
||||
new_version (int): 要保存的新的版本号。
|
||||
"""
|
||||
with self.lock:
|
||||
try:
|
||||
if os.path.exists(self.versions_file):
|
||||
with open(self.versions_file, 'r', encoding='utf-8') as f:
|
||||
versions_data = json.load(f)
|
||||
else:
|
||||
versions_data = {}
|
||||
|
||||
versions_data[identifier] = new_version
|
||||
|
||||
with open(self.versions_file, 'w', encoding='utf-8') as f:
|
||||
json.dump(versions_data, f, indent=4, ensure_ascii=False)
|
||||
except (IOError, json.JSONDecodeError) as e:
|
||||
print(f"错误: 保存版本信息到 '{self.versions_file}' 失败: {e}")
|
||||
# 在这种情况下,可以选择抛出异常或采取其他恢复措施
|
||||
raise
|
||||
|
||||
def get_model_paths(self, training_mode: str, model_type: str, **kwargs: Any) -> Dict[str, Any]:
|
||||
"""
|
||||
主入口函数:为一次新的训练获取所有相关路径和版本信息。
|
||||
此方法遵循扁平化文件存储规范,将逻辑路径编码到文件名中。
|
||||
|
||||
Args:
|
||||
training_mode (str): 训练模式 ('product', 'store', 'global')。
|
||||
model_type (str): 模型类型 (e.g., 'mlstm', 'kan')。
|
||||
**kwargs: 从API请求中传递的参数字典。
|
||||
|
||||
Returns:
|
||||
Dict[str, Any]: 一个包含所有路径和关键信息的字典。
|
||||
"""
|
||||
# 1. 生成不含模型类型和版本的核心标识符,并将其中的分隔符替换为下划线
|
||||
# 例如:product/P001/all -> product_P001_all
|
||||
base_identifier = self._generate_identifier(training_mode, **kwargs)
|
||||
|
||||
# 规范化处理,将 'scope' 'products' 等关键字替换为更简洁的形式
|
||||
# 例如 product_P001_scope_all -> product_P001_all
|
||||
core_prefix = base_identifier.replace('_scope_', '_').replace('_products_', '_')
|
||||
|
||||
# 2. 构建用于版本控制的完整标识符 (不含版本号)
|
||||
# 例如: product_P001_all_mlstm
|
||||
version_control_identifier = f"{core_prefix}_{model_type}"
|
||||
|
||||
# 3. 获取下一个版本号
|
||||
next_version = self.get_next_version(version_control_identifier)
|
||||
version_str = f"v{next_version}"
|
||||
|
||||
# 4. 构建最终的文件名前缀,包含版本号
|
||||
# 例如: product_P001_all_mlstm_v2
|
||||
filename_prefix = f"{version_control_identifier}_{version_str}"
|
||||
|
||||
# 5. 确保 `saved_models` 和 `saved_models/checkpoints` 目录存在
|
||||
checkpoints_base_dir = os.path.join(self.base_dir, 'checkpoints')
|
||||
os.makedirs(self.base_dir, exist_ok=True)
|
||||
os.makedirs(checkpoints_base_dir, exist_ok=True)
|
||||
|
||||
# 6. 构建并返回包含所有扁平化路径和关键信息的字典
|
||||
return {
|
||||
"identifier": version_control_identifier, # 用于版本控制的key
|
||||
"filename_prefix": filename_prefix, # 用于数据库和文件查找
|
||||
"version": next_version,
|
||||
"base_dir": self.base_dir,
|
||||
"model_path": os.path.join(self.base_dir, f"{filename_prefix}_model.pth"),
|
||||
"metadata_path": os.path.join(self.base_dir, f"{filename_prefix}_metadata.json"),
|
||||
"loss_curve_path": os.path.join(self.base_dir, f"{filename_prefix}_loss_curve.png"),
|
||||
"checkpoint_dir": checkpoints_base_dir, # 指向公共的检查点目录
|
||||
"best_checkpoint_path": os.path.join(checkpoints_base_dir, f"{filename_prefix}_checkpoint_best.pth"),
|
||||
# 为动态epoch检查点提供一个格式化模板
|
||||
"epoch_checkpoint_template": os.path.join(checkpoints_base_dir, f"{filename_prefix}_checkpoint_epoch_{{N}}.pth")
|
||||
}
|
||||
|
||||
def get_model_path_for_prediction(self, training_mode: str, model_type: str, version: int, **kwargs: Any) -> Optional[str]:
|
||||
"""
|
||||
获取用于预测的已存在模型的完整路径 (遵循扁平化规范)。
|
||||
|
||||
Args:
|
||||
training_mode (str): 训练模式。
|
||||
model_type (str): 模型类型。
|
||||
version (int): 模型版本号。
|
||||
**kwargs: 其他用于定位模型的参数。
|
||||
|
||||
Returns:
|
||||
Optional[str]: 模型的完整路径,如果不存在则返回None。
|
||||
"""
|
||||
# 1. 生成不含模型类型和版本的核心标识符
|
||||
base_identifier = self._generate_identifier(training_mode, **kwargs)
|
||||
core_prefix = base_identifier.replace('_scope_', '_').replace('_products_', '_')
|
||||
|
||||
# 2. 构建用于版本控制的标识符
|
||||
version_control_identifier = f"{core_prefix}_{model_type}"
|
||||
|
||||
# 3. 构建完整的文件名前缀
|
||||
version_str = f"v{version}"
|
||||
filename_prefix = f"{version_control_identifier}_{version_str}"
|
||||
|
||||
# 4. 构建模型文件的完整路径
|
||||
model_path = os.path.join(self.base_dir, f"{filename_prefix}_model.pth")
|
||||
|
||||
return model_path if os.path.exists(model_path) else None
|
@ -321,6 +321,24 @@ class ModelManager:
|
||||
'aggregation_method': aggregation_method
|
||||
}
|
||||
|
||||
# 兼容以 _model.pth 结尾的格式
|
||||
elif base_name.endswith('_model'):
|
||||
name_part = base_name.rsplit('_model', 1)[0]
|
||||
parts = name_part.split('_')
|
||||
# 假设格式为 {product_id}_{...}_{model_type}_{version}
|
||||
if len(parts) >= 3:
|
||||
version = parts[-1]
|
||||
model_type = parts[-2]
|
||||
product_id = '_'.join(parts[:-2]) # The rest is product_id + scope
|
||||
return {
|
||||
'model_type': model_type,
|
||||
'product_id': product_id,
|
||||
'version': version,
|
||||
'training_mode': 'product', # Assumption
|
||||
'store_id': None,
|
||||
'aggregation_method': None
|
||||
}
|
||||
|
||||
# 兼容旧格式
|
||||
else:
|
||||
# 尝试解析其他格式
|
||||
@ -345,7 +363,7 @@ class ModelManager:
|
||||
'store_id': None,
|
||||
'aggregation_method': None
|
||||
}
|
||||
|
||||
|
||||
except Exception as e:
|
||||
print(f"解析文件名失败 {filename}: {e}")
|
||||
|
||||
|
@ -268,7 +268,7 @@ def get_store_product_sales_data(store_id: str,
|
||||
|
||||
# 数据标准化已在load_multi_store_data中完成
|
||||
# 验证必要的列是否存在
|
||||
required_columns = ['sales', 'price', 'weekday', 'month', 'is_holiday', 'is_weekend', 'is_promotion', 'temperature']
|
||||
required_columns = ['sales', 'weekday', 'month', 'is_holiday', 'is_weekend', 'is_promotion', 'temperature']
|
||||
missing_columns = [col for col in required_columns if col not in df.columns]
|
||||
|
||||
if missing_columns:
|
||||
@ -324,29 +324,21 @@ def aggregate_multi_store_data(product_id: Optional[str] = None,
|
||||
grouping_entity = "所有产品"
|
||||
|
||||
# 按日期聚合(使用标准化后的列名)
|
||||
agg_dict = {}
|
||||
if aggregation_method == 'sum':
|
||||
agg_dict = {
|
||||
'sales': 'sum', # 标准化后的销量列
|
||||
'sales_amount': 'sum',
|
||||
'price': 'mean' # 标准化后的价格列,取平均值
|
||||
}
|
||||
elif aggregation_method == 'mean':
|
||||
agg_dict = {
|
||||
'sales': 'mean',
|
||||
'sales_amount': 'mean',
|
||||
'price': 'mean'
|
||||
}
|
||||
elif aggregation_method == 'median':
|
||||
agg_dict = {
|
||||
'sales': 'median',
|
||||
'sales_amount': 'median',
|
||||
'price': 'median'
|
||||
}
|
||||
# 定义一个更健壮的聚合规范,以保留所有特征
|
||||
agg_spec = {
|
||||
'sales': aggregation_method,
|
||||
'sales_amount': aggregation_method,
|
||||
'price': 'mean',
|
||||
'weekday': 'first',
|
||||
'month': 'first',
|
||||
'is_holiday': 'first',
|
||||
'is_weekend': 'first',
|
||||
'is_promotion': 'first',
|
||||
'temperature': 'mean'
|
||||
}
|
||||
|
||||
# 确保列名存在
|
||||
available_cols = df.columns.tolist()
|
||||
agg_dict = {k: v for k, v in agg_dict.items() if k in available_cols}
|
||||
# 只聚合DataFrame中存在的列
|
||||
agg_dict = {k: v for k, v in agg_spec.items() if k in df.columns}
|
||||
|
||||
# 聚合数据
|
||||
aggregated_df = df.groupby('date').agg(agg_dict).reset_index()
|
||||
|
@ -24,6 +24,7 @@ server_dir = os.path.dirname(current_dir)
|
||||
sys.path.append(server_dir)
|
||||
|
||||
from utils.logging_config import setup_api_logging, get_training_logger, log_training_progress
|
||||
from utils.file_save import ModelPathManager
|
||||
import numpy as np
|
||||
|
||||
def convert_numpy_types(obj):
|
||||
@ -44,6 +45,9 @@ class TrainingTask:
|
||||
model_type: str
|
||||
training_mode: str
|
||||
store_id: Optional[str] = None
|
||||
aggregation_method: Optional[str] = None # 新增:聚合方式
|
||||
product_scope: str = 'all'
|
||||
product_ids: Optional[list] = None
|
||||
epochs: int = 100
|
||||
status: str = "pending" # pending, running, completed, failed
|
||||
start_time: Optional[str] = None
|
||||
@ -53,6 +57,8 @@ class TrainingTask:
|
||||
error: Optional[str] = None
|
||||
metrics: Optional[Dict[str, Any]] = None
|
||||
process_id: Optional[int] = None
|
||||
path_info: Optional[Dict[str, Any]] = None # 新增字段
|
||||
version: Optional[int] = None # 新增版本字段
|
||||
|
||||
class TrainingWorker:
|
||||
"""训练工作进程"""
|
||||
@ -137,16 +143,20 @@ class TrainingWorker:
|
||||
except Exception as e:
|
||||
training_logger.error(f"进度回调失败: {e}")
|
||||
|
||||
# 执行真正的训练,传递进度回调
|
||||
# 执行真正的训练,传递进度回调和路径信息
|
||||
metrics = predictor.train_model(
|
||||
product_id=task.product_id,
|
||||
model_type=task.model_type,
|
||||
epochs=task.epochs,
|
||||
store_id=task.store_id,
|
||||
training_mode=task.training_mode,
|
||||
aggregation_method=task.aggregation_method, # 传递聚合方式
|
||||
product_scope=task.product_scope, # 传递药品范围
|
||||
product_ids=task.product_ids, # 传递药品ID列表
|
||||
socketio=None, # 子进程中不能直接使用socketio
|
||||
task_id=task.task_id,
|
||||
progress_callback=progress_callback # 传递进度回调函数
|
||||
progress_callback=progress_callback, # 传递进度回调函数
|
||||
path_info=task.path_info # 传递路径信息
|
||||
)
|
||||
|
||||
# 发送训练完成日志到主控制台
|
||||
@ -157,11 +167,25 @@ class TrainingWorker:
|
||||
})
|
||||
|
||||
if metrics:
|
||||
self.progress_queue.put({
|
||||
'task_id': task.task_id,
|
||||
'log_type': 'info',
|
||||
'message': f"📊 训练指标: MSE={metrics.get('mse', 'N/A'):.4f}, RMSE={metrics.get('rmse', 'N/A'):.4f}"
|
||||
})
|
||||
if 'error' in metrics:
|
||||
self.progress_queue.put({
|
||||
'task_id': task.task_id,
|
||||
'log_type': 'error',
|
||||
'message': f"❌ 训练返回错误: {metrics['error']}"
|
||||
})
|
||||
else:
|
||||
# 只有在没有错误时才格式化指标
|
||||
mse_val = metrics.get('mse', 'N/A')
|
||||
rmse_val = metrics.get('rmse', 'N/A')
|
||||
|
||||
mse_str = f"{mse_val:.4f}" if isinstance(mse_val, (int, float)) else mse_val
|
||||
rmse_str = f"{rmse_val:.4f}" if isinstance(rmse_val, (int, float)) else rmse_val
|
||||
|
||||
self.progress_queue.put({
|
||||
'task_id': task.task_id,
|
||||
'log_type': 'info',
|
||||
'message': f"📊 训练指标: MSE={mse_str}, RMSE={rmse_str}"
|
||||
})
|
||||
except ImportError as e:
|
||||
training_logger.error(f"❌ 导入训练器失败: {e}")
|
||||
# 返回模拟的训练结果用于测试
|
||||
@ -176,18 +200,29 @@ class TrainingWorker:
|
||||
}
|
||||
training_logger.warning("⚠️ 使用模拟训练结果")
|
||||
|
||||
# 训练完成
|
||||
task.status = "completed"
|
||||
task.end_time = time.strftime('%Y-%m-%d %H:%M:%S')
|
||||
task.progress = 100.0
|
||||
task.metrics = metrics
|
||||
task.message = "训练完成"
|
||||
|
||||
training_logger.success(f"✅ 训练任务完成 - 耗时: {task.end_time}")
|
||||
# 检查训练是否成功
|
||||
if metrics:
|
||||
# 训练成功
|
||||
task.status = "completed"
|
||||
task.end_time = time.strftime('%Y-%m-%d %H:%M:%S')
|
||||
task.progress = 100.0
|
||||
task.metrics = metrics
|
||||
task.message = "训练完成"
|
||||
|
||||
training_logger.success(f"✅ 训练任务完成 - 耗时: {task.end_time}")
|
||||
training_logger.info(f"📊 训练指标: {metrics}")
|
||||
|
||||
self.result_queue.put(('complete', asdict(task)))
|
||||
|
||||
self.result_queue.put(('complete', asdict(task)))
|
||||
else:
|
||||
# 训练失败(性能不佳)
|
||||
# 即使性能不佳,也标记为完成,让用户决定是否使用
|
||||
task.status = "completed"
|
||||
task.end_time = time.strftime('%Y-%m-%d %H:%M:%S')
|
||||
task.metrics = metrics if metrics else {}
|
||||
task.message = "训练完成(性能可能不佳)"
|
||||
|
||||
training_logger.warning(f"⚠️ 训练完成,但性能可能不佳 (metrics: {metrics})")
|
||||
self.result_queue.put(('complete', asdict(task)))
|
||||
|
||||
except Exception as e:
|
||||
error_msg = str(e)
|
||||
@ -235,6 +270,7 @@ class TrainingProcessManager:
|
||||
|
||||
# 设置日志
|
||||
self.logger = setup_api_logging()
|
||||
self.path_manager = ModelPathManager() # 实例化
|
||||
|
||||
def start(self):
|
||||
"""启动进程管理器"""
|
||||
@ -281,18 +317,26 @@ class TrainingProcessManager:
|
||||
|
||||
self.logger.info("✅ 训练进程管理器已停止")
|
||||
|
||||
def submit_task(self, product_id: str, model_type: str, training_mode: str = "product",
|
||||
store_id: str = None, epochs: int = 100, **kwargs) -> str:
|
||||
"""提交训练任务"""
|
||||
def submit_task(self, training_params: Dict[str, Any], path_info: Dict[str, Any]) -> str:
|
||||
"""
|
||||
提交训练任务
|
||||
Args:
|
||||
training_params (Dict[str, Any]): 来自API请求的原始参数
|
||||
path_info (Dict[str, Any]): 由ModelPathManager生成的路径和版本信息
|
||||
"""
|
||||
task_id = str(uuid.uuid4())
|
||||
|
||||
task = TrainingTask(
|
||||
task_id=task_id,
|
||||
product_id=product_id,
|
||||
model_type=model_type,
|
||||
training_mode=training_mode,
|
||||
store_id=store_id,
|
||||
epochs=epochs
|
||||
product_id=training_params.get('product_id'),
|
||||
model_type=training_params.get('model_type'),
|
||||
training_mode=training_params.get('training_mode', 'product'),
|
||||
store_id=training_params.get('store_id'),
|
||||
epochs=training_params.get('epochs', 100),
|
||||
aggregation_method=training_params.get('aggregation_method'), # 新增
|
||||
product_scope=training_params.get('product_scope', 'all'),
|
||||
product_ids=training_params.get('product_ids'),
|
||||
path_info=path_info # 存储路径信息
|
||||
)
|
||||
|
||||
with self.lock:
|
||||
@ -301,7 +345,7 @@ class TrainingProcessManager:
|
||||
# 将任务放入队列
|
||||
self.task_queue.put(asdict(task))
|
||||
|
||||
self.logger.info(f"📋 训练任务已提交: {task_id[:8]} | {model_type} | {product_id}")
|
||||
self.logger.info(f"📋 训练任务已提交: {task_id[:8]} | {task.model_type} | {task.product_id}")
|
||||
return task_id
|
||||
|
||||
def get_task_status(self, task_id: str) -> Optional[Dict[str, Any]]:
|
||||
@ -341,14 +385,41 @@ class TrainingProcessManager:
|
||||
|
||||
with self.lock:
|
||||
if task_id in self.tasks:
|
||||
task = self.tasks[task_id]
|
||||
# 使用转换后的数据更新任务状态
|
||||
for key, value in serializable_task_data.items():
|
||||
setattr(self.tasks[task_id], key, value)
|
||||
if hasattr(task, key):
|
||||
setattr(task, key, value)
|
||||
|
||||
# 如果任务成功完成,则更新版本文件和任务对象中的版本号
|
||||
if action == 'complete':
|
||||
# 只有在训练成功(metrics有效)时才保存版本信息
|
||||
if task.metrics and task.metrics.get('r2', -1) >= 0:
|
||||
if task.path_info:
|
||||
# 确保使用正确的、经过规范化处理的标识符
|
||||
version_control_identifier = task.path_info.get('identifier')
|
||||
version = task.path_info.get('version')
|
||||
if version_control_identifier and version:
|
||||
try:
|
||||
self.path_manager.save_version_info(version_control_identifier, version)
|
||||
self.logger.info(f"✅ 版本信息已更新: identifier={version_control_identifier}, version={version}")
|
||||
task.version = version # 关键修复:将版本号保存到任务对象中
|
||||
except Exception as e:
|
||||
self.logger.error(f"❌ 更新版本文件失败: {e}")
|
||||
else:
|
||||
self.logger.warning(f"⚠️ 任务 {task_id} 训练性能不佳或失败,不保存版本信息。")
|
||||
|
||||
# WebSocket通知 - 使用已转换的数据
|
||||
if self.websocket_callback:
|
||||
try:
|
||||
if action == 'complete':
|
||||
# 从任务对象中获取权威的版本号
|
||||
version = None
|
||||
with self.lock:
|
||||
task = self.tasks.get(task_id)
|
||||
if task:
|
||||
version = task.version
|
||||
|
||||
# 训练完成 - 发送完成状态
|
||||
self.websocket_callback('training_update', {
|
||||
'task_id': task_id,
|
||||
@ -359,7 +430,10 @@ class TrainingProcessManager:
|
||||
'metrics': serializable_task_data.get('metrics'),
|
||||
'end_time': serializable_task_data.get('end_time'),
|
||||
'product_id': serializable_task_data.get('product_id'),
|
||||
'model_type': serializable_task_data.get('model_type')
|
||||
'model_type': serializable_task_data.get('model_type'),
|
||||
'version': version, # 添加版本号
|
||||
'product_scope': serializable_task_data.get('product_scope'),
|
||||
'product_ids': serializable_task_data.get('product_ids')
|
||||
})
|
||||
# 额外发送一个完成事件,确保前端能收到
|
||||
self.websocket_callback('training_completed', {
|
||||
@ -369,7 +443,10 @@ class TrainingProcessManager:
|
||||
'message': serializable_task_data.get('message', '训练完成'),
|
||||
'metrics': serializable_task_data.get('metrics'),
|
||||
'product_id': serializable_task_data.get('product_id'),
|
||||
'model_type': serializable_task_data.get('model_type')
|
||||
'model_type': serializable_task_data.get('model_type'),
|
||||
'version': version, # 添加版本号
|
||||
'product_scope': serializable_task_data.get('product_scope'),
|
||||
'product_ids': serializable_task_data.get('product_ids')
|
||||
})
|
||||
elif action == 'error':
|
||||
# 训练失败
|
||||
@ -381,7 +458,9 @@ class TrainingProcessManager:
|
||||
'message': serializable_task_data.get('message', '训练失败'),
|
||||
'error': serializable_task_data.get('error'),
|
||||
'product_id': serializable_task_data.get('product_id'),
|
||||
'model_type': serializable_task_data.get('model_type')
|
||||
'model_type': serializable_task_data.get('model_type'),
|
||||
'product_scope': serializable_task_data.get('product_scope'),
|
||||
'product_ids': serializable_task_data.get('product_ids')
|
||||
})
|
||||
else:
|
||||
# 状态更新
|
||||
@ -393,7 +472,9 @@ class TrainingProcessManager:
|
||||
'message': serializable_task_data.get('message', ''),
|
||||
'metrics': serializable_task_data.get('metrics'),
|
||||
'product_id': serializable_task_data.get('product_id'),
|
||||
'model_type': serializable_task_data.get('model_type')
|
||||
'model_type': serializable_task_data.get('model_type'),
|
||||
'product_scope': serializable_task_data.get('product_scope'),
|
||||
'product_ids': serializable_task_data.get('product_ids')
|
||||
})
|
||||
except Exception as e:
|
||||
self.logger.error(f"WebSocket通知失败: {e}")
|
||||
|
259
test/test_file_save_logic.py
Normal file
259
test/test_file_save_logic.py
Normal file
@ -0,0 +1,259 @@
|
||||
import os
|
||||
import sys
|
||||
import shutil
|
||||
import json
|
||||
|
||||
# 将项目根目录添加到系统路径,以便导入server模块
|
||||
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
|
||||
sys.path.insert(0, project_root)
|
||||
|
||||
from server.utils.file_save import ModelPathManager
|
||||
|
||||
def run_tests():
|
||||
"""执行所有路径生成逻辑的测试"""
|
||||
|
||||
# --- 测试设置 ---
|
||||
test_base_dir = 'test_saved_models'
|
||||
if os.path.exists(test_base_dir):
|
||||
shutil.rmtree(test_base_dir) # 清理旧的测试目录
|
||||
|
||||
path_manager = ModelPathManager(base_dir=test_base_dir)
|
||||
model_type = 'mlstm'
|
||||
|
||||
print("="*50)
|
||||
print("🚀 开始测试 ModelPathManager 路径生成逻辑...")
|
||||
print(f"测试根目录: {os.path.abspath(test_base_dir)}")
|
||||
print("="*50)
|
||||
|
||||
# --- 1. 按店铺训练 (Store Training) 测试 ---
|
||||
print("\n--- 🧪 1. 按店铺训练 (Store Training) ---")
|
||||
|
||||
# a) 店铺训练 - 所有药品
|
||||
print("\n[1a] 场景: 店铺训练 - 所有药品")
|
||||
store_payload_all = {
|
||||
'store_id': 'S001',
|
||||
'model_type': model_type,
|
||||
'training_mode': 'store',
|
||||
'product_scope': 'all'
|
||||
}
|
||||
payload = store_payload_all.copy()
|
||||
payload.pop('model_type', None)
|
||||
payload.pop('training_mode', None)
|
||||
paths_store_all = path_manager.get_model_paths(training_mode='store', model_type=model_type, **payload)
|
||||
print(f" - Identifier: {paths_store_all['identifier']}")
|
||||
print(f" - Version Dir: {paths_store_all['version_dir']}")
|
||||
assert f"store_S001_products_all_{model_type}" == paths_store_all['identifier']
|
||||
expected_path = os.path.join(test_base_dir, 'store', 'S001_all', model_type, 'v1')
|
||||
assert os.path.normpath(expected_path) == os.path.normpath(paths_store_all['version_dir'])
|
||||
|
||||
# b) 店铺训练 - 特定药品 (使用哈希)
|
||||
print("\n[1b] 场景: 店铺训练 - 特定药品 (使用哈希)")
|
||||
store_payload_specific = {
|
||||
'store_id': 'S002',
|
||||
'model_type': model_type,
|
||||
'training_mode': 'store',
|
||||
'product_scope': 'specific',
|
||||
'product_ids': ['P001', 'P005', 'P002']
|
||||
}
|
||||
payload = store_payload_specific.copy()
|
||||
payload.pop('model_type', None)
|
||||
payload.pop('training_mode', None)
|
||||
paths_store_specific = path_manager.get_model_paths(training_mode='store', model_type=model_type, **payload)
|
||||
hashed_ids = path_manager._hash_ids(['P001', 'P005', 'P002'])
|
||||
print(f" - Hashed IDs: {hashed_ids}")
|
||||
print(f" - Identifier: {paths_store_specific['identifier']}")
|
||||
print(f" - Version Dir: {paths_store_specific['version_dir']}")
|
||||
assert f"store_S002_products_{hashed_ids}_{model_type}" == paths_store_specific['identifier']
|
||||
expected_path = os.path.join(test_base_dir, 'store', f'S002_{hashed_ids}', model_type, 'v1')
|
||||
assert os.path.normpath(expected_path) == os.path.normpath(paths_store_specific['version_dir'])
|
||||
|
||||
# c) 店铺训练 - 单个指定药品
|
||||
print("\n[1c] 场景: 店铺训练 - 单个指定药品")
|
||||
store_payload_single_product = {
|
||||
'store_id': 'S003',
|
||||
'model_type': model_type,
|
||||
'training_mode': 'store',
|
||||
'product_scope': 'specific',
|
||||
'product_ids': ['P789']
|
||||
}
|
||||
payload = store_payload_single_product.copy()
|
||||
payload.pop('model_type', None)
|
||||
payload.pop('training_mode', None)
|
||||
paths_store_single_product = path_manager.get_model_paths(training_mode='store', model_type=model_type, **payload)
|
||||
print(f" - Identifier: {paths_store_single_product['identifier']}")
|
||||
print(f" - Version Dir: {paths_store_single_product['version_dir']}")
|
||||
assert f"store_S003_products_P789_{model_type}" == paths_store_single_product['identifier']
|
||||
expected_path = os.path.join(test_base_dir, 'store', 'S003_P789', model_type, 'v1')
|
||||
assert os.path.normpath(expected_path) == os.path.normpath(paths_store_single_product['version_dir'])
|
||||
|
||||
# --- 2. 按药品训练 (Product Training) 测试 ---
|
||||
print("\n--- 🧪 2. 按药品训练 (Product Training) ---")
|
||||
|
||||
# a) 药品训练 - 所有店铺
|
||||
print("\n[2a] 场景: 药品训练 - 所有店铺")
|
||||
product_payload_all = {
|
||||
'product_id': 'P123',
|
||||
'model_type': model_type,
|
||||
'training_mode': 'product',
|
||||
'store_id': None # 明确测试 None 的情况
|
||||
}
|
||||
payload = product_payload_all.copy()
|
||||
payload.pop('model_type', None)
|
||||
payload.pop('training_mode', None)
|
||||
paths_product_all = path_manager.get_model_paths(training_mode='product', model_type=model_type, **payload)
|
||||
print(f" - Identifier: {paths_product_all['identifier']}")
|
||||
print(f" - Version Dir: {paths_product_all['version_dir']}")
|
||||
assert f"product_P123_scope_all_{model_type}" == paths_product_all['identifier']
|
||||
expected_path = os.path.join(test_base_dir, 'product', 'P123_all', model_type, 'v1')
|
||||
assert os.path.normpath(expected_path) == os.path.normpath(paths_product_all['version_dir'])
|
||||
|
||||
# b) 药品训练 - 特定店铺
|
||||
print("\n[2b] 场景: 药品训练 - 特定店铺")
|
||||
product_payload_specific = {
|
||||
'product_id': 'P456',
|
||||
'store_id': 'S003',
|
||||
'model_type': model_type,
|
||||
'training_mode': 'product'
|
||||
}
|
||||
payload = product_payload_specific.copy()
|
||||
payload.pop('model_type', None)
|
||||
payload.pop('training_mode', None)
|
||||
paths_product_specific = path_manager.get_model_paths(training_mode='product', model_type=model_type, **payload)
|
||||
print(f" - Identifier: {paths_product_specific['identifier']}")
|
||||
print(f" - Version Dir: {paths_product_specific['version_dir']}")
|
||||
assert f"product_P456_scope_S003_{model_type}" == paths_product_specific['identifier']
|
||||
expected_path = os.path.join(test_base_dir, 'product', 'P456_S003', model_type, 'v1')
|
||||
assert os.path.normpath(expected_path) == os.path.normpath(paths_product_specific['version_dir'])
|
||||
|
||||
# --- 3. 全局训练 (Global Training) 测试 ---
|
||||
print("\n--- 🧪 3. 全局训练 (Global Training) ---")
|
||||
|
||||
# a) 全局训练 - 所有数据
|
||||
print("\n[3a] 场景: 全局训练 - 所有数据")
|
||||
global_payload_all = {
|
||||
'model_type': model_type,
|
||||
'training_mode': 'global',
|
||||
'training_scope': 'all',
|
||||
'aggregation_method': 'sum'
|
||||
}
|
||||
payload = global_payload_all.copy()
|
||||
payload.pop('model_type', None)
|
||||
payload.pop('training_mode', None)
|
||||
paths_global_all = path_manager.get_model_paths(training_mode='global', model_type=model_type, **payload)
|
||||
print(f" - Identifier: {paths_global_all['identifier']}")
|
||||
print(f" - Version Dir: {paths_global_all['version_dir']}")
|
||||
assert f"global_all_agg_sum_{model_type}" == paths_global_all['identifier']
|
||||
expected_path = os.path.join(test_base_dir, 'global', 'all', 'sum', model_type, 'v1')
|
||||
assert os.path.normpath(expected_path) == os.path.normpath(paths_global_all['version_dir'])
|
||||
|
||||
# a2) 全局训练 - 所有数据 (使用 all_stores_all_products)
|
||||
print("\n[3a2] 场景: 全局训练 - 所有数据 (使用 'all_stores_all_products')")
|
||||
global_payload_all_alt = {
|
||||
'model_type': model_type,
|
||||
'training_mode': 'global',
|
||||
'training_scope': 'all_stores_all_products',
|
||||
'aggregation_method': 'sum'
|
||||
}
|
||||
payload = global_payload_all_alt.copy()
|
||||
payload.pop('model_type', None)
|
||||
payload.pop('training_mode', None)
|
||||
paths_global_all_alt = path_manager.get_model_paths(training_mode='global', model_type=model_type, **payload)
|
||||
assert f"global_all_agg_sum_{model_type}" == paths_global_all_alt['identifier']
|
||||
assert os.path.normpath(expected_path) == os.path.normpath(paths_global_all_alt['version_dir'])
|
||||
|
||||
# b) 全局训练 - 自定义范围 (使用哈希)
|
||||
print("\n[3b] 场景: 全局训练 - 自定义范围 (使用哈希)")
|
||||
global_payload_custom = {
|
||||
'model_type': model_type,
|
||||
'training_mode': 'global',
|
||||
'training_scope': 'custom',
|
||||
'aggregation_method': 'mean',
|
||||
'store_ids': ['S001', 'S003'],
|
||||
'product_ids': ['P001', 'P002']
|
||||
}
|
||||
payload = global_payload_custom.copy()
|
||||
payload.pop('model_type', None)
|
||||
payload.pop('training_mode', None)
|
||||
paths_global_custom = path_manager.get_model_paths(training_mode='global', model_type=model_type, **payload)
|
||||
s_hash = path_manager._hash_ids(['S001', 'S003'])
|
||||
p_hash = path_manager._hash_ids(['P001', 'P002'])
|
||||
print(f" - Store Hash: {s_hash}, Product Hash: {p_hash}")
|
||||
print(f" - Identifier: {paths_global_custom['identifier']}")
|
||||
print(f" - Version Dir: {paths_global_custom['version_dir']}")
|
||||
assert f"global_custom_s_{s_hash}_p_{p_hash}_agg_mean_{model_type}" == paths_global_custom['identifier']
|
||||
expected_path = os.path.join(test_base_dir, 'global', 'custom', s_hash, p_hash, 'mean', model_type, 'v1')
|
||||
assert os.path.normpath(expected_path) == os.path.normpath(paths_global_custom['version_dir'])
|
||||
|
||||
# c) 全局训练 - 单个店铺
|
||||
print("\n[3c] 场景: 全局训练 - 单个店铺")
|
||||
global_payload_single_store = {
|
||||
'model_type': model_type,
|
||||
'training_mode': 'global',
|
||||
'training_scope': 'selected_stores',
|
||||
'aggregation_method': 'mean',
|
||||
'store_ids': ['S007']
|
||||
}
|
||||
payload = global_payload_single_store.copy()
|
||||
payload.pop('model_type', None)
|
||||
payload.pop('training_mode', None)
|
||||
paths_global_single_store = path_manager.get_model_paths(training_mode='global', model_type=model_type, **payload)
|
||||
print(f" - Identifier: {paths_global_single_store['identifier']}")
|
||||
print(f" - Version Dir: {paths_global_single_store['version_dir']}")
|
||||
assert f"global_stores_S007_agg_mean_{model_type}" == paths_global_single_store['identifier']
|
||||
expected_path = os.path.join(test_base_dir, 'global', 'stores', 'S007', 'mean', model_type, 'v1')
|
||||
assert os.path.normpath(expected_path) == os.path.normpath(paths_global_single_store['version_dir'])
|
||||
|
||||
# d) 全局训练 - 自定义范围 (单ID)
|
||||
print("\n[3d] 场景: 全局训练 - 自定义范围 (单ID)")
|
||||
global_payload_custom_single = {
|
||||
'model_type': model_type,
|
||||
'training_mode': 'global',
|
||||
'training_scope': 'custom',
|
||||
'aggregation_method': 'mean',
|
||||
'store_ids': ['S008'],
|
||||
'product_ids': ['P888']
|
||||
}
|
||||
payload = global_payload_custom_single.copy()
|
||||
payload.pop('model_type', None)
|
||||
payload.pop('training_mode', None)
|
||||
paths_global_custom_single = path_manager.get_model_paths(training_mode='global', model_type=model_type, **payload)
|
||||
print(f" - Identifier: {paths_global_custom_single['identifier']}")
|
||||
print(f" - Version Dir: {paths_global_custom_single['version_dir']}")
|
||||
assert f"global_custom_s_S008_p_P888_agg_mean_{model_type}" == paths_global_custom_single['identifier']
|
||||
expected_path = os.path.join(test_base_dir, 'global', 'custom', 'S008', 'P888', 'mean', model_type, 'v1')
|
||||
assert os.path.normpath(expected_path) == os.path.normpath(paths_global_custom_single['version_dir'])
|
||||
|
||||
# --- 4. 版本管理测试 ---
|
||||
print("\n--- 🧪 4. 版本管理测试 ---")
|
||||
print("\n[4a] 场景: 多次调用同一训练,版本号递增")
|
||||
|
||||
# 第一次训练
|
||||
path_manager.save_version_info(paths_store_all['identifier'], paths_store_all['version'])
|
||||
print(f" - 保存版本: {paths_store_all['identifier']} -> v{paths_store_all['version']}")
|
||||
|
||||
# 第二次训练
|
||||
payload = store_payload_all.copy()
|
||||
payload.pop('model_type', None)
|
||||
payload.pop('training_mode', None)
|
||||
paths_store_all_v2 = path_manager.get_model_paths(training_mode='store', model_type=model_type, **payload)
|
||||
print(f" - 获取新版本: {paths_store_all_v2['identifier']} -> v{paths_store_all_v2['version']}")
|
||||
assert paths_store_all_v2['version'] == 2
|
||||
expected_path = os.path.join(test_base_dir, 'store', 'S001_all', model_type, 'v2')
|
||||
assert os.path.normpath(expected_path) == os.path.normpath(paths_store_all_v2['version_dir'])
|
||||
|
||||
# 验证 versions.json 文件
|
||||
with open(path_manager.versions_file, 'r') as f:
|
||||
versions_data = json.load(f)
|
||||
print(f" - versions.json 内容: {versions_data}")
|
||||
assert versions_data[paths_store_all['identifier']] == 1
|
||||
|
||||
print("\n="*50)
|
||||
print("✅ 所有测试用例通过!")
|
||||
print("="*50)
|
||||
|
||||
# --- 清理 ---
|
||||
shutil.rmtree(test_base_dir)
|
||||
print(f"🗑️ 测试目录 '{test_base_dir}' 已清理。")
|
||||
|
||||
if __name__ == '__main__':
|
||||
run_tests()
|
118
test/verify_save_logic.py
Normal file
118
test/verify_save_logic.py
Normal file
@ -0,0 +1,118 @@
|
||||
import unittest
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
|
||||
# 将项目根目录添加到 sys.path,以解决模块导入问题
|
||||
# 这使得测试脚本可以直接运行,而无需复杂的路径配置
|
||||
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
|
||||
if project_root not in sys.path:
|
||||
sys.path.insert(0, project_root)
|
||||
|
||||
from server.utils.file_save import ModelPathManager
|
||||
|
||||
class TestModelPathManager(unittest.TestCase):
|
||||
"""
|
||||
测试 ModelPathManager 是否严格遵循扁平化文件存储规范。
|
||||
"""
|
||||
def setUp(self):
|
||||
"""在每个测试用例开始前,设置测试环境。"""
|
||||
self.test_base_dir = 'test_saved_models'
|
||||
# 清理之前的测试目录和文件
|
||||
if os.path.exists(self.test_base_dir):
|
||||
shutil.rmtree(self.test_base_dir)
|
||||
self.path_manager = ModelPathManager(base_dir=self.test_base_dir)
|
||||
|
||||
def tearDown(self):
|
||||
"""在每个测试用例结束后,清理测试环境。"""
|
||||
if os.path.exists(self.test_base_dir):
|
||||
shutil.rmtree(self.test_base_dir)
|
||||
|
||||
def test_product_mode_path_generation(self):
|
||||
"""测试 'product' 模式下的路径生成是否符合规范。"""
|
||||
print("\n--- 测试 'product' 模式 ---")
|
||||
params = {
|
||||
'training_mode': 'product',
|
||||
'model_type': 'mlstm',
|
||||
'product_id': 'P001',
|
||||
'store_id': 'all'
|
||||
}
|
||||
|
||||
# 第一次调用,版本应为 1
|
||||
paths_v1 = self.path_manager.get_model_paths(**params)
|
||||
|
||||
# 验证版本号
|
||||
self.assertEqual(paths_v1['version'], 1)
|
||||
|
||||
# 验证文件名前缀
|
||||
expected_prefix_v1 = 'product_P001_all_mlstm_v1'
|
||||
self.assertEqual(paths_v1['filename_prefix'], expected_prefix_v1)
|
||||
|
||||
# 验证各个文件的完整路径
|
||||
self.assertEqual(paths_v1['model_path'], os.path.join(self.test_base_dir, f'{expected_prefix_v1}_model.pth'))
|
||||
self.assertEqual(paths_v1['metadata_path'], os.path.join(self.test_base_dir, f'{expected_prefix_v1}_metadata.json'))
|
||||
self.assertEqual(paths_v1['loss_curve_path'], os.path.join(self.test_base_dir, f'{expected_prefix_v1}_loss_curve.png'))
|
||||
|
||||
# 验证检查点路径
|
||||
checkpoint_dir = os.path.join(self.test_base_dir, 'checkpoints')
|
||||
self.assertEqual(paths_v1['checkpoint_dir'], checkpoint_dir)
|
||||
self.assertEqual(paths_v1['best_checkpoint_path'], os.path.join(checkpoint_dir, f'{expected_prefix_v1}_checkpoint_best.pth'))
|
||||
self.assertEqual(paths_v1['epoch_checkpoint_template'], os.path.join(checkpoint_dir, f'{expected_prefix_v1}_checkpoint_epoch_{{N}}.pth'))
|
||||
|
||||
print(f"生成的文件名前缀: {paths_v1['filename_prefix']}")
|
||||
print(f"生成的模型路径: {paths_v1['model_path']}")
|
||||
print("验证通过!")
|
||||
|
||||
# 模拟一次成功的训练,以触发版本递增
|
||||
self.path_manager.save_version_info(paths_v1['identifier'], paths_v1['version'])
|
||||
|
||||
# 第二次调用,版本应为 2
|
||||
paths_v2 = self.path_manager.get_model_paths(**params)
|
||||
self.assertEqual(paths_v2['version'], 2)
|
||||
expected_prefix_v2 = 'product_P001_all_mlstm_v2'
|
||||
self.assertEqual(paths_v2['filename_prefix'], expected_prefix_v2)
|
||||
print(f"\n版本递增后,生成的文件名前缀: {paths_v2['filename_prefix']}")
|
||||
print("版本递增验证通过!")
|
||||
|
||||
def test_store_mode_path_generation_with_hash(self):
|
||||
"""测试 'store' 模式下使用哈希的路径生成。"""
|
||||
print("\n--- 测试 'store' 模式 (多药品ID哈希) ---")
|
||||
params = {
|
||||
'training_mode': 'store',
|
||||
'model_type': 'kan',
|
||||
'store_id': 'S008',
|
||||
'product_scope': 'specific',
|
||||
'product_ids': ['P002', 'P005', 'P003'] # 顺序故意打乱
|
||||
}
|
||||
|
||||
paths = self.path_manager.get_model_paths(**params)
|
||||
|
||||
# 哈希值应该是固定的,因为ID列表会先排序再哈希
|
||||
expected_hash = self.path_manager._hash_ids(sorted(['P002', 'P005', 'P003']))
|
||||
expected_prefix = f'store_S008_{expected_hash}_kan_v1'
|
||||
|
||||
self.assertEqual(paths['filename_prefix'], expected_prefix)
|
||||
self.assertEqual(paths['model_path'], os.path.join(self.test_base_dir, f'{expected_prefix}_model.pth'))
|
||||
print(f"生成的文件名前缀: {paths['filename_prefix']}")
|
||||
print("验证通过!")
|
||||
|
||||
def test_global_mode_path_generation(self):
|
||||
"""测试 'global' 模式下的路径生成。"""
|
||||
print("\n--- 测试 'global' 模式 ---")
|
||||
params = {
|
||||
'training_mode': 'global',
|
||||
'model_type': 'transformer',
|
||||
'training_scope': 'all',
|
||||
'aggregation_method': 'mean'
|
||||
}
|
||||
|
||||
paths = self.path_manager.get_model_paths(**params)
|
||||
|
||||
expected_prefix = 'global_all_agg_mean_transformer_v1'
|
||||
self.assertEqual(paths['filename_prefix'], expected_prefix)
|
||||
self.assertEqual(paths['model_path'], os.path.join(self.test_base_dir, f'{expected_prefix}_model.pth'))
|
||||
print(f"生成的文件名前缀: {paths['filename_prefix']}")
|
||||
print("验证通过!")
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
322
xz修改记录日志和启动依赖.md
322
xz修改记录日志和启动依赖.md
@ -1,10 +1,9 @@
|
||||
### 根目录启动
|
||||
`uv pip install loguru numpy pandas torch matplotlib flask flask_cors flask_socketio flasgger scikit-learn tqdm pytorch_tcn`
|
||||
|
||||
**1**:`uv venv`
|
||||
**2**:`uv pip install loguru numpy pandas torch matplotlib flask flask_cors flask_socketio flasgger scikit-learn tqdm pytorch_tcn pyarrow xgboost -i https://pypi.tuna.tsinghua.edu.cn/simple`
|
||||
**3**: `uv run .\server\api.py`
|
||||
### UI
|
||||
`npm install` `npm run dev`
|
||||
|
||||
|
||||
**1**:`npm install` `npm run dev`
|
||||
|
||||
# “预测分析”模块UI重构修改记录
|
||||
|
||||
@ -755,4 +754,315 @@
|
||||
# ... 后续处理逻辑保持不变 ...
|
||||
```
|
||||
|
||||
通过以上步骤,您就可以在不改动项目其他任何部分的情况下,轻松地将数据源从本地文件切换到服务器数据库。
|
||||
通过以上步骤,您就可以在不改动项目其他任何部分的情况下,轻松地将数据源从本地文件切换到服务器数据库。
|
||||
|
||||
---
|
||||
**日期**: 2025-07-15 11:43
|
||||
**主题**: 修复因PyTorch版本不兼容导致的训练失败问题
|
||||
|
||||
### 问题描述
|
||||
在修复了路径和依赖问题后,在某些机器上运行模型训练时,程序因 `TypeError: ReduceLROnPlateau.__init__() got an unexpected keyword argument 'verbose'` 而崩溃。但在本地开发机上运行正常。
|
||||
|
||||
### 根本原因
|
||||
此问题是典型的**环境不一致**导致的兼容性错误。
|
||||
1. **PyTorch版本差异**: 本地开发环境安装了较旧版本的PyTorch,其学习率调度器 `ReduceLROnPlateau` 支持 `verbose` 参数(用于在学习率变化时打印日志)。
|
||||
2. **新环境**: 在其他计算机或新创建的虚拟环境中,安装了较新版本的PyTorch。在新版本中,`ReduceLROnPlateau` 的 `verbose` 参数已被移除。
|
||||
3. **代码问题**: `server/trainers/mlstm_trainer.py` 和 `server/trainers/transformer_trainer.py` 的代码中,在创建 `ReduceLROnPlateau` 实例时硬编码了 `verbose=True` 参数,导致在新版PyTorch环境下调用时出现 `TypeError`。
|
||||
|
||||
### 解决方案:移除已弃用的参数
|
||||
1. **全面排查**: 检查了项目中所有训练器文件 (`mlstm_trainer.py`, `transformer_trainer.py`, `kan_trainer.py`, `tcn_trainer.py`)。
|
||||
2. **精确定位**: 确认只有 `mlstm_trainer.py` 和 `transformer_trainer.py` 使用了 `ReduceLROnPlateau` 并传递了 `verbose` 参数。
|
||||
3. **执行修复**:
|
||||
* **文件**: `server/trainers/mlstm_trainer.py` 和 `server/trainers/transformer_trainer.py`
|
||||
* **位置**: `ReduceLROnPlateau` 的初始化调用处。
|
||||
* **操作**: 删除了 `verbose=True` 参数。
|
||||
```diff
|
||||
- scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', ..., verbose=True)
|
||||
+ scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', ...)
|
||||
```
|
||||
* **原因**: 移除这个在新版PyTorch中已不存在的参数,可以从根本上解决 `TypeError`,并确保代码在不同版本的PyTorch环境中都能正常运行。此修改不影响学习率调度器的核心功能。
|
||||
|
||||
### 最终结果
|
||||
通过移除已弃用的 `verbose` 参数,彻底解决了由于环境差异导致的版本兼容性问题,确保了项目在所有目标机器上都能稳定地执行训练任务。
|
||||
|
||||
---
|
||||
**日期**: 2025-07-15 14:05
|
||||
**主题**: 仪表盘UI调整
|
||||
|
||||
### 描述
|
||||
根据用户请求,将仪表盘上的“数据管理”卡片替换为“店铺管理”。
|
||||
|
||||
### 主要改动
|
||||
* **文件**: `UI/src/views/DashboardView.vue`
|
||||
* **修改**:
|
||||
1. 在 `featureCards` 数组中,将原“数据管理”的对象修改为“店铺管理”。
|
||||
2. 更新了卡片的 `title`, `description`, `icon` 和 `path`,使其指向店铺管理页面 (`/store-management`)。
|
||||
3. 在脚本中导入了新的 `Shop` 图标。
|
||||
|
||||
### 结果
|
||||
仪表盘现在直接提供到“店铺管理”页面的快捷入口,提高了操作效率。
|
||||
|
||||
---
|
||||
**日期**: 2025-07-18
|
||||
**主题**: 模型保存逻辑重构与集中化管理
|
||||
|
||||
### 目标
|
||||
根据 `xz训练模型保存规则.md`,将系统中分散的模型文件保存逻辑统一重构,创建一个集中、健壮且可测试的路径管理系统。
|
||||
|
||||
### 核心成果
|
||||
1. **创建了 `server/utils/file_save.py` 模块**: 这个新模块现在是系统中处理模型文件保存路径的唯一权威来源。
|
||||
2. **实现了三种训练模式的路径生成**: 系统现在可以为“按店铺”、“按药品”和“全局”三种训练模式正确生成层级化的、可追溯的目录结构。
|
||||
3. **集成了智能ID处理**:
|
||||
* 对于包含**多个ID**的训练场景,系统会自动计算一个简短的哈希值作为目录名。
|
||||
* 对于全局训练中只包含**单个店铺或药品ID**的场景,系统会直接使用该ID作为目录名,增强了路径的可读性。
|
||||
4. **重构了整个训练流程**: 修改了API层、进程管理层以及所有模型训练器,使它们能够协同使用新的路径管理模块。
|
||||
5. **添加了自动化测试**: 创建了 `test/test_file_save_logic.py` 脚本,用于验证所有路径生成和版本管理逻辑的正确性。
|
||||
|
||||
### 详细文件修改记录
|
||||
|
||||
1. **`server/utils/file_save.py`**
|
||||
* **操作**: 创建
|
||||
* **内容**: 实现了 `ModelPathManager` 类,包含以下核心方法:
|
||||
* `_hash_ids`: 对ID列表进行排序和哈希。
|
||||
* `_generate_identifier`: 根据训练模式和参数生成唯一的模型标识符。
|
||||
* `get_next_version` / `save_version_info`: 线程安全地管理 `versions.json` 文件,实现版本号的获取和更新。
|
||||
* `get_model_paths`: 作为主入口,协调以上方法,生成包含所有产物路径的字典。
|
||||
|
||||
2. **`server/api.py`**
|
||||
* **操作**: 修改
|
||||
* **位置**: `start_training` 函数 (`/api/training` 端点)。
|
||||
* **内容**:
|
||||
* 导入并实例化 `ModelPathManager`。
|
||||
* 在接收到训练请求后,调用 `path_manager.get_model_paths()` 来获取所有路径信息。
|
||||
* 将获取到的 `path_info` 字典和原始请求参数 `training_params` 一并传递给后台训练任务管理器。
|
||||
* 修复了因重复传递关键字参数 (`model_type`, `training_mode`) 导致的 `TypeError`。
|
||||
* 修复了 `except` 块中因未导入 `traceback` 模块导致的 `UnboundLocalError`。
|
||||
|
||||
3. **`server/utils/training_process_manager.py`**
|
||||
* **操作**: 修改
|
||||
* **内容**:
|
||||
* 修改 `submit_task` 方法,使其能接收 `training_params` 和 `path_info` 字典。
|
||||
* 在 `TrainingTask` 数据类中增加了 `path_info` 字段来存储路径信息。
|
||||
* 在 `TrainingWorker` 中,将 `path_info` 传递给实际的训练函数。
|
||||
* 在 `_monitor_results` 方法中,当任务成功完成时,调用 `path_manager.save_version_info` 来更新 `versions.json`,完成版本管理的闭环。
|
||||
|
||||
4. **所有训练器文件** (`mlstm_trainer.py`, `kan_trainer.py`, `tcn_trainer.py`, `transformer_trainer.py`)
|
||||
* **操作**: 修改
|
||||
* **内容**:
|
||||
* 统一修改了主训练函数的签名,增加了 `path_info=None` 参数。
|
||||
* 移除了所有内部手动构建文件路径的逻辑。
|
||||
* 所有保存操作(最终模型、检查点、损失曲线图)现在都直接从传入的 `path_info` 字典中获取预先生成好的路径。
|
||||
* 简化了 `save_checkpoint` 辅助函数,使其也依赖 `path_info`。
|
||||
|
||||
5. **`test/test_file_save_logic.py`**
|
||||
* **操作**: 创建
|
||||
* **内容**:
|
||||
* 编写了一个独立的测试脚本,用于验证 `ModelPathManager` 的所有功能。
|
||||
* 覆盖了所有训练模式及其子场景(包括单ID和多ID哈希)。
|
||||
* 测试了版本号的正确递增和 `versions.json` 的写入。
|
||||
* 修复了测试脚本中因绝对/相对路径不匹配和重复关键字参数导致的多个 `AssertionError` 和 `TypeError`。
|
||||
|
||||
---
|
||||
**日期**: 2025-07-18 (后续修复)
|
||||
**主题**: 修复API层调用路径管理器时的 `TypeError`
|
||||
|
||||
### 问题描述
|
||||
在完成所有重构和测试后,实际运行API时,`POST /api/training` 端点在调用 `path_manager.get_model_paths` 时崩溃,并抛出 `TypeError: get_model_paths() got multiple values for keyword argument 'training_mode'`。
|
||||
|
||||
### 根本原因
|
||||
这是一个回归错误。在修复测试脚本 `test_file_save_logic.py` 中的类似问题时,我未能将相同的修复逻辑应用回 `server/api.py`。代码在调用 `get_model_paths` 时,既通过关键字参数 `training_mode=...` 明确传递了该参数,又通过 `**data` 将其再次传入,导致了冲突。
|
||||
|
||||
### 解决方案
|
||||
1. **文件**: `server/api.py`
|
||||
2. **位置**: `start_training` 函数。
|
||||
3. **操作**: 修改了对 `get_model_paths` 的调用逻辑。
|
||||
4. **内容**:
|
||||
```python
|
||||
# 移除 model_type 和 training_mode 以避免重复关键字参数错误
|
||||
data_for_path = data.copy()
|
||||
data_for_path.pop('model_type', None)
|
||||
data_for_path.pop('training_mode', None)
|
||||
path_info = path_manager.get_model_paths(
|
||||
training_mode=training_mode,
|
||||
model_type=model_type,
|
||||
**data_for_path # 传递剩余的payload
|
||||
)
|
||||
```
|
||||
5. **原因**: 在通过 `**` 解包传递参数之前,先从字典副本中移除了所有会被明确指定的关键字参数,从而确保了函数调用签名的正确性。
|
||||
|
||||
---
|
||||
**日期**: 2025-07-18 (最终修复)
|
||||
**主题**: 修复因中间层函数签名未更新导致的 `TypeError`
|
||||
|
||||
### 问题描述
|
||||
在完成所有重构后,实际运行API并触发训练任务时,程序在后台进程中因 `TypeError: train_model() got an unexpected keyword argument 'path_info'` 而崩溃。
|
||||
|
||||
### 根本原因
|
||||
这是一个典型的“中间人”遗漏错误。我成功地修改了调用链的两端(`api.py` -> `training_process_manager.py` 和 `*_trainer.py`),但忘记了修改它们之间的中间层——`server/core/predictor.py` 中的 `train_model` 方法。`training_process_manager` 尝试将 `path_info` 传递给 `predictor.train_model`,但后者的函数签名中并未包含这个新参数,导致了 `TypeError`。
|
||||
|
||||
### 解决方案
|
||||
1. **文件**: `server/core/predictor.py`
|
||||
2. **位置**: `train_model` 函数的定义处。
|
||||
3. **操作**: 在函数签名中增加了 `path_info=None` 参数。
|
||||
4. **内容**:
|
||||
```python
|
||||
def train_model(self, ..., progress_callback=None, path_info=None):
|
||||
# ...
|
||||
```
|
||||
5. **位置**: `train_model` 函数内部,对所有具体训练器(`train_product_model_with_mlstm`, `_with_kan`, etc.)的调用处。
|
||||
6. **操作**: 在所有调用中,将接收到的 `path_info` 参数透传下去。
|
||||
7. **内容**:
|
||||
```python
|
||||
# ...
|
||||
metrics = train_product_model_with_transformer(
|
||||
...,
|
||||
path_info=path_info
|
||||
)
|
||||
# ...
|
||||
```
|
||||
8. **原因**: 通过在中间层函数上“打通”`path_info` 参数的传递通道,确保了从API层到最终训练器层的完整数据流,解决了 `TypeError`。
|
||||
|
||||
---
|
||||
**日期**: 2025-07-18 (最终修复)
|
||||
**主题**: 修复“按药品训练-聚合所有店铺”模式下的路径生成错误
|
||||
|
||||
### 问题描述
|
||||
在实际运行中发现,当进行“按药品训练”并选择“聚合所有店铺”时,生成的模型保存路径中包含了错误的后缀 `_None`,而不是预期的 `_all` (例如 `.../17002608_None/...`)。
|
||||
|
||||
### 根本原因
|
||||
在 `server/utils/file_save.py` 的 `_generate_identifier` 和 `get_model_paths` 方法中,当 `store_id` 从前端传来为 `None` 时,代码 `scope = store_id if store_id else 'all'` 会因为 `store_id` 是 `None` 而正确地将 `scope` 设为 `'all'`。然而,在 `get_model_paths` 方法中,我错误地使用了 `kwargs.get('store_id', 'all')`,这在 `store_id` 键存在但值为 `None` 时,仍然会返回 `None`,导致了路径拼接错误。
|
||||
|
||||
### 解决方案
|
||||
1. **文件**: `server/utils/file_save.py`
|
||||
2. **位置**: `_generate_identifier` 和 `get_model_paths` 方法中处理 `product` 训练模式的部分。
|
||||
3. **操作**: 将逻辑从 `scope = kwargs.get('store_id', 'all')` 修改为更严谨的 `scope = store_id if store_id is not None else 'all'`。
|
||||
4. **内容**:
|
||||
```python
|
||||
# in _generate_identifier
|
||||
scope = store_id if store_id is not None else 'all'
|
||||
|
||||
# in get_model_paths
|
||||
store_id = kwargs.get('store_id')
|
||||
scope = store_id if store_id is not None else 'all'
|
||||
scope_folder = f"{product_id}_{scope}"
|
||||
```
|
||||
5. **原因**: 这种写法能正确处理 `store_id` 键不存在、或键存在但值为 `None` 的两种情况,确保在这两种情况下 `scope` 都被正确地设置为 `'all'`,从而生成符合规范的路径。
|
||||
|
||||
---
|
||||
**日期**: 2025-07-18 (最终修复)
|
||||
**主题**: 修复 `KeyError: 'price'` 和单ID哈希错误
|
||||
|
||||
### 问题描述
|
||||
在完成大规模重构后,实际运行时发现了两个隐藏的bug:
|
||||
1. 在“按店铺训练”模式下,训练因 `KeyError: 'price'` 而失败。
|
||||
2. 在“按店铺训练”模式下,当只选择一个“指定药品”时,系统仍然错误地对该药品的ID进行了哈希处理,而不是直接使用ID。
|
||||
|
||||
### 根本原因
|
||||
1. **`KeyError`**: `server/utils/multi_store_data_utils.py` 中的 `get_store_product_sales_data` 函数包含了一个硬编码的列校验,该校验要求 `price` 列必须存在,但这与当前的数据源不符。
|
||||
2. **哈希错误**: `server/utils/file_save.py` 中的 `get_model_paths` 方法在处理 `store` 训练模式时,没有复用 `_generate_identifier` 中已经写好的单ID判断逻辑,导致了逻辑不一致。
|
||||
|
||||
### 解决方案
|
||||
1. **修复 `KeyError`**:
|
||||
* **文件**: `server/utils/multi_store_data_utils.py`
|
||||
* **位置**: `get_store_product_sales_data` 函数。
|
||||
* **操作**: 从 `required_columns` 列表中移除了 `'price'`,根除了这个硬性依赖。
|
||||
2. **修复哈希逻辑**:
|
||||
* **文件**: `server/utils/file_save.py`
|
||||
* **位置**: `_generate_identifier` 和 `get_model_paths` 方法中处理 `store` 训练模式的部分。
|
||||
* **操作**: 统一了逻辑,确保在这两个地方都使用了 `scope = product_ids[0] if len(product_ids) == 1 else self._hash_ids(product_ids)` 的判断,从而在只选择一个药品时直接使用其ID。
|
||||
3. **更新测试**:
|
||||
* **文件**: `test/test_file_save_logic.py`
|
||||
* **操作**: 增加了新的测试用例,专门验证“按店铺训练-单个指定药品”场景下的路径生成是否正确。
|
||||
|
||||
---
|
||||
**日期**: 2025-07-18 (最终修复)
|
||||
**主题**: 修复全局训练范围值不匹配导致的 `ValueError`
|
||||
|
||||
### 问题描述
|
||||
在完成所有重构后,实际运行API并触发“全局训练-所有店铺所有药品”时,程序因 `ValueError: 未知的全局训练范围: all_stores_all_products` 而崩溃。
|
||||
|
||||
### 根本原因
|
||||
前端传递的 `training_scope` 值为 `all_stores_all_products`,而 `server/utils/file_save.py` 中的 `_generate_identifier` 和 `get_model_paths` 方法只处理了 `all` 这个值,未能兼容前端传递的具体字符串,导致逻辑判断失败。
|
||||
|
||||
### 解决方案
|
||||
1. **文件**: `server/utils/file_save.py`
|
||||
2. **位置**: `_generate_identifier` 和 `get_model_paths` 方法中处理 `global` 训练模式的部分。
|
||||
3. **操作**: 将逻辑判断从 `if training_scope == 'all':` 修改为 `if training_scope in ['all', 'all_stores_all_products']:`。
|
||||
4. **原因**: 使代码能够同时兼容两种表示“所有范围”的字符串,确保了前端请求的正确处理。
|
||||
5. **更新测试**:
|
||||
* **文件**: `test/test_file_save_logic.py`
|
||||
* **操作**: 增加了新的测试用例,专门验证 `training_scope` 为 `all_stores_all_products` 时的路径生成是否正确。
|
||||
|
||||
---
|
||||
**日期**: 2025-07-18 (最终优化)
|
||||
**主题**: 优化全局训练自定义模式下的单ID路径生成
|
||||
|
||||
### 问题描述
|
||||
根据用户反馈,希望在全局训练的“自定义范围”模式下,如果只选择单个店铺和/或单个药品,路径中应直接使用ID而不是哈希值,以增强可读性。
|
||||
|
||||
### 解决方案
|
||||
1. **文件**: `server/utils/file_save.py`
|
||||
2. **位置**: `_generate_identifier` 和 `get_model_paths` 方法中处理 `global` 训练模式 `custom` 范围的部分。
|
||||
3. **操作**: 为 `store_ids` 和 `product_ids` 分别增加了单ID判断逻辑。
|
||||
4. **内容**:
|
||||
```python
|
||||
# in _generate_identifier
|
||||
s_id = store_ids[0] if len(store_ids) == 1 else self._hash_ids(store_ids)
|
||||
p_id = product_ids[0] if len(product_ids) == 1 else self._hash_ids(product_ids)
|
||||
scope_part = f"custom_s_{s_id}_p_{p_id}"
|
||||
|
||||
# in get_model_paths
|
||||
store_ids = kwargs.get('store_ids', [])
|
||||
product_ids = kwargs.get('product_ids', [])
|
||||
s_id = store_ids[0] if len(store_ids) == 1 else self._hash_ids(store_ids)
|
||||
p_id = product_ids[0] if len(product_ids) == 1 else self._hash_ids(product_ids)
|
||||
scope_parts.extend(['custom', s_id, p_id])
|
||||
```
|
||||
5. **原因**: 使 `custom` 模式下的路径生成逻辑与 `selected_stores` 和 `selected_products` 模式保持一致,在只选择一个ID时优先使用ID本身,提高了路径的可读性和一致性。
|
||||
6. **更新测试**:
|
||||
* **文件**: `test/test_file_save_logic.py`
|
||||
* **操作**: 增加了新的测试用例,专门验证“全局训练-自定义范围-单ID”场景下的路径生成是否正确。
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
####
|
||||
---
|
||||
**日期**: 2025-07-18
|
||||
**主题**: 统一训练页面UI显示并修复后端数据传递
|
||||
|
||||
### 问题描述
|
||||
1. 在“按店铺训练”和“全局模型训练”页面的任务列表中,模型版本号前缺少 'v' 前缀,与“按品训练”页面不一致。
|
||||
2. 在“全局模型训练”页面的任务列表中,“聚合方式”一列始终为空,无法显示数据。
|
||||
|
||||
### 根本原因
|
||||
1. **UI层面**: `UI/src/views/StoreTrainingView.vue` 和 `UI/src/views/training/GlobalTrainingView.vue` 在渲染版本号时,没有像 `ProductTrainingView.vue` 一样添加 'v' 前缀的模板。
|
||||
2. **后端层面**: `server/utils/training_process_manager.py` 中的 `TrainingTask` 数据类缺少 `aggregation_method` 字段,导致从任务提交到数据返回的整个流程中,该信息都丢失了。
|
||||
|
||||
### 解决方案
|
||||
1. **修复前端UI**:
|
||||
* **文件**: `UI/src/views/StoreTrainingView.vue`, `UI/src/views/training/GlobalTrainingView.vue`
|
||||
* **操作**: 修改了 `el-table-column` for `version`,为其添加了 `<template>`,使用 `<el-tag>v{{ row.version }}</el-tag>` 来渲染版本号,确保了显示格式的统一。
|
||||
|
||||
2. **修复后端数据流**:
|
||||
* **文件**: `server/utils/training_process_manager.py`
|
||||
* **操作**:
|
||||
1. 在 `TrainingTask` 数据类中增加了 `aggregation_method: Optional[str] = None` 字段。
|
||||
2. 修改 `submit_task` 方法,使其在创建 `TrainingTask` 对象时能接收并设置 `aggregation_method`。
|
||||
3. 修改 `run_training_task` 方法,在调用 `predictor.train_model` 时,将 `task.aggregation_method` 传递下去。
|
||||
|
||||
### 最终结果
|
||||
通过前后端的协同修复,现在所有训练页面的UI表现完全一致,并且全局训练的“聚合方式”能够被正确记录和显示。
|
61
xz模型添加流程.md
Normal file
61
xz模型添加流程.md
Normal file
@ -0,0 +1,61 @@
|
||||
# 为系统添加新模型的标准流程
|
||||
|
||||
本文档总结了向本项目中添加一个新的预测模型(以XGBoost为例)的标准流程,旨在为未来的开发工作提供清晰、可复用的路线图。
|
||||
|
||||
---
|
||||
|
||||
### 第1步:创建模型训练器
|
||||
|
||||
这是最核心的一步,负责实现新模型的训练逻辑。
|
||||
|
||||
1. **创建新文件**:在 [`server/trainers/`](server/trainers/) 目录下,创建一个新的Python文件,例如 `new_model_trainer.py`。
|
||||
|
||||
2. **定义训练函数**:在该文件中,定义一个核心的训练函数,遵循项目的标准签名,接收 `product_id`, `store_id`, `epochs`, `path_info` 等参数。
|
||||
|
||||
3. **实现函数内部逻辑**:
|
||||
* **数据加载**:使用 [`utils.multi_store_data_utils.load_multi_store_data`](server/utils/multi_store_data_utils.py) 加载数据,并根据 `product_id` 和 `store_id` 进行筛选。
|
||||
* **数据预处理**:将时间序列数据转换为监督学习格式。对于像XGBoost这样的模型,这意味着创建一个“滑动窗口”(如我们实现的 `create_dataset` 函数)。
|
||||
* **数据缩放 (关键)**:**必须**使用 `sklearn.preprocessing.MinMaxScaler` 对特征 (`X`) 和目标 (`y`) 进行归一化。创建并训练 `scaler_X` 和 `scaler_y` 两个缩放器。
|
||||
* **模型训练**:初始化您的新模型,并使用**归一化后**的数据进行训练。
|
||||
* **生成损失曲线 (可选但推荐)**:如果模型支持,在训练过程中捕获训练集和验证集的损失,然后使用 `matplotlib` 绘制损失曲线图,并将其保存为 `..._loss_curve.png`。
|
||||
* **保存检查点 (可选但推荐)**:如果模型支持回调(Callbacks),可以实现一个自定义回调函数,用于按指定轮次间隔保存模型检查点 (`..._checkpoint_epoch_{N}.pth`)。
|
||||
* **模型评估**:使用**反归一化后**的预测结果来计算评估指标(RMSE, R2等)。
|
||||
* **模型保存 (关键)**:
|
||||
* 创建一个字典(payload),**必须**包含以下内容:`'model'` (训练好的模型对象), `'config'` (训练配置), `'scaler_X'` (特征缩放器), 和 `'scaler_y'` (目标缩放器)。
|
||||
* 使用正确的库(PyTorch模型用 `torch.save`,其他模型如XGBoost用 `joblib.dump`)将这个字典保存到 `path_info['model_path']` 指定的路径。**文件名统一使用 `.pth` 扩展名**。
|
||||
|
||||
---
|
||||
|
||||
### 第2步:将训练器集成到系统中
|
||||
|
||||
1. **注册训练器**:打开 [`server/trainers/__init__.py`](server/trainers/__init__.py)。
|
||||
* 在文件顶部,从您的新训练器文件中导入训练函数,例如 `from .new_model_trainer import train_product_model_with_new_model`。
|
||||
* 在文件底部的 `__all__` 列表中,添加您的新训练函数名。
|
||||
|
||||
2. **添加调度逻辑**:打开 [`server/core/predictor.py`](server/core/predictor.py)。
|
||||
* 在 `train_model` 方法中,找到 `if/elif` 逻辑块,为您的新模型添加一个新的 `elif model_type == 'new_model':` 分支,并在此分支中调用您的新训练函数。
|
||||
|
||||
---
|
||||
|
||||
### 第3步:实现预测逻辑
|
||||
|
||||
1. **修改预测器**:打开 [`server/predictors/model_predictor.py`](server/predictors/model_predictor.py)。
|
||||
2. **添加预测分支**:在 `load_model_and_predict` 函数中,找到 `if/elif` 逻辑块,为您的新模型添加一个新的 `elif model_type == 'new_model':` 分支。
|
||||
3. **实现分支内部逻辑**:
|
||||
* 使用与保存时相同的库(例如 `joblib.load`)加载 `.pth` 模型文件。
|
||||
* 从加载的字典中,**必须**提取出 `model`, `config`, `scaler_X`, 和 `scaler_y`。
|
||||
* 准备用于预测的输入数据(例如,最近N天的数据)。
|
||||
* 在进行预测时,**必须**先用 `scaler_X.transform` 对输入数据进行归一化。
|
||||
* 得到模型的预测结果后,**必须**用 `scaler_y.inverse_transform` 将结果反归一化,以得到真实的预测值。
|
||||
|
||||
---
|
||||
|
||||
### 第4步:更新API和依赖项
|
||||
|
||||
1. **更新API端点**:打开 [`server/api.py`](server/api.py)。
|
||||
* 在 `/api/training` 路由(`start_training` 函数)的 `valid_model_types` 列表中,添加您的新模型ID(例如 `'new_model'`)。
|
||||
* 在 `/api/model_types` 路由(`get_model_types` 函数)返回的列表中,添加您新模型的描述信息,以便它能显示在前端界面。
|
||||
|
||||
2. **更新依赖**:打开 [`requirements.txt`](requirements.txt) 文件,添加您的新模型所需要的Python库(例如 `xgboost`)。
|
||||
|
||||
遵循以上四个步骤,您就可以将任何新的预测模型一致、健壮地集成到现有系统中。
|
50
xz模型预测修改.md
Normal file
50
xz模型预测修改.md
Normal file
@ -0,0 +1,50 @@
|
||||
# 模型预测路径修复记录
|
||||
|
||||
**修改时间**: 2025-07-18 18:43:50
|
||||
|
||||
## 1. 问题背景
|
||||
|
||||
系统在进行模型预测时出现“文件未找到”的错误。经分析,根本原因是模型加载逻辑(预测时)与模型保存逻辑(训练时)遵循了不一致的路径规则。
|
||||
|
||||
- **保存规则 (新)**: 遵循 `xz训练模型保存规则.md`,将模型保存在结构化的层级目录中,例如 `saved_models/product/{product_id}_all/mlstm/v1/model.pth`。
|
||||
- **加载逻辑 (旧)**: 代码中硬编码了扁平化的文件路径查找方式,例如在 `saved_models` 根目录下直接查找名为 `{product_id}_{model_type}_v1.pth` 的文件。
|
||||
|
||||
这种不匹配导致预测功能无法定位到已经训练好的模型。
|
||||
|
||||
## 2. 修复方案
|
||||
|
||||
为了解决此问题,我们采取了集中化路径管理的策略,确保整个应用程序都通过一个统一的管理器来生成和获取模型路径。
|
||||
|
||||
## 3. 代码修改详情
|
||||
|
||||
### 第一处修改:增强路径管理器
|
||||
|
||||
- **文件**: [`server/utils/file_save.py`](server/utils/file_save.py)
|
||||
- **操作**: 在 `ModelPathManager` 类中新增了 `get_model_path_for_prediction` 方法。
|
||||
- **目的**:
|
||||
- 提供一个专门用于**预测时**获取模型路径的函数。
|
||||
- 该函数严格按照 `xz训练模型保存规则.md` 中定义的层级结构来构建模型文件的完整路径。
|
||||
- 这使得路径生成逻辑被集中管理,避免了代码各处的硬编码。
|
||||
|
||||
### 第二处修改:修复API预测接口
|
||||
|
||||
- **文件**: [`server/api.py`](server/api.py)
|
||||
- **操作**:
|
||||
1. 修改了 `/api/prediction` 接口 (`predict` 函数) 的内部逻辑。
|
||||
2. 修改了辅助函数 `run_prediction` 的定义和实现。
|
||||
- **目的**:
|
||||
- **`predict` 函数**: 移除了所有旧的、手動拼接模型文件名的错误逻辑。转而实例化 `ModelPathManager` 并调用其新的 `get_model_path_for_prediction` 方法来获取准确、唯一的模型路径。
|
||||
- **`run_prediction` 函数**: 更新了其函数签名,增加了 `model_path` 参数,使其能够接收并向下传递由 `predict` 函数获取到的正确路径。同时,简化了其内部逻辑,直接调用 `load_model_and_predict`。
|
||||
|
||||
### 第三处修改:修复模型加载器
|
||||
|
||||
- **文件**: [`server/predictors/model_predictor.py`](server/predictors/model_predictor.py)
|
||||
- **操作**: 修改了 `load_model_and_predict` 函数。
|
||||
- **目的**:
|
||||
- 更新函数签名,添加了 `model_path` 参数。
|
||||
- **彻底移除了**函数内部所有复杂的、用于猜测模型文件位置的旧逻辑。
|
||||
- 函数现在完全依赖于从 `api.py` 传递过来的 `model_path` 参数来加载模型,确保了加载路径的准确性。
|
||||
|
||||
## 4. 结论
|
||||
|
||||
通过以上三处修改,我们打通了从API请求到模型文件加载的整个链路,确保了所有环节都遵循统一的、正确的结构化路径规则。这从根本上解决了因路径不匹配导致模型读取失败的问题。
|
93
xz训练模型保存规则.md
Normal file
93
xz训练模型保存规则.md
Normal file
@ -0,0 +1,93 @@
|
||||
# 扁平化模型数据处理规范 (最终版)
|
||||
|
||||
**版本**: 4.0 (最终版)
|
||||
**核心思想**: 逻辑路径被转换为文件名的一部分,实现极致扁平化的文件存储。
|
||||
|
||||
---
|
||||
|
||||
## 一、 文件保存规则
|
||||
|
||||
### 1.1. 核心原则
|
||||
|
||||
所有元数据都被编码到文件名中。一个逻辑上的层级路径(例如 `product/P001_all/mlstm/v2`)应该被转换为一个用下划线连接的文件名前缀(`product_P001_all_mlstm_v2`)。
|
||||
|
||||
### 1.2. 文件存储位置
|
||||
|
||||
- **最终产物**: 所有最终模型、元数据文件、损失图等,统一存放在 `saved_models/` 根目录下。
|
||||
- **过程文件**: 所有训练过程中的检查点文件,统一存放在 `saved_models/checkpoints/` 目录下。
|
||||
|
||||
### 1.3. 文件名生成规则
|
||||
|
||||
1. **构建逻辑路径**: 根据训练参数(模式、范围、类型、版本)确定逻辑路径。
|
||||
- *示例*: `product/P001_all/mlstm/v2`
|
||||
|
||||
2. **生成文件名前缀**: 将逻辑路径中的所有 `/` 替换为 `_`。
|
||||
- *示例*: `product_P001_all_mlstm_v2`
|
||||
|
||||
3. **拼接文件后缀**: 在前缀后加上描述文件类型的后缀。
|
||||
- `_model.pth`
|
||||
- `_loss_curve.png`
|
||||
- `_checkpoint_best.pth`
|
||||
- `_checkpoint_epoch_{N}.pth`
|
||||
|
||||
#### **完整示例:**
|
||||
|
||||
- **最终模型**: `saved_models/product_P001_all_mlstm_v2_model.pth`
|
||||
- **最佳检查点**: `saved_models/checkpoints/product_P001_all_mlstm_v2_checkpoint_best.pth`
|
||||
- **Epoch 50 检查点**: `saved_models/checkpoints/product_P001_all_mlstm_v2_checkpoint_epoch_50.pth`
|
||||
|
||||
---
|
||||
|
||||
## 二、 文件读取规则
|
||||
|
||||
1. **确定模型元数据**: 根据需求确定要加载的模型的训练模式、范围、类型和版本。
|
||||
2. **构建文件名前缀**: 按照与保存时相同的逻辑,将元数据拼接成文件名前缀(例如 `product_P001_all_mlstm_v2`)。
|
||||
3. **定位文件**:
|
||||
- 要加载最终模型,查找文件: `saved_models/{prefix}_model.pth`。
|
||||
- 要加载最佳检查点,查找文件: `saved_models/checkpoints/{prefix}_checkpoint_best.pth`。
|
||||
|
||||
---
|
||||
|
||||
## 三、 数据库存储规则
|
||||
|
||||
数据库用于索引,应存储足以重构文件名前缀的关键元数据。
|
||||
|
||||
#### **`models` 表结构建议:**
|
||||
|
||||
| 字段名 | 类型 | 描述 | 示例 |
|
||||
| :--- | :--- | :--- | :--- |
|
||||
| `id` | INTEGER | 主键 | 1 |
|
||||
| `filename_prefix` | TEXT | **完整文件名前缀,可作为唯一标识** | `product_P001_all_mlstm_v2` |
|
||||
| `model_identifier`| TEXT | 用于版本控制的标识符 (不含版本) | `product_P001_all_mlstm` |
|
||||
| `version` | INTEGER | 版本号 | `2` |
|
||||
| `status` | TEXT | 模型状态 | `completed`, `training`, `failed` |
|
||||
| `created_at` | TEXT | 创建时间 | `2025-07-21 02:29:00` |
|
||||
| `metrics_summary`| TEXT | 关键性能指标的JSON字符串 | `{"rmse": 10.5, "r2": 0.89}` |
|
||||
|
||||
#### **保存逻辑:**
|
||||
- 训练完成后,向表中插入一条记录。`filename_prefix` 字段是查找与该次训练相关的所有文件的关键。
|
||||
|
||||
---
|
||||
|
||||
## 四、 版本记录规则
|
||||
|
||||
版本管理依赖于根目录下的 `versions.json` 文件,以实现原子化、线程安全的版本号递增。
|
||||
|
||||
- **文件名**: `versions.json`
|
||||
- **位置**: `saved_models/versions.json`
|
||||
- **结构**: 一个JSON对象,`key` 是不包含版本号的标识符,`value` 是该标识符下最新的版本号(整数)。
|
||||
- **Key**: `{prefix_core}_{model_type}` (例如: `product_P001_all_mlstm`)
|
||||
- **Value**: `Integer`
|
||||
|
||||
#### **`versions.json` 示例:**
|
||||
```json
|
||||
{
|
||||
"product_P001_all_mlstm": 2,
|
||||
"store_S001_P002_transformer": 1
|
||||
}
|
||||
```
|
||||
|
||||
#### **版本管理流程:**
|
||||
|
||||
1. **获取新版本**: 开始训练前,构建 `key`。读取 `versions.json`,找到对应 `key` 的 `value`。新版本号为 `value + 1` (若key不存在,则为 `1`)。
|
||||
2. **更新版本**: 训练成功后,将新的版本号写回到 `versions.json`。此过程**必须使用文件锁**以防止并发冲突。
|
Loading…
x
Reference in New Issue
Block a user