Explorar o código

微信号作品数据同步

Ethanfly hai 1 día
pai
achega
98a44b2191

+ 1 - 0
client/src/components.d.ts

@@ -20,6 +20,7 @@ declare module 'vue' {
     ElContainer: typeof import('element-plus/es')['ElContainer']
     ElDatePicker: typeof import('element-plus/es')['ElDatePicker']
     ElDialog: typeof import('element-plus/es')['ElDialog']
+    ElDivider: typeof import('element-plus/es')['ElDivider']
     ElDrawer: typeof import('element-plus/es')['ElDrawer']
     ElDropdown: typeof import('element-plus/es')['ElDropdown']
     ElDropdownItem: typeof import('element-plus/es')['ElDropdownItem']

+ 57 - 0
client/src/views/Analytics/Work/index.vue

@@ -243,6 +243,23 @@
                     </div>
                   </template>
 
+                  <!-- 视频号:播放、点赞、评论、分享、收藏、关注数、涨粉量、平均观看时长、完播率、2s退出率 -->
+                  <template v-else-if="selectedWork.platform === 'weixin_video'">
+                    <div
+                      v-for="item in weixinMetricCards"
+                      :key="item.label"
+                      class="data-card"
+                      :class="{ highlight: item.key && activeTrendMetric === item.key }"
+                      role="button"
+                      tabindex="0"
+                      @click="item.key && setTrendMetric(item.key)"
+                      @keyup.enter="item.key && setTrendMetric(item.key)"
+                    >
+                      <div class="card-label">{{ item.label }}</div>
+                      <div class="card-value">{{ item.value }}</div>
+                    </div>
+                  </template>
+
                   <!-- 其他平台:保持原口径 -->
                   <template v-else>
                     <div
@@ -454,6 +471,7 @@ interface WorkDetailData {
   collectCount: number;
   shareCount: number;
   fansIncrease: number;
+  followCount: number; // 视频号:关注数
   coverClickRate: string;
   avgWatchDuration: string;
   completionRate: string;
@@ -471,6 +489,7 @@ const workDetailData = ref<WorkDetailData>({
   collectCount: 0,
   shareCount: 0,
   fansIncrease: 0,
+  followCount: 0,
   coverClickRate: '0',
   avgWatchDuration: '0',
   completionRate: '0',
@@ -494,6 +513,7 @@ type TrendMetricKey =
   | 'completionRate'
   | 'twoSecondExitRate'
   | 'fansIncrease'
+  | 'followCount'
   | 'totalWatchDuration';
 
 const activeTrendMetric = ref<TrendMetricKey>('playCount');
@@ -516,6 +536,7 @@ const trendTitle = computed(() => {
       collectCount: '收藏量趋势',
       shareCount: '分享量趋势',
       fansIncrease: '涨粉量趋势',
+      followCount: '关注量趋势',
       exposureCount: '曝光量趋势',
       coverClickRate: '封面点击率趋势',
       avgWatchDuration: '平均观看时长趋势',
@@ -536,6 +557,7 @@ const trendTitle = computed(() => {
     completionRate: '完播率趋势',
     twoSecondExitRate: '2s退出率趋势',
     fansIncrease: '涨粉量趋势',
+    followCount: '关注量趋势',
     totalWatchDuration: '播放总时长趋势',
   };
   return map[activeTrendMetric.value] || '趋势';
@@ -797,6 +819,24 @@ const douyinMetricCards = computed<MetricCardConfig[]>(() => {
   ];
 });
 
+// 视频号:播放、点赞、评论、分享、收藏、关注数、涨粉量、平均观看时长、完播率、2s退出率
+const weixinMetricCards = computed<MetricCardConfig[]>(() => {
+  const d = workDetailData.value;
+  const base = selectedWork.value;
+  return [
+    { key: 'playCount', label: '播放量', value: formatNumber(d.playCount || base?.viewsCount || 0) },
+    { key: 'likeCount', label: '点赞量', value: formatNumber(d.likeCount || base?.likesCount || 0) },
+    { key: 'commentCount', label: '评论量', value: formatNumber(d.commentCount || base?.commentsCount || 0) },
+    { key: 'shareCount', label: '分享量', value: formatNumber(d.shareCount || base?.sharesCount || 0) },
+    { key: 'collectCount', label: '收藏量', value: formatNumber(d.collectCount || base?.collectsCount || 0) },
+    { key: 'followCount', label: '关注数', value: formatNumber(d.followCount || 0) },
+    // { key: 'fansIncrease', label: '涨粉量', value: formatNumber(d.fansIncrease || 0) },
+    { key: 'avgWatchDuration', label: '平均观看时长', value: formatAvgWatchDurationSeconds(d.avgWatchDuration) },
+    { key: 'completionRate', label: '完播率', value: formatRate(d.completionRate) },
+    // { key: 'twoSecondExitRate', label: '2s退出率', value: formatRate(d.twoSecondExitRate) },
+  ];
+});
+
 // 查看详情
 async function handleView(row: WorkData) {
   selectedWork.value = row;
@@ -805,6 +845,21 @@ async function handleView(row: WorkData) {
   workStatsHistory.value = [];
   drawerVisible.value = true;
   activeTab.value = 'core';
+
+  // [已注释] 视频号:点击详情时先同步每日数据(浏览器自动化 + CSV 导入)
+  // if (row.platform === 'weixin_video') {
+  //   detailLoading.value = true;
+  //   try {
+  //     const syncRes = await request.post(`/api/work-day-statistics/sync-weixin-video/${row.id}`);
+  //     if (syncRes?.success && (syncRes?.data?.inserted > 0 || syncRes?.data?.updated > 0)) {
+  //       ElMessage.success(syncRes.message || `已同步 ${(syncRes.data?.inserted || 0) + (syncRes.data?.updated || 0)} 条日数据`);
+  //     }
+  //   } catch (e) {
+  //     console.warn('视频号日数据同步失败(可忽略):', e);
+  //   } finally {
+  //     detailLoading.value = false;
+  //   }
+  // }
   
   // 先用列表行做“瞬时占位”(列表来自区间汇总,可能不等于 works 表累计值)
   workDetailData.value = {
@@ -816,6 +871,7 @@ async function handleView(row: WorkData) {
     collectCount: row.collectsCount || 0,
     shareCount: row.sharesCount || 0,
     fansIncrease: 0,
+    followCount: 0,
     coverClickRate: '0',
     avgWatchDuration: '0',
     completionRate: '0',
@@ -855,6 +911,7 @@ async function loadWorkBase(workId: number) {
       collectCount: toIntSafe(data.yesterdayCollectCount ?? 0),
       shareCount: toIntSafe(data.yesterdayShareCount ?? 0),
       fansIncrease: toIntSafe(data.yesterdayFansIncrease ?? 0),
+      followCount: toIntSafe(data.yesterdayFollowCount ?? 0),
       coverClickRate: String(data.yesterdayCoverClickRate ?? '0'),
       avgWatchDuration: String(data.yesterdayAvgWatchDuration ?? '0'),
       completionRate: String(data.yesterdayCompletionRate ?? '0'),

+ 38 - 0
database/migrations/fix_platform_accounts_timestamp_format.sql

@@ -0,0 +1,38 @@
+-- 修复 platform_accounts 表的 created_at 和 updated_at 时间格式
+-- 将 TIMESTAMP 类型改为 DATETIME 类型,确保时间格式为 2026-02-05 12:22:22
+-- 执行日期: 2026-02-05
+
+USE media_manager;
+
+-- 步骤1: 设置会话时区为东八区(确保时间转换正确)
+SET time_zone = '+08:00';
+
+-- 步骤2: 修改 created_at 字段类型为 DATETIME
+-- MySQL 会自动将 TIMESTAMP 转换为 DATETIME,保持时间值不变
+ALTER TABLE platform_accounts 
+MODIFY COLUMN created_at DATETIME NULL;
+
+ALTER TABLE platform_accounts 
+MODIFY COLUMN created_at DATETIME DEFAULT CURRENT_TIMESTAMP;
+
+-- 步骤3: 修改 updated_at 字段类型为 DATETIME
+ALTER TABLE platform_accounts 
+MODIFY COLUMN updated_at DATETIME NULL;
+
+ALTER TABLE platform_accounts 
+MODIFY COLUMN updated_at DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP;
+
+-- 步骤4: 验证修改结果
+DESCRIBE platform_accounts;
+
+SELECT 
+    id, 
+    account_name, 
+    platform,
+    DATE_FORMAT(created_at, '%Y-%m-%d %H:%i:%s') AS created_at_formatted,
+    DATE_FORMAT(updated_at, '%Y-%m-%d %H:%i:%s') AS updated_at_formatted,
+    created_at,
+    updated_at
+FROM platform_accounts 
+ORDER BY id DESC 
+LIMIT 10;

+ 2 - 2
database/schema.sql

@@ -80,8 +80,8 @@ CREATE TABLE IF NOT EXISTS platform_accounts (
     status ENUM('active','expired','disabled') DEFAULT 'active',
     proxy_config JSON,
     group_id INT,
-    created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
-    updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+    created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
+    updated_at DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
     FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE,
     FOREIGN KEY (group_id) REFERENCES account_groups(id) ON DELETE SET NULL,
     UNIQUE KEY uk_user_platform_account (user_id, platform, account_id)

+ 128 - 0
server/python/app.py

@@ -927,6 +927,134 @@ def get_works():
         return jsonify({"success": False, "error": str(e)}), 500
 
 
+# ==================== 视频号同步作品每日数据(浏览器自动化) ====================
+
+@app.route("/sync_weixin_account_works_daily_stats", methods=["POST"])
+def sync_weixin_account_works_daily_stats():
+    """
+    纯浏览器批量同步视频号账号下所有(在库)作品的每日数据到 work_day_statistics。
+    流程:打开 statistic/post → 单篇视频 → 近30天 → 遍历列表,按 exportId 匹配作品,
+    匹配则点击查看 → 详情页近30天 → 下载表格 → 解析 CSV 存入 work_day_statistics。
+    请求体: {
+        "works": [{"work_id": 906, "platform_video_id": "export/xxx"}, ...],
+        "cookie": "...",
+        "show_browser": false
+    }
+    """
+    try:
+        data = request.json or {}
+        works = data.get("works", [])
+        cookie_str = data.get("cookie", "")
+
+        if not works or not cookie_str:
+            return jsonify({
+                "success": False,
+                "error": "缺少 works 或 cookie 参数",
+            }), 400
+
+        show_browser = data.get("show_browser", False)
+        if isinstance(show_browser, str):
+            show_browser = show_browser.lower() in ("true", "1", "yes")
+        headless = not show_browser
+
+        def save_fn(stats_list):
+            if not stats_list:
+                return {"inserted": 0, "updated": 0}
+            return call_nodejs_api("POST", "/work-day-statistics/batch-dates", {"statistics": stats_list})
+
+        def update_works_fn(updates):
+            if not updates:
+                return {"updated": 0}
+            return call_nodejs_api("POST", "/works/batch-update-from-csv", {"updates": updates})
+
+        publisher = WeixinPublisher(headless=headless)
+        result = asyncio.run(publisher.sync_account_works_daily_stats_via_browser(
+            cookie_str, works, save_fn=save_fn, update_works_fn=update_works_fn, headless=headless
+        ))
+
+        if not result.get("success"):
+            return jsonify({
+                "success": False,
+                "error": result.get("error", "同步失败"),
+            }), 200
+
+        works_updated = result.get("works_updated", 0)
+        msg = f"批量同步完成: 处理 {result.get('total_processed', 0)} 个作品, 跳过 {result.get('total_skipped', 0)} 个, 新增 {result.get('inserted', 0)} 条, 更新 {result.get('updated', 0)} 条"
+        if works_updated > 0:
+            msg += f", works 表更新 {works_updated} 条"
+        return jsonify({
+            "success": True,
+            "message": msg,
+            "total_processed": result.get("total_processed", 0),
+            "total_skipped": result.get("total_skipped", 0),
+            "inserted": result.get("inserted", 0),
+            "updated": result.get("updated", 0),
+            "works_updated": works_updated,
+        })
+    except Exception as e:
+        traceback.print_exc()
+        return jsonify({"success": False, "error": str(e)}), 500
+
+
+@app.route("/sync_weixin_work_daily_stats", methods=["POST"])
+def sync_weixin_work_daily_stats():
+    """
+    通过浏览器自动化同步单个视频号作品的每日数据到 work_day_statistics。
+    请求体: { "work_id": 906, "platform_video_id": "export/xxx", "cookie": "..." }
+    """
+    try:
+        data = request.json or {}
+        work_id = data.get("work_id")
+        platform_video_id = (data.get("platform_video_id") or "").strip()
+        cookie_str = data.get("cookie", "")
+
+        if not work_id or not platform_video_id or not cookie_str:
+            return jsonify({
+                "success": False,
+                "error": "缺少 work_id、platform_video_id 或 cookie 参数",
+            }), 400
+
+        work_id = int(work_id)
+        # show_browser=True 时显示浏览器窗口,便于观察点击操作
+        show_browser = data.get("show_browser", True)
+        if isinstance(show_browser, str):
+            show_browser = show_browser.lower() in ("true", "1", "yes")
+        headless = not show_browser
+        print(f"[SyncWXDailyStats] headless={headless} (show_browser={show_browser})", flush=True)
+        publisher = WeixinPublisher(headless=headless)
+        result = asyncio.run(publisher.sync_work_daily_stats_via_browser(
+            cookie_str, work_id, platform_video_id
+        ))
+
+        if not result.get("success"):
+            return jsonify({
+                "success": False,
+                "error": result.get("error", "同步失败"),
+            }), 200
+
+        stats = result.get("statistics") or []
+        if not stats:
+            return jsonify({
+                "success": True,
+                "message": "无新数据需要保存",
+                "inserted": 0,
+                "updated": 0,
+            })
+
+        save_result = call_nodejs_api("POST", "/work-day-statistics/batch-dates", {
+            "statistics": stats,
+        })
+        return jsonify({
+            "success": True,
+            "message": f"同步成功: 新增 {save_result.get('inserted', 0)} 条, 更新 {save_result.get('updated', 0)} 条",
+            "inserted": save_result.get("inserted", 0),
+            "updated": save_result.get("updated", 0),
+        })
+    except Exception as e:
+        traceback.print_exc()
+        return jsonify({"success": False, "error": str(e)}), 500
+
+
 # ==================== 保存作品日统计数据接口 ====================
 
 @app.route("/work_day_statistics", methods=["POST"])

+ 596 - 8
server/python/platforms/weixin.py

@@ -322,16 +322,20 @@ class WeixinPublisher(BasePublisher):
         playwright = await async_playwright().start()
         
         # 参考 matrix: 使用系统内的 Chrome 浏览器,避免 H264 编码错误
-        # 如果没有安装 Chrome,则使用默认 Chromium
+        # 非 headless 时添加 slow_mo 便于观察点击操作
+        launch_opts = {"headless": self.headless}
+        if not self.headless:
+            launch_opts["slow_mo"] = 400  # 每个操作间隔 400ms,便于观看
+            print(f"[{self.platform_name}] 有头模式 + slow_mo=400ms,浏览器将可见", flush=True)
         try:
-            self.browser = await playwright.chromium.launch(
-                headless=self.headless,
-                channel="chrome"  # 使用系统 Chrome
-            )
-            print(f"[{self.platform_name}] 使用系统 Chrome 浏览器")
+            launch_opts["channel"] = "chrome"
+            self.browser = await playwright.chromium.launch(**launch_opts)
+            print(f"[{self.platform_name}] 使用系统 Chrome 浏览器", flush=True)
         except Exception as e:
-            print(f"[{self.platform_name}] Chrome 不可用,使用 Chromium: {e}")
-            self.browser = await playwright.chromium.launch(headless=self.headless)
+            print(f"[{self.platform_name}] Chrome 不可用,使用 Chromium: {e}", flush=True)
+            if "channel" in launch_opts:
+                del launch_opts["channel"]
+            self.browser = await playwright.chromium.launch(**launch_opts)
         
         # 设置 HTTP Headers 防止重定向
         headers = {
@@ -1202,6 +1206,590 @@ class WeixinPublisher(BasePublisher):
         return WorksResult(success=True, platform=self.platform_name, works=works, total=total, has_more=has_more, next_page=next_page)
     
 
+    async def sync_work_daily_stats_via_browser(
+        self, cookies: str, work_id: int, platform_video_id: str
+    ) -> dict:
+        """
+        通过浏览器自动化同步单个作品的每日数据到 work_day_statistics。
+        流程:
+        1. 打开 statistic/post 页,点击单篇视频 tab,点击近30天
+        2. 监听 post_list 接口,根据 exportId 匹配 platform_video_id 得到 objectId
+        3. 找到 data-row-key=objectId 的行,点击「查看」
+        4. 进入详情页,点击数据详情的近30天,点击下载表格
+        5. 解析 CSV 并返回 statistics 列表(供 Node 保存)
+        """
+        import csv
+        import tempfile
+        from pathlib import Path
+
+        result = {"success": False, "error": "", "statistics": [], "inserted": 0, "updated": 0}
+        post_list_data = {"list": []}
+
+        async def handle_response(response):
+            try:
+                if "statistic/post_list" in response.url and response.request.method == "POST":
+                    try:
+                        body = await response.json()
+                        if body.get("errCode") == 0 and body.get("data"):
+                            post_list_data["list"] = body.get("data", {}).get("list", [])
+                    except Exception:
+                        pass
+            except Exception:
+                pass
+
+        try:
+            await self.init_browser()
+            cookie_list = self.parse_cookies(cookies)
+            await self.set_cookies(cookie_list)
+            if not self.page:
+                raise Exception("Page not initialized")
+
+            self.page.on("response", handle_response)
+
+            # 1. 打开数据分析-作品数据页
+            print(f"[{self.platform_name}] 打开数据分析页...", flush=True)
+            await self.page.goto("https://channels.weixin.qq.com/platform/statistic/post", timeout=30000)
+            if not self.headless:
+                print(f"[{self.platform_name}] 浏览器已打开,请将窗口置于前台观看操作(等待 5 秒)...", flush=True)
+                await asyncio.sleep(5)
+            else:
+                await asyncio.sleep(3)
+            if "login" in self.page.url:
+                raise Exception("Cookie 已过期,请重新登录")
+
+            # 2. 点击「单篇视频」tab
+            tab_sel = "div.weui-desktop-tab__navs ul li:nth-child(2) a"
+            try:
+                await self.page.wait_for_selector(tab_sel, timeout=8000)
+                await self.page.click(tab_sel)
+            except Exception:
+                tab_sel = "a:has-text('单篇视频')"
+                await self.page.click(tab_sel)
+            await asyncio.sleep(2)
+
+            # 3. 点击「近30天」(单篇视频页的日期范围筛选)
+            # 选择器优先级:精确匹配单篇视频区域内的日期范围 radio 组
+            radio_selectors = [
+                "div.post-single-wrap div.weui-desktop-radio-group.radio-group label:has-text('近30天')",
+                "div.post-single-wrap div.filter-wrap div.weui-desktop-radio-group label:nth-child(2)",
+                "div.post-single-wrap div.card-body div.filter-wrap div:nth-child(2) label:nth-child(2)",
+                "div.post-single-wrap label:has-text('近30天')",
+                "div.weui-desktop-radio-group label:has-text('近30天')",
+                "label:has-text('近30天')",
+            ]
+            clicked = False
+            for sel in radio_selectors:
+                try:
+                    el = self.page.locator(sel).first
+                    if await el.count() > 0:
+                        await el.click()
+                        clicked = True
+                        print(f"[{self.platform_name}] 已点击近30天按钮 (selector: {sel[:50]}...)", flush=True)
+                        break
+                except Exception as e:
+                    continue
+            if not clicked:
+                print(f"[{self.platform_name}] 警告: 未找到近30天按钮,继续尝试...", flush=True)
+            await asyncio.sleep(3)
+
+            # 4. 从 post_list 响应中找 exportId -> objectId
+            export_id_to_object = {}
+            for item in post_list_data["list"]:
+                eid = (item.get("exportId") or "").strip()
+                oid = (item.get("objectId") or "").strip()
+                if eid and oid:
+                    export_id_to_object[eid] = oid
+
+            object_id = export_id_to_object.get(platform_video_id) or export_id_to_object.get(
+                platform_video_id.strip()
+            )
+            if not object_id:
+                # 尝试宽松匹配(platform_video_id 可能带前缀)
+                for eid, oid in export_id_to_object.items():
+                    if platform_video_id in eid or eid in platform_video_id:
+                        object_id = oid
+                        break
+            if not object_id:
+                result["error"] = f"未在 post_list 中匹配到 exportId={platform_video_id}"
+                print(f"[{self.platform_name}] {result['error']}", flush=True)
+                return result
+
+            # 5. 找到 data-row-key=objectId 的行,点击「查看」
+            view_btn = self.page.locator(f'tr[data-row-key="{object_id}"] a.detail-wrap, tr[data-row-key="{object_id}"] a:has-text("查看")')
+            try:
+                await view_btn.first.wait_for(timeout=5000)
+                await view_btn.first.click()
+            except Exception as e:
+                view_btn = self.page.locator(f'tr[data-row-key="{object_id}"] a')
+                if await view_btn.count() > 0:
+                    await view_btn.first.click()
+                else:
+                    raise Exception(f"未找到 objectId={object_id} 的查看按钮: {e}")
+            await asyncio.sleep(3)
+
+            # 6. 详情页:点击数据详情的「近30天」,再点击「下载表格」
+            detail_radio = "div.post-statistic-common div.filter-wrap label:nth-child(2)"
+            for sel in [detail_radio, "div.main-body label:has-text('近30天')"]:
+                try:
+                    el = self.page.locator(sel).first
+                    if await el.count() > 0:
+                        await el.click()
+                        break
+                except Exception:
+                    continue
+            await asyncio.sleep(2)
+
+            # 保存到 server/tmp 目录
+            download_dir = Path(__file__).resolve().parent.parent.parent / "tmp"
+            download_dir.mkdir(parents=True, exist_ok=True)
+
+            async with self.page.expect_download(timeout=15000) as download_info:
+                download_btn = self.page.locator("div.post-statistic-common div.filter-extra a, a:has-text('下载表格')")
+                if await download_btn.count() == 0:
+                    raise Exception("未找到「下载表格」按钮")
+                await download_btn.first.click()
+            download = await download_info.value
+            save_path = download_dir / f"work_{work_id}_{int(time.time())}.csv"
+            await download.save_as(save_path)
+
+            # 7. 解析 CSV -> statistics
+            stats_list = []
+            with open(save_path, "r", encoding="utf-8-sig", errors="replace") as f:
+                reader = csv.DictReader(f)
+                rows = list(reader)
+            for row in rows:
+                date_val = (
+                    row.get("日期")
+                    or row.get("date")
+                    or row.get("时间")
+                    or row.get("时间周期", "")
+                ).strip()
+                if not date_val:
+                    continue
+                dt = None
+                norm = date_val[:10].replace("年", "-").replace("月", "-").replace("日", "-").replace("/", "-")
+                if len(norm) >= 8 and norm.count("-") >= 2:
+                    parts = norm.split("-")
+                    if len(parts) == 3:
+                        try:
+                            y, m, d = int(parts[0]), int(parts[1]), int(parts[2])
+                            if 2000 <= y <= 2100 and 1 <= m <= 12 and 1 <= d <= 31:
+                                dt = datetime(y, m, d)
+                        except (ValueError, IndexError):
+                            pass
+                if not dt:
+                    for fmt in ["%Y-%m-%d", "%Y/%m/%d", "%m/%d/%Y"]:
+                        try:
+                            dt = datetime.strptime((date_val.split()[0] if date_val else "")[:10], fmt)
+                            break
+                        except (ValueError, IndexError):
+                            dt = None
+                if not dt:
+                    continue
+                rec_date = dt.strftime("%Y-%m-%d")
+                play = self._parse_count(row.get("播放", "") or row.get("播放量", "") or row.get("play_count", "0"))
+                like = self._parse_count(row.get("点赞", "") or row.get("like_count", "0"))
+                comment = self._parse_count(row.get("评论", "") or row.get("comment_count", "0"))
+                share = self._parse_count(row.get("分享", "") or row.get("share_count", "0"))
+                collect = self._parse_count(row.get("收藏", "") or row.get("collect_count", "0"))
+                comp_rate = (row.get("完播率", "") or row.get("completion_rate", "0")).strip().rstrip("%") or "0"
+                avg_dur = (row.get("平均播放时长", "") or row.get("avg_watch_duration", "0")).strip()
+                stats_list.append({
+                    "work_id": work_id,
+                    "record_date": rec_date,
+                    "play_count": play,
+                    "like_count": like,
+                    "comment_count": comment,
+                    "share_count": share,
+                    "collect_count": collect,
+                    "completion_rate": comp_rate,
+                    "avg_watch_duration": avg_dur,
+                })
+            result["statistics"] = stats_list
+            result["success"] = True
+            try:
+                os.remove(save_path)
+            except Exception:
+                pass
+        except Exception as e:
+            import traceback
+            traceback.print_exc()
+            result["error"] = str(e)
+        finally:
+            try:
+                await self.close_browser()
+            except Exception:
+                pass
+        return result
+
+    async def sync_account_works_daily_stats_via_browser(
+        self,
+        cookies: str,
+        works: List[dict],
+        save_fn=None,
+        update_works_fn=None,
+        headless: bool = True,
+    ) -> dict:
+        """
+        纯浏览器批量同步账号下所有作品(在库的)的每日数据到 work_day_statistics。
+        流程:
+        1. 打开 statistic/post → 点击单篇视频 → 点击近30天
+        2. 【首次】监听 post_list 接口 → 解析响应更新 works 表 yesterday_* 字段
+        3. 监听 post_list 获取 exportId->objectId 映射
+        4. 遍历 post_list 的每一条:
+           - 若 exportId 在 works 的 platform_video_id 中无匹配 → 跳过
+           - 若匹配 → 找到 data-row-key=objectId 的行,点击「查看」
+           - 详情页:默认近7天,直接监听 feed_aggreagate_data_by_tab_type 接口
+           - 从「全部」tab 解析 browse/like/comment/forward/fav/follow,日期从昨天往前推
+           - 通过 save_fn 存入 work_day_statistics
+           - 返回列表页,继续下一条
+        works: [{"work_id": int, "platform_video_id": str}, ...]
+        save_fn: (stats_list: List[dict]) -> {inserted, updated},由调用方传入,用于调用 Node batch-dates
+        update_works_fn: (updates: List[dict]) -> {updated},由调用方传入,用于将 post_list 解析数据更新到 works 表(仅首次调用)
+        """
+        from pathlib import Path
+        from datetime import timedelta
+
+        result = {
+            "success": True,
+            "error": "",
+            "total_processed": 0,
+            "total_skipped": 0,
+            "inserted": 0,
+            "updated": 0,
+            "works_updated": 0,
+        }
+        # platform_video_id(exportId) -> work_id
+        export_id_to_work = {}
+        for w in works:
+            pvid = (w.get("platform_video_id") or w.get("platformVideoId") or "").strip()
+            wid = w.get("work_id") or w.get("workId")
+            if pvid and wid is not None:
+                export_id_to_work[pvid] = int(wid)
+                # 兼容可能带/不带前缀(如 export/xxx vs xxx)
+                if "/" in pvid:
+                    export_id_to_work[pvid.split("/")[-1]] = int(wid)
+
+        post_list_data = {"list": []}
+        feed_aggreagate_data = {"body": None}
+
+        async def handle_response(response):
+            try:
+                url = response.url
+                if "statistic/post_list" in url:
+                    try:
+                        body = await response.json()
+                        if body.get("errCode") == 0 and body.get("data"):
+                            post_list_data["list"] = body.get("data", {}).get("list", [])
+                    except Exception:
+                        pass
+                elif "feed_aggreagate_data_by_tab_type" in url:
+                    try:
+                        body = await response.json()
+                        if body.get("errCode") == 0 and body.get("data"):
+                            feed_aggreagate_data["body"] = body
+                    except Exception:
+                        pass
+            except Exception:
+                pass
+
+        try:
+            await self.init_browser()
+            cookie_list = self.parse_cookies(cookies)
+            await self.set_cookies(cookie_list)
+            if not self.page:
+                raise Exception("Page not initialized")
+
+            self.page.on("response", handle_response)
+
+            # 1. 打开数据分析-作品数据页
+            print(f"[{self.platform_name}] 打开数据分析页...", flush=True)
+            await self.page.goto("https://channels.weixin.qq.com/platform/statistic/post", timeout=30000)
+            if not headless:
+                print(f"[{self.platform_name}] 浏览器已打开,请将窗口置于前台观看操作(等待 5 秒)...", flush=True)
+                await asyncio.sleep(5)
+            else:
+                await asyncio.sleep(3)
+            if "login" in self.page.url:
+                raise Exception("Cookie 已过期,请重新登录")
+
+            # 2. 点击「单篇视频」tab
+            tab_sel = "div.weui-desktop-tab__navs ul li:nth-child(2) a"
+            try:
+                await self.page.wait_for_selector(tab_sel, timeout=8000)
+                await self.page.click(tab_sel)
+            except Exception:
+                tab_sel = "a:has-text('单篇视频')"
+                await self.page.click(tab_sel)
+            await asyncio.sleep(2)
+
+            # 3. 点击「近30天」前清空 list,点击后等待 handler 捕获带 fullPlayRate 的 post_list
+            post_list_data["list"] = []
+            radio_selectors = [
+                "div.post-single-wrap div.weui-desktop-radio-group.radio-group label:has-text('近30天')",
+                "div.post-single-wrap div.filter-wrap div.weui-desktop-radio-group label:nth-child(2)",
+                "div.post-single-wrap label:has-text('近30天')",
+                "div.weui-desktop-radio-group label:has-text('近30天')",
+                "label:has-text('近30天')",
+            ]
+            clicked = False
+            for sel in radio_selectors:
+                try:
+                    el = self.page.locator(sel).first
+                    if await el.count() > 0:
+                        await el.click()
+                        clicked = True
+                        print(f"[{self.platform_name}] 已点击近30天 (selector: {sel[:40]}...)", flush=True)
+                        break
+                except Exception:
+                    continue
+            if not clicked:
+                print(f"[{self.platform_name}] 警告: 未找到近30天按钮", flush=True)
+            await asyncio.sleep(5)
+
+            # 4. 从 post_list 获取列表
+            items = post_list_data["list"]
+            if not items:
+                result["error"] = "未监听到 post_list 或列表为空"
+                print(f"[{self.platform_name}] {result['error']}", flush=True)
+                return result
+
+            # 4.5 【仅首次】从 post_list 接口响应解析数据 → 更新 works 表(不再下载 CSV)
+            # post_list 返回字段映射: readCount->播放量, likeCount->点赞, commentCount->评论, forwardCount->分享,
+            # fullPlayRate->完播率(0-1小数), avgPlayTimeSec->平均播放时长(秒), exportId->匹配 work_id
+            if update_works_fn and items:
+                try:
+                    updates = []
+                    for it in items:
+                        eid = (it.get("exportId") or "").strip()
+                        if not eid:
+                            continue
+                        work_id = export_id_to_work.get(eid)
+                        if work_id is None:
+                            for k, v in export_id_to_work.items():
+                                if eid in k or k in eid:
+                                    work_id = v
+                                    break
+                        if work_id is None:
+                            continue
+                        read_count = int(it.get("readCount") or 0)
+                        like_count = int(it.get("likeCount") or 0)
+                        comment_count = int(it.get("commentCount") or 0)
+                        forward_count = int(it.get("forwardCount") or 0)
+                        follow_count = int(it.get("followCount") or 0)
+                        full_play_rate = it.get("fullPlayRate")
+                        if full_play_rate is not None:
+                            comp_rate = f"{float(full_play_rate) * 100:.2f}%"
+                        else:
+                            comp_rate = "0"
+                        avg_sec = it.get("avgPlayTimeSec")
+                        if avg_sec is not None:
+                            avg_dur = f"{float(avg_sec):.2f}秒"
+                        else:
+                            avg_dur = "0"
+                        updates.append({
+                            "work_id": work_id,
+                            "yesterday_play_count": read_count,
+                            "yesterday_like_count": like_count,
+                            "yesterday_comment_count": comment_count,
+                            "yesterday_share_count": forward_count,
+                            "yesterday_follow_count": follow_count,
+                            "yesterday_completion_rate": comp_rate,
+                            "yesterday_avg_watch_duration": avg_dur,
+                        })
+                    if updates:
+                        try:
+                            save_result = update_works_fn(updates)
+                            result["works_updated"] = save_result.get("updated", 0)
+                        except Exception as api_err:
+                            import traceback
+                            traceback.print_exc()
+                except Exception as e:
+                    import traceback
+                    traceback.print_exc()
+                    print(f"[{self.platform_name}] 解析 post_list 更新 works 失败: {e}", flush=True)
+
+            # 辅助:点击单篇视频 + 近30天,恢复列表视图(go_back 后会回到全部视频页)
+            async def ensure_single_video_near30():
+                tab_sel = "div.weui-desktop-tab__navs ul li:nth-child(2) a"
+                try:
+                    await self.page.wait_for_selector(tab_sel, timeout=8000)
+                    await self.page.click(tab_sel)
+                except Exception:
+                    await self.page.click("a:has-text('单篇视频')")
+                await asyncio.sleep(2)
+                for sel in [
+                    "div.post-single-wrap div.weui-desktop-radio-group.radio-group label:has-text('近30天')",
+                    "div.post-single-wrap label:has-text('近30天')",
+                    "div.weui-desktop-radio-group label:has-text('近30天')",
+                    "label:has-text('近30天')",
+                ]:
+                    try:
+                        el = self.page.locator(sel).first
+                        if await el.count() > 0:
+                            await el.click()
+                            break
+                    except Exception:
+                        continue
+                await asyncio.sleep(3)
+
+            # 5. 遍历每一条,按 exportId 匹配作品
+            processed_export_ids = set()
+
+            for idx, item in enumerate(items):
+                eid = (item.get("exportId") or "").strip()
+                oid = (item.get("objectId") or "").strip()
+                if not oid:
+                    continue
+
+                # 已处理过的跳过(理论上循环顺序即处理顺序,此处做双重保险)
+                if eid in processed_export_ids:
+                    print(f"[{self.platform_name}] 跳过 [{idx+1}] exportId={eid} (已处理)", flush=True)
+                    continue
+
+                # go_back 后回到全部视频页,需重新点击单篇视频+近30天
+                if idx > 0:
+                    await ensure_single_video_near30()
+
+                # 匹配 work_id
+                work_id = export_id_to_work.get(eid)
+                if work_id is None:
+                    for k, v in export_id_to_work.items():
+                        if eid in k or k in eid:
+                            work_id = v
+                            break
+                if work_id is None:
+                    result["total_skipped"] += 1
+                    print(f"[{self.platform_name}] 跳过 [{idx+1}] exportId={eid} (库中无对应作品)", flush=True)
+                    continue
+
+                # 点击「查看」:Ant Design 表格 tr[data-row-key] > td > div.slot-wrap > a.detail-wrap
+                # 操作列可能在 ant-table-fixed-right 内,优先尝试
+                view_selectors = [
+                    f'div.ant-table-fixed-right tr[data-row-key="{oid}"] a.detail-wrap',
+                    f'tr[data-row-key="{oid}"] a.detail-wrap',
+                    f'tr[data-row-key="{oid}"] td a.detail-wrap',
+                    f'tr[data-row-key="{oid}"] a:has-text("查看")',
+                    f'tr[data-row-key="{oid}"] a',
+                ]
+                clicked = False
+                for sel in view_selectors:
+                    view_btn = self.page.locator(sel)
+                    if await view_btn.count() > 0:
+                        try:
+                            await view_btn.first.wait_for(timeout=3000)
+                            await view_btn.first.click()
+                            clicked = True
+                            print(f"[{self.platform_name}] 已点击查看 (selector: {sel[:40]}...)", flush=True)
+                            break
+                        except Exception as e:
+                            continue
+                if not clicked:
+                    print(f"[{self.platform_name}] 未找到 objectId={oid} 的查看按钮", flush=True)
+                    result["total_skipped"] += 1
+                    continue
+                await asyncio.sleep(3)
+
+                # 详情页:默认展示近7天,页面加载时自动请求 feed_aggreagate,不清空 body 避免覆盖已监听到的响应
+                await asyncio.sleep(4)
+
+                # 从 feed_aggreagate 响应解析「全部」数据
+                # 数据结构: data.dataByFanstype[].dataByTabtype[] 中 tabTypeName="全部" 或 tabType=999
+                # 日期:从昨天往前推 N 天(含昨天),数组从最早到最晚排列
+                body = feed_aggreagate_data.get("body")
+                if not body or not body.get("data"):
+                    print(f"[{self.platform_name}] work_id={work_id} 未监听到 feed_aggreagate 有效响应", flush=True)
+                    await self.page.go_back()
+                    await asyncio.sleep(2)
+                    continue
+
+                tab_all = None
+                for fan_item in body.get("data", {}).get("dataByFanstype", []):
+                    for tab_item in fan_item.get("dataByTabtype", []):
+                        if tab_item.get("tabTypeName") == "全部" or tab_item.get("tabType") == 999:
+                            tab_all = tab_item.get("data")
+                            break
+                    if tab_all is not None:
+                        break
+                if not tab_all:
+                    tab_all = body.get("data", {}).get("feedData", [{}])[0].get("totalData")
+                if not tab_all:
+                    print(f"[{self.platform_name}] work_id={work_id} 未找到「全部」数据", flush=True)
+                    await self.page.go_back()
+                    await asyncio.sleep(2)
+                    continue
+
+                browse = tab_all.get("browse", [])
+                n = len(browse)
+                if n == 0:
+                    print(f"[{self.platform_name}] work_id={work_id} browse 为空", flush=True)
+                    await self.page.go_back()
+                    await asyncio.sleep(2)
+                    continue
+
+                # 日期:昨天往前推 n 天,index 0 = 最早日
+                today = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
+                yesterday = today - timedelta(days=1)
+                start_date = yesterday - timedelta(days=n - 1)
+
+                like_arr = tab_all.get("like", [])
+                comment_arr = tab_all.get("comment", [])
+                forward_arr = tab_all.get("forward", [])
+                fav_arr = tab_all.get("fav", [])
+                follow_arr = tab_all.get("follow", [])
+
+                stats_list = []
+                for i in range(n):
+                    rec_dt = start_date + timedelta(days=i)
+                    rec_date = rec_dt.strftime("%Y-%m-%d")
+                    play = self._parse_count(browse[i] if i < len(browse) else "0")
+                    like = self._parse_count(like_arr[i] if i < len(like_arr) else "0")
+                    comment = self._parse_count(comment_arr[i] if i < len(comment_arr) else "0")
+                    share = self._parse_count(forward_arr[i] if i < len(forward_arr) else "0")
+                    follow = self._parse_count(follow_arr[i] if i < len(follow_arr) else "0")
+                    # fav[i] 不入库,follow[i] 入 follow_count
+                    stats_list.append({
+                        "work_id": work_id,
+                        "record_date": rec_date,
+                        "play_count": play,
+                        "like_count": like,
+                        "comment_count": comment,
+                        "share_count": share,
+                        "collect_count": 0,
+                        "follow_count": follow,
+                        "completion_rate": "0",
+                        "avg_watch_duration": "0",
+                    })
+                print(f"[{self.platform_name}] work_id={work_id} 从 feed_aggreagate 解析得到 {len(stats_list)} 条日统计", flush=True)
+
+                # 存入 work_day_statistics(通过 save_fn 调用 Node)
+                if save_fn and stats_list:
+                    try:
+                        save_result = save_fn(stats_list)
+                        result["inserted"] += save_result.get("inserted", 0)
+                        result["updated"] += save_result.get("updated", 0)
+                    except Exception as e:
+                        print(f"[{self.platform_name}] work_id={work_id} 保存失败: {e}", flush=True)
+
+                result["total_processed"] += 1
+                processed_export_ids.add(eid)
+
+                # 返回列表页,继续下一条(会回到全部视频页,下次循环会重新点击单篇视频+近30天)
+                await self.page.go_back()
+                await asyncio.sleep(2)
+            print(f"[{self.platform_name}] 批量同步完成: 处理 {result['total_processed']} 个作品, 跳过 {result['total_skipped']} 个", flush=True)
+        except Exception as e:
+            import traceback
+            traceback.print_exc()
+            result["success"] = False
+            result["error"] = str(e)
+        finally:
+            try:
+                await self.close_browser()
+            except Exception:
+                pass
+        return result
+
     async def get_comments(self, cookies: str, work_id: str, cursor: str = "") -> CommentsResult:
         """
         获取视频号作品评论(完全参考 get_weixin_work_comments.py 的接口监听逻辑)

+ 2 - 2
server/src/models/entities/PlatformAccount.ts

@@ -63,11 +63,11 @@ export class PlatformAccount {
   @Column({ type: 'int', nullable: true, name: 'group_id' })
   groupId!: number | null;
 
-  @Column({ type: 'timestamp', name: 'created_at', default: () => 'CURRENT_TIMESTAMP' })
+  @Column({ type: 'datetime', name: 'created_at', default: () => 'CURRENT_TIMESTAMP' })
   createdAt!: Date;
 
   @Column({
-    type: 'timestamp',
+    type: 'datetime',
     name: 'updated_at',
     default: () => 'CURRENT_TIMESTAMP',
     onUpdate: 'CURRENT_TIMESTAMP',

+ 3 - 0
server/src/models/entities/Work.ts

@@ -78,6 +78,9 @@ export class Work {
   @Column({ name: 'yesterday_fans_increase', type: 'int', default: 0 })
   yesterdayFansIncrease!: number;
 
+  @Column({ name: 'yesterday_follow_count', type: 'int', default: 0 })
+  yesterdayFollowCount!: number;
+
   @Column({ name: 'yesterday_cover_click_rate', type: 'varchar', length: 50, default: '0' })
   yesterdayCoverClickRate!: string;
 

+ 3 - 0
server/src/models/entities/WorkDayStatistics.ts

@@ -33,6 +33,9 @@ export class WorkDayStatistics {
   @Column({ name: 'fans_increase', type: 'int', default: 0, comment: '涨粉数' })
   fansIncrease!: number;
 
+  @Column({ name: 'follow_count', type: 'int', default: 0, comment: '关注数' })
+  followCount!: number;
+
   @Column({ name: 'cover_click_rate', type: 'varchar', length: 50, default: '0', comment: '封面点击率' })
   coverClickRate!: string;
 

+ 110 - 1
server/src/routes/internal.ts

@@ -11,7 +11,7 @@ import { validateRequest } from '../middleware/validate.js';
 import { config } from '../config/index.js';
 import { wsManager } from '../websocket/index.js';
 import { WS_EVENTS } from '@media-manager/shared';
-import { AppDataSource, PlatformAccount } from '../models/index.js';
+import { AppDataSource, PlatformAccount, Work } from '../models/index.js';
 import { CookieManager } from '../automation/cookie.js';
 
 const router = Router();
@@ -88,6 +88,56 @@ router.post(
 );
 
 /**
+ * POST /api/internal/work-day-statistics/batch-dates
+ * 按日期批量保存作品日统计数据(用于视频号 CSV 导入等)
+ */
+router.post(
+  '/work-day-statistics/batch-dates',
+  [
+    body('statistics').isArray().withMessage('statistics 必须是数组'),
+    body('statistics.*.workId').optional().isNumeric().withMessage('workId 必须是数字'),
+    body('statistics.*.work_id').optional().isNumeric().withMessage('work_id 必须是数字'),
+    body('statistics.*.recordDate').optional().isString().withMessage('recordDate 必须是字符串'),
+    body('statistics.*.record_date').optional().isString().withMessage('record_date 必须是字符串'),
+    validateRequest,
+  ],
+  asyncHandler(async (req, res) => {
+    const items = req.body.statistics.map((item: any) => {
+      const workId = item.workId ?? item.work_id;
+      const recordDateStr = item.recordDate ?? item.record_date ?? '';
+      const recordDate = recordDateStr ? new Date(recordDateStr) : new Date();
+      recordDate.setHours(0, 0, 0, 0);
+      return {
+        workId: Number(workId),
+        recordDate,
+        playCount: item.playCount ?? item.play_count ?? 0,
+        exposureCount: item.exposureCount ?? item.exposure_count ?? 0,
+        likeCount: item.likeCount ?? item.like_count ?? 0,
+        commentCount: item.commentCount ?? item.comment_count ?? 0,
+        shareCount: item.shareCount ?? item.share_count ?? 0,
+        collectCount: item.collectCount ?? item.collect_count ?? 0,
+        fansIncrease: item.fansIncrease ?? item.fans_increase ?? 0,
+        followCount: item.followCount ?? item.follow_count ?? 0,
+        coverClickRate: String(item.coverClickRate ?? item.cover_click_rate ?? '0'),
+        avgWatchDuration: String(item.avgWatchDuration ?? item.avg_watch_duration ?? '0'),
+        totalWatchDuration: String(item.totalWatchDuration ?? item.total_watch_duration ?? '0'),
+        completionRate: String(item.completionRate ?? item.completion_rate ?? '0'),
+        twoSecondExitRate: String(item.twoSecondExitRate ?? item.two_second_exit_rate ?? '0'),
+      };
+    });
+
+    const result = await workDayStatisticsService.saveStatisticsForDateBatch(items);
+
+    res.json({
+      success: true,
+      inserted: result.inserted,
+      updated: result.updated,
+      message: `保存成功: 新增 ${result.inserted} 条, 更新 ${result.updated} 条`,
+    });
+  })
+);
+
+/**
  * GET /api/internal/work-day-statistics/trend
  * 获取数据趋势
  */
@@ -196,6 +246,65 @@ router.get(
 );
 
 /**
+ * POST /api/internal/works/batch-update-from-csv
+ * 根据视频号 CSV 明细批量更新 works 表 yesterday_* 字段
+ * 请求体: { "updates": [{ work_id, yesterday_play_count, yesterday_like_count, yesterday_comment_count, yesterday_share_count, yesterday_completion_rate, yesterday_avg_watch_duration }, ...] }
+ */
+router.post(
+  '/works/batch-update-from-csv',
+  [
+    body('updates').isArray().withMessage('updates 必须是数组'),
+    validateRequest,
+  ],
+  asyncHandler(async (req, res) => {
+    const raw = req.body.updates as Array<any>;
+    const updates = raw
+      .map((item) => ({
+        work_id: item.work_id ?? item.workId,
+        yesterday_play_count: item.yesterday_play_count ?? item.yesterdayPlayCount,
+        yesterday_like_count: item.yesterday_like_count ?? item.yesterdayLikeCount,
+        yesterday_comment_count: item.yesterday_comment_count ?? item.yesterdayCommentCount,
+        yesterday_share_count: item.yesterday_share_count ?? item.yesterdayShareCount,
+        yesterday_follow_count: item.yesterday_follow_count ?? item.yesterdayFollowCount,
+        yesterday_completion_rate: item.yesterday_completion_rate ?? item.yesterdayCompletionRate,
+        yesterday_avg_watch_duration: item.yesterday_avg_watch_duration ?? item.yesterdayAvgWatchDuration,
+      }))
+      .filter((item) => item.work_id != null);
+
+    const workRepository = AppDataSource.getRepository(Work);
+    let updated = 0;
+    for (const item of updates) {
+      const patch: Partial<{
+        yesterdayPlayCount: number;
+        yesterdayLikeCount: number;
+        yesterdayCommentCount: number;
+        yesterdayShareCount: number;
+        yesterdayFollowCount: number;
+        yesterdayCompletionRate: string;
+        yesterdayAvgWatchDuration: string;
+      }> = {};
+      if (item.yesterday_play_count !== undefined) patch.yesterdayPlayCount = item.yesterday_play_count;
+      if (item.yesterday_like_count !== undefined) patch.yesterdayLikeCount = item.yesterday_like_count;
+      if (item.yesterday_comment_count !== undefined) patch.yesterdayCommentCount = item.yesterday_comment_count;
+      if (item.yesterday_share_count !== undefined) patch.yesterdayShareCount = item.yesterday_share_count;
+      if (item.yesterday_follow_count !== undefined) patch.yesterdayFollowCount = item.yesterday_follow_count;
+      if (item.yesterday_completion_rate !== undefined) patch.yesterdayCompletionRate = String(item.yesterday_completion_rate);
+      if (item.yesterday_avg_watch_duration !== undefined) patch.yesterdayAvgWatchDuration = String(item.yesterday_avg_watch_duration);
+      if (Object.keys(patch).length === 0) continue;
+
+      const result = await workRepository.update(item.work_id, patch);
+      if (result.affected && result.affected > 0) updated += result.affected;
+    }
+
+    res.json({
+      success: true,
+      updated,
+      message: `成功更新 ${updated} 条 works 记录`,
+    });
+  })
+);
+
+/**
  * POST /api/internal/captcha/request
  * Python 发布服务请求前端输入验证码(短信/图形)
  */

+ 99 - 2
server/src/routes/workDayStatistics.ts

@@ -1,5 +1,5 @@
 import { Router } from 'express';
-import { query } from 'express-validator';
+import { query, param } from 'express-validator';
 import { spawn } from 'child_process';
 import path from 'path';
 import { fileURLToPath } from 'url';
@@ -7,8 +7,9 @@ import { authenticate } from '../middleware/auth.js';
 import { asyncHandler } from '../middleware/error.js';
 import { validateRequest } from '../middleware/validate.js';
 import { WorkDayStatisticsService } from '../services/WorkDayStatisticsService.js';
-import { AppDataSource, Work } from '../models/index.js';
+import { AppDataSource, Work, PlatformAccount } from '../models/index.js';
 import { logger } from '../utils/logger.js';
+import { CookieManager } from '../automation/cookie.js';
 
 /**
  * Work day statistics(原 Python 统计接口的 Node 版本)
@@ -536,6 +537,102 @@ router.get(
   })
 );
 
+const PYTHON_SERVICE_URL = process.env.PYTHON_PUBLISH_SERVICE_URL || process.env.XHS_SERVICE_URL || 'http://localhost:5005';
+
+/**
+ * POST /api/work-day-statistics/sync-weixin-video/:workId
+ * 同步视频号作品的每日数据(浏览器自动化 + CSV 导入)
+ */
+router.post(
+  '/sync-weixin-video/:workId',
+  [param('workId').isInt().withMessage('workId 必须是整数'), validateRequest],
+  asyncHandler(async (req, res) => {
+    const workId = Number(req.params.workId);
+    const workRepository = AppDataSource.getRepository(Work);
+    const work = await workRepository.findOne({
+      where: { id: workId },
+      relations: ['account'],
+    });
+
+    if (!work) {
+      return res.status(404).json({ success: false, message: '作品不存在' });
+    }
+    if (work.account.userId !== req.user!.userId) {
+      return res.status(403).json({ success: false, message: '无权访问该作品' });
+    }
+    if (work.platform !== 'weixin_video') {
+      return res.status(400).json({ success: false, message: '仅支持视频号作品' });
+    }
+
+    const account = work.account as PlatformAccount;
+    if (!account.cookieData) {
+      return res.status(400).json({ success: false, message: '账号未配置 Cookie' });
+    }
+
+    let cookieStr: string;
+    try {
+      cookieStr = CookieManager.decrypt(account.cookieData);
+    } catch {
+      cookieStr = account.cookieData;
+    }
+
+    let cookieForPython: string;
+    try {
+      JSON.parse(cookieStr);
+      cookieForPython = cookieStr;
+    } catch {
+      cookieForPython = JSON.stringify(
+        cookieStr.split(';').filter(Boolean).map((s) => {
+          const idx = s.trim().indexOf('=');
+          const name = idx >= 0 ? s.trim().slice(0, idx) : s.trim();
+          const value = idx >= 0 ? s.trim().slice(idx + 1) : '';
+          return { name, value, domain: '.weixin.qq.com', path: '/' };
+        })
+      );
+    }
+
+    const pyRes = await fetch(`${PYTHON_SERVICE_URL}/sync_weixin_work_daily_stats`, {
+      method: 'POST',
+      headers: { 'Content-Type': 'application/json' },
+      body: JSON.stringify({
+        work_id: workId,
+        platform_video_id: work.platformVideoId,
+        cookie: cookieForPython,
+        show_browser: true, // 显示浏览器窗口,便于观察点击操作
+      }),
+      signal: AbortSignal.timeout(120000),
+    });
+
+    const data = (await pyRes.json().catch(() => ({}))) as {
+      success?: boolean;
+      error?: string;
+      message?: string;
+      inserted?: number;
+      updated?: number;
+    };
+
+    if (!pyRes.ok) {
+      return res.status(500).json({
+        success: false,
+        message: data.error || 'Python 服务请求失败',
+      });
+    }
+
+    if (!data.success) {
+      return res.json({
+        success: false,
+        message: data.error || '同步失败',
+      });
+    }
+
+    return res.json({
+      success: true,
+      message: data.message || '同步成功',
+      data: { inserted: data.inserted ?? 0, updated: data.updated ?? 0 },
+    });
+  })
+);
+
 /**
  * GET /api/work-day-statistics/work/:workId
  * 获取单个作品的历史统计数据(用于作品详情页)

+ 6 - 7
server/src/scheduler/index.ts

@@ -65,12 +65,11 @@ export class TaskScheduler {
     this.scheduleJob('wx-video-data-center-import', '30 12 * * *', this.importWeixinVideoDataCenterLast30Days.bind(this));
 
     // 每天 12:35:同步视频号作品维度的「作品列表 + 按天聚合-全部」数据,写入 work_day_statistics
-    // [已中止] 暂时禁用,等待接口问题解决
-    // this.scheduleJob(
-    //   'wx-video-work-statistics-import',
-    //   '35 12 * * *',
-    //   this.importWeixinVideoWorkStatistics.bind(this)
-    // );
+    this.scheduleJob(
+      'wx-video-work-statistics-import',
+      '35 12 * * *',
+      this.importWeixinVideoWorkStatistics.bind(this)
+    );
 
     this.scheduleJob('auto-reply-messages', '* * * * *', this.autoReplyMessages.bind(this));
     // 注意:账号刷新由客户端定时触发,不在服务端自动执行
@@ -89,7 +88,7 @@ export class TaskScheduler {
     logger.info('[Scheduler]   - dy-work-statistics-import:  daily at 12:50 (50 12 * * *)');
     logger.info('[Scheduler]   - bj-content-overview-import: daily at 12:20 (20 12 * * *)');
     logger.info('[Scheduler]   - wx-video-data-center-import: daily at 12:30 (30 12 * * *)');
-    // logger.info('[Scheduler]   - wx-video-work-statistics-import: daily at 12:35 (35 12 * * *)');
+    logger.info('[Scheduler]   - wx-video-work-statistics-import: daily at 12:35 (35 12 * * *)');
     logger.info('[Scheduler]   - auto-reply-messages: every minute (* * * * *)');
     logger.info('[Scheduler] Note: Account refresh is triggered by client, not server');
     logger.info('[Scheduler] ========================================');

+ 77 - 0
server/src/scripts/fix-platform-accounts-timestamp.ts

@@ -0,0 +1,77 @@
+#!/usr/bin/env tsx
+/**
+ * 修复 platform_accounts 表的 created_at 和 updated_at 字段格式
+ * 将 timestamp 类型改为 datetime 类型,时间格式为 2026-02-05 12:22:22
+ *
+ * 运行: cd server && pnpm exec tsx src/scripts/fix-platform-accounts-timestamp.ts
+ */
+import { initDatabase, AppDataSource } from '../models/index.js';
+import { logger } from '../utils/logger.js';
+
+async function fixPlatformAccountsTimestamp() {
+  try {
+    await initDatabase();
+    logger.info('数据库连接已初始化');
+
+    const queryRunner = AppDataSource.createQueryRunner();
+    await queryRunner.connect();
+
+    try {
+      logger.info('\n========================================');
+      logger.info('修复 platform_accounts 时间字段格式...');
+      logger.info('========================================\n');
+
+      await queryRunner.query(`SET time_zone = '+08:00'`);
+
+      logger.info('1. 修改 created_at 字段类型...');
+      await queryRunner.query(`
+        ALTER TABLE platform_accounts 
+        MODIFY COLUMN created_at DATETIME NULL
+      `);
+      await queryRunner.query(`
+        ALTER TABLE platform_accounts 
+        MODIFY COLUMN created_at DATETIME DEFAULT CURRENT_TIMESTAMP
+      `);
+      logger.info('   ✓ created_at 修改完成');
+
+      logger.info('2. 修改 updated_at 字段类型...');
+      await queryRunner.query(`
+        ALTER TABLE platform_accounts 
+        MODIFY COLUMN updated_at DATETIME NULL
+      `);
+      await queryRunner.query(`
+        ALTER TABLE platform_accounts 
+        MODIFY COLUMN updated_at DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
+      `);
+      logger.info('   ✓ updated_at 修改完成');
+
+      logger.info('\n3. 验证修复结果...');
+      const rows = await queryRunner.query(`
+        SELECT id, account_name, created_at, updated_at 
+        FROM platform_accounts 
+        ORDER BY id DESC 
+        LIMIT 5
+      `);
+      rows.forEach((row: any) => {
+        logger.info(`   ID ${row.id}: created_at=${row.created_at}, updated_at=${row.updated_at}`);
+      });
+
+      logger.info('\n========================================');
+      logger.info('platform_accounts 时间字段修复完成!');
+      logger.info('========================================\n');
+    } catch (error: any) {
+      logger.error('修复过程中出错:', error);
+      throw error;
+    } finally {
+      await queryRunner.release();
+    }
+  } catch (error: any) {
+    logger.error('修复失败:', error);
+    process.exit(1);
+  } finally {
+    await AppDataSource.destroy();
+    process.exit(0);
+  }
+}
+
+fixPlatformAccountsTimestamp().catch(console.error);

+ 20 - 2
server/src/scripts/run-weixin-video-work-stats-import.ts

@@ -2,11 +2,29 @@ import { initDatabase } from '../models/index.js';
 import { logger } from '../utils/logger.js';
 import { WeixinVideoWorkStatisticsImportService } from '../services/WeixinVideoWorkStatisticsImportService.js';
 
+/**
+ * 用法: pnpm exec tsx server/src/scripts/run-weixin-video-work-stats-import.ts [accountId] [show]
+ * 不传 accountId 则同步所有视频号账号
+ * 第二个参数传 show 则打开浏览器可视模式,便于观察点击操作
+ */
 async function main() {
   try {
     await initDatabase();
-    logger.info('[WX WorkStats] Manual run start...');
-    await WeixinVideoWorkStatisticsImportService.runDailyImport();
+    const accountIdArg = process.argv[2];
+    const showArg = process.argv[3];
+    const showBrowser = (showArg || '').toLowerCase() === 'show';
+    if (accountIdArg) {
+      const accountId = parseInt(accountIdArg, 10);
+      if (isNaN(accountId)) {
+        logger.error('[WX WorkStats] accountId 必须是数字');
+        process.exit(1);
+      }
+      logger.info(`[WX WorkStats] 单账号同步 accountId=${accountId}${showBrowser ? ' (可视模式)' : ''} start...`);
+      await WeixinVideoWorkStatisticsImportService.runDailyImportForAccount(accountId, showBrowser);
+    } else {
+      logger.info('[WX WorkStats] 全量同步 start...');
+      await WeixinVideoWorkStatisticsImportService.runDailyImport();
+    }
     logger.info('[WX WorkStats] Manual run done.');
     process.exit(0);
   } catch (e) {

+ 92 - 0
server/src/scripts/test-weixin-work-daily-sync.ts

@@ -0,0 +1,92 @@
+#!/usr/bin/env tsx
+/**
+ * 测试视频号作品每日数据同步(work_id=906)
+ * 用法: cd server && pnpm exec tsx src/scripts/test-weixin-work-daily-sync.ts [workId]
+ */
+import { initDatabase, AppDataSource, Work, PlatformAccount } from '../models/index.js';
+import { logger } from '../utils/logger.js';
+import { CookieManager } from '../automation/cookie.js';
+
+const PYTHON_SERVICE_URL = process.env.PYTHON_PUBLISH_SERVICE_URL || 'http://localhost:5005';
+
+async function main() {
+  const workId = Number(process.argv[2] || 906);
+  await initDatabase();
+
+  const workRepo = AppDataSource.getRepository(Work);
+  const work = await workRepo.findOne({
+    where: { id: workId },
+    relations: ['account'],
+  });
+
+  if (!work) {
+    logger.error(`作品 ${workId} 不存在`);
+    process.exit(1);
+  }
+  if (work.platform !== 'weixin_video') {
+    logger.error(`作品 ${workId} 不是视频号,platform=${work.platform}`);
+    process.exit(1);
+  }
+
+  const account = work.account as PlatformAccount;
+  if (!account.cookieData) {
+    logger.error('账号未配置 Cookie');
+    process.exit(1);
+  }
+
+  let cookieStr: string;
+  try {
+    cookieStr = CookieManager.decrypt(account.cookieData);
+  } catch {
+    cookieStr = account.cookieData;
+  }
+
+  let cookieForPython: string;
+  try {
+    JSON.parse(cookieStr);
+    cookieForPython = cookieStr;
+  } catch {
+    cookieForPython = JSON.stringify(
+      cookieStr
+        .split(';')
+        .filter(Boolean)
+        .map((s) => {
+          const idx = s.trim().indexOf('=');
+          const name = idx >= 0 ? s.trim().slice(0, idx) : s.trim();
+          const value = idx >= 0 ? s.trim().slice(idx + 1) : '';
+          return { name, value, domain: '.weixin.qq.com', path: '/' };
+        })
+    );
+  }
+
+  logger.info(`测试同步 work_id=${workId}, platform_video_id=${work.platformVideoId}`);
+  logger.info(`调用 Python: ${PYTHON_SERVICE_URL}/sync_weixin_work_daily_stats`);
+
+  const res = await fetch(`${PYTHON_SERVICE_URL}/sync_weixin_work_daily_stats`, {
+    method: 'POST',
+    headers: { 'Content-Type': 'application/json' },
+    body: JSON.stringify({
+      work_id: workId,
+      platform_video_id: work.platformVideoId,
+      cookie: cookieForPython,
+      show_browser: true,
+    }),
+    signal: AbortSignal.timeout(120000),
+  });
+
+  const data = (await res.json().catch(() => ({}))) as any;
+  logger.info('Python 响应:', JSON.stringify(data, null, 2));
+
+  if (data.success) {
+    logger.info(`成功: ${data.message}, inserted=${data.inserted}, updated=${data.updated}`);
+  } else {
+    logger.error(`失败: ${data.error}`);
+  }
+  await AppDataSource.destroy();
+  process.exit(data.success ? 0 : 1);
+}
+
+main().catch((e) => {
+  logger.error(e);
+  process.exit(1);
+});

+ 80 - 363
server/src/services/WeixinVideoWorkStatisticsImportService.ts

@@ -1,56 +1,19 @@
 /**
- * 视频号:作品维度「作品列表 + 按天聚合数据」→ 导入 work_day_statistics
+ * 视频号:作品维度「纯浏览器自动化」→ 导入 work_day_statistics
  *
- * 流程:
- * 1. 获取 works 表中 platform=weixin_video 的作品(platform_video_id 存的是 exportId)
- * 2. 调用 post_list 接口获取作品列表,通过 exportId 匹配得到 objectId
- * 3. 对每个作品调用 feed_aggreagate_data_by_tab_type,取「全部」tab 的按天数据
- * 4. 将 browse→播放、like→点赞、comment→评论 写入 work_day_statistics(follow=关注、fav/forward 暂不入库)
+ * 流程:调用 Python 纯浏览器接口,由 Python 完成:
+ * 1. 打开 statistic/post → 点击单篇视频 → 点击近30天
+ * 2. 监听 post_list 获取 exportId->objectId
+ * 3. 遍历列表,按 exportId 匹配 DB 作品,匹配则点击查看 → 详情页近30天 → 下载表格
+ * 4. 解析 CSV 存入 work_day_statistics
  */
 
-import crypto from 'crypto';
 import { AppDataSource, PlatformAccount, Work } from '../models/index.js';
 import { logger } from '../utils/logger.js';
-import { WorkDayStatisticsService } from './WorkDayStatisticsService.js';
 import { CookieManager } from '../automation/cookie.js';
 
-const POST_LIST_BASE =
-  'https://channels.weixin.qq.com/micro/statistic/cgi-bin/mmfinderassistant-bin/statistic/post_list';
-const FEED_AGGREGATE_BASE =
-  'https://channels.weixin.qq.com/micro/statistic/cgi-bin/mmfinderassistant-bin/statistic/feed_aggreagate_data_by_tab_type';
-
-/** 列表页 _pageUrl(与浏览器「数据统计-作品」列表一致) */
-const POST_LIST_PAGE_URL = 'https://channels.weixin.qq.com/micro/statistic/post';
-/** 详情页 _pageUrl(与浏览器 postDetail 一致,feed_aggreagate 用) */
-const POST_DETAIL_PAGE_URL = 'https://channels.weixin.qq.com/micro/statistic/postDetail';
-
-/** 生成随机 _rid(格式如 6982df69-ff6e46a5,8hex-8hex) */
-function generateRandomRid(): string {
-  const a = crypto.randomBytes(4).toString('hex');
-  const b = crypto.randomBytes(4).toString('hex');
-  return `${a}-${b}`;
-}
-
-/**
- * 构建带 _aid、_rid、_pageUrl 的 URL。
- * 若传入 sessionAid/sessionRid 则优先使用(本账号 post_list 生成的,复用于 feed_aggreagate);
- * 否则读环境变量 WX_VIDEO_AID、WX_VIDEO_RID。
- */
-function buildUrlWithAidRid(
-  base: string,
-  pageUrl: string,
-  sessionAid?: string,
-  sessionRid?: string
-): string {
-  const aid = sessionAid ?? process.env.WX_VIDEO_AID?.trim() ?? '';
-  const rid = sessionRid ?? process.env.WX_VIDEO_RID?.trim() ?? '';
-  const params = new URLSearchParams();
-  if (aid) params.set('_aid', aid);
-  if (rid) params.set('_rid', rid);
-  params.set('_pageUrl', pageUrl);
-  const qs = params.toString();
-  return qs ? `${base}?${qs}` : base;
-}
+const PYTHON_SERVICE_URL =
+  process.env.PYTHON_PUBLISH_SERVICE_URL || process.env.XHS_SERVICE_URL || 'http://localhost:5005';
 
 function tryDecryptCookieData(cookieData: string | null): string | null {
   if (!cookieData) return null;
@@ -63,107 +26,52 @@ function tryDecryptCookieData(cookieData: string | null): string | null {
   }
 }
 
-/** 将账号 cookie_data 转为 HTTP Cookie 头字符串 */
-function getCookieHeaderString(cookieData: string | null): string {
+/** 将 cookie 转为 Python 接口所需格式(JSON 数组或原始字符串) */
+function getCookieForPython(cookieData: string | null): string {
   const raw = tryDecryptCookieData(cookieData);
   if (!raw) return '';
   const s = raw.trim();
   if (!s) return '';
-  if (s.startsWith('[') || s.startsWith('{')) {
-    try {
-      const parsed = JSON.parse(s);
-      const arr = Array.isArray(parsed) ? parsed : parsed?.cookies ?? [];
-      if (!Array.isArray(arr)) return '';
-      return arr
-        .map((c: { name?: string; value?: string }) => {
-          const name = String(c?.name ?? '').trim();
-          const value = String(c?.value ?? '').trim();
-          return name ? `${name}=${value}` : '';
-        })
+  try {
+    JSON.parse(s);
+    return s; // 已是 JSON
+  } catch {
+    return JSON.stringify(
+      s
+        .split(';')
         .filter(Boolean)
-        .join('; ');
-    } catch {
-      return s;
-    }
-  }
-  return s;
-}
-
-/** 从 Cookie 字符串中解析 x-wechat-uin(可选) */
-function getXWechatUinFromCookie(cookieHeader: string): string | undefined {
-  const match = cookieHeader.match(/\bwxuin=(\d+)/i);
-  return match ? match[1] : undefined;
-}
-
-/** 从账号 account_id 得到 _log_finder_id(去掉 weixin_video_ 前缀,保证以 @finder 结尾) */
-function getLogFinderId(accountId: string | null): string {
-  if (!accountId) return '';
-  const s = String(accountId).trim();
-  const prefix = 'weixin_video_';
-  const id = s.startsWith(prefix) ? s.slice(prefix.length) : s;
-  if (!id) return '';
-  return id.endsWith('@finder') ? id : `${id}@finder`;
-}
-
-function buildPostListUrl(sessionAid?: string, sessionRid?: string): string {
-  return buildUrlWithAidRid(POST_LIST_BASE, POST_LIST_PAGE_URL, sessionAid, sessionRid);
-}
-
-function buildFeedAggregateUrl(sessionAid?: string, sessionRid?: string): string {
-  return buildUrlWithAidRid(FEED_AGGREGATE_BASE, POST_DETAIL_PAGE_URL, sessionAid, sessionRid);
-}
-
-/** 近30天到昨天:返回 [startTime, endTime] Unix 秒(中国时间 00:00:00 起算) */
-function getLast30DaysRange(): { startTime: number; endTime: number; startDate: Date; endDate: Date } {
-  const now = new Date();
-  const yesterday = new Date(now.getFullYear(), now.getMonth(), now.getDate() - 1);
-  const startDate = new Date(yesterday.getFullYear(), yesterday.getMonth(), yesterday.getDate() - 30);
-  startDate.setHours(0, 0, 0, 0);
-  yesterday.setHours(23, 59, 59, 999);
-  const startTime = Math.floor(startDate.getTime() / 1000);
-  const endTime = Math.floor(yesterday.getTime() / 1000);
-  const endDateNorm = new Date(yesterday.getFullYear(), yesterday.getMonth(), yesterday.getDate());
-  endDateNorm.setHours(0, 0, 0, 0);
-  return { startTime, endTime, startDate, endDate: endDateNorm };
-}
-
-function toInt(val: unknown, defaultVal = 0): number {
-  if (typeof val === 'number') return Number.isFinite(val) ? Math.round(val) : defaultVal;
-  if (typeof val === 'string') {
-    const n = parseInt(val, 10);
-    return Number.isFinite(n) ? n : defaultVal;
+        .map((part) => {
+          const idx = part.trim().indexOf('=');
+          const name = idx >= 0 ? part.trim().slice(0, idx) : part.trim();
+          const value = idx >= 0 ? part.trim().slice(idx + 1) : '';
+          return { name, value, domain: '.weixin.qq.com', path: '/' };
+        })
+    );
   }
-  return defaultVal;
-}
-
-interface PostListItem {
-  objectId?: string;
-  exportId?: string;
-}
-
-interface FeedAggregateDataByTabType {
-  tabType?: number;
-  tabTypeName?: string;
-  data?: {
-    browse?: string[];
-    like?: string[];
-    comment?: string[];
-    forward?: string[];
-    fav?: string[];
-    follow?: string[];
-  };
 }
 
 export class WeixinVideoWorkStatisticsImportService {
   private accountRepository = AppDataSource.getRepository(PlatformAccount);
   private workRepository = AppDataSource.getRepository(Work);
-  private workDayStatisticsService = new WorkDayStatisticsService();
 
   static async runDailyImport(): Promise<void> {
     const svc = new WeixinVideoWorkStatisticsImportService();
     await svc.runDailyImportForAllWeixinVideoAccounts();
   }
 
+  /** 仅同步指定账号(用于测试),showBrowser=true 时显示浏览器窗口 */
+  static async runDailyImportForAccount(accountId: number, showBrowser = false): Promise<void> {
+    const svc = new WeixinVideoWorkStatisticsImportService();
+    const account = await svc.accountRepository.findOne({
+      where: { id: accountId, platform: 'weixin_video' as any },
+    });
+    if (!account) {
+      throw new Error(`未找到视频号账号 id=${accountId}`);
+    }
+    logger.info(`[WX WorkStats] 单账号同步 accountId=${accountId} showBrowser=${showBrowser}`);
+    await svc.importAccountWorksStatistics(account, showBrowser);
+  }
+
   async runDailyImportForAllWeixinVideoAccounts(): Promise<void> {
     const accounts = await this.accountRepository.find({
       where: { platform: 'weixin_video' as any },
@@ -182,9 +90,9 @@ export class WeixinVideoWorkStatisticsImportService {
     logger.info('[WX WorkStats] All accounts done');
   }
 
-  private async importAccountWorksStatistics(account: PlatformAccount): Promise<void> {
-    const cookieHeader = getCookieHeaderString(account.cookieData);
-    if (!cookieHeader) {
+  private async importAccountWorksStatistics(account: PlatformAccount, showBrowser = false): Promise<void> {
+    const cookieForPython = getCookieForPython(account.cookieData);
+    if (!cookieForPython) {
       logger.warn(`[WX WorkStats] accountId=${account.id} cookieData 为空或无法解析,跳过`);
       return;
     }
@@ -197,251 +105,60 @@ export class WeixinVideoWorkStatisticsImportService {
       return;
     }
 
-    const { startTime, endTime, startDate, endDate } = getLast30DaysRange();
-    const logFinderId = getLogFinderId(account.accountId);
-    const xWechatUin = getXWechatUinFromCookie(cookieHeader);
+    const worksPayload = works
+      .filter((w) => (w.platformVideoId ?? '').trim())
+      .map((w) => ({ work_id: w.id, platform_video_id: (w.platformVideoId ?? '').trim() }));
 
-    // _aid:post_list 时生成一次,本批次请求数据接口(feed_aggreagate)时复用;_rid 每次请求随机
-    const sessionAid =
-      process.env.WX_VIDEO_AID?.trim() || crypto.randomUUID();
-    logger.info(`[WX WorkStats] accountId=${account.id} post_list 生成 aid=${sessionAid},数据接口复用此 aid,rid 每次随机`);
-
-    const headers: Record<string, string> = {
-      accept: '*/*',
-      'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
-      'content-type': 'application/json',
-      cookie: cookieHeader,
-      origin: 'https://channels.weixin.qq.com',
-      referer: 'https://channels.weixin.qq.com/micro/statistic/post',
-      'user-agent':
-        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/144.0.0.0 Safari/537.36 Edg/144.0.0.0',
-    };
-    if (xWechatUin) headers['x-wechat-uin'] = xWechatUin;
-
-    const postListBody = {
-      pageSize: 100,
-      currentPage: 1,
-      sort: 0,
-      order: 0,
-      startTime,
-      endTime,
-      timestamp: String(Date.now()),
-      _log_finder_uin: '',
-      _log_finder_id: logFinderId,
-      rawKeyBuff: null,
-      pluginSessionId: null,
-      scene: 7,
-      reqScene: 7,
-    };
-
-    const postListUrl = buildPostListUrl(sessionAid, generateRandomRid());
-    let res: Response;
-    try {
-      res = await fetch(postListUrl, {
-        method: 'POST',
-        headers,
-        body: JSON.stringify(postListBody),
-        signal: AbortSignal.timeout(30_000),
-      });
-    } catch (e) {
-      logger.error(`[WX WorkStats] post_list request failed. accountId=${account.id}`, e);
-      throw e;
-    }
-
-    if (!res.ok) {
-      logger.warn(`[WX WorkStats] post_list HTTP ${res.status}. accountId=${account.id}`);
+    if (!worksPayload.length) {
+      logger.info(`[WX WorkStats] accountId=${account.id} 无有效 platform_video_id,跳过`);
       return;
     }
 
-    const postListJson = (await res.json().catch(() => null)) as {
-      errCode?: number;
-      errMsg?: string;
-      data?: { list?: PostListItem[]; totalCount?: number };
-    } | null;
-
-    if (!postListJson || postListJson.errCode !== 0) {
-      logger.warn(
-        `[WX WorkStats] post_list errCode=${postListJson?.errCode} errMsg=${postListJson?.errMsg}. accountId=${account.id}`
-      );
-      return;
-    }
-
-    const list = postListJson.data?.list ?? [];
-    const totalCount = postListJson.data?.totalCount ?? list.length;
-    const exportIdToObjectId = new Map<string, string>();
-    for (const item of list) {
-      const exportId = item.exportId ?? '';
-      const objectId = item.objectId ?? '';
-      if (exportId && objectId) exportIdToObjectId.set(exportId, objectId);
-    }
-
-    // 日志:对比 API 与 DB 的 exportId
     logger.info(
-      `[WX WorkStats] accountId=${account.id} post_list 返回 totalCount=${totalCount} list.length=${list.length}`
+      `[WX WorkStats] accountId=${account.id} 调用 Python 纯浏览器同步,共 ${worksPayload.length} 个作品`
     );
-    const apiExportIds: string[] = [];
-    for (let i = 0; i < list.length; i++) {
-      const item = list[i];
-      const eid = (item.exportId ?? '').trim();
-      const oid = (item.objectId ?? '').trim();
-      apiExportIds.push(eid);
-      logger.info(`[WX WorkStats] post_list[${i}] exportId=${eid} objectId=${oid}`);
-    }
-    for (const work of works) {
-      const dbExportId = (work.platformVideoId ?? '').trim();
-      if (!dbExportId) continue;
-      const matched = exportIdToObjectId.has(dbExportId);
-      logger.info(
-        `[WX WorkStats] DB workId=${work.id} platform_video_id(exportId)=${dbExportId} 匹配post_list=${matched}`
-      );
-      if (!matched && apiExportIds.length > 0) {
-        const sameLength = apiExportIds.filter((e) => e.length === dbExportId.length).length;
-        const containsDb = apiExportIds.some((e) => e === dbExportId || e.includes(dbExportId) || dbExportId.includes(e));
-        logger.info(
-          `[WX WorkStats] 对比: DB长度=${dbExportId.length} API条数=${apiExportIds.length} 同长API条数=${sameLength} 是否包含关系=${containsDb}`
-        );
-      }
-    }
 
-    let totalInserted = 0;
-    let totalUpdated = 0;
-
-    const feedHeaders: Record<string, string> = {
-      ...headers,
-      referer: 'https://channels.weixin.qq.com/micro/statistic/postDetail?isImageMode=0',
-      'finger-print-device-id':
-        process.env.WX_VIDEO_FINGERPRINT_DEVICE_ID?.trim() ||
-        '4605bc28ad3962eb9ee791897b199217',
-    };
-
-    for (const work of works) {
-      const exportId = (work.platformVideoId ?? '').trim();
-      if (!exportId) continue;
-
-      const objectId = exportIdToObjectId.get(exportId);
-      if (!objectId) {
-        logger.debug(`[WX WorkStats] workId=${work.id} exportId=${exportId} 未在 post_list 中匹配到 objectId,跳过`);
-        continue;
-      }
+    try {
+      const pyRes = await fetch(`${PYTHON_SERVICE_URL}/sync_weixin_account_works_daily_stats`, {
+        method: 'POST',
+        headers: { 'Content-Type': 'application/json' },
+        body: JSON.stringify({
+          works: worksPayload,
+          cookie: cookieForPython,
+          show_browser: showBrowser,
+        }),
+        signal: AbortSignal.timeout(600_000), // 10 分钟,批量可能较久
+      });
 
-      const feedBody = {
-        startTs: String(startTime),
-        endTs: String(endTime),
-        interval: 3,
-        feedId: objectId,
-        timestamp: String(Date.now()),
-        _log_finder_uin: '',
-        _log_finder_id: logFinderId,
-        rawKeyBuff: null,
-        pluginSessionId: null,
-        scene: 7,
-        reqScene: 7,
+      const data = (await pyRes.json().catch(() => ({}))) as {
+        success?: boolean;
+        error?: string;
+        message?: string;
+        total_processed?: number;
+        total_skipped?: number;
+        inserted?: number;
+        updated?: number;
+        works_updated?: number;
       };
 
-      const feedUrl = buildFeedAggregateUrl(sessionAid, generateRandomRid());
-      let feedRes: Response;
-      try {
-        feedRes = await fetch(feedUrl, {
-          method: 'POST',
-          headers: feedHeaders,
-          body: JSON.stringify(feedBody),
-          signal: AbortSignal.timeout(30_000),
-        });
-      } catch (e) {
-        logger.error(`[WX WorkStats] feed_aggreagate request failed. workId=${work.id} feedId=${objectId}`, e);
-        continue;
-      }
-
-      if (!feedRes.ok) {
-        logger.warn(`[WX WorkStats] feed_aggreagate HTTP ${feedRes.status}. workId=${work.id}`);
-        continue;
+      if (!pyRes.ok) {
+        logger.warn(`[WX WorkStats] accountId=${account.id} Python 请求失败: ${pyRes.status} ${data.error || ''}`);
+        return;
       }
 
-      const feedJson = (await feedRes.json().catch(() => null)) as {
-        errCode?: number;
-        data?: {
-          dataByFanstype?: { dataByTabtype?: FeedAggregateDataByTabType[] }[];
-          feedData?: { dataByTabtype?: FeedAggregateDataByTabType[] }[];
-        };
-      } | null;
-
-      const isTestWork = work.id === 866 || work.id === 867 || work.id === 902 || work.id === 903;
-      if (isTestWork) {
-        logger.info(`[WX WorkStats] feed_aggreagate 原始响应 workId=${work.id} errCode=${feedJson?.errCode} errMsg=${(feedJson as any)?.errMsg} 完整body=${JSON.stringify(feedJson ?? null)}`);
-      }
-
-      if (!feedJson || feedJson.errCode !== 0) {
-        if (isTestWork) {
-          logger.warn(`[WX WorkStats] workId=${work.id} feed_aggreagate 非成功 errCode=${feedJson?.errCode} 跳过`);
-        }
-        continue;
-      }
-
-      const dataByFanstype = feedJson.data?.dataByFanstype ?? [];
-      const firstFans = dataByFanstype[0];
-      const dataByTabtype = firstFans?.dataByTabtype ?? feedJson.data?.feedData?.[0]?.dataByTabtype ?? [];
-      const tabAll = dataByTabtype.find((t) => t.tabTypeName === '全部' || t.tabType === 999);
-      if (isTestWork) {
-        logger.info(
-          `[WX WorkStats] workId=${work.id} dataByTabtype.length=${dataByTabtype.length} tabAll=${!!tabAll} tabAll.tabTypeName=${tabAll?.tabTypeName}`
-        );
+      if (!data.success) {
+        logger.warn(`[WX WorkStats] accountId=${account.id} 同步失败: ${data.error || ''}`);
+        return;
       }
-      if (!tabAll?.data) continue;
-
-      const data = tabAll.data;
-      const browse = data.browse ?? [];
-      const like = data.like ?? [];
-      const comment = data.comment ?? [];
-      if (isTestWork) {
-        logger.info(
-          `[WX WorkStats] workId=${work.id} 「全部」data: browse.length=${browse.length} like.length=${like.length} comment.length=${comment.length}`
-        );
-        logger.info(`[WX WorkStats] workId=${work.id} browse=${JSON.stringify(browse)}`);
-        logger.info(`[WX WorkStats] workId=${work.id} like=${JSON.stringify(like)}`);
-        logger.info(`[WX WorkStats] workId=${work.id} comment=${JSON.stringify(comment)}`);
-      }
-
-      const len = Math.max(browse.length, like.length, comment.length);
-      if (len === 0) continue;
-
-      const patches: Array<{
-        workId: number;
-        recordDate: Date;
-        playCount?: number;
-        likeCount?: number;
-        commentCount?: number;
-      }> = [];
-
-      for (let i = 0; i < len; i++) {
-        const recordDate = new Date(startDate);
-        recordDate.setDate(recordDate.getDate() + i);
-        recordDate.setHours(0, 0, 0, 0);
-        if (recordDate > endDate) break;
 
-        patches.push({
-          workId: work.id,
-          recordDate,
-          playCount: toInt(browse[i], 0),
-          likeCount: toInt(like[i], 0),
-          commentCount: toInt(comment[i], 0),
-        });
-      }
-
-      if (isTestWork) {
-        logger.info(`[WX WorkStats] workId=${work.id} 生成 patches.length=${patches.length} 前3条=${JSON.stringify(patches.slice(0, 3))}`);
-      }
-      if (patches.length) {
-        const result = await this.workDayStatisticsService.saveStatisticsForDateBatch(patches);
-        if (isTestWork) {
-          logger.info(`[WX WorkStats] workId=${work.id} saveStatisticsForDateBatch inserted=${result.inserted} updated=${result.updated}`);
-        }
-        totalInserted += result.inserted;
-        totalUpdated += result.updated;
-      }
+      const worksUpdated = data.works_updated ?? 0;
+      logger.info(
+        `[WX WorkStats] accountId=${account.id} 完成: 处理 ${data.total_processed ?? 0} 个, 跳过 ${data.total_skipped ?? 0} 个, 新增 ${data.inserted ?? 0} 条, 更新 ${data.updated ?? 0} 条` +
+          (worksUpdated > 0 ? `, works 表更新 ${worksUpdated} 条` : '')
+      );
+    } catch (e) {
+      logger.error(`[WX WorkStats] accountId=${account.id} 调用 Python 失败:`, e);
+      throw e;
     }
-
-    logger.info(
-      `[WX WorkStats] accountId=${account.id} completed. inserted=${totalInserted} updated=${totalUpdated}`
-    );
   }
 }

+ 8 - 0
server/src/services/WorkDayStatisticsService.ts

@@ -11,6 +11,7 @@ interface StatisticsItem {
   shareCount?: number;
   collectCount?: number;
   fansIncrease?: number;
+  followCount?: number;
   coverClickRate?: string;
   avgWatchDuration?: string;
   totalWatchDuration?: string;
@@ -59,6 +60,7 @@ interface WorkStatisticsItem {
   shareCount: number;
   collectCount: number;
   fansIncrease?: number;
+  followCount?: number; // 视频号:关注数
   totalWatchDuration?: string;
   avgWatchDuration?: string;
   coverClickRate?: string;
@@ -161,6 +163,7 @@ export class WorkDayStatisticsService {
           shareCount: stat.shareCount ?? existing.shareCount,
           collectCount: stat.collectCount ?? existing.collectCount,
           fansIncrease: stat.fansIncrease ?? existing.fansIncrease,
+          followCount: stat.followCount ?? existing.followCount,
           coverClickRate: stat.coverClickRate ?? existing.coverClickRate ?? '0',
           avgWatchDuration: stat.avgWatchDuration ?? existing.avgWatchDuration ?? '0',
           totalWatchDuration: stat.totalWatchDuration ?? existing.totalWatchDuration ?? '0',
@@ -180,6 +183,7 @@ export class WorkDayStatisticsService {
           shareCount: stat.shareCount ?? 0,
           collectCount: stat.collectCount ?? 0,
           fansIncrease: stat.fansIncrease ?? 0,
+          followCount: stat.followCount ?? 0,
           coverClickRate: stat.coverClickRate ?? '0',
           avgWatchDuration: stat.avgWatchDuration ?? '0',
           totalWatchDuration: stat.totalWatchDuration ?? '0',
@@ -238,6 +242,7 @@ export class WorkDayStatisticsService {
         shareCount: patch.shareCount ?? existing.shareCount,
         collectCount: patch.collectCount ?? existing.collectCount,
         fansIncrease: patch.fansIncrease ?? existing.fansIncrease,
+        followCount: patch.followCount ?? existing.followCount,
         coverClickRate: patch.coverClickRate ?? existing.coverClickRate ?? '0',
         avgWatchDuration: patch.avgWatchDuration ?? existing.avgWatchDuration ?? '0',
         totalWatchDuration: patch.totalWatchDuration ?? existing.totalWatchDuration ?? '0',
@@ -257,6 +262,7 @@ export class WorkDayStatisticsService {
       shareCount: patch.shareCount ?? 0,
       collectCount: patch.collectCount ?? 0,
       fansIncrease: patch.fansIncrease ?? 0,
+      followCount: patch.followCount ?? 0,
       coverClickRate: patch.coverClickRate ?? '0',
       avgWatchDuration: patch.avgWatchDuration ?? '0',
       totalWatchDuration: patch.totalWatchDuration ?? '0',
@@ -609,6 +615,7 @@ export class WorkDayStatisticsService {
       .addSelect('wds.share_count', 'shareCount')
       .addSelect('wds.collect_count', 'collectCount')
       .addSelect('wds.fans_increase', 'fansIncrease')
+      .addSelect('wds.follow_count', 'followCount')
       .addSelect('wds.total_watch_duration', 'totalWatchDuration')
       .addSelect('wds.avg_watch_duration', 'avgWatchDuration')
       .addSelect('wds.cover_click_rate', 'coverClickRate')
@@ -649,6 +656,7 @@ export class WorkDayStatisticsService {
         shareCount: parseInt(row.shareCount) || 0,
         collectCount: parseInt(row.collectCount) || 0,
         fansIncrease: parseInt(row.fansIncrease) || 0,
+        followCount: parseInt(row.followCount) || 0,
         totalWatchDuration: row.totalWatchDuration || '0',
         avgWatchDuration: row.avgWatchDuration || '0',
         coverClickRate: row.coverClickRate || '0',

+ 4 - 0
server/tmp/feed_aggreagate_account61_work906.json

@@ -0,0 +1,4 @@
+{
+  "errCode": 300800,
+  "errMsg": "request failed"
+}

+ 4 - 0
server/tmp/feed_aggreagate_account61_work907.json

@@ -0,0 +1,4 @@
+{
+  "errCode": 300800,
+  "errMsg": "request failed"
+}

+ 32 - 0
server/tmp/post_list_params.json

@@ -0,0 +1,32 @@
+[
+  {
+    "index": 0,
+    "exportId": "export/UzFfAgtgekIEAQAAAAAA7t0VvbweUgAAAAstQy6ubaLX4KHWvLEZgBPEuaBYKCQxUe6NzNPgMJpPX5kDlbwQURlqNnMbvAgw",
+    "objectId": "14850108409273518573",
+    "createTime": 1770270873,
+    "desc": "雪落下的声音",
+    "readCount": 149,
+    "likeCount": 0,
+    "commentCount": 0,
+    "forwardCount": 0,
+    "favCount": 0,
+    "followCount": 0,
+    "fullPlayRate": 0.20945945945945946,
+    "avgPlayTimeSec": 5.5675675675675675
+  },
+  {
+    "index": 1,
+    "exportId": "export/UzFfAgtgekIEAQAAAAAA2kAUwIZSfQAAAAstQy6ubaLX4KHWvLEZgBPEg6BYPH5pROKNzNPgMJpFgSGS9JNqdISSEBUW5Tvu",
+    "objectId": "14847937164213881303",
+    "createTime": 1770012040,
+    "desc": "",
+    "readCount": 173,
+    "likeCount": 0,
+    "commentCount": 0,
+    "forwardCount": 0,
+    "favCount": 1,
+    "followCount": 0,
+    "fullPlayRate": 0.023121387283236993,
+    "avgPlayTimeSec": 3.2254335260115607
+  }
+]