Forráskód Böngészése

Implement new API endpoints for fetching works and comments

- Added `/works` endpoint to retrieve a list of works from specified platforms, supporting pagination and cookie authentication.
- Introduced `/comments` endpoint to fetch comments for a specific work, with error handling for missing parameters.
- Updated the README.md to include documentation for the new API endpoints, detailing request and response formats.
- Enhanced the base platform classes to support the new functionality, including data models for works and comments.
- Improved logging for API requests to facilitate debugging and monitoring.
Ethanfly 12 órája
szülő
commit
a3ce30620a

+ 83 - 0
server/python/README.md

@@ -220,6 +220,89 @@ Content-Type: application/json
 }
 ```
 
+### 获取作品列表
+
+```
+POST /works
+Content-Type: application/json
+
+{
+  "platform": "douyin",           // douyin | xiaohongshu | kuaishou | weixin
+  "cookie": "cookie字符串或JSON",
+  "page": 0,                      // 页码(从0开始,可选,默认0)
+  "page_size": 20                 // 每页数量(可选,默认20)
+}
+
+响应:
+{
+  "success": true,
+  "platform": "douyin",
+  "works": [
+    {
+      "work_id": "作品ID",
+      "title": "作品标题",
+      "cover_url": "封面URL",
+      "video_url": "视频URL",
+      "duration": 60,
+      "status": "published",      // published | reviewing | rejected | draft
+      "publish_time": "2024-01-20 12:00:00",
+      "play_count": 1000,
+      "like_count": 100,
+      "comment_count": 50,
+      "share_count": 20,
+      "collect_count": 10
+    }
+  ],
+  "total": 100,
+  "has_more": true
+}
+```
+
+### 获取作品评论
+
+```
+POST /comments
+Content-Type: application/json
+
+{
+  "platform": "douyin",           // douyin | xiaohongshu | kuaishou | weixin
+  "cookie": "cookie字符串或JSON",
+  "work_id": "作品ID",
+  "cursor": ""                    // 分页游标(可选,用于翻页)
+}
+
+响应:
+{
+  "success": true,
+  "platform": "douyin",
+  "work_id": "作品ID",
+  "comments": [
+    {
+      "comment_id": "评论ID",
+      "work_id": "作品ID",
+      "content": "评论内容",
+      "author_id": "作者ID",
+      "author_name": "作者昵称",
+      "author_avatar": "作者头像",
+      "like_count": 10,
+      "reply_count": 5,
+      "create_time": "2024-01-20 12:00:00",
+      "is_author": false,
+      "replies": [
+        {
+          "comment_id": "子评论ID",
+          "content": "回复内容",
+          ...
+        }
+      ]
+    }
+  ],
+  "total": 50,
+  "has_more": true,
+  "cursor": "下一页游标"
+}
+```
+
 ## 扩展新平台
 
 如需添加新平台,请按以下步骤:

BIN
server/python/__pycache__/app.cpython-313.pyc


+ 189 - 1
server/python/app.py

@@ -359,6 +359,191 @@ def check_cookie():
         return jsonify({"valid": False, "error": str(e)})
 
 
+# ==================== 获取作品列表接口 ====================
+
+@app.route("/works", methods=["POST"])
+def get_works():
+    """
+    获取作品列表
+    
+    请求体:
+    {
+        "platform": "douyin",           # douyin | xiaohongshu | kuaishou
+        "cookie": "cookie字符串或JSON",
+        "page": 0,                       # 页码(从0开始,可选,默认0)
+        "page_size": 20                  # 每页数量(可选,默认20)
+    }
+    
+    响应:
+    {
+        "success": true,
+        "platform": "douyin",
+        "works": [...],
+        "total": 100,
+        "has_more": true
+    }
+    """
+    try:
+        data = request.json
+        
+        platform = data.get("platform", "").lower()
+        cookie_str = data.get("cookie", "")
+        page = data.get("page", 0)
+        page_size = data.get("page_size", 20)
+        
+        print(f"[Works] 收到请求: platform={platform}, page={page}, page_size={page_size}")
+        
+        if not platform:
+            return jsonify({"success": False, "error": "缺少 platform 参数"}), 400
+        if platform not in PLATFORM_MAP:
+            return jsonify({
+                "success": False,
+                "error": f"不支持的平台: {platform},支持: {list(PLATFORM_MAP.keys())}"
+            }), 400
+        if not cookie_str:
+            return jsonify({"success": False, "error": "缺少 cookie 参数"}), 400
+        
+        # 获取对应平台的发布器
+        PublisherClass = get_publisher(platform)
+        publisher = PublisherClass(headless=HEADLESS_MODE)
+        
+        # 执行获取作品
+        result = asyncio.run(publisher.run_get_works(cookie_str, page, page_size))
+        
+        return jsonify(result.to_dict())
+        
+    except Exception as e:
+        traceback.print_exc()
+        return jsonify({"success": False, "error": str(e)}), 500
+
+
+# ==================== 获取评论列表接口 ====================
+
+@app.route("/comments", methods=["POST"])
+def get_comments():
+    """
+    获取作品评论
+    
+    请求体:
+    {
+        "platform": "douyin",           # douyin | xiaohongshu | kuaishou
+        "cookie": "cookie字符串或JSON",
+        "work_id": "作品ID",
+        "cursor": ""                    # 分页游标(可选)
+    }
+    
+    响应:
+    {
+        "success": true,
+        "platform": "douyin",
+        "work_id": "xxx",
+        "comments": [...],
+        "total": 50,
+        "has_more": true,
+        "cursor": "xxx"
+    }
+    """
+    try:
+        data = request.json
+        
+        platform = data.get("platform", "").lower()
+        cookie_str = data.get("cookie", "")
+        work_id = data.get("work_id", "")
+        cursor = data.get("cursor", "")
+        
+        print(f"[Comments] 收到请求: platform={platform}, work_id={work_id}")
+        
+        if not platform:
+            return jsonify({"success": False, "error": "缺少 platform 参数"}), 400
+        if platform not in PLATFORM_MAP:
+            return jsonify({
+                "success": False,
+                "error": f"不支持的平台: {platform},支持: {list(PLATFORM_MAP.keys())}"
+            }), 400
+        if not cookie_str:
+            return jsonify({"success": False, "error": "缺少 cookie 参数"}), 400
+        if not work_id:
+            return jsonify({"success": False, "error": "缺少 work_id 参数"}), 400
+        
+        # 获取对应平台的发布器
+        PublisherClass = get_publisher(platform)
+        publisher = PublisherClass(headless=HEADLESS_MODE)
+        
+        # 执行获取评论
+        result = asyncio.run(publisher.run_get_comments(cookie_str, work_id, cursor))
+        
+        result_dict = result.to_dict()
+        # 添加 cursor 到响应
+        if hasattr(result, '__dict__') and 'cursor' in result.__dict__:
+            result_dict['cursor'] = result.__dict__['cursor']
+        
+        return jsonify(result_dict)
+        
+    except Exception as e:
+        traceback.print_exc()
+        return jsonify({"success": False, "error": str(e)}), 500
+
+
+# ==================== 获取所有作品评论接口 ====================
+
+@app.route("/all_comments", methods=["POST"])
+def get_all_comments():
+    """
+    获取所有作品的评论(一次性获取)
+    
+    请求体:
+    {
+        "platform": "douyin",           # douyin | xiaohongshu
+        "cookie": "cookie字符串或JSON"
+    }
+    
+    响应:
+    {
+        "success": true,
+        "platform": "douyin",
+        "work_comments": [
+            {
+                "work_id": "xxx",
+                "title": "作品标题",
+                "cover_url": "封面URL",
+                "comments": [...]
+            }
+        ],
+        "total": 5
+    }
+    """
+    try:
+        data = request.json
+        
+        platform = data.get("platform", "").lower()
+        cookie_str = data.get("cookie", "")
+        
+        print(f"[AllComments] 收到请求: platform={platform}")
+        
+        if not platform:
+            return jsonify({"success": False, "error": "缺少 platform 参数"}), 400
+        if platform not in ['douyin', 'xiaohongshu']:
+            return jsonify({
+                "success": False,
+                "error": f"该接口只支持 douyin 和 xiaohongshu 平台"
+            }), 400
+        if not cookie_str:
+            return jsonify({"success": False, "error": "缺少 cookie 参数"}), 400
+        
+        # 获取对应平台的发布器
+        PublisherClass = get_publisher(platform)
+        publisher = PublisherClass(headless=HEADLESS_MODE)
+        
+        # 执行获取所有评论
+        result = asyncio.run(publisher.get_all_comments(cookie_str))
+        
+        return jsonify(result)
+        
+    except Exception as e:
+        traceback.print_exc()
+        return jsonify({"success": False, "error": str(e)}), 500
+
+
 # ==================== 健康检查 ====================
 
 @app.route("/health", methods=["GET"])
@@ -385,12 +570,15 @@ def index():
     """首页"""
     return jsonify({
         "name": "多平台视频发布服务",
-        "version": "1.0.0",
+        "version": "1.1.0",
         "endpoints": {
             "GET /": "服务信息",
             "GET /health": "健康检查",
             "POST /publish": "发布视频",
             "POST /publish/batch": "批量发布",
+            "POST /works": "获取作品列表",
+            "POST /comments": "获取作品评论",
+            "POST /all_comments": "获取所有作品评论",
             "POST /check_cookie": "检查 Cookie",
             "POST /sign": "小红书签名"
         },

BIN
server/python/platforms/__pycache__/base.cpython-313.pyc


BIN
server/python/platforms/__pycache__/douyin.cpython-313.pyc


BIN
server/python/platforms/__pycache__/kuaishou.cpython-313.pyc


BIN
server/python/platforms/__pycache__/weixin.cpython-313.pyc


BIN
server/python/platforms/__pycache__/xiaohongshu.cpython-313.pyc


+ 181 - 1
server/python/platforms/base.py

@@ -10,7 +10,7 @@ import os
 from abc import ABC, abstractmethod
 from dataclasses import dataclass, field
 from datetime import datetime
-from typing import List, Optional, Callable
+from typing import List, Optional, Callable, Dict, Any
 from playwright.async_api import async_playwright, Browser, BrowserContext, Page
 
 
@@ -41,6 +41,114 @@ class PublishResult:
     error: str = ""
 
 
+@dataclass
+class WorkItem:
+    """作品数据"""
+    work_id: str
+    title: str
+    cover_url: str = ""
+    video_url: str = ""
+    duration: int = 0  # 秒
+    status: str = "published"  # published, reviewing, rejected, draft
+    publish_time: str = ""
+    play_count: int = 0
+    like_count: int = 0
+    comment_count: int = 0
+    share_count: int = 0
+    collect_count: int = 0
+    
+    def to_dict(self) -> Dict[str, Any]:
+        return {
+            "work_id": self.work_id,
+            "title": self.title,
+            "cover_url": self.cover_url,
+            "video_url": self.video_url,
+            "duration": self.duration,
+            "status": self.status,
+            "publish_time": self.publish_time,
+            "play_count": self.play_count,
+            "like_count": self.like_count,
+            "comment_count": self.comment_count,
+            "share_count": self.share_count,
+            "collect_count": self.collect_count,
+        }
+
+
+@dataclass
+class CommentItem:
+    """评论数据"""
+    comment_id: str
+    work_id: str
+    content: str
+    author_id: str = ""
+    author_name: str = ""
+    author_avatar: str = ""
+    like_count: int = 0
+    reply_count: int = 0
+    create_time: str = ""
+    is_author: bool = False  # 是否是作者的评论
+    replies: List['CommentItem'] = field(default_factory=list)
+    
+    def to_dict(self) -> Dict[str, Any]:
+        return {
+            "comment_id": self.comment_id,
+            "work_id": self.work_id,
+            "content": self.content,
+            "author_id": self.author_id,
+            "author_name": self.author_name,
+            "author_avatar": self.author_avatar,
+            "like_count": self.like_count,
+            "reply_count": self.reply_count,
+            "create_time": self.create_time,
+            "is_author": self.is_author,
+            "replies": [r.to_dict() for r in self.replies],
+        }
+
+
+@dataclass
+class WorksResult:
+    """作品列表结果"""
+    success: bool
+    platform: str
+    works: List[WorkItem] = field(default_factory=list)
+    total: int = 0
+    has_more: bool = False
+    error: str = ""
+    
+    def to_dict(self) -> Dict[str, Any]:
+        return {
+            "success": self.success,
+            "platform": self.platform,
+            "works": [w.to_dict() for w in self.works],
+            "total": self.total,
+            "has_more": self.has_more,
+            "error": self.error,
+        }
+
+
+@dataclass
+class CommentsResult:
+    """评论列表结果"""
+    success: bool
+    platform: str
+    work_id: str
+    comments: List[CommentItem] = field(default_factory=list)
+    total: int = 0
+    has_more: bool = False
+    error: str = ""
+    
+    def to_dict(self) -> Dict[str, Any]:
+        return {
+            "success": self.success,
+            "platform": self.platform,
+            "work_id": self.work_id,
+            "comments": [c.to_dict() for c in self.comments],
+            "total": self.total,
+            "has_more": self.has_more,
+            "error": self.error,
+        }
+
+
 class BasePublisher(ABC):
     """
     平台发布基类
@@ -166,6 +274,43 @@ class BasePublisher(ABC):
         """
         pass
     
+    async def get_works(self, cookies: str, page: int = 0, page_size: int = 20) -> WorksResult:
+        """
+        获取作品列表 - 子类可覆盖实现
+        
+        Args:
+            cookies: cookie 字符串或 JSON
+            page: 页码(从0开始)
+            page_size: 每页数量
+            
+        Returns:
+            WorksResult: 作品列表结果
+        """
+        return WorksResult(
+            success=False,
+            platform=self.platform_name,
+            error="该平台暂不支持获取作品列表"
+        )
+    
+    async def get_comments(self, cookies: str, work_id: str, cursor: str = "") -> CommentsResult:
+        """
+        获取作品评论 - 子类可覆盖实现
+        
+        Args:
+            cookies: cookie 字符串或 JSON
+            work_id: 作品ID
+            cursor: 分页游标
+            
+        Returns:
+            CommentsResult: 评论列表结果
+        """
+        return CommentsResult(
+            success=False,
+            platform=self.platform_name,
+            work_id=work_id,
+            error="该平台暂不支持获取评论"
+        )
+    
     async def run(self, cookies: str, params: PublishParams) -> PublishResult:
         """
         运行发布任务
@@ -183,3 +328,38 @@ class BasePublisher(ABC):
             )
         finally:
             await self.close_browser()
+    
+    async def run_get_works(self, cookies: str, page: int = 0, page_size: int = 20) -> WorksResult:
+        """
+        运行获取作品任务
+        """
+        try:
+            return await self.get_works(cookies, page, page_size)
+        except Exception as e:
+            import traceback
+            traceback.print_exc()
+            return WorksResult(
+                success=False,
+                platform=self.platform_name,
+                error=str(e)
+            )
+        finally:
+            await self.close_browser()
+    
+    async def run_get_comments(self, cookies: str, work_id: str, cursor: str = "") -> CommentsResult:
+        """
+        运行获取评论任务
+        """
+        try:
+            return await self.get_comments(cookies, work_id, cursor)
+        except Exception as e:
+            import traceback
+            traceback.print_exc()
+            return CommentsResult(
+                success=False,
+                platform=self.platform_name,
+                work_id=work_id,
+                error=str(e)
+            )
+        finally:
+            await self.close_browser()

+ 439 - 1
server/python/platforms/douyin.py

@@ -6,8 +6,13 @@
 
 import asyncio
 import os
+import json
 from datetime import datetime
-from .base import BasePublisher, PublishParams, PublishResult
+from typing import List
+from .base import (
+    BasePublisher, PublishParams, PublishResult,
+    WorkItem, WorksResult, CommentItem, CommentsResult
+)
 
 
 class DouyinPublisher(BasePublisher):
@@ -242,3 +247,436 @@ class DouyinPublisher(BasePublisher):
         await self.page.screenshot(path=screenshot_path, full_page=True)
         print(f"[{self.platform_name}] 发布超时,截图保存到: {screenshot_path}")
         raise Exception(f"发布超时(截图: {screenshot_path})")
+    
+    async def get_works(self, cookies: str, page: int = 0, page_size: int = 20) -> WorksResult:
+        """获取抖音作品列表"""
+        print(f"\n{'='*60}")
+        print(f"[{self.platform_name}] 获取作品列表")
+        print(f"[{self.platform_name}] page={page}, page_size={page_size}")
+        print(f"{'='*60}")
+        
+        works: List[WorkItem] = []
+        total = 0
+        has_more = False
+        
+        try:
+            await self.init_browser()
+            cookie_list = self.parse_cookies(cookies)
+            await self.set_cookies(cookie_list)
+            
+            if not self.page:
+                raise Exception("Page not initialized")
+            
+            # 访问创作者中心首页以触发登录验证
+            await self.page.goto("https://creator.douyin.com/creator-micro/home")
+            await asyncio.sleep(3)
+            
+            # 检查登录状态
+            current_url = self.page.url
+            if "login" in current_url or "passport" in current_url:
+                raise Exception("Cookie 已过期,请重新登录")
+            
+            # 调用作品列表 API
+            cursor = page * page_size
+            api_url = f"https://creator.douyin.com/janus/douyin/creator/pc/work_list?scene=star_atlas&device_platform=android&count={page_size}&max_cursor={cursor}&cookie_enabled=true&browser_language=zh-CN&browser_platform=Win32&browser_name=Mozilla&browser_online=true&timezone_name=Asia%2FShanghai&aid=1128"
+            
+            response = await self.page.evaluate(f'''
+                async () => {{
+                    const resp = await fetch("{api_url}", {{
+                        credentials: 'include',
+                        headers: {{ 'Accept': 'application/json' }}
+                    }});
+                    return await resp.json();
+                }}
+            ''')
+            
+            print(f"[{self.platform_name}] API 响应: has_more={response.get('has_more')}, aweme_list={len(response.get('aweme_list', []))}")
+            
+            aweme_list = response.get('aweme_list', [])
+            has_more = response.get('has_more', False)
+            
+            for aweme in aweme_list:
+                aweme_id = str(aweme.get('aweme_id', ''))
+                if not aweme_id:
+                    continue
+                
+                statistics = aweme.get('statistics', {})
+                
+                # 获取封面
+                cover_url = ''
+                if aweme.get('Cover', {}).get('url_list'):
+                    cover_url = aweme['Cover']['url_list'][0]
+                elif aweme.get('video', {}).get('cover', {}).get('url_list'):
+                    cover_url = aweme['video']['cover']['url_list'][0]
+                
+                # 获取标题
+                title = aweme.get('item_title', '') or aweme.get('desc', '').split('\n')[0][:50] or '无标题'
+                
+                # 获取时长(毫秒转秒)
+                duration = aweme.get('video', {}).get('duration', 0) // 1000
+                
+                # 获取发布时间
+                create_time = aweme.get('create_time', 0)
+                publish_time = datetime.fromtimestamp(create_time).strftime('%Y-%m-%d %H:%M:%S') if create_time else ''
+                
+                works.append(WorkItem(
+                    work_id=aweme_id,
+                    title=title,
+                    cover_url=cover_url,
+                    duration=duration,
+                    status='published',
+                    publish_time=publish_time,
+                    play_count=int(statistics.get('play_count', 0)),
+                    like_count=int(statistics.get('digg_count', 0)),
+                    comment_count=int(statistics.get('comment_count', 0)),
+                    share_count=int(statistics.get('share_count', 0)),
+                ))
+            
+            total = len(works)
+            print(f"[{self.platform_name}] 获取到 {total} 个作品")
+            
+        except Exception as e:
+            import traceback
+            traceback.print_exc()
+            return WorksResult(
+                success=False,
+                platform=self.platform_name,
+                error=str(e)
+            )
+        
+        return WorksResult(
+            success=True,
+            platform=self.platform_name,
+            works=works,
+            total=total,
+            has_more=has_more
+        )
+    
+    async def get_comments(self, cookies: str, work_id: str, cursor: str = "") -> CommentsResult:
+        """获取抖音作品评论 - 通过访问视频详情页拦截评论 API"""
+        print(f"\n{'='*60}")
+        print(f"[{self.platform_name}] 获取作品评论")
+        print(f"[{self.platform_name}] work_id={work_id}, cursor={cursor}")
+        print(f"{'='*60}")
+        
+        comments: List[CommentItem] = []
+        total = 0
+        has_more = False
+        next_cursor = ""
+        captured_data = {}
+        
+        try:
+            await self.init_browser()
+            cookie_list = self.parse_cookies(cookies)
+            await self.set_cookies(cookie_list)
+            
+            if not self.page:
+                raise Exception("Page not initialized")
+            
+            # 设置 API 响应监听器
+            async def handle_response(response):
+                nonlocal captured_data
+                url = response.url
+                # 监听评论列表 API - 抖音视频页面使用的 API
+                # /aweme/v1/web/comment/list/ 或 /comment/list/
+                if '/comment/list' in url and ('aweme_id' in url or work_id in url):
+                    try:
+                        json_data = await response.json()
+                        print(f"[{self.platform_name}] 捕获到评论 API: {url[:100]}...", flush=True)
+                        # 检查响应是否成功
+                        if json_data.get('status_code') == 0 or json_data.get('comments'):
+                            captured_data = json_data
+                            comment_count = len(json_data.get('comments', []))
+                            print(f"[{self.platform_name}] 评论 API 响应成功: comments={comment_count}, has_more={json_data.get('has_more')}", flush=True)
+                    except Exception as e:
+                        print(f"[{self.platform_name}] 解析评论响应失败: {e}", flush=True)
+            
+            self.page.on('response', handle_response)
+            print(f"[{self.platform_name}] 已注册评论 API 响应监听器", flush=True)
+            
+            # 访问视频详情页 - 这会自动触发评论 API 请求
+            video_url = f"https://www.douyin.com/video/{work_id}"
+            print(f"[{self.platform_name}] 访问视频详情页: {video_url}", flush=True)
+            await self.page.goto(video_url, wait_until="domcontentloaded", timeout=30000)
+            await asyncio.sleep(5)
+            
+            # 检查登录状态
+            current_url = self.page.url
+            if "login" in current_url or "passport" in current_url:
+                raise Exception("Cookie 已过期,请重新登录")
+            
+            # 等待评论加载
+            if not captured_data:
+                print(f"[{self.platform_name}] 等待评论 API 响应...", flush=True)
+                # 尝试滚动页面触发评论加载
+                await self.page.evaluate('window.scrollBy(0, 300)')
+                await asyncio.sleep(3)
+            
+            if not captured_data:
+                # 再等待一会
+                await asyncio.sleep(3)
+            
+            # 移除监听器
+            self.page.remove_listener('response', handle_response)
+            
+            # 解析评论数据
+            if captured_data:
+                comment_list = captured_data.get('comments') or []
+                has_more = captured_data.get('has_more', False) or captured_data.get('has_more', 0) == 1
+                next_cursor = str(captured_data.get('cursor', ''))
+                total = captured_data.get('total', 0) or len(comment_list)
+                
+                print(f"[{self.platform_name}] 解析评论: total={total}, has_more={has_more}, comments={len(comment_list)}", flush=True)
+                
+                for comment in comment_list:
+                    cid = str(comment.get('cid', ''))
+                    if not cid:
+                        continue
+                    
+                    user = comment.get('user', {})
+                    
+                    # 解析回复列表
+                    replies = []
+                    reply_list = comment.get('reply_comment', []) or []
+                    for reply in reply_list:
+                        reply_user = reply.get('user', {})
+                        replies.append(CommentItem(
+                            comment_id=str(reply.get('cid', '')),
+                            work_id=work_id,
+                            content=reply.get('text', ''),
+                            author_id=str(reply_user.get('uid', '')),
+                            author_name=reply_user.get('nickname', ''),
+                            author_avatar=reply_user.get('avatar_thumb', {}).get('url_list', [''])[0] if reply_user.get('avatar_thumb') else '',
+                            like_count=int(reply.get('digg_count', 0)),
+                            create_time=datetime.fromtimestamp(reply.get('create_time', 0)).strftime('%Y-%m-%d %H:%M:%S') if reply.get('create_time') else '',
+                            is_author=reply.get('is_author', False),
+                        ))
+                    
+                    comments.append(CommentItem(
+                        comment_id=cid,
+                        work_id=work_id,
+                        content=comment.get('text', ''),
+                        author_id=str(user.get('uid', '')),
+                        author_name=user.get('nickname', ''),
+                        author_avatar=user.get('avatar_thumb', {}).get('url_list', [''])[0] if user.get('avatar_thumb') else '',
+                        like_count=int(comment.get('digg_count', 0)),
+                        reply_count=int(comment.get('reply_comment_total', 0)),
+                        create_time=datetime.fromtimestamp(comment.get('create_time', 0)).strftime('%Y-%m-%d %H:%M:%S') if comment.get('create_time') else '',
+                        is_author=comment.get('is_author', False),
+                        replies=replies,
+                    ))
+                
+                print(f"[{self.platform_name}] 解析到 {len(comments)} 条评论", flush=True)
+            else:
+                print(f"[{self.platform_name}] 未捕获到评论 API 响应", flush=True)
+            
+        except Exception as e:
+            import traceback
+            traceback.print_exc()
+            return CommentsResult(
+                success=False,
+                platform=self.platform_name,
+                work_id=work_id,
+                error=str(e)
+            )
+        finally:
+            await self.close_browser()
+        
+        result = CommentsResult(
+            success=True,
+            platform=self.platform_name,
+            work_id=work_id,
+            comments=comments,
+            total=total,
+            has_more=has_more
+        )
+        result.__dict__['cursor'] = next_cursor
+        return result
+    
+    async def get_all_comments(self, cookies: str) -> dict:
+        """获取所有作品的评论 - 通过评论管理页面"""
+        print(f"\n{'='*60}")
+        print(f"[{self.platform_name}] 获取所有作品评论")
+        print(f"{'='*60}")
+        
+        all_work_comments = []
+        captured_comments = []
+        captured_works = {}  # work_id -> work_info
+        
+        try:
+            await self.init_browser()
+            cookie_list = self.parse_cookies(cookies)
+            await self.set_cookies(cookie_list)
+            
+            if not self.page:
+                raise Exception("Page not initialized")
+            
+            # 设置 API 响应监听器
+            async def handle_response(response):
+                nonlocal captured_comments, captured_works
+                url = response.url
+                try:
+                    # 监听评论列表 API - 多种格式
+                    # /comment/list/select/ 或 /comment/read 或 /creator/comment/list
+                    if '/comment/list' in url or '/comment/read' in url or 'comment_list' in url:
+                        json_data = await response.json()
+                        print(f"[{self.platform_name}] 捕获到评论 API: {url[:100]}...", flush=True)
+                        
+                        # 格式1: comments 字段
+                        comments = json_data.get('comments', [])
+                        # 格式2: comment_info_list 字段
+                        if not comments:
+                            comments = json_data.get('comment_info_list', [])
+                        
+                        if comments:
+                            # 从 URL 中提取 aweme_id
+                            import re
+                            aweme_id_match = re.search(r'aweme_id=(\d+)', url)
+                            aweme_id = aweme_id_match.group(1) if aweme_id_match else ''
+                            
+                            for comment in comments:
+                                # 添加 aweme_id 到评论中
+                                if aweme_id and 'aweme_id' not in comment:
+                                    comment['aweme_id'] = aweme_id
+                                captured_comments.append(comment)
+                            
+                            print(f"[{self.platform_name}] 捕获到 {len(comments)} 条评论 (aweme_id={aweme_id}),总计: {len(captured_comments)}", flush=True)
+                    
+                    # 监听作品列表 API
+                    if '/work_list' in url or '/item/list' in url or '/creator/item' in url:
+                        json_data = await response.json()
+                        aweme_list = json_data.get('aweme_list', []) or json_data.get('item_info_list', []) or json_data.get('item_list', [])
+                        print(f"[{self.platform_name}] 捕获到作品列表 API: {len(aweme_list)} 个作品", flush=True)
+                        for aweme in aweme_list:
+                            aweme_id = str(aweme.get('aweme_id', '') or aweme.get('item_id', '') or aweme.get('item_id_plain', ''))
+                            if aweme_id:
+                                cover_url = ''
+                                if aweme.get('Cover', {}).get('url_list'):
+                                    cover_url = aweme['Cover']['url_list'][0]
+                                elif aweme.get('video', {}).get('cover', {}).get('url_list'):
+                                    cover_url = aweme['video']['cover']['url_list'][0]
+                                elif aweme.get('cover_image_url'):
+                                    cover_url = aweme['cover_image_url']
+                                
+                                captured_works[aweme_id] = {
+                                    'title': aweme.get('item_title', '') or aweme.get('title', '') or aweme.get('desc', ''),
+                                    'cover': cover_url,
+                                    'comment_count': aweme.get('statistics', {}).get('comment_count', 0) or aweme.get('comment_count', 0),
+                                }
+                except Exception as e:
+                    print(f"[{self.platform_name}] 解析响应失败: {e}", flush=True)
+            
+            self.page.on('response', handle_response)
+            print(f"[{self.platform_name}] 已注册 API 响应监听器", flush=True)
+            
+            # 访问评论管理页面
+            print(f"[{self.platform_name}] 访问评论管理页面...", flush=True)
+            await self.page.goto("https://creator.douyin.com/creator-micro/interactive/comment", wait_until="domcontentloaded", timeout=30000)
+            await asyncio.sleep(5)
+            
+            # 检查登录状态
+            current_url = self.page.url
+            if "login" in current_url or "passport" in current_url:
+                raise Exception("Cookie 已过期,请重新登录")
+            
+            print(f"[{self.platform_name}] 页面加载完成,当前捕获: {len(captured_comments)} 条评论, {len(captured_works)} 个作品", flush=True)
+            
+            # 尝试点击"选择作品"来加载作品列表
+            try:
+                select_btn = await self.page.query_selector('text="选择作品"')
+                if select_btn:
+                    print(f"[{self.platform_name}] 点击选择作品按钮...", flush=True)
+                    await select_btn.click()
+                    await asyncio.sleep(3)
+                    
+                    # 获取作品列表
+                    work_items = await self.page.query_selector_all('[class*="work-item"], [class*="video-item"], [class*="aweme-item"]')
+                    print(f"[{self.platform_name}] 找到 {len(work_items)} 个作品元素", flush=True)
+                    
+                    # 点击每个作品加载其评论
+                    for i, item in enumerate(work_items[:10]):  # 最多处理10个作品
+                        try:
+                            await item.click()
+                            await asyncio.sleep(2)
+                            print(f"[{self.platform_name}] 已点击作品 {i+1}/{min(len(work_items), 10)}", flush=True)
+                        except:
+                            pass
+                    
+                    # 关闭选择作品弹窗
+                    close_btn = await self.page.query_selector('[class*="close"], [class*="cancel"]')
+                    if close_btn:
+                        await close_btn.click()
+                        await asyncio.sleep(1)
+            except Exception as e:
+                print(f"[{self.platform_name}] 选择作品操作失败: {e}", flush=True)
+            
+            # 滚动加载更多评论
+            for i in range(5):
+                await self.page.evaluate('window.scrollBy(0, 500)')
+                await asyncio.sleep(1)
+            
+            await asyncio.sleep(3)
+            
+            # 移除监听器
+            self.page.remove_listener('response', handle_response)
+            
+            print(f"[{self.platform_name}] 最终捕获: {len(captured_comments)} 条评论, {len(captured_works)} 个作品", flush=True)
+            
+            # 按作品分组评论
+            work_comments_map = {}  # work_id -> work_comments
+            for comment in captured_comments:
+                # 从评论中获取作品信息
+                aweme = comment.get('aweme', {}) or comment.get('item', {})
+                aweme_id = str(comment.get('aweme_id', '') or aweme.get('aweme_id', '') or aweme.get('item_id', ''))
+                
+                if not aweme_id:
+                    continue
+                
+                if aweme_id not in work_comments_map:
+                    work_info = captured_works.get(aweme_id, {})
+                    work_comments_map[aweme_id] = {
+                        'work_id': aweme_id,
+                        'title': aweme.get('title', '') or aweme.get('desc', '') or work_info.get('title', ''),
+                        'cover_url': aweme.get('cover', {}).get('url_list', [''])[0] if aweme.get('cover') else work_info.get('cover', ''),
+                        'comments': []
+                    }
+                
+                cid = str(comment.get('cid', ''))
+                if not cid:
+                    continue
+                
+                user = comment.get('user', {})
+                
+                work_comments_map[aweme_id]['comments'].append({
+                    'comment_id': cid,
+                    'author_id': str(user.get('uid', '')),
+                    'author_name': user.get('nickname', ''),
+                    'author_avatar': user.get('avatar_thumb', {}).get('url_list', [''])[0] if user.get('avatar_thumb') else '',
+                    'content': comment.get('text', ''),
+                    'like_count': int(comment.get('digg_count', 0)),
+                    'create_time': datetime.fromtimestamp(comment.get('create_time', 0)).strftime('%Y-%m-%d %H:%M:%S') if comment.get('create_time') else '',
+                    'is_author': comment.get('is_author', False),
+                })
+            
+            all_work_comments = list(work_comments_map.values())
+            total_comments = sum(len(w['comments']) for w in all_work_comments)
+            print(f"[{self.platform_name}] 获取到 {len(all_work_comments)} 个作品的 {total_comments} 条评论", flush=True)
+            
+        except Exception as e:
+            import traceback
+            traceback.print_exc()
+            return {
+                'success': False,
+                'platform': self.platform_name,
+                'error': str(e),
+                'work_comments': []
+            }
+        finally:
+            await self.close_browser()
+        
+        return {
+            'success': True,
+            'platform': self.platform_name,
+            'work_comments': all_work_comments,
+            'total': len(all_work_comments)
+        }

+ 189 - 1
server/python/platforms/kuaishou.py

@@ -7,7 +7,11 @@
 import asyncio
 import os
 from datetime import datetime
-from .base import BasePublisher, PublishParams, PublishResult
+from typing import List
+from .base import (
+    BasePublisher, PublishParams, PublishResult,
+    WorkItem, WorksResult, CommentItem, CommentsResult
+)
 
 
 class KuaishouPublisher(BasePublisher):
@@ -163,3 +167,187 @@ class KuaishouPublisher(BasePublisher):
                 await asyncio.sleep(1)
         
         raise Exception("发布超时")
+
+    async def get_works(self, cookies: str, page: int = 0, page_size: int = 20) -> WorksResult:
+        """获取快手作品列表"""
+        print(f"\n{'='*60}")
+        print(f"[{self.platform_name}] 获取作品列表")
+        print(f"[{self.platform_name}] page={page}, page_size={page_size}")
+        print(f"{'='*60}")
+        
+        works: List[WorkItem] = []
+        total = 0
+        has_more = False
+        
+        try:
+            await self.init_browser()
+            cookie_list = self.parse_cookies(cookies)
+            await self.set_cookies(cookie_list)
+            
+            if not self.page:
+                raise Exception("Page not initialized")
+            
+            # 访问创作者中心
+            await self.page.goto("https://cp.kuaishou.com/")
+            await asyncio.sleep(3)
+            
+            # 检查登录状态
+            current_url = self.page.url
+            if "passport" in current_url or "login" in current_url:
+                raise Exception("Cookie 已过期,请重新登录")
+            
+            # 调用作品列表 API
+            pcursor = "" if page == 0 else str(page)
+            api_url = f"https://cp.kuaishou.com/rest/cp/works/v2/video/pc/photo/list?count={page_size}&pcursor={pcursor}&status=public"
+            
+            js_code = f"""
+                async () => {{
+                    const resp = await fetch("{api_url}", {{
+                        credentials: 'include',
+                        headers: {{ 'Accept': 'application/json' }}
+                    }});
+                    return await resp.json();
+                }}
+            """
+            
+            response = await self.page.evaluate(js_code)
+            
+            if response.get('result') == 1:
+                data = response.get('data', {})
+                photo_list = data.get('list', [])
+                has_more = len(photo_list) >= page_size
+                
+                for photo in photo_list:
+                    photo_id = photo.get('photoId', '')
+                    if not photo_id:
+                        continue
+                    
+                    # 封面
+                    cover_url = photo.get('coverUrl', '')
+                    if cover_url.startswith('http://'):
+                        cover_url = cover_url.replace('http://', 'https://')
+                    
+                    # 时长
+                    duration = photo.get('duration', 0) // 1000  # 毫秒转秒
+                    
+                    # 发布时间
+                    create_time = photo.get('timestamp', 0) // 1000
+                    publish_time = ''
+                    if create_time:
+                        from datetime import datetime
+                        publish_time = datetime.fromtimestamp(create_time).strftime('%Y-%m-%d %H:%M:%S')
+                    
+                    works.append(WorkItem(
+                        work_id=str(photo_id),
+                        title=photo.get('caption', '') or '无标题',
+                        cover_url=cover_url,
+                        duration=duration,
+                        status='published',
+                        publish_time=publish_time,
+                        play_count=photo.get('viewCount', 0),
+                        like_count=photo.get('likeCount', 0),
+                        comment_count=photo.get('commentCount', 0),
+                        share_count=photo.get('shareCount', 0),
+                    ))
+                
+                print(f"[{self.platform_name}] 获取到 {len(works)} 个作品")
+            
+        except Exception as e:
+            import traceback
+            traceback.print_exc()
+            return WorksResult(success=False, platform=self.platform_name, error=str(e))
+        
+        return WorksResult(success=True, platform=self.platform_name, works=works, total=total or len(works), has_more=has_more)
+    
+    async def get_comments(self, cookies: str, work_id: str, cursor: str = "") -> CommentsResult:
+        """获取快手作品评论"""
+        print(f"\n{'='*60}")
+        print(f"[{self.platform_name}] 获取作品评论")
+        print(f"[{self.platform_name}] work_id={work_id}")
+        print(f"{'='*60}")
+        
+        comments: List[CommentItem] = []
+        total = 0
+        has_more = False
+        
+        try:
+            await self.init_browser()
+            cookie_list = self.parse_cookies(cookies)
+            await self.set_cookies(cookie_list)
+            
+            if not self.page:
+                raise Exception("Page not initialized")
+            
+            await self.page.goto("https://cp.kuaishou.com/")
+            await asyncio.sleep(3)
+            
+            current_url = self.page.url
+            if "passport" in current_url or "login" in current_url:
+                raise Exception("Cookie 已过期,请重新登录")
+            
+            # 调用评论列表 API
+            pcursor = cursor or ""
+            api_url = f"https://cp.kuaishou.com/rest/cp/works/comment/list?photoId={work_id}&pcursor={pcursor}&count=20"
+            
+            js_code = f"""
+                async () => {{
+                    const resp = await fetch("{api_url}", {{
+                        credentials: 'include',
+                        headers: {{ 'Accept': 'application/json' }}
+                    }});
+                    return await resp.json();
+                }}
+            """
+            
+            response = await self.page.evaluate(js_code)
+            
+            if response.get('result') == 1:
+                data = response.get('data', {})
+                comment_list = data.get('list', [])
+                has_more = data.get('pcursor', '') != ''
+                
+                for comment in comment_list:
+                    cid = comment.get('commentId', '')
+                    if not cid:
+                        continue
+                    
+                    author = comment.get('author', {})
+                    
+                    # 解析子评论
+                    replies = []
+                    sub_list = comment.get('subComments', []) or []
+                    for sub in sub_list:
+                        sub_author = sub.get('author', {})
+                        replies.append(CommentItem(
+                            comment_id=str(sub.get('commentId', '')),
+                            work_id=work_id,
+                            content=sub.get('content', ''),
+                            author_id=str(sub_author.get('id', '')),
+                            author_name=sub_author.get('name', ''),
+                            author_avatar=sub_author.get('headurl', ''),
+                            like_count=sub.get('likeCount', 0),
+                            create_time=str(sub.get('timestamp', '')),
+                        ))
+                    
+                    comments.append(CommentItem(
+                        comment_id=str(cid),
+                        work_id=work_id,
+                        content=comment.get('content', ''),
+                        author_id=str(author.get('id', '')),
+                        author_name=author.get('name', ''),
+                        author_avatar=author.get('headurl', ''),
+                        like_count=comment.get('likeCount', 0),
+                        reply_count=comment.get('subCommentCount', 0),
+                        create_time=str(comment.get('timestamp', '')),
+                        replies=replies,
+                    ))
+                
+                total = len(comments)
+                print(f"[{self.platform_name}] 获取到 {total} 条评论")
+            
+        except Exception as e:
+            import traceback
+            traceback.print_exc()
+            return CommentsResult(success=False, platform=self.platform_name, work_id=work_id, error=str(e))
+        
+        return CommentsResult(success=True, platform=self.platform_name, work_id=work_id, comments=comments, total=total, has_more=has_more)

+ 221 - 1
server/python/platforms/weixin.py

@@ -7,7 +7,11 @@
 import asyncio
 import os
 from datetime import datetime
-from .base import BasePublisher, PublishParams, PublishResult
+from typing import List
+from .base import (
+    BasePublisher, PublishParams, PublishResult,
+    WorkItem, WorksResult, CommentItem, CommentsResult
+)
 
 
 def format_short_title(origin_title: str) -> str:
@@ -288,3 +292,219 @@ class WeixinPublisher(BasePublisher):
                 await asyncio.sleep(1)
         
         raise Exception("发布超时")
+
+    async def get_works(self, cookies: str, page: int = 0, page_size: int = 20) -> WorksResult:
+        """获取视频号作品列表"""
+        print(f"\n{'='*60}")
+        print(f"[{self.platform_name}] 获取作品列表")
+        print(f"[{self.platform_name}] page={page}, page_size={page_size}")
+        print(f"{'='*60}")
+        
+        works: List[WorkItem] = []
+        total = 0
+        has_more = False
+        
+        try:
+            await self.init_browser()
+            cookie_list = self.parse_cookies(cookies)
+            await self.set_cookies(cookie_list)
+            
+            if not self.page:
+                raise Exception("Page not initialized")
+            
+            # 访问视频号创作者中心
+            await self.page.goto("https://channels.weixin.qq.com/platform/post/list")
+            await asyncio.sleep(5)
+            
+            # 检查登录状态
+            current_url = self.page.url
+            if "login" in current_url:
+                raise Exception("Cookie 已过期,请重新登录")
+            
+            # 视频号使用页面爬取方式获取作品列表
+            # 等待作品列表加载
+            await self.page.wait_for_selector('div.post-feed-wrap', timeout=10000)
+            
+            # 获取所有作品项
+            post_items = self.page.locator('div.post-feed-item')
+            item_count = await post_items.count()
+            
+            print(f"[{self.platform_name}] 找到 {item_count} 个作品项")
+            
+            for i in range(min(item_count, page_size)):
+                try:
+                    item = post_items.nth(i)
+                    
+                    # 获取封面
+                    cover_el = item.locator('div.cover-wrap img').first
+                    cover_url = ''
+                    if await cover_el.count() > 0:
+                        cover_url = await cover_el.get_attribute('src') or ''
+                    
+                    # 获取标题
+                    title_el = item.locator('div.content').first
+                    title = ''
+                    if await title_el.count() > 0:
+                        title = await title_el.text_content() or ''
+                        title = title.strip()[:50]
+                    
+                    # 获取统计数据
+                    stats_el = item.locator('div.post-data')
+                    play_count = 0
+                    like_count = 0
+                    comment_count = 0
+                    
+                    if await stats_el.count() > 0:
+                        stats_text = await stats_el.text_content() or ''
+                        # 解析统计数据(格式可能是: 播放 100 点赞 50 评论 10)
+                        import re
+                        play_match = re.search(r'播放[\s]*([\d.]+[万]?)', stats_text)
+                        like_match = re.search(r'点赞[\s]*([\d.]+[万]?)', stats_text)
+                        comment_match = re.search(r'评论[\s]*([\d.]+[万]?)', stats_text)
+                        
+                        def parse_count(match):
+                            if not match:
+                                return 0
+                            val = match.group(1)
+                            if '万' in val:
+                                return int(float(val.replace('万', '')) * 10000)
+                            return int(val)
+                        
+                        play_count = parse_count(play_match)
+                        like_count = parse_count(like_match)
+                        comment_count = parse_count(comment_match)
+                    
+                    # 获取发布时间
+                    time_el = item.locator('div.time')
+                    publish_time = ''
+                    if await time_el.count() > 0:
+                        publish_time = await time_el.text_content() or ''
+                        publish_time = publish_time.strip()
+                    
+                    # 生成临时 work_id(视频号可能需要从详情页获取)
+                    work_id = f"weixin_{i}_{hash(title)}"
+                    
+                    works.append(WorkItem(
+                        work_id=work_id,
+                        title=title or '无标题',
+                        cover_url=cover_url,
+                        duration=0,
+                        status='published',
+                        publish_time=publish_time,
+                        play_count=play_count,
+                        like_count=like_count,
+                        comment_count=comment_count,
+                    ))
+                except Exception as e:
+                    print(f"[{self.platform_name}] 解析作品 {i} 失败: {e}")
+                    continue
+            
+            total = len(works)
+            has_more = item_count > page_size
+            print(f"[{self.platform_name}] 获取到 {total} 个作品")
+            
+        except Exception as e:
+            import traceback
+            traceback.print_exc()
+            return WorksResult(success=False, platform=self.platform_name, error=str(e))
+        
+        return WorksResult(success=True, platform=self.platform_name, works=works, total=total, has_more=has_more)
+    
+    async def get_comments(self, cookies: str, work_id: str, cursor: str = "") -> CommentsResult:
+        """获取视频号作品评论"""
+        print(f"\n{'='*60}")
+        print(f"[{self.platform_name}] 获取作品评论")
+        print(f"[{self.platform_name}] work_id={work_id}")
+        print(f"{'='*60}")
+        
+        comments: List[CommentItem] = []
+        total = 0
+        has_more = False
+        
+        try:
+            await self.init_browser()
+            cookie_list = self.parse_cookies(cookies)
+            await self.set_cookies(cookie_list)
+            
+            if not self.page:
+                raise Exception("Page not initialized")
+            
+            # 访问评论管理页面
+            await self.page.goto("https://channels.weixin.qq.com/platform/comment/index")
+            await asyncio.sleep(5)
+            
+            # 检查登录状态
+            current_url = self.page.url
+            if "login" in current_url:
+                raise Exception("Cookie 已过期,请重新登录")
+            
+            # 等待评论列表加载
+            try:
+                await self.page.wait_for_selector('div.comment-list', timeout=10000)
+            except:
+                print(f"[{self.platform_name}] 未找到评论列表")
+                return CommentsResult(success=True, platform=self.platform_name, work_id=work_id, comments=[], total=0, has_more=False)
+            
+            # 获取所有评论项
+            comment_items = self.page.locator('div.comment-item')
+            item_count = await comment_items.count()
+            
+            print(f"[{self.platform_name}] 找到 {item_count} 个评论项")
+            
+            for i in range(item_count):
+                try:
+                    item = comment_items.nth(i)
+                    
+                    # 获取作者信息
+                    author_name = ''
+                    author_avatar = ''
+                    name_el = item.locator('div.nick-name')
+                    if await name_el.count() > 0:
+                        author_name = await name_el.text_content() or ''
+                        author_name = author_name.strip()
+                    
+                    avatar_el = item.locator('img.avatar')
+                    if await avatar_el.count() > 0:
+                        author_avatar = await avatar_el.get_attribute('src') or ''
+                    
+                    # 获取评论内容
+                    content = ''
+                    content_el = item.locator('div.comment-content')
+                    if await content_el.count() > 0:
+                        content = await content_el.text_content() or ''
+                        content = content.strip()
+                    
+                    # 获取时间
+                    create_time = ''
+                    time_el = item.locator('div.time')
+                    if await time_el.count() > 0:
+                        create_time = await time_el.text_content() or ''
+                        create_time = create_time.strip()
+                    
+                    # 生成评论 ID
+                    comment_id = f"weixin_comment_{i}_{hash(content)}"
+                    
+                    comments.append(CommentItem(
+                        comment_id=comment_id,
+                        work_id=work_id,
+                        content=content,
+                        author_id='',
+                        author_name=author_name,
+                        author_avatar=author_avatar,
+                        like_count=0,
+                        reply_count=0,
+                        create_time=create_time,
+                    ))
+                except Exception as e:
+                    print(f"[{self.platform_name}] 解析评论 {i} 失败: {e}")
+                    continue
+            
+            total = len(comments)
+            print(f"[{self.platform_name}] 获取到 {total} 条评论")
+            
+        except Exception as e:
+            import traceback
+            traceback.print_exc()
+            return CommentsResult(success=False, platform=self.platform_name, work_id=work_id, error=str(e))
+        
+        return CommentsResult(success=True, platform=self.platform_name, work_id=work_id, comments=comments, total=total, has_more=has_more)

+ 463 - 1
server/python/platforms/xiaohongshu.py

@@ -9,7 +9,11 @@ import asyncio
 import os
 import sys
 from pathlib import Path
-from .base import BasePublisher, PublishParams, PublishResult
+from typing import List
+from .base import (
+    BasePublisher, PublishParams, PublishResult,
+    WorkItem, WorksResult, CommentItem, CommentsResult
+)
 
 # 添加 matrix 项目路径,用于导入签名脚本
 MATRIX_PATH = Path(__file__).parent.parent.parent.parent / "matrix"
@@ -463,3 +467,461 @@ class XiaohongshuPublisher(BasePublisher):
             platform=self.platform_name,
             message="发布完成"
         )
+    
+    async def get_works(self, cookies: str, page: int = 0, page_size: int = 20) -> WorksResult:
+        """获取小红书作品列表 - 通过监听页面网络响应获取数据"""
+        print(f"\n{'='*60}", flush=True)
+        print(f"[{self.platform_name}] 获取作品列表", flush=True)
+        print(f"[{self.platform_name}] page={page}, page_size={page_size}", flush=True)
+        print(f"{'='*60}", flush=True)
+        
+        works: List[WorkItem] = []
+        total = 0
+        has_more = False
+        captured_data = {}
+        
+        try:
+            await self.init_browser()
+            cookie_list = self.parse_cookies(cookies)
+            
+            # 打印 cookies 信息用于调试
+            print(f"[{self.platform_name}] 解析到 {len(cookie_list)} 个 cookies", flush=True)
+            
+            await self.set_cookies(cookie_list)
+            
+            if not self.page:
+                raise Exception("Page not initialized")
+            
+            # 定义响应监听器 - 捕获页面自动发起的 API 请求
+            async def handle_response(response):
+                nonlocal captured_data
+                url = response.url
+                # 监听作品列表 API
+                if 'creator/note/user/posted' in url or 'creator/note_list' in url:
+                    try:
+                        json_data = await response.json()
+                        print(f"[{self.platform_name}] 捕获到 API 响应: {url[:80]}...", flush=True)
+                        if json_data.get('success') or json_data.get('code') == 0:
+                            captured_data = json_data
+                            print(f"[{self.platform_name}] API 响应成功,data keys: {list(json_data.get('data', {}).keys())}", flush=True)
+                    except Exception as e:
+                        print(f"[{self.platform_name}] 解析响应失败: {e}", flush=True)
+            
+            # 注册响应监听器
+            self.page.on('response', handle_response)
+            print(f"[{self.platform_name}] 已注册 API 响应监听器", flush=True)
+            
+            # 访问笔记管理页面 - 页面会自动发起 API 请求
+            print(f"[{self.platform_name}] 访问笔记管理页面...", flush=True)
+            
+            try:
+                await self.page.goto("https://creator.xiaohongshu.com/new/note-manager", wait_until="domcontentloaded", timeout=30000)
+            except Exception as nav_error:
+                print(f"[{self.platform_name}] 导航超时,但继续尝试: {nav_error}", flush=True)
+            
+            # 等待 API 响应被捕获
+            await asyncio.sleep(5)
+            
+            # 检查登录状态
+            current_url = self.page.url
+            print(f"[{self.platform_name}] 当前页面: {current_url}", flush=True)
+            if "login" in current_url:
+                raise Exception("Cookie 已过期,请重新登录")
+            
+            # 如果还没有捕获到数据,等待更长时间
+            if not captured_data:
+                print(f"[{self.platform_name}] 等待 API 响应...", flush=True)
+                await asyncio.sleep(5)
+            
+            # 移除监听器
+            self.page.remove_listener('response', handle_response)
+            
+            # 处理捕获到的数据
+            import json
+            if captured_data:
+                print(f"[{self.platform_name}] 成功捕获到 API 数据", flush=True)
+                data = captured_data.get('data', {})
+                notes = data.get('notes', [])
+                print(f"[{self.platform_name}] notes 数量: {len(notes)}", flush=True)
+                
+                # 从 tags 获取总数
+                tags = data.get('tags', [])
+                for tag in tags:
+                    if tag.get('id') == 'special.note_time_desc':
+                        total = tag.get('notes_count', 0)
+                        break
+                
+                has_more = data.get('page', -1) != -1
+                
+                for note in notes:
+                    note_id = note.get('id', '')
+                    if not note_id:
+                        continue
+                    
+                    # 获取封面
+                    cover_url = ''
+                    images_list = note.get('images_list', [])
+                    if images_list:
+                        cover_url = images_list[0].get('url', '')
+                        if cover_url.startswith('http://'):
+                            cover_url = cover_url.replace('http://', 'https://')
+                    
+                    # 获取时长
+                    duration = note.get('video_info', {}).get('duration', 0)
+                    
+                    # 解析状态
+                    status = 'published'
+                    tab_status = note.get('tab_status', 1)
+                    if tab_status == 0:
+                        status = 'draft'
+                    elif tab_status == 2:
+                        status = 'reviewing'
+                    elif tab_status == 3:
+                        status = 'rejected'
+                    
+                    works.append(WorkItem(
+                        work_id=note_id,
+                        title=note.get('display_title', '') or '无标题',
+                        cover_url=cover_url,
+                        duration=duration,
+                        status=status,
+                        publish_time=note.get('time', ''),
+                        play_count=note.get('view_count', 0),
+                        like_count=note.get('likes', 0),
+                        comment_count=note.get('comments_count', 0),
+                        share_count=note.get('shared_count', 0),
+                        collect_count=note.get('collected_count', 0),
+                    ))
+                
+                print(f"[{self.platform_name}] 解析到 {len(works)} 个作品,总计: {total}", flush=True)
+            else:
+                print(f"[{self.platform_name}] 未能捕获到 API 数据", flush=True)
+            
+        except Exception as e:
+            import traceback
+            print(f"[{self.platform_name}] 发生异常: {e}", flush=True)
+            traceback.print_exc()
+            return WorksResult(
+                success=False,
+                platform=self.platform_name,
+                error=str(e)
+            )
+        finally:
+            # 确保关闭浏览器
+            await self.close_browser()
+        
+        return WorksResult(
+            success=True,
+            platform=self.platform_name,
+            works=works,
+            total=total or len(works),
+            has_more=has_more
+        )
+    
+    async def get_comments(self, cookies: str, work_id: str, cursor: str = "") -> CommentsResult:
+        """获取小红书作品评论 - 通过创作者后台评论管理页面"""
+        print(f"\n{'='*60}")
+        print(f"[{self.platform_name}] 获取作品评论")
+        print(f"[{self.platform_name}] work_id={work_id}, cursor={cursor}")
+        print(f"{'='*60}")
+        
+        comments: List[CommentItem] = []
+        total = 0
+        has_more = False
+        next_cursor = ""
+        captured_data = {}
+        
+        try:
+            await self.init_browser()
+            cookie_list = self.parse_cookies(cookies)
+            await self.set_cookies(cookie_list)
+            
+            if not self.page:
+                raise Exception("Page not initialized")
+            
+            # 设置 API 响应监听器
+            async def handle_response(response):
+                nonlocal captured_data
+                url = response.url
+                # 监听评论相关 API - 创作者后台和普通页面的 API
+                if '/comment/' in url and ('page' in url or 'list' in url):
+                    try:
+                        json_data = await response.json()
+                        print(f"[{self.platform_name}] 捕获到评论 API: {url[:100]}...", flush=True)
+                        if json_data.get('success') or json_data.get('code') == 0:
+                            data = json_data.get('data', {})
+                            comment_list = data.get('comments') or data.get('list') or []
+                            if comment_list:
+                                captured_data = json_data
+                                print(f"[{self.platform_name}] 评论 API 响应成功,comments={len(comment_list)}", flush=True)
+                            else:
+                                print(f"[{self.platform_name}] 评论 API 响应成功但无评论", flush=True)
+                    except Exception as e:
+                        print(f"[{self.platform_name}] 解析评论响应失败: {e}", flush=True)
+            
+            self.page.on('response', handle_response)
+            print(f"[{self.platform_name}] 已注册评论 API 响应监听器", flush=True)
+            
+            # 访问创作者后台评论管理页面
+            comment_url = "https://creator.xiaohongshu.com/creator/comment"
+            print(f"[{self.platform_name}] 访问评论管理页面: {comment_url}", flush=True)
+            await self.page.goto(comment_url, wait_until="domcontentloaded", timeout=30000)
+            await asyncio.sleep(5)
+            
+            # 检查是否被重定向到登录页
+            current_url = self.page.url
+            print(f"[{self.platform_name}] 当前页面 URL: {current_url}", flush=True)
+            if "login" in current_url:
+                raise Exception("Cookie 已过期,请重新登录")
+            
+            # 等待评论加载
+            if not captured_data:
+                print(f"[{self.platform_name}] 等待评论 API 响应...", flush=True)
+                # 尝试滚动页面触发评论加载
+                await self.page.evaluate('window.scrollBy(0, 500)')
+                await asyncio.sleep(3)
+            
+            if not captured_data:
+                # 再等待一会,可能评论 API 加载较慢
+                print(f"[{self.platform_name}] 继续等待评论加载...", flush=True)
+                await asyncio.sleep(5)
+            
+            # 移除监听器
+            self.page.remove_listener('response', handle_response)
+            
+            # 解析评论数据
+            if captured_data:
+                data = captured_data.get('data', {})
+                comment_list = data.get('comments') or data.get('list') or []
+                has_more = data.get('has_more', False)
+                next_cursor = data.get('cursor', '')
+                
+                print(f"[{self.platform_name}] 解析评论: has_more={has_more}, comments={len(comment_list)}", flush=True)
+                
+                for comment in comment_list:
+                    cid = comment.get('id', '')
+                    if not cid:
+                        continue
+                    
+                    user_info = comment.get('user_info', {})
+                    
+                    # 解析子评论
+                    replies = []
+                    sub_comments = comment.get('sub_comments', []) or []
+                    for sub in sub_comments:
+                        sub_user = sub.get('user_info', {})
+                        replies.append(CommentItem(
+                            comment_id=sub.get('id', ''),
+                            work_id=work_id,
+                            content=sub.get('content', ''),
+                            author_id=sub_user.get('user_id', ''),
+                            author_name=sub_user.get('nickname', ''),
+                            author_avatar=sub_user.get('image', ''),
+                            like_count=sub.get('like_count', 0),
+                            create_time=sub.get('create_time', ''),
+                        ))
+                    
+                    comments.append(CommentItem(
+                        comment_id=cid,
+                        work_id=work_id,
+                        content=comment.get('content', ''),
+                        author_id=user_info.get('user_id', ''),
+                        author_name=user_info.get('nickname', ''),
+                        author_avatar=user_info.get('image', ''),
+                        like_count=comment.get('like_count', 0),
+                        reply_count=comment.get('sub_comment_count', 0),
+                        create_time=comment.get('create_time', ''),
+                        replies=replies,
+                    ))
+                
+                total = len(comments)
+                print(f"[{self.platform_name}] 解析到 {total} 条评论", flush=True)
+            else:
+                print(f"[{self.platform_name}] 未捕获到评论 API 响应", flush=True)
+            
+        except Exception as e:
+            import traceback
+            traceback.print_exc()
+            return CommentsResult(
+                success=False,
+                platform=self.platform_name,
+                work_id=work_id,
+                error=str(e)
+            )
+        finally:
+            await self.close_browser()
+        
+        result = CommentsResult(
+            success=True,
+            platform=self.platform_name,
+            work_id=work_id,
+            comments=comments,
+            total=total,
+            has_more=has_more
+        )
+        result.__dict__['cursor'] = next_cursor
+        return result
+    
+    async def get_all_comments(self, cookies: str) -> dict:
+        """获取所有作品的评论 - 通过评论管理页面"""
+        print(f"\n{'='*60}")
+        print(f"[{self.platform_name}] 获取所有作品评论")
+        print(f"{'='*60}")
+        
+        all_work_comments = []
+        captured_comments = []
+        captured_notes = {}  # note_id -> note_info
+        
+        try:
+            await self.init_browser()
+            cookie_list = self.parse_cookies(cookies)
+            await self.set_cookies(cookie_list)
+            
+            if not self.page:
+                raise Exception("Page not initialized")
+            
+            # 设置 API 响应监听器
+            async def handle_response(response):
+                nonlocal captured_comments, captured_notes
+                url = response.url
+                try:
+                    # 监听评论列表 API - 多种格式
+                    if '/comment/' in url and ('page' in url or 'list' in url):
+                        json_data = await response.json()
+                        print(f"[{self.platform_name}] 捕获到评论 API: {url[:100]}...", flush=True)
+                        
+                        if json_data.get('success') or json_data.get('code') == 0:
+                            data = json_data.get('data', {})
+                            comments = data.get('comments', []) or data.get('list', [])
+                            
+                            # 从 URL 中提取 note_id
+                            import re
+                            note_id_match = re.search(r'note_id=([^&]+)', url)
+                            note_id = note_id_match.group(1) if note_id_match else ''
+                            
+                            if comments:
+                                for comment in comments:
+                                    # 添加 note_id 到评论中
+                                    if note_id and 'note_id' not in comment:
+                                        comment['note_id'] = note_id
+                                    captured_comments.append(comment)
+                                
+                                print(f"[{self.platform_name}] 捕获到 {len(comments)} 条评论 (note_id={note_id}),总计: {len(captured_comments)}", flush=True)
+                    
+                    # 监听笔记列表 API
+                    if '/note/' in url and ('list' in url or 'posted' in url or 'manager' in url):
+                        json_data = await response.json()
+                        if json_data.get('success') or json_data.get('code') == 0:
+                            data = json_data.get('data', {})
+                            notes = data.get('notes', []) or data.get('list', [])
+                            print(f"[{self.platform_name}] 捕获到笔记列表 API: {len(notes)} 个笔记", flush=True)
+                            for note in notes:
+                                note_id = note.get('note_id', '') or note.get('id', '')
+                                if note_id:
+                                    cover_url = ''
+                                    cover = note.get('cover', {})
+                                    if isinstance(cover, dict):
+                                        cover_url = cover.get('url', '') or cover.get('url_default', '')
+                                    elif isinstance(cover, str):
+                                        cover_url = cover
+                                    
+                                    captured_notes[note_id] = {
+                                        'title': note.get('title', '') or note.get('display_title', ''),
+                                        'cover': cover_url,
+                                    }
+                except Exception as e:
+                    print(f"[{self.platform_name}] 解析响应失败: {e}", flush=True)
+            
+            self.page.on('response', handle_response)
+            print(f"[{self.platform_name}] 已注册 API 响应监听器", flush=True)
+            
+            # 访问评论管理页面
+            print(f"[{self.platform_name}] 访问评论管理页面...", flush=True)
+            await self.page.goto("https://creator.xiaohongshu.com/creator/comment", wait_until="domcontentloaded", timeout=30000)
+            await asyncio.sleep(5)
+            
+            # 检查登录状态
+            current_url = self.page.url
+            if "login" in current_url:
+                raise Exception("Cookie 已过期,请重新登录")
+            
+            print(f"[{self.platform_name}] 页面加载完成,当前捕获: {len(captured_comments)} 条评论, {len(captured_notes)} 个笔记", flush=True)
+            
+            # 滚动加载更多评论
+            for i in range(5):
+                await self.page.evaluate('window.scrollBy(0, 500)')
+                await asyncio.sleep(1)
+            
+            await asyncio.sleep(3)
+            
+            # 移除监听器
+            self.page.remove_listener('response', handle_response)
+            
+            print(f"[{self.platform_name}] 最终捕获: {len(captured_comments)} 条评论, {len(captured_notes)} 个笔记", flush=True)
+            
+            # 按作品分组评论
+            work_comments_map = {}  # note_id -> work_comments
+            for comment in captured_comments:
+                # 获取笔记信息
+                note_info = comment.get('note_info', {}) or comment.get('note', {})
+                note_id = comment.get('note_id', '') or note_info.get('note_id', '') or note_info.get('id', '')
+                
+                if not note_id:
+                    continue
+                
+                if note_id not in work_comments_map:
+                    saved_note = captured_notes.get(note_id, {})
+                    cover_url = ''
+                    cover = note_info.get('cover', {})
+                    if isinstance(cover, dict):
+                        cover_url = cover.get('url', '') or cover.get('url_default', '')
+                    elif isinstance(cover, str):
+                        cover_url = cover
+                    if not cover_url:
+                        cover_url = saved_note.get('cover', '')
+                    
+                    work_comments_map[note_id] = {
+                        'work_id': note_id,
+                        'title': note_info.get('title', '') or note_info.get('display_title', '') or saved_note.get('title', ''),
+                        'cover_url': cover_url,
+                        'comments': []
+                    }
+                
+                cid = comment.get('id', '') or comment.get('comment_id', '')
+                if not cid:
+                    continue
+                
+                user_info = comment.get('user_info', {}) or comment.get('user', {})
+                
+                work_comments_map[note_id]['comments'].append({
+                    'comment_id': cid,
+                    'author_id': user_info.get('user_id', '') or user_info.get('id', ''),
+                    'author_name': user_info.get('nickname', '') or user_info.get('name', ''),
+                    'author_avatar': user_info.get('image', '') or user_info.get('avatar', ''),
+                    'content': comment.get('content', ''),
+                    'like_count': comment.get('like_count', 0),
+                    'create_time': comment.get('create_time', ''),
+                })
+            
+            all_work_comments = list(work_comments_map.values())
+            total_comments = sum(len(w['comments']) for w in all_work_comments)
+            print(f"[{self.platform_name}] 获取到 {len(all_work_comments)} 个作品的 {total_comments} 条评论", flush=True)
+            
+        except Exception as e:
+            import traceback
+            traceback.print_exc()
+            return {
+                'success': False,
+                'platform': self.platform_name,
+                'error': str(e),
+                'work_comments': []
+            }
+        finally:
+            await self.close_browser()
+        
+        return {
+            'success': True,
+            'platform': self.platform_name,
+            'work_comments': all_work_comments,
+            'total': len(all_work_comments)
+        }

+ 146 - 19
server/src/automation/platforms/douyin.ts

@@ -866,50 +866,89 @@ export class DouyinAdapter extends BasePlatformAdapter {
       onProgress?.(10, '正在选择视频文件...');
       
       // 参考 matrix: 点击上传区域触发文件选择
-      // 选择器: div.container-drag-info-Tl0RGH
+      // 选择器: div.container-drag-info-Tl0RGH (哈希值可能变化)
       const uploadDivSelectors = [
         'div[class*="container-drag-info"]',
+        'div[class*="container-drag"]',
+        'div[class*="upload-drag"]',
+        'div[class*="drag-info"]',
         'div[class*="upload-btn"]',
         'div[class*="drag-area"]',
         '[class*="upload"] [class*="drag"]',
+        'div[class*="upload-area"]',
       ];
       
       let uploadTriggered = false;
+      
+      // 方法1: 使用 file chooser 方式(最可靠)
       for (const selector of uploadDivSelectors) {
+        if (uploadTriggered) break;
         try {
           const uploadDiv = this.page.locator(selector).first();
-          if (await uploadDiv.count() > 0) {
-            logger.info(`[Douyin Publish] Found upload div: ${selector}`);
-            
-            // 使用 expect_file_chooser 方式上传(参考 matrix)
-            const [fileChooser] = await Promise.all([
-              this.page.waitForEvent('filechooser', { timeout: 10000 }),
-              uploadDiv.click(),
-            ]);
+          const count = await uploadDiv.count();
+          logger.info(`[Douyin Publish] Checking selector ${selector}: count=${count}`);
+          
+          if (count > 0) {
+            logger.info(`[Douyin Publish] Trying file chooser with selector: ${selector}`);
             
-            await fileChooser.setFiles(params.videoPath);
-            uploadTriggered = true;
-            logger.info(`[Douyin Publish] File selected via file chooser`);
-            break;
+            try {
+              const [fileChooser] = await Promise.all([
+                this.page.waitForEvent('filechooser', { timeout: 10000 }),
+                uploadDiv.click(),
+              ]);
+              
+              await fileChooser.setFiles(params.videoPath);
+              uploadTriggered = true;
+              logger.info(`[Douyin Publish] File selected via file chooser`);
+            } catch (fcError) {
+              logger.warn(`[Douyin Publish] File chooser failed for ${selector}:`, fcError);
+            }
           }
         } catch (e) {
           logger.warn(`[Douyin Publish] Failed with selector ${selector}:`, e);
         }
       }
       
-      // 如果点击方式失败,尝试直接设置 input
+      // 方法2: 直接设置 file input(备用)
       if (!uploadTriggered) {
         logger.info('[Douyin Publish] Trying direct input method...');
-        const fileInput = await this.page.$('input[type="file"]');
-        if (fileInput) {
-          await fileInput.setInputFiles(params.videoPath);
+        try {
+          const fileInputs = await this.page.$$('input[type="file"]');
+          logger.info(`[Douyin Publish] Found ${fileInputs.length} file inputs`);
+          
+          for (const fileInput of fileInputs) {
+            try {
+              await fileInput.setInputFiles(params.videoPath);
+              uploadTriggered = true;
+              logger.info('[Douyin Publish] File set via input element');
+              break;
+            } catch (inputError) {
+              logger.warn('[Douyin Publish] Failed to set file on input:', inputError);
+            }
+          }
+        } catch (e) {
+          logger.warn('[Douyin Publish] Direct input method failed:', e);
+        }
+      }
+      
+      // 方法3: 使用 locator('input[type="file"]').setInputFiles (最后尝试)
+      if (!uploadTriggered) {
+        logger.info('[Douyin Publish] Trying locator input method...');
+        try {
+          await this.page.locator('input[type="file"]').first().setInputFiles(params.videoPath);
           uploadTriggered = true;
-          logger.info('[Douyin Publish] File set via input element');
+          logger.info('[Douyin Publish] File set via locator input');
+        } catch (e) {
+          logger.warn('[Douyin Publish] Locator input method failed:', e);
         }
       }
       
       if (!uploadTriggered) {
-        throw new Error('无法触发文件上传');
+        // 保存截图以便调试
+        const screenshotPath = `douyin_upload_failed_${Date.now()}.png`;
+        await this.page.screenshot({ path: screenshotPath, fullPage: true });
+        logger.error(`[Douyin Publish] Screenshot saved to ${screenshotPath}`);
+        throw new Error(`无法触发文件上传(截图: ${screenshotPath})`);
       }
       
       onProgress?.(15, '视频上传中,等待跳转到发布页面...');
@@ -1268,9 +1307,97 @@ export class DouyinAdapter extends BasePlatformAdapter {
   }
   
   /**
+   * 通过 Python API 获取评论
+   */
+  private async getCommentsViaPython(cookies: string, videoId: string): Promise<CommentData[]> {
+    logger.info('[Douyin] Getting comments via Python API...');
+    
+    const response = await fetch(`${PYTHON_PUBLISH_SERVICE_URL}/comments`, {
+      method: 'POST',
+      headers: {
+        'Content-Type': 'application/json',
+      },
+      body: JSON.stringify({
+        platform: 'douyin',
+        cookie: cookies,
+        work_id: videoId,
+      }),
+    });
+    
+    if (!response.ok) {
+      throw new Error(`Python API returned ${response.status}`);
+    }
+    
+    const result = await response.json();
+    
+    if (!result.success) {
+      throw new Error(result.error || 'Failed to get comments');
+    }
+    
+    // 转换数据格式
+    return (result.comments || []).map((comment: {
+      comment_id: string;
+      author_id: string;
+      author_name: string;
+      author_avatar: string;
+      content: string;
+      like_count: number;
+      create_time: string;
+      reply_count: number;
+      replies?: Array<{
+        comment_id: string;
+        author_id: string;
+        author_name: string;
+        author_avatar: string;
+        content: string;
+        like_count: number;
+        create_time: string;
+      }>;
+    }) => ({
+      commentId: comment.comment_id,
+      authorId: comment.author_id,
+      authorName: comment.author_name,
+      authorAvatar: comment.author_avatar,
+      content: comment.content,
+      likeCount: comment.like_count,
+      commentTime: comment.create_time,
+      replyCount: comment.reply_count,
+      replies: comment.replies?.map((reply: {
+        comment_id: string;
+        author_id: string;
+        author_name: string;
+        author_avatar: string;
+        content: string;
+        like_count: number;
+        create_time: string;
+      }) => ({
+        commentId: reply.comment_id,
+        authorId: reply.author_id,
+        authorName: reply.author_name,
+        authorAvatar: reply.author_avatar,
+        content: reply.content,
+        likeCount: reply.like_count,
+        commentTime: reply.create_time,
+      })),
+    }));
+  }
+
+  /**
    * 获取评论列表
    */
   async getComments(cookies: string, videoId: string): Promise<CommentData[]> {
+    // 优先尝试使用 Python API
+    const pythonAvailable = await this.checkPythonServiceAvailable();
+    if (pythonAvailable) {
+      logger.info('[Douyin] Python service available, using Python API for comments');
+      try {
+        return await this.getCommentsViaPython(cookies, videoId);
+      } catch (pythonError) {
+        logger.warn('[Douyin] Python API getComments failed, falling back to Playwright:', pythonError);
+      }
+    }
+    
+    // 回退到 Playwright 方式
     try {
       // 使用无头浏览器后台运行
       await this.initBrowser({ headless: true });

+ 89 - 1
server/src/automation/platforms/kuaishou.ts

@@ -263,8 +263,96 @@ export class KuaishouAdapter extends BasePlatformAdapter {
     }
   }
   
+  /**
+   * 通过 Python API 获取评论
+   */
+  private async getCommentsViaPython(cookies: string, videoId: string): Promise<CommentData[]> {
+    logger.info('[Kuaishou] Getting comments via Python API...');
+    
+    const response = await fetch(`${PYTHON_PUBLISH_SERVICE_URL}/comments`, {
+      method: 'POST',
+      headers: {
+        'Content-Type': 'application/json',
+      },
+      body: JSON.stringify({
+        platform: 'kuaishou',
+        cookie: cookies,
+        work_id: videoId,
+      }),
+    });
+    
+    if (!response.ok) {
+      throw new Error(`Python API returned ${response.status}`);
+    }
+    
+    const result = await response.json();
+    
+    if (!result.success) {
+      throw new Error(result.error || 'Failed to get comments');
+    }
+    
+    // 转换数据格式
+    return (result.comments || []).map((comment: {
+      comment_id: string;
+      author_id: string;
+      author_name: string;
+      author_avatar: string;
+      content: string;
+      like_count: number;
+      create_time: string;
+      reply_count: number;
+      replies?: Array<{
+        comment_id: string;
+        author_id: string;
+        author_name: string;
+        author_avatar: string;
+        content: string;
+        like_count: number;
+        create_time: string;
+      }>;
+    }) => ({
+      commentId: comment.comment_id,
+      authorId: comment.author_id,
+      authorName: comment.author_name,
+      authorAvatar: comment.author_avatar,
+      content: comment.content,
+      likeCount: comment.like_count,
+      commentTime: comment.create_time,
+      replyCount: comment.reply_count,
+      replies: comment.replies?.map((reply: {
+        comment_id: string;
+        author_id: string;
+        author_name: string;
+        author_avatar: string;
+        content: string;
+        like_count: number;
+        create_time: string;
+      }) => ({
+        commentId: reply.comment_id,
+        authorId: reply.author_id,
+        authorName: reply.author_name,
+        authorAvatar: reply.author_avatar,
+        content: reply.content,
+        likeCount: reply.like_count,
+        commentTime: reply.create_time,
+      })),
+    }));
+  }
+
   async getComments(cookies: string, videoId: string): Promise<CommentData[]> {
-    logger.info(`Kuaishou getComments for video ${videoId}`);
+    // 优先尝试使用 Python API
+    const pythonAvailable = await this.checkPythonServiceAvailable();
+    if (pythonAvailable) {
+      logger.info('[Kuaishou] Python service available, using Python API for comments');
+      try {
+        return await this.getCommentsViaPython(cookies, videoId);
+      } catch (pythonError) {
+        logger.warn('[Kuaishou] Python API getComments failed:', pythonError);
+      }
+    }
+    
+    // Python API 不可用或失败时返回空
+    logger.info(`Kuaishou getComments for video ${videoId} - no implementation`);
     return [];
   }
   

+ 62 - 1
server/src/automation/platforms/weixin.ts

@@ -320,8 +320,69 @@ export class WeixinAdapter extends BasePlatformAdapter {
     }
   }
   
+  /**
+   * 通过 Python API 获取评论
+   */
+  private async getCommentsViaPython(cookies: string, videoId: string): Promise<CommentData[]> {
+    logger.info('[Weixin] Getting comments via Python API...');
+    
+    const response = await fetch(`${PYTHON_PUBLISH_SERVICE_URL}/comments`, {
+      method: 'POST',
+      headers: {
+        'Content-Type': 'application/json',
+      },
+      body: JSON.stringify({
+        platform: 'weixin',
+        cookie: cookies,
+        work_id: videoId,
+      }),
+    });
+    
+    if (!response.ok) {
+      throw new Error(`Python API returned ${response.status}`);
+    }
+    
+    const result = await response.json();
+    
+    if (!result.success) {
+      throw new Error(result.error || 'Failed to get comments');
+    }
+    
+    // 转换数据格式
+    return (result.comments || []).map((comment: {
+      comment_id: string;
+      author_id: string;
+      author_name: string;
+      author_avatar: string;
+      content: string;
+      like_count: number;
+      create_time: string;
+      reply_count: number;
+    }) => ({
+      commentId: comment.comment_id,
+      authorId: comment.author_id,
+      authorName: comment.author_name,
+      authorAvatar: comment.author_avatar,
+      content: comment.content,
+      likeCount: comment.like_count,
+      commentTime: comment.create_time,
+      replyCount: comment.reply_count,
+    }));
+  }
+
   async getComments(cookies: string, videoId: string): Promise<CommentData[]> {
-    logger.warn('Weixin getComments not implemented');
+    // 优先尝试使用 Python API
+    const pythonAvailable = await this.checkPythonServiceAvailable();
+    if (pythonAvailable) {
+      logger.info('[Weixin] Python service available, using Python API for comments');
+      try {
+        return await this.getCommentsViaPython(cookies, videoId);
+      } catch (pythonError) {
+        logger.warn('[Weixin] Python API getComments failed:', pythonError);
+      }
+    }
+    
+    logger.warn('Weixin getComments - Python API not available');
     return [];
   }
   

+ 88 - 0
server/src/automation/platforms/xiaohongshu.ts

@@ -990,9 +990,97 @@ export class XiaohongshuAdapter extends BasePlatformAdapter {
   }
 
   /**
+   * 通过 Python API 获取评论
+   */
+  private async getCommentsViaPython(cookies: string, videoId: string): Promise<CommentData[]> {
+    logger.info('[Xiaohongshu] Getting comments via Python API...');
+    
+    const response = await fetch(`${XHS_PYTHON_SERVICE_URL}/comments`, {
+      method: 'POST',
+      headers: {
+        'Content-Type': 'application/json',
+      },
+      body: JSON.stringify({
+        platform: 'xiaohongshu',
+        cookie: cookies,
+        work_id: videoId,
+      }),
+    });
+    
+    if (!response.ok) {
+      throw new Error(`Python API returned ${response.status}`);
+    }
+    
+    const result = await response.json();
+    
+    if (!result.success) {
+      throw new Error(result.error || 'Failed to get comments');
+    }
+    
+    // 转换数据格式
+    return (result.comments || []).map((comment: {
+      comment_id: string;
+      author_id: string;
+      author_name: string;
+      author_avatar: string;
+      content: string;
+      like_count: number;
+      create_time: string;
+      reply_count: number;
+      replies?: Array<{
+        comment_id: string;
+        author_id: string;
+        author_name: string;
+        author_avatar: string;
+        content: string;
+        like_count: number;
+        create_time: string;
+      }>;
+    }) => ({
+      commentId: comment.comment_id,
+      authorId: comment.author_id,
+      authorName: comment.author_name,
+      authorAvatar: comment.author_avatar,
+      content: comment.content,
+      likeCount: comment.like_count,
+      commentTime: comment.create_time,
+      replyCount: comment.reply_count,
+      replies: comment.replies?.map((reply: {
+        comment_id: string;
+        author_id: string;
+        author_name: string;
+        author_avatar: string;
+        content: string;
+        like_count: number;
+        create_time: string;
+      }) => ({
+        commentId: reply.comment_id,
+        authorId: reply.author_id,
+        authorName: reply.author_name,
+        authorAvatar: reply.author_avatar,
+        content: reply.content,
+        likeCount: reply.like_count,
+        commentTime: reply.create_time,
+      })),
+    }));
+  }
+
+  /**
    * 获取评论列表
    */
   async getComments(cookies: string, videoId: string): Promise<CommentData[]> {
+    // 优先尝试使用 Python API
+    const pythonAvailable = await this.checkPythonServiceAvailable();
+    if (pythonAvailable) {
+      logger.info('[Xiaohongshu] Python service available, using Python API for comments');
+      try {
+        return await this.getCommentsViaPython(cookies, videoId);
+      } catch (pythonError) {
+        logger.warn('[Xiaohongshu] Python API getComments failed, falling back to Playwright:', pythonError);
+      }
+    }
+    
+    // 回退到 Playwright 方式
     try {
       await this.initBrowser({ headless: true });
       await this.setCookies(cookies);

+ 398 - 12
server/src/services/HeadlessBrowserService.ts

@@ -3,6 +3,9 @@ import { chromium, type BrowserContext, type Page } from 'playwright';
 import { logger } from '../utils/logger.js';
 import type { PlatformType } from '@media-manager/shared';
 
+// Python 服务配置
+const PYTHON_SERVICE_URL = process.env.PYTHON_PUBLISH_SERVICE_URL || process.env.XHS_SERVICE_URL || 'http://localhost:5005';
+
 // 抖音 API 接口配置
 const DOUYIN_API = {
   // 检查用户登录状态 - 返回 result: true 表示已登录(需要在浏览器上下文中调用)
@@ -281,9 +284,137 @@ class HeadlessBrowserService {
   }
 
   /**
-   * 获取账号信息(使用无头浏览器)
+   * 检查 Python 服务是否可用
+   */
+  private async checkPythonServiceAvailable(): Promise<boolean> {
+    try {
+      const response = await fetch(`${PYTHON_SERVICE_URL}/health`, {
+        method: 'GET',
+        signal: AbortSignal.timeout(3000),
+      });
+      return response.ok;
+    } catch {
+      return false;
+    }
+  }
+
+  /**
+   * 通过 Python API 获取作品列表
+   */
+  private async fetchWorksViaPython(platform: PlatformType, cookies: CookieData[]): Promise<WorkItem[]> {
+    logger.info(`[Python API] Fetching works for ${platform}...`);
+
+    // 将 cookies 转换为字符串格式
+    const cookieString = JSON.stringify(cookies);
+
+    // Python 服务中视频号的 platform 名称是 weixin
+    const pythonPlatform = platform === 'weixin_video' ? 'weixin' : platform;
+
+    const response = await fetch(`${PYTHON_SERVICE_URL}/works`, {
+      method: 'POST',
+      headers: {
+        'Content-Type': 'application/json',
+      },
+      body: JSON.stringify({
+        platform: pythonPlatform,
+        cookie: cookieString,
+        page: 0,
+        page_size: 50,
+      }),
+    });
+
+    if (!response.ok) {
+      throw new Error(`Python API returned ${response.status}`);
+    }
+
+    const result = await response.json();
+
+    if (!result.success) {
+      throw new Error(result.error || 'Failed to get works');
+    }
+
+    logger.info(`[Python API] Got ${result.works?.length || 0} works for ${platform}`);
+
+    // 转换数据格式
+    return (result.works || []).map((work: {
+      work_id: string;
+      title: string;
+      cover_url: string;
+      duration: number;
+      publish_time: string;
+      status: string;
+      play_count: number;
+      like_count: number;
+      comment_count: number;
+      share_count: number;
+      collect_count?: number;
+    }) => ({
+      videoId: work.work_id,
+      title: work.title,
+      coverUrl: work.cover_url,
+      duration: String(work.duration || 0),
+      publishTime: work.publish_time,
+      status: work.status || 'published',
+      playCount: work.play_count || 0,
+      likeCount: work.like_count || 0,
+      commentCount: work.comment_count || 0,
+      shareCount: work.share_count || 0,
+    }));
+  }
+
+  /**
+   * 获取账号信息(优先使用 Python API,回退到无头浏览器)
    */
   async fetchAccountInfo(platform: PlatformType, cookies: CookieData[]): Promise<AccountInfo> {
+    // 对于支持的平台,优先尝试使用 Python API 获取作品列表
+    const supportedPlatforms: PlatformType[] = ['douyin', 'xiaohongshu', 'kuaishou', 'weixin_video'];
+
+    if (supportedPlatforms.includes(platform)) {
+      const pythonAvailable = await this.checkPythonServiceAvailable();
+      if (pythonAvailable) {
+        logger.info(`[Python API] Service available, trying to fetch works for ${platform}`);
+        try {
+          const worksList = await this.fetchWorksViaPython(platform, cookies);
+
+          // 如果成功获取到作品,构建基本的账号信息
+          if (worksList.length > 0) {
+            logger.info(`[Python API] Successfully fetched ${worksList.length} works for ${platform}`);
+
+            // [临时注释] 不再使用 Playwright 获取账号基本信息,直接使用默认信息
+            // 原代码:const accountInfo = await this.fetchAccountInfoWithPlaywright(platform, cookies);
+            const accountInfo = this.getDefaultAccountInfo(platform);
+            accountInfo.worksList = worksList;
+            accountInfo.worksCount = worksList.length;
+            return accountInfo;
+          }
+
+          // 即使作品列表为空,也直接返回默认账号信息,不走 Playwright
+          logger.info(`[Python API] Got empty works list for ${platform}, returning default info`);
+          return this.getDefaultAccountInfo(platform);
+        } catch (pythonError) {
+          // [临时注释] 不再回退到 Playwright,直接返回默认信息
+          logger.warn(`[Python API] Failed to fetch works for ${platform}:`, pythonError);
+          return this.getDefaultAccountInfo(platform);
+          // 原代码:logger.warn(`[Python API] Failed to fetch works for ${platform}, falling back to Playwright:`, pythonError);
+        }
+      } else {
+        // [临时注释] Python 服务不可用时,也不走 Playwright,直接返回默认信息
+        logger.info(`[Python API] Service not available for ${platform}, returning default info`);
+        return this.getDefaultAccountInfo(platform);
+        // 原代码:logger.info(`[Python API] Service not available, using Playwright for ${platform}`);
+      }
+    }
+
+    // [临时注释] 不支持的平台也不走 Playwright,直接返回默认信息
+    // 原代码:return this.fetchAccountInfoWithPlaywright(platform, cookies);
+    logger.info(`[API Only] Platform ${platform} not in supported list, returning default info`);
+    return this.getDefaultAccountInfo(platform);
+  }
+
+  /**
+   * 使用 Playwright 获取账号信息(原方法)
+   */
+  private async fetchAccountInfoWithPlaywright(platform: PlatformType, cookies: CookieData[]): Promise<AccountInfo> {
     const browser = await chromium.launch({ headless: true });
 
     try {
@@ -1111,13 +1242,18 @@ class HeadlessBrowserService {
         page.on('response', notesApiHandler);
         logger.info('[Xiaohongshu] API listener registered, navigating to note manager...');
 
-        // 导航到笔记管理页面
-        await page.goto('https://creator.xiaohongshu.com/new/note-manager', {
-          waitUntil: 'networkidle',  // 等待网络空闲,确保 API 已响应
-          timeout: 30000,
-        });
+        // 导航到笔记管理页面 - 使用 domcontentloaded 加快加载,避免 networkidle 超时
+        try {
+          await page.goto('https://creator.xiaohongshu.com/new/note-manager', {
+            waitUntil: 'domcontentloaded',
+            timeout: 30000,
+          });
+        } catch (navError) {
+          // 导航超时不影响已捕获的 API 数据
+          logger.warn('[Xiaohongshu] Navigation timeout, but API data may have been captured');
+        }
 
-        // 等待初始页面加载和 API 响应
+        // 等待 API 响应
         await page.waitForTimeout(5000);
         logger.info(`[Xiaohongshu] After initial wait: apiResponseReceived=${apiResponseReceived}, notesCount=${allNotesData.length}`);
 
@@ -2456,12 +2592,185 @@ class HeadlessBrowserService {
   }
 
   /**
-   * 获取抖音评论 (旧版 - DOM解析方式)
-   * 模拟点击"选择作品"按钮,依次点击作品获取评论
-   * 作为备用方案
+   * 通过 Python API 获取评论 - 分作品逐个获取
+   */
+  private async fetchCommentsViaPythonApi(platform: 'douyin' | 'xiaohongshu', cookies: CookieData[]): Promise<WorkComments[]> {
+    const allWorkComments: WorkComments[] = [];
+    const cookieString = JSON.stringify(cookies);
+
+    // 1. 先获取作品列表
+    logger.info(`[${platform} Comments Python] Fetching works list...`);
+    const worksResponse = await fetch(`${PYTHON_SERVICE_URL}/works`, {
+      method: 'POST',
+      headers: { 'Content-Type': 'application/json' },
+      body: JSON.stringify({
+        platform,
+        cookie: cookieString,
+        page: 0,
+        page_size: 50,
+      }),
+    });
+
+    if (!worksResponse.ok) {
+      throw new Error(`Python API works returned ${worksResponse.status}`);
+    }
+
+    const worksResult = await worksResponse.json();
+    if (!worksResult.success) {
+      throw new Error(worksResult.error || 'Failed to get works');
+    }
+
+    const works = worksResult.works || [];
+    logger.info(`[${platform} Comments Python] Got ${works.length} works`);
+
+    // 2. 遍历作品获取评论
+    for (const work of works) {
+      const workId = work.work_id;
+      if (!workId) continue;
+
+      try {
+        logger.info(`[${platform} Comments Python] Fetching comments for work ${workId}...`);
+        const commentsResponse = await fetch(`${PYTHON_SERVICE_URL}/comments`, {
+          method: 'POST',
+          headers: { 'Content-Type': 'application/json' },
+          body: JSON.stringify({
+            platform,
+            cookie: cookieString,
+            work_id: workId,
+          }),
+        });
+
+        if (!commentsResponse.ok) {
+          logger.warn(`[${platform} Comments Python] Comments API returned ${commentsResponse.status} for work ${workId}`);
+          continue;
+        }
+
+        const commentsResult = await commentsResponse.json();
+        if (!commentsResult.success) {
+          logger.warn(`[${platform} Comments Python] Failed to get comments for work ${workId}: ${commentsResult.error}`);
+          continue;
+        }
+
+        const comments: CommentItem[] = (commentsResult.comments || []).map((c: {
+          comment_id: string;
+          author_id: string;
+          author_name: string;
+          author_avatar: string;
+          content: string;
+          like_count: number;
+          create_time: string;
+          reply_count?: number;
+        }) => ({
+          commentId: c.comment_id,
+          authorId: c.author_id,
+          authorName: c.author_name,
+          authorAvatar: c.author_avatar,
+          content: c.content,
+          likeCount: c.like_count,
+          commentTime: c.create_time,
+          replyCount: c.reply_count || 0,
+        }));
+
+        if (comments.length > 0) {
+          allWorkComments.push({
+            videoId: workId,
+            videoTitle: work.title || '',
+            videoCoverUrl: work.cover_url || '',
+            comments,
+          });
+          logger.info(`[${platform} Comments Python] Got ${comments.length} comments for work ${workId}`);
+        }
+      } catch (commentError) {
+        logger.warn(`[${platform} Comments Python] Error fetching comments for work ${workId}:`, commentError);
+      }
+    }
+
+    logger.info(`[${platform} Comments Python] Total: ${allWorkComments.length} works with comments`);
+    return allWorkComments;
+  }
+
+  /**
+   * 通过 Python API 获取抖音评论 - 一次性获取所有作品的评论(备用)
+   */
+  private async fetchDouyinCommentsViaPythonApi(cookies: CookieData[]): Promise<WorkComments[]> {
+    const cookieString = JSON.stringify(cookies);
+
+    logger.info('[Douyin Comments Python] Fetching all comments...');
+    const response = await fetch(`${PYTHON_SERVICE_URL}/all_comments`, {
+      method: 'POST',
+      headers: { 'Content-Type': 'application/json' },
+      body: JSON.stringify({
+        platform: 'douyin',
+        cookie: cookieString,
+      }),
+    });
+
+    if (!response.ok) {
+      throw new Error(`Python API all_comments returned ${response.status}`);
+    }
+
+    const result = await response.json();
+    if (!result.success) {
+      throw new Error(result.error || 'Failed to get all comments');
+    }
+
+    const workComments = result.work_comments || [];
+    logger.info(`[Douyin Comments Python] Got ${workComments.length} works with comments`);
+
+    // 转换数据格式
+    const allWorkComments: WorkComments[] = workComments.map((wc: {
+      work_id: string;
+      title: string;
+      cover_url: string;
+      comments: Array<{
+        comment_id: string;
+        author_id: string;
+        author_name: string;
+        author_avatar: string;
+        content: string;
+        like_count: number;
+        create_time: string;
+        is_author?: boolean;
+      }>;
+    }) => ({
+      videoId: wc.work_id,
+      videoTitle: wc.title || '',
+      videoCoverUrl: wc.cover_url || '',
+      comments: (wc.comments || []).map(c => ({
+        commentId: c.comment_id,
+        authorId: c.author_id,
+        authorName: c.author_name,
+        authorAvatar: c.author_avatar,
+        content: c.content,
+        likeCount: c.like_count,
+        commentTime: c.create_time,
+      })),
+    }));
+
+    logger.info(`[Douyin Comments Python] Total: ${allWorkComments.length} works with comments`);
+    return allWorkComments;
+  }
+
+  /**
+   * 获取抖音评论 - 优先使用 Python API
    */
   async fetchDouyinCommentsViaApi(cookies: CookieData[]): Promise<WorkComments[]> {
-    // 优先使用 API 拦截方式
+    // 优先使用 Python API(分作品获取)
+    const pythonAvailable = await this.checkPythonServiceAvailable();
+    if (pythonAvailable) {
+      logger.info('[Douyin Comments] Using Python API...');
+      try {
+        const result = await this.fetchCommentsViaPythonApi('douyin', cookies);
+        if (result.length > 0) {
+          return result;
+        }
+        logger.info('[Douyin Comments] Python API returned empty, falling back to Playwright...');
+      } catch (pythonError) {
+        logger.warn('[Douyin Comments] Python API failed:', pythonError);
+      }
+    }
+
+    // 回退到 Playwright API 拦截方式
     const result = await this.fetchDouyinCommentsByApiInterception(cookies);
     if (result.length > 0) {
       return result;
@@ -2959,9 +3268,86 @@ class HeadlessBrowserService {
   }
 
   /**
-   * 获取小红书评论
+   * 通过 Python API 获取小红书评论 - 一次性获取所有作品的评论
+   */
+  private async fetchXiaohongshuCommentsViaPythonApi(cookies: CookieData[]): Promise<WorkComments[]> {
+    const cookieString = JSON.stringify(cookies);
+
+    logger.info('[Xiaohongshu Comments Python] Fetching all comments...');
+    const response = await fetch(`${PYTHON_SERVICE_URL}/all_comments`, {
+      method: 'POST',
+      headers: { 'Content-Type': 'application/json' },
+      body: JSON.stringify({
+        platform: 'xiaohongshu',
+        cookie: cookieString,
+      }),
+    });
+
+    if (!response.ok) {
+      throw new Error(`Python API all_comments returned ${response.status}`);
+    }
+
+    const result = await response.json();
+    if (!result.success) {
+      throw new Error(result.error || 'Failed to get all comments');
+    }
+
+    const workComments = result.work_comments || [];
+    logger.info(`[Xiaohongshu Comments Python] Got ${workComments.length} works with comments`);
+
+    // 转换数据格式
+    const allWorkComments: WorkComments[] = workComments.map((wc: {
+      work_id: string;
+      title: string;
+      cover_url: string;
+      comments: Array<{
+        comment_id: string;
+        author_id: string;
+        author_name: string;
+        author_avatar: string;
+        content: string;
+        like_count: number;
+        create_time: string;
+      }>;
+    }) => ({
+      videoId: wc.work_id,
+      videoTitle: wc.title || '',
+      videoCoverUrl: wc.cover_url || '',
+      comments: (wc.comments || []).map(c => ({
+        commentId: c.comment_id,
+        authorId: c.author_id,
+        authorName: c.author_name,
+        authorAvatar: c.author_avatar,
+        content: c.content,
+        likeCount: c.like_count,
+        commentTime: c.create_time,
+      })),
+    }));
+
+    logger.info(`[Xiaohongshu Comments Python] Total: ${allWorkComments.length} works with comments`);
+    return allWorkComments;
+  }
+
+  /**
+   * 获取小红书评论 - 优先使用 Python API
    */
   async fetchXiaohongshuCommentsViaApi(cookies: CookieData[]): Promise<WorkComments[]> {
+    // 优先使用 Python API(分作品获取)
+    const pythonAvailable = await this.checkPythonServiceAvailable();
+    if (pythonAvailable) {
+      logger.info('[Xiaohongshu Comments] Using Python API...');
+      try {
+        const result = await this.fetchCommentsViaPythonApi('xiaohongshu', cookies);
+        if (result.length > 0) {
+          return result;
+        }
+        logger.info('[Xiaohongshu Comments] Python API returned empty, falling back to Playwright...');
+      } catch (pythonError) {
+        logger.warn('[Xiaohongshu Comments] Python API failed:', pythonError);
+      }
+    }
+
+    // 回退到 Playwright 方式
     const browser = await chromium.launch({
       headless: true,
       args: ['--no-sandbox', '--disable-setuid-sandbox'],

+ 10 - 3
server/src/services/WorkService.ts

@@ -94,28 +94,35 @@ export class WorkService {
     const allAccounts = await this.accountRepository.find({ where: { userId } });
     logger.info(`[SyncWorks] All accounts for user ${userId}: ${allAccounts.map(a => `id=${a.id},status=${a.status},platform=${a.platform}`).join('; ')}`);
     
+    // 同时查询 active 和 expired 状态的账号(expired 的账号 cookie 可能实际上还有效)
     const queryBuilder = this.accountRepository
       .createQueryBuilder('account')
       .where('account.userId = :userId', { userId })
-      .andWhere('account.status = :status', { status: 'active' });
+      .andWhere('account.status IN (:...statuses)', { statuses: ['active', 'expired'] });
 
     if (accountId) {
       queryBuilder.andWhere('account.id = :accountId', { accountId });
     }
 
     const accounts = await queryBuilder.getMany();
-    logger.info(`[SyncWorks] Found ${accounts.length} active accounts`);
+    logger.info(`[SyncWorks] Found ${accounts.length} accounts (active + expired)`);
     
     let totalSynced = 0;
     let accountCount = 0;
 
     for (const account of accounts) {
       try {
-        logger.info(`[SyncWorks] Syncing account ${account.id} (${account.platform})`);
+        logger.info(`[SyncWorks] Syncing account ${account.id} (${account.platform}, status: ${account.status})`);
         const synced = await this.syncAccountWorks(userId, account);
         totalSynced += synced;
         accountCount++;
         logger.info(`[SyncWorks] Account ${account.id} synced ${synced} works`);
+        
+        // 如果同步成功且账号状态是 expired,则恢复为 active
+        if (synced > 0 && account.status === 'expired') {
+          await this.accountRepository.update(account.id, { status: 'active' });
+          logger.info(`[SyncWorks] Account ${account.id} status restored to active`);
+        }
       } catch (error) {
         logger.error(`Failed to sync works for account ${account.id}:`, error);
       }