在当今数据驱动的商业环境中,实时获取和分析电商平台数据对于企业决策至关重要。本文将详细介绍如何构建一个弹性的数据管道,利用淘宝商品 API 进行流式数据采集与处理,帮助企业快速响应市场变化,获取竞争优势。
数据管道架构设计
一个弹性的数据管道应具备以下几个核心组件构成:
- 数据采集层:负责从淘宝商品 API 获取数据
- 数据处理层:对原始数据进行清洗、转换和 enrichment
- 数据存储层:存储处理后的数据,支持快速查询
- 监控与告警:确保整个管道的稳定运行
淘宝商品 API 接入准备
在开始之前,需要完成认证获取 API 调用所需的 Api Key 和 Api Secret。淘宝商品相关的主要 API 包括:
- 商品搜索 API:根据关键词搜索商品
- 商品详情 API:获取商品详细信息
- 商品评价 API:获取商品评价数据
流式数据采集实现
以下是使用 Python 实现的流式数据采集组件,采用异步方式提高采集效率,并实现了简单的限流和重试机制:
import asyncio
import aiohttp
import time
import hmac
import hashlib
import base64
import json
from datetime import datetime
from typing import List, Dict, Optional
import logging
# 配置日志
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger('taobao-collector')
class TaobaoAPIClient:
"""淘宝开放平台API客户端"""
def __init__(self, app_key: str, app_secret: str, timeout: int = 10):
self.app_key = app_key
self.app_secret = app_secret
self.timeout = timeout
self.base_url = "https://eco.taobao.com/router/rest"
# 限流控制
self.rate_limit = 10 # 每秒最多请求数
self.last_request_time = 0
self.request_interval = 1.0 / self.rate_limit
def _generate_signature(self, params: Dict[str, str]) -> str:
"""生成API请求签名"""
sorted_params = sorted(params.items())
sign_str = self.app_secret + ''.join([f"{k}{v}" for k, v in sorted_params]) + self.app_secret
return hmac.new(sign_str.encode(), digestmod=hashlib.md5).hexdigest().upper()
async def _request(self, method: str, params: Dict[str, str]) -> Optional[Dict]:
"""发送API请求"""
# 限流控制
current_time = time.time()
elapsed = current_time - self.last_request_time
if elapsed < self.request_interval:
await asyncio.sleep(self.request_interval - elapsed)
# 构建公共参数
common_params = {
"app_key": self.app_key,
"format": "json",
"method": method,
"timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
"v": "2.0",
"sign_method": "md5"
}
# 合并参数
all_params = {**common_params,** params}
# 生成签名
all_params["sign"] = self._generate_signature(all_params)
try:
async with aiohttp.ClientSession() as session:
async with session.get(
self.base_url,
params=all_params,
timeout=self.timeout
) as response:
self.last_request_time = time.time()
if response.status == 200:
result = await response.json()
# 检查API返回的错误码
if "error_response" in result:
logger.error(f"API error: {result['error_response']}")
return None
return result
else:
logger.error(f"Request failed with status: {response.status}")
return None
except Exception as e:
logger.error(f"Request error: {str(e)}")
return None
async def search_products(self, keyword: str, page: int = 1, page_size: int = 40) -> Optional[Dict]:
"""搜索商品"""
params = {
"q": keyword,
"page_no": str(page),
"page_size": str(page_size)
}
return await self._request("taobao.tbk.item.get", params)
async def get_product_details(self, item_id: str) -> Optional[Dict]:
"""获取商品详情"""
params = {
"num_iid": item_id
}
return await self._request("taobao.tbk.item.info.get", params)
class ProductStreamCollector:
"""商品流式数据采集器"""
def __init__(self, client: TaobaoAPIClient, output_queue: asyncio.Queue):
self.client = client
self.output_queue = output_queue
self.running = False
async def collect_keyword(self, keyword: str, max_pages: int = 5):
"""根据关键词采集商品数据"""
logger.info(f"开始采集关键词: {keyword}, 最大页数: {max_pages}")
for page in range(1, max_pages + 1):
if not self.running:
break
try:
# 搜索商品
search_result = await self.client.search_products(keyword, page)
if not search_result or "tbk_item_get_response" not in search_result:
logger.warning(f"第 {page} 页搜索结果为空")
continue
items = search_result["tbk_item_get_response"]["results"]["n_tbk_item"]
if not items:
logger.info(f"关键词 {keyword} 第 {page} 页没有更多商品")
break
# 对每个商品获取详细信息并放入队列
for item in items:
item_id = item["num_iid"]
detail_result = await self.client.get_product_details(str(item_id))
if detail_result and "tbk_item_info_get_response" in detail_result:
product_data = detail_result["tbk_item_info_get_response"]["results"]["n_tbk_item"][0]
# 添加采集元数据
product_data["collect_time"] = datetime.now().isoformat()
product_data["source_keyword"] = keyword
# 放入输出队列
await self.output_queue.put(product_data)
logger.debug(f"已采集商品: {item_id}")
logger.info(f"已完成关键词 {keyword} 第 {page} 页采集,共 {len(items)} 个商品")
except Exception as e:
logger.error(f"采集关键词 {keyword} 第 {page} 页时出错: {str(e)}", exc_info=True)
# 出错时重试一次
await asyncio.sleep(2)
page -= 1 # 重试当前页
logger.info(f"关键词 {keyword} 采集完成")
async def start(self, keywords: List[str], max_pages: int = 5):
"""开始采集任务"""
self.running = True
logger.info(f"开始采集任务,关键词数量: {len(keywords)}")
# 并发采集不同关键词
tasks = [self.collect_keyword(keyword, max_pages) for keyword in keywords]
await asyncio.gather(*tasks)
logger.info("所有采集任务完成")
self.running = False
def stop(self):
"""停止采集任务"""
self.running = False
logger.info("采集任务已停止")
async def main():
# 配置信息(实际使用时应从环境变量或配置文件获取)
APP_KEY = "your_app_key"
APP_SECRET = "your_app_secret"
# 创建输出队列
output_queue = asyncio.Queue(maxsize=1000)
# 创建API客户端和采集器
api_client = TaobaoAPIClient(APP_KEY, APP_SECRET)
collector = ProductStreamCollector(api_client, output_queue)
# 要采集的关键词列表
keywords = ["手机", "笔记本电脑", "运动鞋"]
# 启动数据处理任务(这里简单打印,实际应用中可以写入数据库或消息队列)
async def process_data():
while collector.running or not output_queue.empty():
try:
data = await output_queue.get()
# 这里可以添加数据处理逻辑
logger.info(f"处理商品: {data.get('title', '未知商品')},价格: {data.get('zk_final_price', '未知')}")
output_queue.task_done()
except Exception as e:
logger.error(f"数据处理出错: {str(e)}")
# 启动采集和处理任务
process_task = asyncio.create_task(process_data())
await collector.start(keywords, max_pages=3)
await output_queue.join() # 等待所有数据处理完成
process_task.cancel()
if __name__ == "__main__":
try:
asyncio.run(main())
except KeyboardInterrupt:
logger.info("程序被用户中断")
数据处理与转换
采集到原始数据后,需要进行清洗和转换,以便后续分析。以下是数据处理层的实现:
import json
import re
from datetime import datetime
from typing import Dict, Any, List
import logging
import pandas as pd
import numpy as np
# 配置日志
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger('data-processor')
class ProductDataProcessor:
"""商品数据处理器"""
def __init__(self):
# 定义需要保留和处理的字段
self.required_fields = [
'num_iid', 'title', 'pict_url', 'small_images', 'reserve_price',
'zk_final_price', 'user_type', 'provcity', 'item_url', 'sales',
'volume', 'collect_time', 'source_keyword'
]
# 价格区间映射
self.price_ranges = {
'0-50': (0, 50),
'50-100': (50, 100),
'100-200': (100, 200),
'200-500': (200, 500),
'500-1000': (500, 1000),
'1000+': (1000, float('inf'))
}
def clean_data(self, raw_data: Dict[str, Any]) -> Dict[str, Any]:
"""清洗原始数据,去除不需要的字段"""
# 过滤出需要的字段
cleaned = {k: v for k, v in raw_data.items() if k in self.required_fields}
# 处理缺失值
for field in self.required_fields:
if field not in cleaned:
cleaned[field] = None
return cleaned
def transform_data(self, cleaned_data: Dict[str, Any]) -> Dict[str, Any]:
"""转换数据格式,提取有价值信息"""
transformed = cleaned_data.copy()
try:
# 处理价格字段,转换为数值类型
if transformed.get('zk_final_price'):
transformed['zk_final_price'] = float(transformed['zk_final_price'])
# 确定价格区间
for range_name, (min_val, max_val) in self.price_ranges.items():
if min_val <= transformed['zk_final_price'] < max_val:
transformed['price_range'] = range_name
break
# 处理原价
if transformed.get('reserve_price'):
transformed['reserve_price'] = float(transformed['reserve_price'])
# 处理销量
if transformed.get('volume'):
# 处理带万的销量,如"1.2万"
volume_str = str(transformed['volume'])
if '万' in volume_str:
transformed['volume'] = int(float(volume_str.replace('万', '')) * 10000)
else:
transformed['volume'] = int(volume_str)
# 从标题中提取品牌信息(简单规则)
transformed['brand'] = self._extract_brand(transformed.get('title', ''))
# 转换时间格式
if transformed.get('collect_time'):
transformed['collect_time'] = datetime.fromisoformat(transformed['collect_time'])
# 判断是否为天猫商品
transformed['is_tmall'] = transformed.get('user_type') == 1
# 提取省份信息
if transformed.get('provcity'):
province = transformed['provcity'].split(' ')[0]
transformed['province'] = province
except Exception as e:
logger.error(f"数据转换出错: {str(e)}, 数据: {cleaned_data.get('num_iid')}")
return transformed
def _extract_brand(self, title: str) -> str:
"""从标题中提取品牌信息(简单实现)"""
# 这里可以根据实际需求扩展品牌列表
common_brands = ['苹果', '华为', '小米', '三星', 'OPPO', 'vivo',
'耐克', '阿迪达斯', '李宁', '安踏']
for brand in common_brands:
if brand in title:
return brand
# 尝试从标题开头提取品牌
match = re.match(r'^([A-Za-z0-9\u4e00-\u9fa5]+)\s?[品牌]?', title)
if match:
return match.group(1)
return '未知品牌'
def process_batch(self, raw_data_list: List[Dict[str, Any]]) -> pd.DataFrame:
"""处理批量数据并转换为DataFrame"""
processed_data = []
for raw_data in raw_data_list:
try:
cleaned = self.clean_data(raw_data)
transformed = self.transform_data(cleaned)
processed_data.append(transformed)
except Exception as e:
logger.error(f"处理单条数据出错: {str(e)}, 商品ID: {raw_data.get('num_iid')}")
# 转换为DataFrame
df = pd.DataFrame(processed_data)
# 处理DataFrame中的缺失值
if not df.empty:
# 数值型字段填充0
numeric_cols = ['zk_final_price', 'reserve_price', 'volume', 'sales']
for col in numeric_cols:
if col in df.columns:
df[col] = df[col].fillna(0)
# 字符串字段填充'未知'
str_cols = ['title', 'provcity', 'brand', 'province', 'price_range']
for col in str_cols:
if col in df.columns:
df[col] = df[col].fillna('未知')
return df
def analyze_data(self, df: pd.DataFrame) -> Dict[str, Any]:
"""对处理后的数据进行简单分析"""
if df.empty:
return {}
analysis = {
'total_items': len(df),
'unique_brands': df['brand'].nunique(),
'avg_price': df['zk_final_price'].mean(),
'price_range_distribution': df['price_range'].value_counts().to_dict(),
'top_provinces': df['province'].value_counts().head(5).to_dict(),
'tmall_ratio': df['is_tmall'].mean(),
'top_selling': df.sort_values('volume', ascending=False).head(5)[
['num_iid', 'title', 'zk_final_price', 'volume']
].to_dict('records')
}
return analysis
# 使用示例
def main():
# 加载示例数据(实际应用中从队列或消息系统获取)
with open('sample_raw_data.json', 'r', encoding='utf-8') as f:
raw_data_list = json.load(f)
# 创建处理器实例
processor = ProductDataProcessor()
# 处理数据
processed_df = processor.process_batch(raw_data_list)
print(f"处理完成,共 {len(processed_df)} 条数据")
# 简单分析
analysis = processor.analyze_data(processed_df)
print("\n数据分析结果:")
print(f"总商品数: {analysis['total_items']}")
print(f"平均价格: {analysis['avg_price']:.2f}")
print(f"天猫商品比例: {analysis['tmall_ratio']:.2%}")
print(f"价格区间分布: {analysis['price_range_distribution']}")
# 保存处理后的数据
processed_df.to_csv('processed_products.csv', index=False, encoding='utf-8-sig')
print("\n处理后的数据已保存到 processed_products.csv")
if __name__ == "__main__":
main()
弹性扩展与容错机制
为确保数据管道的稳定性和弹性,我们需要实现以下机制:
- 自动扩缩容:根据 API 响应时间和队列长度动态调整采集任务数量
- 熔断机制:当 API 返回错误率过高时,自动暂停采集任务
- 数据重试:对于失败的 API 请求,实现指数退避重试策略
- 断点续传:记录采集进度,支持从断点继续采集
以下是弹性控制组件的实现:
import asyncio
import time
import math
from typing import Dict, List, Callable, Awaitable
import logging
from dataclasses import dataclass
from enum import Enum
# 配置日志
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger('elastic-controller')
class CircuitState(Enum):
"""熔断器状态"""
CLOSED = "closed" # 正常状态,允许请求
OPEN = "open" # 打开状态,拒绝请求
HALF_OPEN = "half_open" # 半开状态,允许部分请求
@dataclass
class CircuitBreakerConfig:
"""熔断器配置"""
failure_threshold: int = 5 # 失败阈值
recovery_timeout: int = 30 # 恢复超时时间(秒)
half_open_attempts: int = 3 # 半开状态尝试次数
class CircuitBreaker:
"""API请求熔断器,防止系统过载"""
def __init__(self, config: CircuitBreakerConfig = CircuitBreakerConfig()):
self.config = config
self.state = CircuitState.CLOSED
self.failure_count = 0
self.success_count = 0
self.last_failure_time = 0
self.open_time = 0
def _transition_to_closed(self):
"""切换到关闭状态"""
self.state = CircuitState.CLOSED
self.failure_count = 0
self.success_count = 0
logger.info("熔断器状态切换为: CLOSED")
def _transition_to_open(self):
"""切换到打开状态"""
self.state = CircuitState.OPEN
self.open_time = time.time()
logger.warning("熔断器状态切换为: OPEN")
def _transition_to_half_open(self):
"""切换到半开状态"""
self.state = CircuitState.HALF_OPEN
self.success_count = 0
logger.info("熔断器状态切换为: HALF_OPEN")
def allow_request(self) -> bool:
"""判断是否允许请求"""
if self.state == CircuitState.CLOSED:
return True
if self.state == CircuitState.OPEN:
# 检查是否已过恢复超时时间
if time.time() - self.open_time > self.config.recovery_timeout:
self._transition_to_half_open()
return True
return False
if self.state == CircuitState.HALF_OPEN:
# 半开状态只允许有限次数的尝试
return self.success_count < self.config.half_open_attempts
return False
def record_success(self):
"""记录成功请求"""
if self.state == CircuitState.CLOSED:
# 成功时重置失败计数
self.failure_count = 0
elif self.state == CircuitState.HALF_OPEN:
self.success_count += 1
# 如果足够多的请求成功,切换到关闭状态
if self.success_count >= self.config.half_open_attempts:
self._transition_to_closed()
def record_failure(self):
"""记录失败请求"""
if self.state == CircuitState.CLOSED:
self.failure_count += 1
# 如果失败次数达到阈值,切换到打开状态
if self.failure_count >= self.config.failure_threshold:
self._transition_to_open()
elif self.state == CircuitState.HALF_OPEN:
# 半开状态下任何失败都回到打开状态
self._transition_to_open()
class BackoffStrategy:
"""退避策略,用于请求重试"""
@staticmethod
def exponential_backoff(attempt: int, base_delay: float = 1.0, max_delay: float = 10.0) -> float:
"""
指数退避算法
:param attempt: 重试次数(从0开始)
:param base_delay: 基础延迟时间(秒)
:param max_delay: 最大延迟时间(秒)
:return: 延迟时间(秒)
"""
delay = base_delay * (2 **attempt)
# 添加随机抖动,避免惊群效应
jitter = delay * 0.1 * (1 - 2 * math.random())
return min(delay + jitter, max_delay)
class ElasticController:
"""弹性控制器,管理采集任务的动态扩缩容"""
def __init__(self,
min_workers: int = 2,
max_workers: int = 10,
queue_threshold: int = 500,
load_check_interval: int = 10):
self.min_workers = min_workers
self.max_workers = max_workers
self.queue_threshold = queue_threshold
self.load_check_interval = load_check_interval
self.workers = []
self.worker_count = min_workers
self.queue_length = 0
self.api_response_times = []
self.is_running = False
self.controller_task = None
async def start(self, worker_factory: Callable[[], Awaitable]):
"""启动弹性控制器"""
self.is_running = True
logger.info(f"启动弹性控制器,初始工作线程数: {self.min_workers}")
# 创建初始工作线程
for _ in range(self.min_workers):
self.workers.append(asyncio.create_task(worker_factory()))
# 启动负载检查任务
self.controller_task = asyncio.create_task(self._monitor_and_adjust(worker_factory))
async def stop(self):
"""停止弹性控制器"""
self.is_running = False
if self.controller_task:
await self.controller_task
# 取消所有工作线程
for worker in self.workers:
worker.cancel()
await asyncio.gather(*self.workers, return_exceptions=True)
logger.info("弹性控制器已停止")
def update_metrics(self, queue_length: int, response_time: float):
"""更新监控指标"""
self.queue_length = queue_length
# 保留最近100个响应时间
self.api_response_times.append(response_time)
if len(self.api_response_times) > 100:
self.api_response_times.pop(0)
async def _monitor_and_adjust(self, worker_factory: Callable[[], Awaitable]):
"""监控系统负载并调整工作线程数"""
while self.is_running:
try:
await asyncio.sleep(self.load_check_interval)
# 计算平均响应时间
avg_response_time = sum(self.api_response_times) / len(self.api_response_times) \
if self.api_response_times else 0
# 日志输出当前状态
logger.info(
f"系统状态 - 工作线程数: {len(self.workers)}, "
f"队列长度: {self.queue_length}, "
f"平均响应时间: {avg_response_time:.2f}s"
)
# 检查是否需要调整工作线程数
if self.queue_length > self.queue_threshold and len(self.workers) < self.max_workers:
# 需要增加工作线程
new_workers = min(self.max_workers - len(self.workers), 2) # 每次最多增加2个
for _ in range(new_workers):
self.workers.append(asyncio.create_task(worker_factory()))
logger.info(f"增加了 {new_workers} 个工作线程,当前总数: {len(self.workers)}")
elif self.queue_length < self.queue_threshold / 2 and len(self.workers) > self.min_workers:
# 需要减少工作线程
workers_to_remove = min(len(self.workers) - self.min_workers, 1) # 每次最多减少1个
for _ in range(workers_to_remove):
worker = self.workers.pop()
worker.cancel()
logger.info(f"减少了 {workers_to_remove} 个工作线程,当前总数: {len(self.workers)}")
except Exception as e:
logger.error(f"负载监控任务出错: {str(e)}", exc_info=True)
# 使用示例
async def example_worker():
"""示例工作线程函数"""
try:
while True:
# 模拟工作
await asyncio.sleep(1)
except asyncio.CancelledError:
logger.info("工作线程已取消")
async def example_usage():
# 创建熔断器
circuit_breaker = CircuitBreaker()
# 创建弹性控制器
controller = ElasticController(min_workers=2, max_workers=5)
# 启动控制器
await controller.start(example_worker)
# 模拟运行30秒
for i in range(30):
# 模拟更新指标
controller.update_metrics(
queue_length=300 if i < 15 else 100, # 前15秒队列较长,后15秒队列较短
response_time=0.5 + (i % 5) * 0.1
)
await asyncio.sleep(1)
# 停止控制器
await controller.stop()
if __name__ == "__main__":
asyncio.run(example_usage())
监控与告警
为确保数据管道的稳定运行,需要实现完善的监控与告警机制:
- 关键指标监控:API 调用成功率、数据吞吐量、处理延迟等
- 异常检测:自动识别异常模式,如数据量突降、响应时间突增等
- 多级告警:根据异常严重程度发送不同级别的告警通知
总结与扩展
本文介绍的弹性数据管道通过以下特点确保了高效稳定的数据采集与处理:
- 弹性伸缩:根据系统负载自动调整工作线程数量
- 容错机制:通过熔断器和重试策略处理 API 调用异常
- 流式处理:实时处理数据,降低内存占用
- 可扩展性:模块化设计便于功能扩展
未来可以从以下方面扩展该数据管道:
- 集成更多数据源,如京东、拼多多等其他电商平台 API
- 引入实时分析引擎,如 Flink 或 Spark Streaming
- 实现数据可视化 dashboard,直观展示采集和分析结果
- 添加机器学习模块,实现商品价格预测、趋势分析等高级功能
通过构建这样的弹性数据管道,企业可以实时掌握电商平台的商品动态,为市场决策提供数据支持,在激烈的市场竞争中获取优势。