backtrader/data_downloader.py

805 lines
29 KiB
Python
Raw Normal View History

import pandas as pd
import tushare as ts
import numpy as np
import os
from sqlalchemy import create_engine, text
from datetime import datetime, timedelta
from utils import load_config
def create_engine_from_config(config):
mysql = config['mysql']
connection_string = f"mysql+pymysql://{mysql['user']}:{mysql['password']}@{mysql['host']}:{mysql['port']}/{mysql['database']}?charset={mysql['charset']}&use_unicode=1"
return create_engine(connection_string)
def create_metadata_table(engine):
"""创建股票元数据表"""
# 从sql文件读取创建表的SQL
with open('sql/meta.sql', 'r', encoding='utf-8') as f:
create_table_sql = f.read()
with engine.connect() as conn:
conn.execute(text(create_table_sql))
conn.commit()
print("股票元数据表创建成功")
def create_stock_table(engine, ts_code):
"""为指定股票创建数据表"""
# 确保sql目录存在
os.makedirs('sql', exist_ok=True)
# 读取模板SQL
with open('sql/stock_table_template.sql', 'r', encoding='utf-8') as f:
template_sql = f.read()
# 替换模板中的symbol
create_table_sql = template_sql.format(symbol=ts_code.split('.')[0])
# 执行创建表的SQL
with engine.connect() as conn:
conn.execute(text(create_table_sql))
conn.commit()
print(f"股票({ts_code})数据表创建成功")
def download_stock_metadata(pro):
"""下载股票基础信息"""
df = pro.stock_basic(exchange='', list_status='L',
fields='ts_code,symbol,name,area,industry,fullname,enname,cnspell,market,exchange,curr_type,list_status,list_date,delist_date,is_hs,act_name,act_ent_type')
# 添加状态字段
df['status'] = 0 # 未初始化
df['remark'] = '自动导入'
df['last_full_update'] = None
df['last_incremental_update'] = None
df['data_start_date'] = None
df['data_end_date'] = None
df['record_count'] = 0
df['latest_price'] = None
df['latest_date'] = None
df['latest_pe_ttm'] = None
df['latest_pb'] = None
df['latest_total_mv'] = None
return df
def save_metadata(df, engine):
"""保存元数据到数据库,只更新指定字段发生变化的记录,处理日期格式问题"""
# 需要监控变化的基础元数据字段
metadata_fields = [
'ts_code', 'symbol', 'name', 'area', 'industry', 'fullname',
'enname', 'cnspell', 'market', 'exchange', 'curr_type',
'list_status', 'list_date', 'delist_date', 'is_hs',
'act_name', 'act_ent_type'
]
# 日期字段列表
date_fields = ['list_date', 'delist_date']
# 从数据库中读取现有的元数据
try:
with engine.connect() as conn:
# 只查询我们关心的字段
query = f"SELECT {', '.join(metadata_fields)} FROM stock_metadata"
existing_df = pd.read_sql(query, conn)
except Exception as e:
print(f"读取现有元数据时出错: {str(e)}")
existing_df = pd.DataFrame() # 表不存在时创建空DataFrame
if existing_df.empty:
# 如果表是空的,直接插入所有数据
result = df.to_sql('stock_metadata', engine, index=False,
if_exists='append', chunksize=1000)
print(f"成功保存{result}条股票元数据(新建)")
return result
else:
# 数据预处理:处理日期格式
df_processed = df.copy()
existing_processed = existing_df.copy()
# 标准化日期格式进行比较
def normalize_date(date_str):
if pd.isna(date_str) or date_str == '' or date_str == 'None' or date_str is None:
return ''
# 去除所有非数字字符
date_str = str(date_str).strip()
digits_only = ''.join(c for c in date_str if c.isdigit())
# 如果是8位数字形式的日期返回标准格式
if len(digits_only) == 8:
return digits_only
return ''
# 对两个DataFrame应用日期标准化
for field in date_fields:
if field in df_processed.columns and field in existing_processed.columns:
df_processed[field] = df_processed[field].apply(normalize_date)
existing_processed[field] = existing_processed[field].apply(normalize_date)
# 对其他非日期字段进行标准化处理
for col in [f for f in metadata_fields if f not in date_fields and f != 'ts_code']:
if col in df_processed.columns and col in existing_processed.columns:
# 将两个DataFrame的相同列转换为相同的数据类型字符串类型
df_processed[col] = df_processed[col].astype(str)
existing_processed[col] = existing_processed[col].astype(str)
# 处理NaN值
df_processed[col] = df_processed[col].fillna('').str.strip()
existing_processed[col] = existing_processed[col].fillna('').str.strip()
# 找出新增的记录
existing_ts_codes = set(existing_processed['ts_code'])
new_records = df[~df['ts_code'].isin(existing_ts_codes)].copy()
# 找出需要更新的记录
changed_records = []
unchanged_count = 0
# 对于每个已存在的ts_code检查是否有变化
for ts_code in df_processed[df_processed['ts_code'].isin(existing_ts_codes)]['ts_code']:
# 获取新旧记录(已经标准化后的)
new_record = df_processed[df_processed['ts_code'] == ts_code].iloc[0]
old_record = existing_processed[existing_processed['ts_code'] == ts_code].iloc[0]
# 同时取原始记录用于更新操作
original_record = df[df['ts_code'] == ts_code].iloc[0]
# 检查是否有变化
has_change = False
changed_fields = []
for field in metadata_fields:
if field == 'ts_code': # 跳过主键
continue
new_val = new_record[field]
old_val = old_record[field]
if new_val != old_val:
has_change = True
changed_fields.append(field)
print(f"发现变化 - {ts_code}{field}: '{old_val}' -> '{new_val}'")
if has_change:
changed_records.append({
'record': original_record, # 使用原始记录,保持原始格式
'changed_fields': changed_fields
})
else:
unchanged_count += 1
# 插入新记录
new_count = 0
if not new_records.empty:
new_count = new_records.to_sql('stock_metadata', engine, index=False,
if_exists='append', chunksize=1000)
# 更新变化的记录
updated_count = 0
for change_info in changed_records:
record = change_info['record']
fields = change_info['changed_fields']
# 构建更新语句,只更新变化的字段
fields_to_update = []
params = {'ts_code': record['ts_code']}
for field in fields:
fields_to_update.append(f"{field} = :{field}")
params[field] = record[field]
if fields_to_update:
update_stmt = text(f"""
UPDATE stock_metadata
SET {', '.join(fields_to_update)}
WHERE ts_code = :ts_code
""")
with engine.connect() as conn:
conn.execute(update_stmt, params)
updated_count += 1
conn.commit()
print(f"元数据更新统计:")
print(f" • 新增记录: {new_count}")
print(f" • 更新记录: {updated_count}")
print(f" • 无变化记录: {unchanged_count}")
print(f" • 总处理记录: {new_count + updated_count + unchanged_count}")
return new_count + updated_count
def download_stock_data(pro, ts_code, start_date, end_date):
"""下载股票的所有类型数据并合并"""
# 下载daily价格数据
daily_df = pro.daily(ts_code=ts_code, start_date=start_date, end_date=end_date)
if daily_df.empty:
print(f"警告:{ts_code}没有找到daily数据")
return pd.DataFrame()
# 下载daily_basic数据
daily_basic_df = pro.daily_basic(ts_code=ts_code, start_date=start_date, end_date=end_date)
# 下载moneyflow数据
moneyflow_df = pro.moneyflow(ts_code=ts_code, start_date=start_date, end_date=end_date)
# 确保每个DataFrame都有trade_date列作为合并键
if 'trade_date' not in daily_df.columns:
print(f"错误:{ts_code}的daily数据缺少trade_date列")
return pd.DataFrame()
# 为方便处理,确保所有日期列是字符串类型
daily_df['trade_date'] = daily_df['trade_date'].astype(str)
# 通过merge而不是join合并数据这样可以更好地控制列
result_df = daily_df
# 合并daily_basic数据
if not daily_basic_df.empty:
daily_basic_df['trade_date'] = daily_basic_df['trade_date'].astype(str)
# 识别重叠的列除了ts_code和trade_date
overlap_cols = list(set(result_df.columns) & set(daily_basic_df.columns) - {'ts_code', 'trade_date'})
# 从daily_basic中排除这些重叠列
daily_basic_df_filtered = daily_basic_df.drop(columns=overlap_cols)
# 合并数据
result_df = pd.merge(
result_df,
daily_basic_df_filtered,
on=['ts_code', 'trade_date'],
how='left'
)
# 合并moneyflow数据
if not moneyflow_df.empty:
moneyflow_df['trade_date'] = moneyflow_df['trade_date'].astype(str)
# 识别重叠的列除了ts_code和trade_date
overlap_cols = list(set(result_df.columns) & set(moneyflow_df.columns) - {'ts_code', 'trade_date'})
# 从moneyflow中排除这些重叠列
moneyflow_df_filtered = moneyflow_df.drop(columns=overlap_cols)
# 合并数据
result_df = pd.merge(
result_df,
moneyflow_df_filtered,
on=['ts_code', 'trade_date'],
how='left'
)
# 将trade_date转换为datetime格式
result_df['trade_date'] = pd.to_datetime(result_df['trade_date'])
return result_df
def save_stock_data(df, engine, ts_code, if_exists='replace'):
"""
保存股票数据到对应的表中
Args:
df: 股票数据DataFrame
engine: 数据库引擎
ts_code: 股票代码
if_exists: 如果表已存在处理方式'replace': 替换现有表'append': 附加到现有表
Returns:
int: 保存的记录数量
"""
if df.empty:
print(f"警告:{ts_code}没有数据可保存")
return 0
# 删除ts_code列因为表中不需要
if 'ts_code' in df.columns:
df = df.drop(columns=['ts_code'])
# 使用指定的模式保存数据
try:
symbol = ts_code.split('.')[0]
result = df.to_sql(f'{symbol}', engine, index=False,
if_exists=if_exists, chunksize=1000)
print(f"成功保存{result}{ts_code}股票数据 (模式: {if_exists})")
return result
except Exception as e:
print(f"保存{ts_code}数据时出错: {str(e)}")
return 0
def update_metadata(engine, ts_code):
"""
更新股票元数据中的统计信息从股票表中直接获取数据
Args:
engine: 数据库连接引擎
ts_code: 股票代码(带后缀如000001.SZ)
Returns:
bool: 更新成功返回True否则返回False
"""
try:
# 提取股票代码(不含后缀)
symbol = ts_code.split('.')[0]
# 查询股票数据统计信息
stats_query = f"""
SELECT
MIN(trade_date) as min_date,
MAX(trade_date) as max_date,
COUNT(*) as record_count
FROM `{symbol}`
"""
# 查询最新交易日数据
latest_data_query = f"""
SELECT
close,
pe_ttm,
pb,
total_mv
FROM `{symbol}`
WHERE trade_date = (SELECT MAX(trade_date) FROM `{symbol}`)
LIMIT 1
"""
# 执行查询
with engine.connect() as conn:
# 获取统计信息
stats_result = conn.execute(text(stats_query)).fetchone()
if not stats_result:
print(f"警告:{ts_code}没有数据,将更新元数据状态为异常")
# 更新状态为异常
update_empty_sql = f"""
UPDATE stock_metadata
SET last_full_update = NOW(),
record_count = 0,
status = 2
WHERE ts_code = '{ts_code}'
"""
conn.execute(text(update_empty_sql))
conn.commit()
return False
# 处理日期字段
try:
data_start_date = stats_result[0].strftime('%Y-%m-%d') if stats_result[0] else 'NULL'
data_end_date = stats_result[1].strftime('%Y-%m-%d') if stats_result[1] else 'NULL'
except AttributeError:
# 处理日期可能是字符串而不是datetime对象的情况
data_start_date = stats_result[0] if stats_result[0] else 'NULL'
data_end_date = stats_result[1] if stats_result[1] else 'NULL'
record_count = stats_result[2] or 0
# 获取最新交易日数据
latest_data_result = conn.execute(text(latest_data_query)).fetchone()
# 设置默认值并处理NULL
default_value = 'NULL'
latest_price = default_value
latest_pe_ttm = default_value
latest_pb = default_value
latest_total_mv = default_value
# 如果有最新数据,则更新相应字段
if latest_data_result:
latest_price = str(latest_data_result[0]) if latest_data_result[0] is not None else default_value
latest_pe_ttm = str(latest_data_result[1]) if latest_data_result[1] is not None else default_value
latest_pb = str(latest_data_result[2]) if latest_data_result[2] is not None else default_value
latest_total_mv = str(latest_data_result[3]) if latest_data_result[3] is not None else default_value
# 构建更新SQL
update_sql = f"""
UPDATE stock_metadata
SET
last_full_update = NOW(),
data_start_date = CASE WHEN '{data_start_date}' = 'NULL' THEN NULL ELSE '{data_start_date}' END,
data_end_date = CASE WHEN '{data_end_date}' = 'NULL' THEN NULL ELSE '{data_end_date}' END,
record_count = {record_count},
status = CASE WHEN {record_count} > 0 THEN 1 ELSE 2 END,
latest_price = CASE WHEN '{latest_price}' = 'NULL' THEN NULL ELSE {latest_price} END,
latest_date = CASE WHEN '{data_end_date}' = 'NULL' THEN NULL ELSE '{data_end_date}' END,
latest_pe_ttm = CASE WHEN '{latest_pe_ttm}' = 'NULL' THEN NULL ELSE {latest_pe_ttm} END,
latest_pb = CASE WHEN '{latest_pb}' = 'NULL' THEN NULL ELSE {latest_pb} END,
latest_total_mv = CASE WHEN '{latest_total_mv}' = 'NULL' THEN NULL ELSE {latest_total_mv} END
WHERE ts_code = '{ts_code}'
"""
# 执行更新
conn.execute(text(update_sql))
conn.commit()
print(f"已更新({ts_code})的元数据信息")
return True
except Exception as e:
print(f"更新({ts_code})元数据时出错: {str(e)}")
# 尝试将状态更新为异常
try:
with engine.connect() as conn:
error_update_sql = f"""
UPDATE stock_metadata
SET status = 2,
last_full_update = NOW()
WHERE ts_code = '{ts_code}'
"""
conn.execute(text(error_update_sql))
conn.commit()
except Exception as inner_e:
print(f"更新({ts_code})状态为异常时出错: {str(inner_e)}")
return False
def process_stock(engine, pro, ts_code, start_date, end_date):
"""处理单个股票的完整流程"""
# 创建该股票的表
create_stock_table(engine, ts_code)
# 下载该股票的数据
stock_data = download_stock_data(pro, ts_code, start_date, end_date)
# 保存股票数据
if not stock_data.empty:
save_stock_data(stock_data, engine, ts_code)
update_metadata(engine, ts_code)
else:
print(f"警告:({ts_code})没有获取到数据")
def perform_full_update(start_year=2020):
"""
执行全量更新从指定年份开始到今天的所有数据
Args:
start_year: 开始年份默认为2020年
"""
print(f"开始执行全量更新 (从{start_year}年至今)...")
start_time = datetime.now()
# 加载配置
config = load_config()
# 创建数据库引擎
engine = create_engine_from_config(config)
# 设置Tushare API
ts.set_token(config['tushare_token'])
pro = ts.pro_api()
# 设置数据获取的日期范围
end_date = datetime.now().strftime('%Y%m%d')
start_date = f"{start_year}0101" # 从指定年份的1月1日开始
print(f"数据范围: {start_date}{end_date}")
# 从元数据表获取所有股票代码
with engine.connect() as conn:
# 查询所有需要更新的股票代码
query = """
SELECT ts_code
FROM stock_metadata
WHERE list_status = 'L' -- 只选择已上市的股票
"""
result = conn.execute(text(query))
stock_codes = [row[0] for row in result]
if not stock_codes:
print("没有找到需要更新的股票,请先确保元数据表已初始化")
return
print(f"共找到 {len(stock_codes)} 只股票需要更新")
# 记录成功和失败的股票数量
success_count = 0
failed_count = 0
skipped_count = 0
# 当前更新时间
update_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
# 逐一处理每只股票
total_stocks = len(stock_codes)
for index, ts_code in enumerate(stock_codes):
try:
print(f"[{index + 1}/{total_stocks}] 正在全量更新股票: {ts_code}")
# 检查股票表是否存在,不存在则创建
symbol = ts_code.split('.')[0]
with engine.connect() as conn:
table_exists_query = f"""
SELECT COUNT(*)
FROM information_schema.tables
WHERE table_schema = '{config['mysql']['database']}'
AND table_name = '{symbol}'
"""
table_exists = conn.execute(text(table_exists_query)).scalar() > 0
if not table_exists:
create_stock_table(engine, ts_code)
# 下载股票数据
stock_data = download_stock_data(pro, ts_code, start_date, end_date)
# 如果下载成功,保存数据并更新元数据
if not stock_data.empty:
# 使用replace模式保存数据全量替换
records_saved = save_stock_data(stock_data, engine, ts_code)
# 更新元数据统计信息
update_metadata(engine, ts_code)
# 只更新全量更新时间戳
with engine.connect() as conn:
update_status_sql = f"""
UPDATE stock_metadata
SET last_full_update = '{update_time}'
WHERE ts_code = '{ts_code}'
"""
conn.execute(text(update_status_sql))
conn.commit()
success_count += 1
print(f"成功更新 {ts_code},保存了 {records_saved} 条记录")
else:
failed_count += 1
print(f"警告:{ts_code} 没有获取到数据")
# 防止API限流每次请求后短暂休息
import time
time.sleep(0.5)
except Exception as e:
failed_count += 1
print(f"处理股票 {ts_code} 时出错: {str(e)}")
end_time = datetime.now()
duration = (end_time - start_time).total_seconds() / 60 # 转换为分钟
print("\n全量更新完成!")
print(f"总耗时: {duration:.2f} 分钟")
print(f"成功更新: {success_count} 只股票")
print(f"更新失败: {failed_count} 只股票")
print(f"总计: {total_stocks} 只股票")
def perform_incremental_update():
"""
执行增量更新从上次增量或全量更新日期到今天的数据
- 使用最近的更新日期增量或全量作为起点
- 如果表不存在跳过该股票
- 只更新增量更新时间字段
"""
print("开始执行增量更新...")
start_time = datetime.now()
# 加载配置
config = load_config()
# 创建数据库引擎
engine = create_engine_from_config(config)
# 设置Tushare API
ts.set_token(config['tushare_token'])
pro = ts.pro_api()
# 当前日期作为结束日期
end_date = datetime.now().strftime('%Y%m%d')
# 获取需要更新的股票及其上次更新日期
stocks_to_update = []
with engine.connect() as conn:
query = """
SELECT
ts_code,
last_incremental_update,
last_full_update
FROM
stock_metadata
WHERE
list_status = 'L' AND
status != 2 -- 排除状态异常的股票
"""
result = conn.execute(text(query))
for row in result:
ts_code = row[0]
last_incr_update = row[1] # 上次增量更新日期
last_full_update = row[2] # 上次全量更新日期
# 确定起始日期:使用最近的更新日期
latest_update = None
# 检查增量更新日期
if last_incr_update:
latest_update = last_incr_update
# 检查全量更新日期
if last_full_update:
if not latest_update or last_full_update > latest_update:
latest_update = last_full_update
# 如果有更新日期,使用其后一天作为起始日期
if latest_update:
start_date = (latest_update + timedelta(days=1)).strftime('%Y%m%d')
else:
# 如果没有任何更新记录默认从30天前开始
start_date = (datetime.now() - timedelta(days=30)).strftime('%Y%m%d')
# 如果起始日期大于等于结束日期,则跳过此股票
if start_date >= end_date:
print(f"股票 {ts_code} 数据已是最新,无需更新")
continue
stocks_to_update.append({
'ts_code': ts_code,
'start_date': start_date
})
if not stocks_to_update:
print("没有找到需要增量更新的股票")
return
print(f"共找到 {len(stocks_to_update)} 只股票需要增量更新")
# 记录成功和失败的股票数量
success_count = 0
failed_count = 0
no_new_data_count = 0
skipped_count = 0
# 当前更新时间
update_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
# 逐一处理每只股票
total_stocks = len(stocks_to_update)
for index, stock in enumerate(stocks_to_update):
ts_code = stock['ts_code']
start_date = stock['start_date']
try:
print(f"[{index + 1}/{total_stocks}] 正在增量更新股票: {ts_code} (从 {start_date}{end_date})")
# 确保股票表存在
symbol = ts_code.split('.')[0]
with engine.connect() as conn:
table_exists_query = f"""
SELECT COUNT(*)
FROM information_schema.tables
WHERE table_schema = '{config['mysql']['database']}'
AND table_name = '{symbol}'
"""
table_exists = conn.execute(text(table_exists_query)).scalar() > 0
if not table_exists:
print(f"股票 {ts_code} 数据表不存在,跳过此股票")
skipped_count += 1
continue # 表不存在则跳过
# 下载增量数据
new_data = download_stock_data(pro, ts_code, start_date, end_date)
if not new_data.empty:
# 获取现有数据
try:
with engine.connect() as conn:
existing_data = pd.read_sql(f"SELECT * FROM `{symbol}`", conn)
except Exception as e:
print(f"读取现有数据失败: {str(e)},跳过此股票")
skipped_count += 1
continue
if not existing_data.empty:
# 转换日期列为相同的格式以便合并
existing_data['trade_date'] = pd.to_datetime(existing_data['trade_date'])
new_data['trade_date'] = pd.to_datetime(new_data['trade_date'])
# 删除可能重复的日期记录
existing_dates = set(existing_data['trade_date'])
new_data = new_data[~new_data['trade_date'].isin(existing_dates)]
if new_data.empty:
print(f"股票 {ts_code} 没有新数据需要更新")
no_new_data_count += 1
continue
# 合并数据
combined_data = pd.concat([existing_data, new_data], ignore_index=True)
# 按日期排序
combined_data = combined_data.sort_values('trade_date')
# 保存合并后的数据
records_saved = save_stock_data(combined_data, engine, ts_code)
else:
# 如果表存在但为空,直接保存新数据
records_saved = save_stock_data(new_data, engine, ts_code)
# 更新元数据统计信息
update_metadata(engine, ts_code)
# 只更新增量更新时间戳
with engine.connect() as conn:
update_status_sql = f"""
UPDATE stock_metadata
SET last_incremental_update = '{update_time}'
WHERE ts_code = '{ts_code}'
"""
conn.execute(text(update_status_sql))
conn.commit()
success_count += 1
print(f"成功增量更新 {ts_code},新增 {len(new_data)} 条记录")
else:
print(f"股票 {ts_code} 在指定时间范围内没有新数据")
no_new_data_count += 1
# 防止API限流每次请求后短暂休息
import time
time.sleep(0.5)
except Exception as e:
failed_count += 1
print(f"增量更新股票 {ts_code} 时出错: {str(e)}")
end_time = datetime.now()
duration = (end_time - start_time).total_seconds() / 60 # 转换为分钟
print("\n增量更新完成!")
print(f"总耗时: {duration:.2f} 分钟")
print(f"成功更新: {success_count} 只股票")
print(f"无新数据: {no_new_data_count} 只股票")
print(f"跳过股票: {skipped_count} 只股票")
print(f"更新失败: {failed_count} 只股票")
print(f"总计: {total_stocks} 只股票")
def main():
"""主函数,允许用户选择更新方式"""
import argparse
# 命令行参数解析
parser = argparse.ArgumentParser(description='股票数据更新工具')
parser.add_argument('--init', action='store_true', help='初始化元数据表')
parser.add_argument('--mode', choices=['full', 'incremental', 'both'],
default='full', help='更新模式: full=全量更新, incremental=增量更新, both=两者都执行')
parser.add_argument('--year', type=int, default=2020, help='全量更新的起始年份')
args = parser.parse_args()
# 如果需要初始化元数据
if args.init:
# 加载配置
config = load_config()
# 创建数据库引擎
engine = create_engine_from_config(config)
# 设置Tushare API
ts.set_token(config['tushare_token'])
pro = ts.pro_api()
# 创建元数据表
create_metadata_table(engine)
# 下载并保存股票元数据
metadata_df = download_stock_metadata(pro)
save_metadata(metadata_df, engine)
print("元数据初始化完成")
# 根据选择的模式执行更新
if args.mode == 'full' or args.mode == 'both':
perform_full_update(args.year)
if args.mode == 'incremental' or args.mode == 'both':
perform_incremental_update()
if __name__ == '__main__':
main()