店播爬取Python脚本

dy_barrage_data_rt.py 2.9KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980
  1. import time
  2. import threading
  3. import json
  4. from rds_model.rds_dy_barrage_list_rt import RdsDyBarrageListRt
  5. from libs.mysql_dy_live_market_record import MysqlDyLiveMarketRecord
  6. from log.print_log import PrintLog
  7. from libs.dy_barrage_info import DyBarrageInfo
  8. from libs.mysql_dy_live import MysqlDyLive
  9. start_time = time.time()
  10. def scrape():
  11. while True:
  12. try:
  13. # 从 待爬取直播间ID 列表中获取一个 直播间ID
  14. room_info = rds.get_request_param()
  15. # 如果没有待爬取的直播,则等一秒,循环
  16. if room_info is None:
  17. time.sleep(1)
  18. continue
  19. # 判断是否到达爬取时间以确定是否需要爬取弹幕,并直接塞回队列尾部
  20. room_dict = json.loads(room_info)
  21. stream_id = str(room_dict['room_id']) # 直播间ID
  22. scrape_time = room_dict['scrape_time'] # 上次抓取时间
  23. uid = room_dict['uid'] # 直播网红ID
  24. if stream_id is None or scrape_time is None:
  25. time.sleep(1)
  26. continue
  27. room_dict.setdefault('times', 0)
  28. room_dict['times'] = (room_dict['times'] + 1) % 10
  29. if room_dict['times'] == 0: # 每爬取十次判断一次是否需要继续监测
  30. live_info = MysqlDyLiveMarketRecord().is_monitor(stream_id, uid)
  31. if live_info is None: # 直播信息不存在,不再回塞数据到爬取队列
  32. continue
  33. # 直播未结束爬取完成塞回队列
  34. time_diff = int(time.time()) - int(scrape_time)
  35. if time_diff > 1:
  36. # 爬取前更新爬取时间塞回队列
  37. room_dict['scrape_time'] = int(time.time())
  38. rds.push_request_id(json.dumps(room_dict))
  39. response_json = DyBarrageInfo.get_data(stream_id)
  40. if not bool(response_json):
  41. time.sleep(0.1)
  42. continue
  43. else:
  44. data = json.dumps(response_json)
  45. rds.push_data_list(data)
  46. else:
  47. PrintLog.print('直播ID%s %d秒前曾爬取过,暂无需继续抓取' % (stream_id, time_diff))
  48. rds.push_request_id(json.dumps(room_dict))
  49. except Exception as e:
  50. PrintLog.print(time.strftime("%H:%M:%S", time.localtime()) + ' ' + stream_id + '数据异常:' + str(e))
  51. time.sleep(0.1)
  52. if __name__ == "__main__":
  53. print("主方法开始执行")
  54. rds = RdsDyBarrageListRt()
  55. print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + ' ' + ' 开始执行,待爬取弹幕直播队列长度:' + str(rds.get_len()))
  56. # 获取弹幕爬取允许最大线程数
  57. scrape_max_threading = rds.max_threading()
  58. for i in range(1, 50):
  59. task = threading.Thread(target=scrape, name=i)
  60. task.start() # 准备就绪,等待cpu执行