example01.py 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384
  1. from urllib.error import URLError
  2. from urllib.request import urlopen
  3. import re
  4. import pymysql
  5. import ssl
  6. from pymysql import Error
  7. # 通过指定的字符集对页面进行解码(不是每个网站都将字符集设置为utf-8)
  8. def decode_page(page_bytes, charsets=('utf-8',)):
  9. page_html = None
  10. for charset in charsets:
  11. try:
  12. page_html = page_bytes.decode(charset)
  13. break
  14. except UnicodeDecodeError:
  15. pass
  16. # logging.error('Decode:', error)
  17. return page_html
  18. # 获取页面的HTML代码(通过递归实现指定次数的重试操作)
  19. def get_page_html(seed_url, *, retry_times=3, charsets=('utf-8',)):
  20. page_html = None
  21. try:
  22. page_html = decode_page(urlopen(seed_url).read(), charsets)
  23. except URLError:
  24. # logging.error('URL:', error)
  25. if retry_times > 0:
  26. return get_page_html(seed_url, retry_times=retry_times - 1,
  27. charsets=charsets)
  28. return page_html
  29. # 从页面中提取需要的部分(通常是链接也可以通过正则表达式进行指定)
  30. def get_matched_parts(page_html, pattern_str, pattern_ignore_case=re.I):
  31. pattern_regex = re.compile(pattern_str, pattern_ignore_case)
  32. return pattern_regex.findall(page_html) if page_html else []
  33. # 开始执行爬虫程序并对指定的数据进行持久化操作
  34. def start_crawl(seed_url, match_pattern, *, max_depth=-1):
  35. conn = pymysql.connect(host='localhost', port=3306,
  36. database='crawler', user='root',
  37. password='123456', charset='utf8')
  38. try:
  39. with conn.cursor() as cursor:
  40. url_list = [seed_url]
  41. visited_url_list = {seed_url: 0}
  42. while url_list:
  43. current_url = url_list.pop(0)
  44. depth = visited_url_list[current_url]
  45. if depth != max_depth:
  46. page_html = get_page_html(current_url, charsets=('utf-8', 'gbk', 'gb2312'))
  47. links_list = get_matched_parts(page_html, match_pattern)
  48. param_list = []
  49. for link in links_list:
  50. if link not in visited_url_list:
  51. visited_url_list[link] = depth + 1
  52. page_html = get_page_html(link, charsets=('utf-8', 'gbk', 'gb2312'))
  53. headings = get_matched_parts(page_html, r'<h1>(.*)<span')
  54. if headings:
  55. param_list.append((headings[0], link))
  56. cursor.executemany('insert into tb_result values (default, %s, %s)',
  57. param_list)
  58. conn.commit()
  59. except Error:
  60. pass
  61. # logging.error('SQL:', error)
  62. finally:
  63. conn.close()
  64. def main():
  65. ssl._create_default_https_context = ssl._create_unverified_context
  66. start_crawl('http://sports.sohu.com/nba_a.shtml',
  67. r'<a[^>]+test=a\s[^>]*href=["\'](.*?)["\']',
  68. max_depth=2)
  69. if __name__ == '__main__':
  70. main()