example01.py 2.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475
  1. from urllib.error import URLError
  2. from urllib.request import urlopen
  3. import re
  4. import pymysql
  5. import ssl
  6. from pymysql import Error
  7. def decode_page(page_bytes, charsets=('utf-8', )):
  8. page_html = None
  9. for charset in charsets:
  10. try:
  11. page_html = page_bytes.decode(charset)
  12. break
  13. except UnicodeDecodeError:
  14. pass
  15. # logging.error('Decode:', error)
  16. return page_html
  17. def get_page_html(seed_url, *, retry_times=3, charsets=('utf-8', )):
  18. page_html = None
  19. try:
  20. page_html = decode_page(urlopen(seed_url).read(), charsets)
  21. except URLError:
  22. # logging.error('URL:', error)
  23. if retry_times > 0:
  24. return get_page_html(seed_url, retry_times=retry_times - 1,
  25. charsets=charsets)
  26. return page_html
  27. def get_matched_parts(page_html, pattern_str, pattern_ignore_case=re.I):
  28. pattern_regex = re.compile(pattern_str, pattern_ignore_case)
  29. return pattern_regex.findall(page_html) if page_html else []
  30. def start_crawl(seed_url, match_pattern):
  31. conn = pymysql.connect(host='localhost', port=3306,
  32. database='crawler', user='root',
  33. password='123456', charset='utf8')
  34. try:
  35. with conn.cursor() as cursor:
  36. url_list = [seed_url]
  37. while url_list:
  38. current_url = url_list.pop(0)
  39. page_html = get_page_html(current_url, charsets=('utf-8', 'gbk', 'gb2312'))
  40. links_list = get_matched_parts(page_html, match_pattern)
  41. url_list += links_list
  42. param_list = []
  43. for link in links_list:
  44. page_html = get_page_html(link, charsets=('utf-8', 'gbk', 'gb2312'))
  45. headings = get_matched_parts(page_html, r'<h1>(.*)<span')
  46. if headings:
  47. param_list.append((headings[0], link))
  48. cursor.executemany('insert into tb_result values (default, %s, %s)',
  49. param_list)
  50. conn.commit()
  51. except Error:
  52. pass
  53. # logging.error('SQL:', error)
  54. finally:
  55. conn.close()
  56. def main():
  57. ssl._create_default_https_context = ssl._create_unverified_context
  58. start_crawl('http://sports.sohu.com/nba_a.shtml',
  59. r'<a[^>]+test=a\s[^>]*href=["\'](.*?)["\']')
  60. if __name__ == '__main__':
  61. main()