import urllib2
from bs4 import BeautifulSoup
import logging
import re
import threading
import traceback
import time
import datetime
import sys
reload(sys)
sys.setdefaultencoding('gb18030')
blog = "http://blog.csdn.net"
url = "http://blog.csdn.net/gugugujiawei?viewmode=contents"
outputDir = 'F:\\linux\\Share\\github\\article\\'
gRetryCount = 4
def decodeHtmlSpecialCharacter(htmlStr):
specChars = {" " : "", \
" " : "", \
" " : "", \
"<" : "<", \
">" : ">", \
"&" : "&", \
""" : "\"", \
"©" : "®", \
"×" : "×", \
"÷" : "÷", \
}
for key in specChars.keys():
htmlStr = htmlStr.replace(key, specChars[key])
return htmlStr
def repalceInvalidCharInFilename(filename):
specChars = {"\\" : "", \
"/" : "", \
":" : "", \
"*" : "", \
"?" : "", \
"\"" : "", \
"<" : "小于", \
">" : "大于", \
"|" : " and ", \
"&" :" or ", \
}
for key in specChars.keys():
filename = filename.replace(key, specChars[key])
return filename
def getPageUrlList(url):
global blog
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
header = { 'User-Agent' : user_agent }
request = urllib2.Request(url, None, header)
response = urllib2.urlopen(request)
data = response.read()
soup = BeautifulSoup(data)
pageListDocs = soup.find_all(id="article_list")
articleUrlTitle = {}
for pageList in pageListDocs:
h1List = pageList.find_all('h1')
for articleList in h1List:
hrefDocs = articleList.find_all("a")
if len(hrefDocs) > 0:
articleHrefDoc = hrefDocs[0]
articleUrl = blog + articleHrefDoc["href"]
articleTitle = articleHrefDoc.text
articleUrlTitle[articleUrl] = articleTitle
print 'the count of articles is',len(articleUrlTitle)
'''
for s in articleUrlTitle:
print s,'--',articleUrlTitle[s]
'''
return articleUrlTitle
def download(url, title):
logging.info(" >> download: " + url)
print 'downloading the article',title
data = None
title = '"' + title + '"'
categories = ""
content = ""
global gRetryCount
count = 0
while True:
if count >= gRetryCount:
break
count = count + 1
try:
time.sleep(2.0)
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
header = { 'User-Agent' : user_agent }
request = urllib2.Request(url, None, header)
response = urllib2.urlopen(request)
data = response.read()
break
except Exception,e:
exstr = traceback.format_exc()
logging.info(" >> failed to download " + url + ", retry: " + str(count) + ", error:" + exstr)
pass
if data == None:
logging.info(" >> failed to download " + url)
return
soup = BeautifulSoup(data)
manageDocs = soup.find_all("div", "article_manage")
for managerDoc in manageDocs:
categoryDoc = managerDoc.find_all("span", "link_categories")
if len(categoryDoc) > 0:
categories = categoryDoc[0].a.get_text().encode('UTF-8').strip()
categories = categories.decode('utf-8').encode('gb2312')
postDateDoc = managerDoc.find_all("span", "link_postdate")
if len(postDateDoc) > 0:
postDateStr = postDateDoc[0].string.encode('UTF-8').strip()
postDate = datetime.datetime.strptime(postDateStr, '%Y-%m-%d %H:%M')
print 'date',postDate
contentDocs = soup.find_all(id="article_content")
for contentDoc in contentDocs:
htmlContent = contentDoc.prettify().encode('UTF-8')
content = htmlContent2String(htmlContent)
exportToMarkdown(outputDir, postDate, categories, title, content)
def htmlContent2String(contentStr):
patternImg = re.compile(r'(<img[\s\S]+?src=")([\s\S]+?)("[\s\S]+?>)')
patternHref = re.compile(r'(<a[\s\S]+?href=")([\s\S]*?)("[\s\S]*?>)([\s\S]+?)(</a>)')
patternRemoveHtml = re.compile(r'</?[^>]+>')
resultContent = patternImg.sub(r'![image_mark](\2)', contentStr)
resultContent = patternHref.sub(r'[\4](\2)', resultContent)
resultContent = re.sub(patternRemoveHtml, r'', resultContent)
resultContent = decodeHtmlSpecialCharacter(resultContent)
return resultContent
def exportToMarkdown(exportDir, postdate, categories, title, content):
titleDate = postdate.strftime('%Y-%m')
contentDate = postdate.strftime('%Y-%m-%d %H:%M:%S %z')
filename = title
filename = repalceInvalidCharInFilename(filename)
filepath = exportDir + filename + '.txt'
newFile = open(filepath,'a+')
newFile.write('title: ' + title + '\n')
newFile.write('date: ' + contentDate + '\n')
newFile.write('categories: [' + categories + ']' + '\n')
newFile.write('tags: [' + categories + ']' + '\n')
newFile.write('---' + '\n\n')
content = content.decode('utf-8').encode('gb18030')
newFile.write(content)
newFile.write('\n')
newFile.close()
if __name__ == "__main__":
global url
articleUrlTitle = getPageUrlList(url)
'''
for s in articleUrlTitle:
print s,'--',articleUrlTitle[s]
'''
threads = []
for url in articleUrlTitle:
patternTitle = re.compile('\r\n *(.+) *\r\n')
title = patternTitle.sub(r'\1',articleUrlTitle[url])
t = threading.Thread(target = download,args = (url,title))
t.start()
threads.append(t)
for i in threads:
i.join()
print "success"