我正在尝试从大量网络链接下载文档(主要是pdf格式),如下所示:
https://projects.worldbank.org/en/projects-operations/document-detail/P167897?type=projects https://projects.worldbank.org/en/projects-operations/document-detail/P167897?type=projects
https://projects.worldbank.org/en/projects-operations/document-detail/P173997?type=projects https://projects.worldbank.org/en/projects-operations/document-detail/P173997?type=projects
https://projects.worldbank.org/en/projects-operations/document-detail/P166309?type=projects https://projects.worldbank.org/en/projects-operations/document-detail/P166309?type=projects
但是,无法从这些链接直接访问 pdf 文件。需要单击子 URL 才能访问 pdf。有什么方法可以抓取子 URL 并从中下载所有相关文件吗?我正在尝试使用以下代码,但到目前为止,专门针对此处列出的这些 URL 尚未取得任何成功。
如果您需要任何进一步的说明,请告诉我。我很乐意这样做。谢谢。
from simplified_scrapy import Spider, SimplifiedDoc, SimplifiedMain, utils
class MySpider(Spider):
name = 'download_pdf'
allowed_domains = ["www.worldbank.org"]
start_urls = [
"https://projects.worldbank.org/en/projects-operations/document-detail/P167897?type=projects",
"https://projects.worldbank.org/en/projects-operations/document-detail/P173997?type=projects",
"https://projects.worldbank.org/en/projects-operations/document-detail/P166309?type=projects"
] # Entry page
def afterResponse(self, response, url, error=None, extra=None):
if not extra:
print ("The version of library simplified_scrapy is too old, please update.")
SimplifiedMain.setRunFlag(False)
return
try:
path = './pdfs'
# create folder start
srcUrl = extra.get('srcUrl')
if srcUrl:
index = srcUrl.find('year/')
year = ''
if index > 0:
year = srcUrl[index + 5:]
index = year.find('?')
if index>0:
path = path + year[:index]
utils.createDir(path)
# create folder end
path = path + url[url.rindex('/'):]
index = path.find('?')
if index > 0: path = path[:index]
flag = utils.saveResponseAsFile(response, path, fileType="pdf")
if flag:
return None
else: # If it's not a pdf, leave it to the frame
return Spider.afterResponse(self, response, url, error, extra)
except Exception as err:
print(err)
def extract(self, url, html, models, modelNames):
doc = SimplifiedDoc(html)
lst = doc.selects('div.list >a').contains("documents/", attr="href")
if not lst:
lst = doc.selects('div.hidden-md hidden-lg >a')
urls = []
for a in lst:
a["url"] = utils.absoluteUrl(url.url, a["href"])
# Set root url start
a["srcUrl"] = url.get('srcUrl')
if not a['srcUrl']:
a["srcUrl"] = url.url
# Set root url end
urls.append(a)
return {"Urls": urls}
# Download again by resetting the URL. Called when you want to download again.
def resetUrl(self):
Spider.clearUrl(self)
Spider.resetUrlsTest(self)
SimplifiedMain.startThread(MySpider()) # Start download