下面是我用来抓取 BSE 网站的代码。一切正常,除了一个小故障。内部(第二个)for 循环不会迭代并且执行结束。任何帮助都会有用。
browser=webdriver.Chrome()
browser.get('http://www.bseindia.com/markets/keystatics/Keystat_index.aspx')
for i in range(1,48):
browser.find_element_by_xpath("//*[@id='ctl00_ContentPlaceHolder1_ddltype']/option["+str(i)+"]").click()
browser.find_element_by_xpath('//*[@id="ctl00_ContentPlaceHolder1_btnSubmit"]').click()
data = []
for j in range(2,21):
browser.find_element_by_xpath("//*[@id='ctl00_ContentPlaceHolder1_gvReport_ctl"+str(j).zfill(2)+"_Linkbtn']").click()
for tr in browser.find_elements_by_xpath('//*[@id="ctl00_ContentPlaceHolder1_gvYearwise"]'):
ths = tr.find_elements_by_tag_name('th')
tds = tr.find_elements_by_tag_name('td')
if ths:
data.append([th.text for th in ths])
if tds:
data.append([td.text for td in tds])
f.write(str(data) + "\n")
很多时候点击会导致 500,所以我运行递归 try catch 块。
这是整个代码:
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
import time
base_url="http://www.bseindia.com/markets/keystatics/Keystat_index.aspx"
#browser = webdriver.Chrome('/Users/qriyoinfolabs/ahlat/chromedriver')
browser=webdriver.Chrome()
browser.get(base_url)
data = []
def fetch_this_erroful_page_for_me(id):
try:
print "Trying "+str(id)+"..."
browser.find_element_by_xpath("//*[@id='ctl00_ContentPlaceHolder1_ddltype']/option["+str(id)+"]").click()
browser.find_element_by_xpath('//*[@id="ctl00_ContentPlaceHolder1_btnSubmit"]').click()
except:
print "Retrying "+str(id)+"..."
time.sleep(2)
browser.get(base_url)
fetch_this_erroful_page_for_me(id)
def click_on_this_link_for_me(year_id,option_id):
try:
print "Trying year"+str(year_id)+"..."
zfilled_id=str(year_id).zfill(2)
browser.find_element_by_xpath("//*[@id='ctl00_ContentPlaceHolder1_gvReport_ctl"+zfilled_id+"_Linkbtn']").click()
return 1
except NoSuchElementException:
return 0
else:
time.sleep(2)
fetch_this_erroful_page_for_me(option_id)
click_on_this_link_for_me(year_id,option_id)
for i in range(1,48):
fetch_this_erroful_page_for_me(i)
for j in range(2,21):
valid=click_on_this_link_for_me(j,i)
if(valid==0):
print "valid0"
break
for tr in browser.find_elements_by_xpath('//*[@id="ctl00_ContentPlaceHolder1_gvYearwise"]'):
ths = tr.find_elements_by_tag_name('th')
tds = tr.find_elements_by_tag_name('td')
if ths:
data.append([th.text for th in ths])
if tds:
data.append([td.text for td in tds])
with open('str.txt','w') as file:
file.write(str(data))
本文收集自互联网,转载请注明来源。
如有侵权,请联系 [email protected] 删除。
我来说两句