错误:过时的元素引用:未附加元素
我知道这是重复的问题,但正如在其他帖子中一样,我添加了延迟(10 秒),但它不起作用。我不明白为什么它不起作用,因为它首先被提取,然后用于在容器内滚动。该代码
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
import pandas as pd
from selenium.webdriver.chrome.options import Options
from time import sleep, time
from parsel import Selector
from selenium.webdriver.chrome.options import Options
options = Options()
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
driver = webdriver.Chrome(options=options)
driver.maximize_window()
driver.get('https://app.wealthcharts.com/')
email = '[email protected]'
passw = 'March12th'
element = WebDriverWait(driver, 20).until(
EC.element_to_be_clickable((By.XPATH, '//button[@type="submit"]')))
driver.find_element(By.XPATH , '//input[@type="email"]').send_keys(email)
driver.find_element(By.XPATH , '//input[@type="password"]').send_keys(passw)
driver.find_element(By.XPATH , '//button[@type="submit"]').click()
element = WebDriverWait(driver, 40).until(
EC.element_to_be_clickable((By.XPATH, '//span[contains(text(),"Symbol")]')))
sleep(1)
data=[]
check = []
links = driver.find_elements(By.XPATH , '//div[@class="ag-center-cols-container"]/div')
yym = driver.find_elements(By.XPATH , '//div[@style="display: flex; flex-direction: column;"]/span')
f = open('data.csv', 'a')
f.write('symbol,name,Time_Announced,Day_Announced,Analyst_Firm,Analyst_Name,Action_on_Security,Prior_Rating,Rating,Action_Price_Target,Prior_Price_Target,Price_Target\n')
for i in links:
if i in check:
continue
try:
symbol = yym[links.index(i)].text
except:
symbol = 'none'
try :
name = i.find_element(By.XPATH , './/div[3]').text
except:
name = 'none'
try :
Time_Announced = i.find_element(By.XPATH , './/div[1]').text
except:
Time_Announced = 'none'
try :
Day_Announced = i.find_element(By.XPATH , './/div[2]').text
except:
Day_Announced = 'none'
try :
Analyst_Firm = i.find_element(By.XPATH , './/div[4]').text
except:
Analyst_Firm = 'none'
try :
Analyst_Name = i.find_element(By.XPATH , './/div[5]').text
except:
Analyst_Name = 'none'
try :
Action_on_Security = i.find_element(By.XPATH , './/div[6]').text
except:
Action_on_Security = 'none'
try :
Prior_Rating = i.find_element(By.XPATH , './/div[7]').text
except:
Prior_Rating = 'none'
try :
Rating = i.find_element(By.XPATH , './/div[8]').text
except:
Rating = 'none'
try :
Action_Price_Target = i.find_element(By.XPATH , './/div[9]').text
except:
Action_Price_Target = 'none'
try :
Prior_Price_Target = i.find_element(By.XPATH , './/div[10]').text
except:
Prior_Price_Target = 'none'
try :
Price_Target = i.find_element(By.XPATH , './/div[11]').text
except:
Price_Target = 'none'
f.write(symbol + ',' + name + ',' + Time_Announced + ',' + Day_Announced + ',' + Analyst_Firm + ',' + Analyst_Name + ',' + Action_on_Security + ',' + Prior_Rating + ',' + Rating + ',' + Action_Price_Target + ',' + Prior_Price_Target + ',' + Price_Target + '\n')
try:
driver.execute_script("arguments[0].scrollIntoView();", i)# error shown on this line
except:
sleep(5)# time delay to handle the error
driver.execute_script("arguments[0].scrollIntoView();", i)
linksd = driver.find_elements(By.XPATH , '//div[@class="ag-center-cols-container"]/div')
for j in linksd:
if j in links or j in check:
pass
else:
links.append(j) #next links to be extracted appending to links
check.append(i)
print(links.index(i))
print('Done Extracting')
有时提取 20 行,有时提取 46 行。 首先提取链接,然后使用相同的链接进行滚动,但显示错误。相同的代码一次达到 1000。我不知道为什么会发生这种情况。
I know this is duplicated question but as in others posts I added delay (10 secs) and it is not working. I dont understand why it is not working because it is being extracted first then used to scroll inside the container. The code is
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
import pandas as pd
from selenium.webdriver.chrome.options import Options
from time import sleep, time
from parsel import Selector
from selenium.webdriver.chrome.options import Options
options = Options()
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
driver = webdriver.Chrome(options=options)
driver.maximize_window()
driver.get('https://app.wealthcharts.com/')
email = '[email protected]'
passw = 'March12th'
element = WebDriverWait(driver, 20).until(
EC.element_to_be_clickable((By.XPATH, '//button[@type="submit"]')))
driver.find_element(By.XPATH , '//input[@type="email"]').send_keys(email)
driver.find_element(By.XPATH , '//input[@type="password"]').send_keys(passw)
driver.find_element(By.XPATH , '//button[@type="submit"]').click()
element = WebDriverWait(driver, 40).until(
EC.element_to_be_clickable((By.XPATH, '//span[contains(text(),"Symbol")]')))
sleep(1)
data=[]
check = []
links = driver.find_elements(By.XPATH , '//div[@class="ag-center-cols-container"]/div')
yym = driver.find_elements(By.XPATH , '//div[@style="display: flex; flex-direction: column;"]/span')
f = open('data.csv', 'a')
f.write('symbol,name,Time_Announced,Day_Announced,Analyst_Firm,Analyst_Name,Action_on_Security,Prior_Rating,Rating,Action_Price_Target,Prior_Price_Target,Price_Target\n')
for i in links:
if i in check:
continue
try:
symbol = yym[links.index(i)].text
except:
symbol = 'none'
try :
name = i.find_element(By.XPATH , './/div[3]').text
except:
name = 'none'
try :
Time_Announced = i.find_element(By.XPATH , './/div[1]').text
except:
Time_Announced = 'none'
try :
Day_Announced = i.find_element(By.XPATH , './/div[2]').text
except:
Day_Announced = 'none'
try :
Analyst_Firm = i.find_element(By.XPATH , './/div[4]').text
except:
Analyst_Firm = 'none'
try :
Analyst_Name = i.find_element(By.XPATH , './/div[5]').text
except:
Analyst_Name = 'none'
try :
Action_on_Security = i.find_element(By.XPATH , './/div[6]').text
except:
Action_on_Security = 'none'
try :
Prior_Rating = i.find_element(By.XPATH , './/div[7]').text
except:
Prior_Rating = 'none'
try :
Rating = i.find_element(By.XPATH , './/div[8]').text
except:
Rating = 'none'
try :
Action_Price_Target = i.find_element(By.XPATH , './/div[9]').text
except:
Action_Price_Target = 'none'
try :
Prior_Price_Target = i.find_element(By.XPATH , './/div[10]').text
except:
Prior_Price_Target = 'none'
try :
Price_Target = i.find_element(By.XPATH , './/div[11]').text
except:
Price_Target = 'none'
f.write(symbol + ',' + name + ',' + Time_Announced + ',' + Day_Announced + ',' + Analyst_Firm + ',' + Analyst_Name + ',' + Action_on_Security + ',' + Prior_Rating + ',' + Rating + ',' + Action_Price_Target + ',' + Prior_Price_Target + ',' + Price_Target + '\n')
try:
driver.execute_script("arguments[0].scrollIntoView();", i)# error shown on this line
except:
sleep(5)# time delay to handle the error
driver.execute_script("arguments[0].scrollIntoView();", i)
linksd = driver.find_elements(By.XPATH , '//div[@class="ag-center-cols-container"]/div')
for j in linksd:
if j in links or j in check:
pass
else:
links.append(j) #next links to be extracted appending to links
check.append(i)
print(links.index(i))
print('Done Extracting')
some times 20 rows are extracted and sometimes it goes to 46.
links are being extracted first then same link is used to scroll but error is shown. The same code went to 1000 one time. I dont to why is this happening.
如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。
绑定邮箱获取回复消息
由于您还没有绑定你的真实邮箱,如果其他用户或者作者回复了您的评论,将不能在第一时间通知您!
发布评论