Python traceback occurs when trying to located element by xpath - python-3.x

Currently have a script which logs me into our company website, clicks an element which loads a table of data i'm trying to collect, the code that provides traceback is
selenium.common.exceptions.NoSuchElementException: Message: no such element: Unable to locate element: {"method":"xpath","selector":"//tr[position()=8]"}
In inspect element that xpath shows that there's 2 relative values (i'm looking for the 1st one)
xpathBeingShownInDOM
and here is the traceback that python gives me after running script.
traceback error
Here goes the full code minus my login credentials:
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
options = webdriver.ChromeOptions()
options.add_experimental_option('excludeSwitches', ['enable-logging'])
browser = webdriver.Chrome(options=options)
browser.get('https://portal.o2pdelta.processwaregroup.com/O2PIntranet/O2PIntranet.aspx')
elem = browser.find_elements_by_xpath("//input[#name='tbUsername' and #id='tbUsername']")[0]
elem.send_keys('MyLogin')
elem2 = browser.find_elements_by_xpath("//input[#name='tbPassword' and #id='tbPassword']")[0]
elem2.send_keys('MyPassword' + Keys.RETURN)
#browser.implicitly_wait(5)
time.sleep(3)
elem3 = browser.find_element_by_xpath("//div[#class='col-md-4' and #id='TrIcon_OPD09']")
elem3.click()
time.sleep(3)
elem4 = browser.find_element_by_xpath("//tr[position()=8]")```

Related

Cannot get Selenium to locate an element

I am trying to use Selenium to automatically click the next_frame button but I do not know why the element can not be stored into the next_frame variable.How do I get the next_frame variable to locate the element?
from webdriver_manager.microsoft import EdgeChromiumDriverManager
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as conditions
from selenium.webdriver.common.by import By
driver = webdriver.Edge(EdgeChromiumDriverManager().install())
driver.implicitly_wait(15)
activity_name = WebDriverWait(driver, 10).until(conditions.visibility_of_element_located((By.ID, "activity-title")))
if activity_name.text == 'Instruction' or activity_name.text == 'Warm-Up' or activity_name.text == 'Summary':
activity_status = driver.find_element_by_id('activity-status')
while activity_status.text != "Complete":
next_frame = driver.find_element_by_xpath('//*[#id="bottom-area"]/div[3]/ol/li[5]/a')
next_frame.click()
activity_status = driver.find_element_by_id('activity-status')
driver.find_element_by_class_name('nav-icon').click()
else:
pass
Here is the message it showed me
selenium.common.exceptions.NoSuchElementException: Message: no such element: Unable to locate element: {"method":"xpath","selector":"//*[#id="bottom-area"]/div[3]/ol/li[5]/a"}

Unable to click on link using python and selenium

from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
# go to website
chrome_options = Options()
chrome_options.add_experimental_option("detach", True)
driver = webdriver.Chrome(executable_path="chromedriver.exe", chrome_options=chrome_options)
action = webdriver.ActionChains(driver)
driver.get('https://www.clinicalkey.com/#!/browse/book/3-s2.0-C2016100010X')
# look for "login" and click
loginclick = driver.find_element_by_xpath("//*[#id='header']/div[3]/ol/li[3]/a/span").click()
How come I'm not able to click on the login section on top right after navigating to the website given? I get an error:
Exception has occurred: NoSuchElementException
Message: no such element: Unable to locate element: {"method":"xpath","selector":"//*[#id='header']/div[3]/ol/li[3]/a/span"}
(Session info: chrome=85.0.4183.83)
File "C:\python\download.py", line 18, in <module>
loginclick = driver.find_element_by_xpath("//*[#id='header']/div[3]/ol/li[3]/a/span").click()
Thank you !
Two reason:
1: Wait for element to load
2: As element is not visible on page use java script to click
driver.get('https://www.clinicalkey.com/#!/browse/book/3-s2.0-C2016100010X')
# look for "login" and click
loginclick=WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.XPATH, "//*[#id='header']/div[3]/ol/li[3]/a/span")))
driver.execute_script("arguments[0].click();", loginclick)
Output:

Python - Selenium XPATH query mismatch

I've tested an XPATH query using Chrome SelAssist extension and it works pretty fine.
The syntax is "//*[#id="fbPhotoSnowliftTimestamp"]/a/abbr":
I've started to write a python code to detect such element:
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
chrome_profile = r"C:\Users\XXX\AppData\Local\Google\Chrome\User Data"
options = webdriver.ChromeOptions()
options.add_argument('user-data-dir=' + chrome_profile)
w = webdriver.Chrome(executable_path="C:\\Projects\\selenium\\chromedriver.exe", chrome_options=options)
w.get('https://website.com')
test = w.find_element(By.XPATH, "//*[#id=\"fbPhotoSnowliftTimestamp\"]/a/abbr")
Page is loaded fine, and it's exactly the same one I've manually tested.
Unfortunately I continue to retrieve this error:
raise exception_class(message, screen, stacktrace)
selenium.common.exceptions.NoSuchElementException: Message: no such element: Unable to locate element: {"method":"xpath","selector":"//*[#id="fbPhotoSnowliftTimestamp"]/a/abbr"}
(Session info: chrome=80.0.3987.149)
I cannot figure out what I'm doing wrong.
Thx for any suggestion.
The page might not be loading as quickly as expected. A timeout exception will be thrown if the xpath is not found using below. Try:
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
chrome_profile = r"C:\Users\XXX\AppData\Local\Google\Chrome\User Data"
options = webdriver.ChromeOptions()
options.add_argument('user-data-dir=' + chrome_profile)
w = webdriver.Chrome(executable_path="C:\\Projects\\selenium\\chromedriver.exe", chrome_options=options)
w.get('https://website.com')
test = WebDriverWait(w, 10).until(EC.visibility_of_element_located((By.XPATH, "//*[#id=\"fbPhotoSnowliftTimestamp\"]/a/abbr")))
#w.find_element(By.XPATH, "//*[#id=\"fbPhotoSnowliftTimestamp\"]/a/abbr")

How to locate elements through Selenium and Xpath

So I am trying to scrape some information from a website, and when I try to get element by xpath I am getting an error "Unable to locate element" when the path that I provide is copied directly from the inspection tool. I tried a couple of things but it did not work, so I told my self I was going to try an easier path (TEST) but still don't work. Is it possible that the website does not show all the html code when inspecting?
Here is the code, with the website and the xpath that I tried.
URL_TRADER = 'https://www.tipranks.com/analysts/joseph-foresi?benchmark=none&period=yearly'
TEST = 'html/body/div[#id="app"]/div[#class="logged-out free"]/div[#class="client-components-app-app__wrapper undefined undefined"]'#/div/div[1]/div/div[2]/div/section/main/table/tbody/tr[3]/td[3]/div/div/div/div[1]/span'
X_PATH = '//*[#id="app"]/div/div/div[2]/div/div[1]/div/div[2]/div/section/main/table/tbody/tr[1]/td[3]/div/div/div/div[1]/span'
The main function is:
def trader_table():
# Loading Chrome and getting to the website
driver = webdriver.Chrome(executable_path='/usr/local/bin/chromedriver')
driver.get(URL_TRADER)
driver.implicitly_wait(10)
text = driver.find_element_by_xpath(X_PATH).get_attribute('innerHTML')
return text
I added a wait condition and used a css selector combination instead but this is the same as your xpath I think
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
url = 'https://www.tipranks.com/analysts/joseph-foresi?benchmark=none&period=yearly'
driver = webdriver.Chrome()
driver.get(url)
data = WebDriverWait(driver,10).until(EC.presence_of_element_located((By.CSS_SELECTOR, ".client-components-experts-infoTable-expertTable__table .client-components-experts-infoTable-expertTable__dataRow td:nth-child(3)"))).get_attribute('innerHTML')
print(data)
You have provided all the necessary details required to construct an answer but you didn't explicitly mention which element you were trying to get.
However, the commented out xpath within TEST gives us a hint you were after the Price Target and to extract the text within those elements as the elements are JavaScript enabled elements, you need to induce WebDriverWait for the visibility_of_all_elements_located() and you can use the following solution:
Code Block:
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
options = webdriver.ChromeOptions()
options.add_argument('start-maximized')
options.add_argument('disable-infobars')
options.add_argument('--disable-extensions')
driver = webdriver.Chrome(chrome_options=options, executable_path=r'C:\WebDrivers\chromedriver.exe')
driver.get("https://www.tipranks.com/analysts/joseph-foresi?benchmark=none&period=yearly")
print([element.get_attribute('innerHTML') for element in WebDriverWait(driver,10).until(EC.visibility_of_all_elements_located((By.XPATH, "//div[#class='client-components-experts-infoTable-expertTable__isBuy']//span")))])
Console Output:
['$14.00', '$110.00', '$237.00', '$36.00', '$150.00', '$71.00', '$188.00', '$91.00', '$101.00', '$110.00']
I guess you are looking after price Here you go.
from selenium import webdriver
URL_TRADER = 'https://www.tipranks.com/analysts/joseph-foresi?benchmark=none&period=yearly'
TEST = 'html/body/div[#id="app"]/div[#class="logged-out free"]/div[#class="client-components-app-app__wrapper undefined undefined"]'#/div/div[1]/div/div[2]/div/section/main/table/tbody/tr[3]/td[3]/div/div/div/div[1]/span'
X_PATH = "//div[#class='client-components-experts-infoTable-expertTable__isBuy']/div/span"
def trader_table():
driver = webdriver.Chrome(executable_path='/usr/local/bin/chromedriver')
driver.get(URL_TRADER)
driver.implicitly_wait(10)
text = driver.find_element_by_xpath(X_PATH).get_attribute('innerHTML')
print(text)
return text
Edited for All rows
from selenium import webdriver
URL_TRADER = 'https://www.tipranks.com/analysts/joseph-foresi?benchmark=none&period=yearly'
X_PATH = "//div[#class='client-components-experts-infoTable-expertTable__isBuy']/div/span"
def trader_table():
driver = webdriver.Chrome(executable_path='/usr/local/bin/chromedriver')
driver.get(URL_TRADER)
driver.implicitly_wait(10)
list_ele= driver.find_elements_by_xpath(X_PATH)
price_list = []
for ele in list_ele:
print(ele.text)
price_list.append(ele.text)
return price_list
list=trader_table()
print(list)
from selenium import webdriver
import time
driver = webdriver.Chrome("your webdriver location")
driver.get("https://www.tipranks.com/analysts/joseph-foresi?benchmark=none&period=yearly")
time.sleep(10)
y = driver.find_element_by_id('app').get_attribute('innerHTML')
print(y)
prints full inner html

Stale Element Reference Exception in Selenium in Python

I am trying to scrape the links of different poems from https://www.poets.org/poetsorg/poems but I am getting a Stale Element Reference Exception error. I have tried increasing sleep time and WebDriverWait as well with no success. Any help will be greatly appreciated. My code is below.
import time
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
def poem_scraper(url):
driver = webdriver.Chrome("\chromedriver.exe")
driver.get(url)
all_links = []
for _ in range(10):
soup = BeautifulSoup(driver.page_source,"html.parser")
total_poems = soup.find_all('td',attrs={'class':"views-field views-field-title"})
for div in total_poems:
links = div.find_all('a')
for a in links:
all_links.append('https://www.poets.org'+a['href'])
timeout = 15
#time.sleep(6)
try:
element_present = EC.presence_of_element_located((By.LINK_TEXT, 'next'))
WebDriverWait(driver, timeout).until(element_present)
except TimeoutException:
print("Timed out waiting for page to load")
test = driver.find_element_by_link_text('next')
time.sleep(6)
test.click()
return(all_links)
StaleElementReferenceException: Message: stale element reference: element is not attached to the page document

Resources