Unable to extract URL names from table using Selenium webdriver - python-3.x

I have a table like below:
The goal is to extract the names using selenium webdriver.
I tried using the below code to fetch the names using xpath:
wd = webdriver.Chrome('chromedriver',chrome_options=chrome_options)
wd.get("https://www.deakin.edu.au/information-technology/staff-listing")
names = wd.find_element_by_xpath('//*[#id="table09355"]/tbody/tr[1]/td/a').text
The output appears as empty i.e ''. How can I extract the names names using xpath in selenium webdriver ? The names are URL hyper-links.
Thanks,

You may want to use below xpath :
//a[contains(#href,'https://')]
and use find_elements to store all anchor tag in a list like this :
for names in wd.find_elements(By.XPATH, "//a[contains(#href,'https://')]")
print(names.text)
Update 1 :
driver.maximize_window()
wait = WebDriverWait(driver, 10)
driver.get('https://www.deakin.edu.au/information-technology/staff-listing')
wait.until(EC.element_to_be_clickable((By.ID, "popup-accept"))).click()
ActionChains(driver).move_to_element(wait.until(EC.element_to_be_clickable((By.XPATH, "//span[text()='Emeritus Professors']")))).perform()
wait.until(EC.element_to_be_clickable((By.XPATH, "//span[text()='Emeritus Professors']"))).click()
ActionChains(driver).move_to_element(wait.until(EC.visibility_of_element_located((By.XPATH, "//span[contains(text(), 'Emeritus Professors')]/ancestor::h3/following-sibling::div/descendant::a")))).perform()
for names in driver.find_elements(By.XPATH, "//span[contains(text(), 'Emeritus Professors')]/ancestor::h3/following-sibling::div/descendant::a"):
print(names.text)
O/P :
Emeritus Professor Lynn Batten
Emeritus Professor Andrzej Goscinski
Process finished with exit code 0
Imports :
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
If you want to run on Google colab, try the below code :
!pip install selenium
!apt-get update
!apt install chromium-chromedriver
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
wd = webdriver.Chrome('chromedriver',chrome_options=chrome_options)
driver =webdriver.Chrome('chromedriver',chrome_options=chrome_options)
wait = WebDriverWait(driver, 10)
driver.get("https://www.deakin.edu.au/information-technology/staff-listing")
wait.until(EC.element_to_be_clickable((By.ID, "popup-accept"))).click()
ActionChains(driver).move_to_element(wait.until(EC.element_to_be_clickable((By.XPATH, "//span[text()='Emeritus Professors']")))).perform()
wait.until(EC.element_to_be_clickable((By.XPATH, "//span[text()='Emeritus Professors']"))).click()
ActionChains(driver).move_to_element(wait.until(EC.visibility_of_element_located((By.XPATH, "//span[contains(text(), 'Emeritus Professors')]/ancestor::h3/following-sibling::div/descendant::a")))).perform()
for names in driver.find_elements(By.XPATH, "//span[contains(text(), 'Emeritus Professors')]/ancestor::h3/following-sibling::div/descendant::a"):
print(names.text)

Related

selenium webdriver unable to find element from its xpath even though the xpath is correct

I'm trying to get the attributes of a tag using selenium webdriver and using the xpath as a locator. I gave the xpath to the driver and it returned NoSuchElementException, but when I enter the xpath in the "Inspect element" window, it showed that particular tag, which means the locator does exist. So what's wrong with selenium? Its still the same even if I give the full xpath
from selenium import webdriver
driver = webdriver.Chrome('D:\\chromedriver.exe')
driver.get('https://cq-portal.webomates.com/#/login')
element=driver.find_element_by_xpath("//button[#type='button']")
print(element.get_attribute('class'))
driver.quit()
selenium version = 3.141.0
You need to just give wait to load the page. Your code is perfectly fine. Either give hardcode wait like sleep or presence of element. Both will work.
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium import webdriver
driver = webdriver.Chrome(PATH)
driver.maximize_window()
wait = WebDriverWait(driver, 20)
driver.get('https://cq-portal.webomates.com/#/login')
wait.until(EC.presence_of_element_located((By.XPATH, "//button[#type='button']")))
element = driver.find_element(By.XPATH, "//button[#type='button']")
print(element.get_attribute('class'))
driver.quit()
Output:
btn btn-md btn-primary btn-block
Loops like you are missing a delay.
Please try this:
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium import webdriver
driver = webdriver.Chrome('D:\\chromedriver.exe')
wait = WebDriverWait(driver, 20)
driver.get('https://cq-portal.webomates.com/#/login')
wait.until(EC.visibility_of_element_located((By.XPATH, "//button[#type='button']")))
element=driver.find_element_by_xpath("//button[#type='button']")
print(element.get_attribute('class'))
driver.quit()

Selenium Python - Verify either of the title with EC.title_contains() method

I want to verify either of the page title "Apple" or "Mango" using EC.title_contains() method.
I have tried below but does't work for both the pages.
WebDriverWait(driver, 10).until(EC.title_contains("Apple") or EC.title_contains("Mango"))
WebDriverWait(driver, 10).until(EC.title_contains("Apple" or "Mango"))
Hi Check if below lines can help you to know the page by page title, change the driver path..
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
opt = webdriver.ChromeOptions()
opt.add_argument("--start-maximized")
driver = webdriver.Chrome(executable_path="C:\\chrome driver\\chromedriver.exe", options=opt)
driver.get("http://rera.rajasthan.gov.in/ProjectSearch")
WebDriverWait(driver,15).until(lambda driver: 'Mango' in driver.title or 'RER' in driver.title)
search_btn = driver.find_element_by_xpath('//*[#id="btn_SearchProjectSubmit"]')
# invoke the click() action
search_btn.click()
For Element below lines need to import By
from selenium.webdriver.common.by import By
WebDriverWait(driver,15).until(lambda driver: driver.find_element(By.XPATH,'xpath') or driver.find_element(By.XPATH,'xpath2nd'))

Selenium unable to find element of icon

My code so far, result returned to be not be found.
import time
from selenium import webdriver
driver = webdriver.Firefox(executable_path="C:/geckodriver")
dominos_pg = "https://www.dominos.ca/pages/order/#!/locations/search/"
driver.get(dominos_pg)
time.sleep(5)
elem_class = driver.find_element_by_class_name("Carryout c-carryout circ-icons__icon circ-icons__icon--carryout")
Any advice/suggestions appreciated.
Try This:
import time
from selenium import webdriver
driver = webdriver.Firefox(executable_path="C:/geckodriver")
dominos_pg = "https://www.dominos.ca/pages/order/#!/locations/search/"
driver.get(dominos_pg)
time.sleep(5)
elem_class = driver.find_element_by_class_name("Carryout c-carryout circ-icons__icon circ-icons__icon--carryout")
print(elem_class)
If That Don't Work It's Mean Your Url Is Not Correct Because I See There is no element Carryout
Your url is incorrect . Please find below working solution:
Solution 1:
from selenium.webdriver.common.by import By
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait as Wait
driver = webdriver.Chrome(executable_path=r"C:\New folder\chromedriver.exe")
driver.maximize_window()
driver.get("https://www.dominosaruba.com/en/pages/order/#!/locations/search/?type=Carryout")
driver.switch_to.frame(2);
CarryoutElement=WebDriverWait(driver, 20).until(
EC.presence_of_element_located((By.XPATH, "//span[contains(text(),'Carryout')]")))
CarryoutElement.click()
Solution 2:
CarryoutElement=WebDriverWait(driver, 20).until(
EC.presence_of_element_located((By.XPATH, "//span[#class='Carryout c-carryout circ-icons__icon circ-icons__icon--carryout']")))
CarryoutElement.click()
solution 3:
CarryoutElement=WebDriverWait(driver, 20).until(
EC.presence_of_element_located((By.XPATH, "//span[#data-quid='easy-order-locator-carryout']")))
CarryoutElement.click()
After much browsing, the I used xpath through the label

How to locate and click "speed test" link on netflix using selenium in python?

I'm a complete beginner with selenium, sorry if the question is dumb (it is dumb):)
I need to find Speed test link on https://www.netflix.com/ and then click it.
i've tried searching by text and some other options. But nothing seems to work, I don't know why.
from selenium import webdriver
from selenium.webdriver.common.keys import Keysfrom selenium.webdriver.common.keys import Keys
driver = webdriver.Chrome()
driver.get("https://www.netflix.com/")
driver.implicitly_wait(10)
elem = driver.find_element_by_link_text("Speed test")
elem.click()
NoSuchElementException: Message: no such element: Unable to locate element: {"method":"link text","selector":"Sign in"}
(Session info: chrome=75.0.3770.142)
The element with text as Speed Test is out ofthe Viewport so you need to induce WebDriverWait for the desired element to be clickable() and you can use the following Locator Strategy:
Using XPATH:
WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, "//a[#class='footer-link']/span[text()='Speed Test']"))).click()
Note : You have to add the following imports :
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
Use WebDriverWait and element_to_be_clickable with following xpath.
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium import webdriver
driver = webdriver.Chrome()
driver.get("https://www.netflix.com/")
elem = WebDriverWait(driver,10).until(EC.element_to_be_clickable((By.XPATH,"//span[#data-uia='data-uia-footer-label'][contains(.,'Speed Test')]")))
elem.click()
Browser snapshot:
To added to this answer you need to use WebDriverWait and then click on the element Show more info
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium import webdriver
driver = webdriver.Chrome()
driver.get("https://www.netflix.com/")
elem = WebDriverWait(driver,10).until(EC.element_to_be_clickable((By.XPATH,"//span[#data-uia='data-uia-footer-label'][contains(.,'Speed Test')]")))
elem.click()
WebDriverWait(driver,60).until(EC.element_to_be_clickable((By.XPATH,"//a[contains(.,'Show more info' )]"))).click()

Python Selenium Vanguard: NoSuchElementException: no such element: Unable to locate element with Selenium and Python

I am trying to download history data and click on link to historical data. However even though the Xcode is correct I get this error:
NoSuchElementException: no such element: Unable to locate element.
Code trials:
from selenium.webdriver.support.ui import WebDriverWait
from selenium import webdriver
driver = webdriver.Chrome(executable_path='/Users/Documents/Coding/chromedriver')
url = "https://www.vanguardinvestor.co.uk/investments/vanguard-lifestrategy-100-equity-fund-accumulation-shares/price-performance?intcmpgn=blendedlifestrategy_lifestrategy100equityfund_fund_link"
driver.get(url)
wait = WebDriverWait(driver, 10)
elem = driver.find_element_by_xpath("//*[#id='prices-and-performance-tab']/div/div[4]/div[3]/div[1]/div[1]/div[3]/div/div/div[2]/div/table/tfoot/tr/td/a")
webdriver.ActionChains(driver).move_to_element(elem).click(elem).perform()
To click on the element wait for the page to load and element to be clickable and then click.Try entire snippet.
from selenium.webdriver.support.ui import WebDriverWait
from selenium import webdriver
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
driver = webdriver.Chrome(executable_path='/Users/Documents/Coding/chromedriver')
url = "https://www.vanguardinvestor.co.uk/investments/vanguard-lifestrategy-100-equity-fund-accumulation-shares/price-performance?intcmpgn=blendedlifestrategy_lifestrategy100equityfund_fund_link"
driver.get(url)
element = WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, "//span[text()[contains(.,'Price & Performance')]]")))
element.click
To click on the element with text as Search for more historical prices you need to induce WebDriverWait for the element to be clickable and you can use the following solution:
Code Block:
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
options = webdriver.ChromeOptions()
options.add_argument("--start-maximized")
options.add_argument("--disable-extensions")
driver = webdriver.Chrome(chrome_options=options, executable_path = r'C:\Utility\BrowserDrivers\chromedriver.exe' )
driver.get("https://www.vanguardinvestor.co.uk/investments/vanguard-lifestrategy-100-equity-fund-accumulation-shares/price-performance?intcmpgn=blendedlifestrategy_lifestrategy100equityfund_fund_link")
WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, "//button[#id='bannerButton']"))).click()
more_historical_prices = WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.LINK_TEXT, "Search for more historical prices")))
driver.execute_script("arguments[0].scrollIntoView(true);", more_historical_prices)
WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.LINK_TEXT, "Search for more historical prices")))
Browser Snapshot:

Resources