How to locate elements through Selenium and Xpath - python-3.x

So I am trying to scrape some information from a website, and when I try to get element by xpath I am getting an error "Unable to locate element" when the path that I provide is copied directly from the inspection tool. I tried a couple of things but it did not work, so I told my self I was going to try an easier path (TEST) but still don't work. Is it possible that the website does not show all the html code when inspecting?
Here is the code, with the website and the xpath that I tried.
URL_TRADER = 'https://www.tipranks.com/analysts/joseph-foresi?benchmark=none&period=yearly'
TEST = 'html/body/div[#id="app"]/div[#class="logged-out free"]/div[#class="client-components-app-app__wrapper undefined undefined"]'#/div/div[1]/div/div[2]/div/section/main/table/tbody/tr[3]/td[3]/div/div/div/div[1]/span'
X_PATH = '//*[#id="app"]/div/div/div[2]/div/div[1]/div/div[2]/div/section/main/table/tbody/tr[1]/td[3]/div/div/div/div[1]/span'
The main function is:
def trader_table():
# Loading Chrome and getting to the website
driver = webdriver.Chrome(executable_path='/usr/local/bin/chromedriver')
driver.get(URL_TRADER)
driver.implicitly_wait(10)
text = driver.find_element_by_xpath(X_PATH).get_attribute('innerHTML')
return text

I added a wait condition and used a css selector combination instead but this is the same as your xpath I think
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
url = 'https://www.tipranks.com/analysts/joseph-foresi?benchmark=none&period=yearly'
driver = webdriver.Chrome()
driver.get(url)
data = WebDriverWait(driver,10).until(EC.presence_of_element_located((By.CSS_SELECTOR, ".client-components-experts-infoTable-expertTable__table .client-components-experts-infoTable-expertTable__dataRow td:nth-child(3)"))).get_attribute('innerHTML')
print(data)

You have provided all the necessary details required to construct an answer but you didn't explicitly mention which element you were trying to get.
However, the commented out xpath within TEST gives us a hint you were after the Price Target and to extract the text within those elements as the elements are JavaScript enabled elements, you need to induce WebDriverWait for the visibility_of_all_elements_located() and you can use the following solution:
Code Block:
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
options = webdriver.ChromeOptions()
options.add_argument('start-maximized')
options.add_argument('disable-infobars')
options.add_argument('--disable-extensions')
driver = webdriver.Chrome(chrome_options=options, executable_path=r'C:\WebDrivers\chromedriver.exe')
driver.get("https://www.tipranks.com/analysts/joseph-foresi?benchmark=none&period=yearly")
print([element.get_attribute('innerHTML') for element in WebDriverWait(driver,10).until(EC.visibility_of_all_elements_located((By.XPATH, "//div[#class='client-components-experts-infoTable-expertTable__isBuy']//span")))])
Console Output:
['$14.00', '$110.00', '$237.00', '$36.00', '$150.00', '$71.00', '$188.00', '$91.00', '$101.00', '$110.00']

I guess you are looking after price Here you go.
from selenium import webdriver
URL_TRADER = 'https://www.tipranks.com/analysts/joseph-foresi?benchmark=none&period=yearly'
TEST = 'html/body/div[#id="app"]/div[#class="logged-out free"]/div[#class="client-components-app-app__wrapper undefined undefined"]'#/div/div[1]/div/div[2]/div/section/main/table/tbody/tr[3]/td[3]/div/div/div/div[1]/span'
X_PATH = "//div[#class='client-components-experts-infoTable-expertTable__isBuy']/div/span"
def trader_table():
driver = webdriver.Chrome(executable_path='/usr/local/bin/chromedriver')
driver.get(URL_TRADER)
driver.implicitly_wait(10)
text = driver.find_element_by_xpath(X_PATH).get_attribute('innerHTML')
print(text)
return text
Edited for All rows
from selenium import webdriver
URL_TRADER = 'https://www.tipranks.com/analysts/joseph-foresi?benchmark=none&period=yearly'
X_PATH = "//div[#class='client-components-experts-infoTable-expertTable__isBuy']/div/span"
def trader_table():
driver = webdriver.Chrome(executable_path='/usr/local/bin/chromedriver')
driver.get(URL_TRADER)
driver.implicitly_wait(10)
list_ele= driver.find_elements_by_xpath(X_PATH)
price_list = []
for ele in list_ele:
print(ele.text)
price_list.append(ele.text)
return price_list
list=trader_table()
print(list)

from selenium import webdriver
import time
driver = webdriver.Chrome("your webdriver location")
driver.get("https://www.tipranks.com/analysts/joseph-foresi?benchmark=none&period=yearly")
time.sleep(10)
y = driver.find_element_by_id('app').get_attribute('innerHTML')
print(y)
prints full inner html

Related

Unable to scrape texts from URLs

I have been strugling to scrape the contents/text of news articles from each URLs. The extraction of URLs works fine, but scraping the texts from each URLs has been challenging. Below is my code:
from selenium.webdriver import ActionChains, Keys
import time
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import StaleElementReferenceException
import sys, time
from bs4 import BeautifulSoup
import requests
import pandas as pd
# Initialize drivver and navigate
driver = webdriver.Chrome()
driver.maximize_window()
url = 'https://www.iol.co.za/news/south-africa/eastern-cape'
wait = WebDriverWait(driver, 5)
driver.get(url)
time.sleep(3)
# take the articles
articles = wait.until(EC.presence_of_all_elements_located((By.XPATH,
f"//article//*[(name() = 'h1' or name()='h2' or name()='h3' or name()='h4' or name()='h5' or name()='h6' or name()='h7') and string-length(text()) > 0]/ancestor::article")))
article_link = []
full_text = []
# For every article we take what we want
for article in articles:
link = article.find_element(By.XPATH, f".//a")
news_link = link.get_attribute('href')
article_link.append(news_link)
for j in article_link:
news_response = requests.get(j)
news_data = news_response.content
news_soup = BeautifulSoup(news_data, 'html.parser')
art_cont = news_soup.find('div', 'Article__StyledArticleContent-sc-uw4nkg-0')
full_text.append(art_cont.text)
print(article_link)
print(full_text)
I tried to use beautifulsoup, but it doesn't seem to work. I will be grateful for any help.
First off you should probably unindent the second for loop, it shouldn't be running inside of the first loop (you will be doubling and getting all of the information countless extra times).
Second. The requests that you send are returning a webpage that has content blocked (I could not figure out a way around this with inserting headers into the request). What you could do is use the driver to load each of the links and grab the text from there, here is how you could do that.
for link in article_link:
driver.get(link)
news_data = wait.until(EC.presence_of_all_elements_located((By.CLASS_NAME, 'Article__StyledArticleContent-sc-uw4nkg-0')))
full_text.append(news_data[0].get_attribute('textContent'))
The full script would look like this:
from selenium.webdriver import ActionChains, Keys
import time
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import StaleElementReferenceException
import sys, time
from bs4 import BeautifulSoup
import requests
import pandas as pd
# Initialize drivver and navigate
driver = webdriver.Chrome()
driver.maximize_window()
url = 'https://www.iol.co.za/news/south-africa/eastern-cape'
wait = WebDriverWait(driver, 5)
driver.get(url)
time.sleep(3)
# take the articles
articles = wait.until(EC.presence_of_all_elements_located((By.XPATH,
f"//article//*[(name() = 'h1' or name()='h2' or name()='h3' or name()='h4' or name()='h5' or name()='h6' or name()='h7') and string-length(text()) > 0]/ancestor::article")))
article_link = []
full_text = []
# For every article we take what we want
for article in articles:
link = article.find_element(By.XPATH, f".//a")
news_link = link.get_attribute('href')
article_link.append(news_link)
for link in article_link[:5]:
driver.get(link)
news_data = wait.until(EC.presence_of_all_elements_located((By.CLASS_NAME, 'Article__StyledArticleContent-sc-uw4nkg-0')))
full_text.append(news_data[0].get_attribute('textContent'))
print(article_link)
print(full_text)
The best course of action is to utilize selenium throughout as the site's content is cloudflare secured. Although #Andrew Ryan has already addressed the issue, I thought I'd come up with a shorter version of it since this answer was already halfway through at the time of his posting.
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium import webdriver
link = 'https://www.iol.co.za/news/south-africa/eastern-cape'
def get_links_and_texts(driver,url):
driver.get(url)
for article_link in [i.get_attribute('href') for i in WebDriverWait(driver,10).until(EC.presence_of_all_elements_located((By.XPATH,"//article/a[starts-with(#class,'Link__StyledLink')]")))]:
driver.get(article_link)
art_content = WebDriverWait(driver,10).until(EC.presence_of_element_located((By.CSS_SELECTOR,".article-content"))).text
yield {"Link":article_link,"article_content":art_content}
if __name__ == '__main__':
with webdriver.Chrome() as driver:
for item in get_links_and_texts(driver,link):
print(item)

selenium webdriver unable to find element from its xpath even though the xpath is correct

I'm trying to get the attributes of a tag using selenium webdriver and using the xpath as a locator. I gave the xpath to the driver and it returned NoSuchElementException, but when I enter the xpath in the "Inspect element" window, it showed that particular tag, which means the locator does exist. So what's wrong with selenium? Its still the same even if I give the full xpath
from selenium import webdriver
driver = webdriver.Chrome('D:\\chromedriver.exe')
driver.get('https://cq-portal.webomates.com/#/login')
element=driver.find_element_by_xpath("//button[#type='button']")
print(element.get_attribute('class'))
driver.quit()
selenium version = 3.141.0
You need to just give wait to load the page. Your code is perfectly fine. Either give hardcode wait like sleep or presence of element. Both will work.
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium import webdriver
driver = webdriver.Chrome(PATH)
driver.maximize_window()
wait = WebDriverWait(driver, 20)
driver.get('https://cq-portal.webomates.com/#/login')
wait.until(EC.presence_of_element_located((By.XPATH, "//button[#type='button']")))
element = driver.find_element(By.XPATH, "//button[#type='button']")
print(element.get_attribute('class'))
driver.quit()
Output:
btn btn-md btn-primary btn-block
Loops like you are missing a delay.
Please try this:
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium import webdriver
driver = webdriver.Chrome('D:\\chromedriver.exe')
wait = WebDriverWait(driver, 20)
driver.get('https://cq-portal.webomates.com/#/login')
wait.until(EC.visibility_of_element_located((By.XPATH, "//button[#type='button']")))
element=driver.find_element_by_xpath("//button[#type='button']")
print(element.get_attribute('class'))
driver.quit()

Selenium unable to find element of icon

My code so far, result returned to be not be found.
import time
from selenium import webdriver
driver = webdriver.Firefox(executable_path="C:/geckodriver")
dominos_pg = "https://www.dominos.ca/pages/order/#!/locations/search/"
driver.get(dominos_pg)
time.sleep(5)
elem_class = driver.find_element_by_class_name("Carryout c-carryout circ-icons__icon circ-icons__icon--carryout")
Any advice/suggestions appreciated.
Try This:
import time
from selenium import webdriver
driver = webdriver.Firefox(executable_path="C:/geckodriver")
dominos_pg = "https://www.dominos.ca/pages/order/#!/locations/search/"
driver.get(dominos_pg)
time.sleep(5)
elem_class = driver.find_element_by_class_name("Carryout c-carryout circ-icons__icon circ-icons__icon--carryout")
print(elem_class)
If That Don't Work It's Mean Your Url Is Not Correct Because I See There is no element Carryout
Your url is incorrect . Please find below working solution:
Solution 1:
from selenium.webdriver.common.by import By
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait as Wait
driver = webdriver.Chrome(executable_path=r"C:\New folder\chromedriver.exe")
driver.maximize_window()
driver.get("https://www.dominosaruba.com/en/pages/order/#!/locations/search/?type=Carryout")
driver.switch_to.frame(2);
CarryoutElement=WebDriverWait(driver, 20).until(
EC.presence_of_element_located((By.XPATH, "//span[contains(text(),'Carryout')]")))
CarryoutElement.click()
Solution 2:
CarryoutElement=WebDriverWait(driver, 20).until(
EC.presence_of_element_located((By.XPATH, "//span[#class='Carryout c-carryout circ-icons__icon circ-icons__icon--carryout']")))
CarryoutElement.click()
solution 3:
CarryoutElement=WebDriverWait(driver, 20).until(
EC.presence_of_element_located((By.XPATH, "//span[#data-quid='easy-order-locator-carryout']")))
CarryoutElement.click()
After much browsing, the I used xpath through the label

How to find text from div class Selenium Python

I am trying to get text out of div class statement with Selenium.
from selenium import webdriver
import time
chrome_path = r"C:\Users\User1\Downloads\chromedriver_win32\chromedriver.exe"
driver = webdriver.Chrome(chrome_path)
driver.get("https://www.gigantti.fi/cms/gigantti-outlet/gigantti-outlet/")
time.sleep(10)
posts1 = driver.find_elements_by_class_name('description')
print(posts1)
posts2 = driver.find_elements_by_css_selector('description')
print(posts2)
posts3 = driver.find_elements_by_tag_name('description')
print(posts3)
posts4 = driver.find_elements_by_id('description')
print(posts4)
posts5 = driver.find_elements_by_name('description')
print(posts5)
posts6 = driver.find_elements_by_xpath("//div[#class='description']")
driver.close()
Output is like this
DevTools listening on ws://127.0.0.1:58551/devtools/browser/4ebf2909-a977-43b8-b0bc-2824c2d371d7
[]
[]
[]
[]
[<selenium.webdriver.remote.webelement.WebElement (session="5a662510a9af8bec45752c91b4397d06", element="6e54f7d0-c586-4f97-a974-1af8d19610ce")>]
This it what Chrome shows when inspecting site.
<div class="description">Samsug HD39J1230GW wifi sovitin, erä</div>
I am trying to extract Samsug HD39J1230GW wifi sovitin, erä
The elements are available inside iframe Name Gigantti Outlet.You need to switch to iframe first.Try below code.
Induce WebDrivberWait and frame_to_be_available_and_switch_to_it()
Induce WebDrivberWait and visibility_of_all_elements_located()
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
chrome_path = r"C:\Users\User1\Downloads\chromedriver_win32\chromedriver.exe"
driver = webdriver.Chrome(chrome_path)
driver.get("https://www.gigantti.fi/cms/gigantti-outlet/gigantti-outlet/")
WebDriverWait(driver,10).until(EC.frame_to_be_available_and_switch_to_it((By.NAME,"Gigantti Outlet")))
posts1=WebDriverWait(driver,10).until(EC.visibility_of_all_elements_located((By.CSS_SELECTOR,".description")))
for post in posts1:
print(post.text)

I want to extract 3rd line from a text input

I have to extract the third line of a text.
URL = 'https://simonsmith.github.io/github-user-search/#/search?q=benjamn'
browser = webdriver.Chrome()
browser.get(URL)
time.sleep(20)
content = browser.page_source
soup = BeautifulSoup(content)
for link in soup.find_all('a'):
n=link.get('href')
n = re.sub(r"\#",'',n)
print(n)
In this example I would only collect benjamn from the output and discard the rest.
OUTPUT:
/
https://developer.github.com/v3/
/benjamn
/BenjamNathan
/benjamni
/benjamnnzz
/BenjamnTal
/benjamncresnik
/benjamn1012990
/benjamnsmith
/benjamn77
/BENJAMNDO4FO
/benjamnzzzz
/benjamn25
/benjamnn
/benjamn2
/benjamnwilliams
https://github.com/simonsmith/github-user-search
You wrote:
for link in soup.find_all('a'):
Suppose you instead had:
links = list(soup.find_all('a'))
for link in links:
Then links[2] would contain the desired link.
Equivalently, you could use:
for i, link in enumerate(soup.find_all('a')):
and focus on the particular link where i == 2.
You can instead better refine your selector and use a css selector
if you use either
li .u-flex
or
[class^=User].u-flex
the first being faster; you will get only the 15 links for people. If you then use find_element_by_css_selector, you will only return the first match.
That is:
browser.find_element_by_css_selector("li .u-flex").get_attribute("href")
No need for BeautifulSoup but the equivalent is:
soup.select_one('li .u-flex')['href']
To fetch the value benjamn use WebdriverWait and element_to_be_clickable with following xpath.
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
URL = 'https://simonsmith.github.io/github-user-search/#/search?q=benjamn'
browser = webdriver.Chrome()
browser.get(URL)
element=WebDriverWait(browser, 15).until(EC.element_to_be_clickable((By.XPATH,"(//a[starts-with(#class,'User_')]//p[starts-with(#class,'User_')])[1]")))
print(element.text)
Output Printed on console:
benjamn
To Print all the text values use following code.
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
URL = 'https://simonsmith.github.io/github-user-search/#/search?q=benjamn'
browser = webdriver.Chrome()
browser.get(URL)
elements=WebDriverWait(browser, 15).until(EC.visibility_of_all_elements_located((By.XPATH,"//a[starts-with(#class,'User_')]//p[starts-with(#class,'User_')]")))
for element in elements:
print(element.text)
Output:
benjamn
BenjamNathan
benjamni
benjamnnzz
BenjamnTal
benjamncresnik
benjamn1012990
benjamnsmith
benjamn77
BENJAMNDO4FO
benjamnzzzz
benjamn25
benjamnn
benjamn2
benjamnwilliams
You can grab that link using selenium making use of xpath and surely not hardcoding index like the following:
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
LINK = 'https://simonsmith.github.io/github-user-search/#/search?q=benjamn'
with webdriver.Chrome() as driver:
wait = WebDriverWait(driver, 10)
driver.get(LINK)
expected_link = wait.until(EC.presence_of_element_located((By.XPATH,"//a[./*[contains(#class,'username')]]")))
print(expected_link.get_attribute("href"))
Output:
https://simonsmith.github.io/github-user-search/#/benjamn

Resources