How to check if auto suggestion exists? - python-3.x

I need to check whether the window with search suggestions exists.When you type something in search a list of suggested searches appears. I need to check whether this pop up window exists.
That window
Code trials:
import time
import unittest
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
class YandexSearchRu(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome()
def test_search(self):
driver = self.driver
driver.get("http://www.yandex.ru")
try:
input = driver.find_element_by_xpath("//*[#id='text']")
except NoSuchElementException:
driver.close()
input.send_keys("Тензор")
input.send_keys(Keys.RETURN)
time.sleep(5)
def tearDown(self):
self.driver.close()
if __name__ == "__main__":
unittest.main()

Try to do this:
driver.get("http://www.yandex.ru")
try:
input = driver.find_element_by_xpath("//*[#id='text']")
input.send_keys("adfadf")
popup = WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.CSS_SELECTOR, "body > div. i-bem.popup > div.popup__content")))
if popup.is_displayed():
print("popup disyplayed")
else:
print("popup not visible")
except NoSuchElementException:

The element isn't a pop up window but Auto Suggestions and to extract the Auto Suggestions you can use the following solution:
Code Block:
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
options = webdriver.ChromeOptions()
options.add_argument('start-maximized')
options.add_argument('disable-infobars')
options.add_argument('--disable-extensions')
driver = webdriver.Chrome(chrome_options=options, executable_path=r'C:\WebDrivers\chromedriver.exe')
driver.get('http://www.yandex.ru')
WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.CSS_SELECTOR, "input.input__control.input__input"))).send_keys("Тензор")
print([auto_suggestion.text for auto_suggestion in WebDriverWait(driver, 5).until(EC.visibility_of_all_elements_located((By.CSS_SELECTOR, "div.popup__content>div.suggest2__content.suggest2__content_theme_normal li>span.suggest2-item__text")))])
Console Output:
['тензор', 'тензор официальный сайт', 'тензор техподдержка', 'тензорное исчисление', 'тензор спб', 'тензорные ядра', 'тензорный анализ', 'тензор эцп', 'тензорезистор', 'тензор инерции']

Related

How to close Shopee pop up in selenium

I want to close the popup for the site https://shopee.com.my/ in selenium
Please check the below image.
I tried below, but getting errors like
NoSuchElementException: no such element: Unable to locate element: {"method":"css selector","selector":".shopee-popup__close-btn"}
Code:
driver.find_element_by_class_name("shopee-popup__close-btn").click()
It's in shadow root.
Code:
driver_path = r'C:\\Users\\***\\***\\chromedriver.exe'
driver = webdriver.Chrome(driver_path)
driver.maximize_window()
wait = WebDriverWait(driver, 30)
driver.get("https://shopee.com.my/")
wait.until(EC.element_to_be_clickable((By.XPATH, "//button[text()='English']"))).click()
try:
time.sleep(3)
close_btn = driver.execute_script('return document.querySelector("#main shopee-banner-popup-stateful").shadowRoot.querySelector("div.home-popup__close-area div.shopee-popup__close-btn")')
close_btn.click()
print('Clicked successfully')
except:
print('Could not clicked')
pass
Imports:
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC

Python Selenium Element not found and Not Intractable

I'm trying to scrape from the moneycontrol.com. When I tried to send value in the search box I keep getting the same error in except block as "Element not Found".
I tried using XPath id as well as using the full XPath but in both cases, it doesn't work.
WITHOUT MAXIMIZING THE WINDOW
XPath id - //*[#id="search_str"]
Full XPath - /html/body/div[1]/header/div[1]/div[1]/div/div/div[2]/div/div/form/input[5]
Attaching the full code below:
import time
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.keys import Keys
def search_stock():
driver = webdriver.Chrome(
r'./chromedriver')
driver.get('https://www.moneycontrol.com/')
time.sleep(5)
search_icon = driver.find_element_by_xpath(
'//*[#id="fixedheader"]/div[4]/span')
search_icon.click()
time.sleep(2)
try:
search_box = driver.find_element_by_xpath('//*[#id="search_str"]')
print("Element is visible? " + str(search_box.is_displayed()))
time.sleep(10)
if search_box.is_displayed():
search_box.send_keys('Zomato')
search_box.send_keys(Keys.RETURN)
except NoSuchElementException:
print("Element not found")
driver.close()
search_stock()
Sometimes, it started working but most of the time it throwing exceptions and errors. Struggling since 3 days but none of the solutions working.
web scraping like that seems quite inefficient it is prob better to use requests and bs4. However if you want to do it like this you could try using action chains. found here Or you can do driver.get('https://www.moneycontrol.com/india/stockpricequote/consumer-food/zomato/Z') from the start instead of typing it in.
You may wanna try the below code :
def search_stock():
driver = webdriver.Chrome(r'./chromedriver')
driver.maximize_window()
driver.implicitly_wait(30)
driver.get('https://www.moneycontrol.com/')
wait = WebDriverWait(driver, 10)
time.sleep(5)
try:
ActionChains(driver).move_to_element(wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, "input[id='search_str']")))).perform()
search_box = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, "input[id='search_str']")))
print("Element is visible? ", search_box.is_displayed())
time.sleep(10)
if search_box.is_displayed():
search_box.send_keys('Zomato')
search_box.send_keys(Keys.RETURN)
except NoSuchElementException:
print("Element not found")
Imports :
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
Try clicking on search_box and only after that sending text there.
search_box = driver.find_element_by_xpath('//form[#id="form_topsearch"]//input[#id="search_str"]')
search_box.click()
time.sleep(0.1)
search_box.send_keys('Zomato')
search_box.send_keys(Keys.RETURN)
Additionally I would advise you using explicit waits of expected conditions instead of hardcoded sleeps.
With it your code will be faster and more reliable.
import time
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
def search_stock():
driver = webdriver.Chrome(r'./chromedriver')
wait = WebDriverWait(driver, 20)
driver.get('https://www.moneycontrol.com/')
wait.until(EC.element_to_be_clickable((By.XPATH, '//*[#id="fixedheader"]/div[4]/span')).click()
search_box = wait.until(EC.element_to_be_clickable((By.XPATH, '//form[#id="form_topsearch"]//input[#id="search_str"]')))
search_box.send_keys('Zomato')
search_box.send_keys(Keys.RETURN)
#I'm not sure you should close the driver immediately after involving searching....
#driver.close()
search_stock()
UPD
Let's try this
import time
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
def search_stock():
driver = webdriver.Chrome(r'./chromedriver')
wait = WebDriverWait(driver, 20)
actions = ActionChains(driver)
driver.get('https://www.moneycontrol.com/')
search_icon = wait.until(EC.presence_of_element_located((By.XPATH, '//*[#id="fixedheader"]/div[4]/span')).click()
time.sleep(0.5)
driver.execute_script("arguments[0].scrollIntoView();", search_icon)
driver.execute_script("arguments[0].click();", search_icon)
search_box = wait.until(EC.presence_of_element_located((By.XPATH, '//form[#id="form_topsearch"]//input[#id="search_str"]')))
driver.execute_script("arguments[0].scrollIntoView();", search_icon)
driver.execute_script("arguments[0].click();", search_icon)
time.sleep(0.5)
search_box.send_keys('Zomato')
search_box.send_keys(Keys.RETURN)
#I'm not sure you should close the driver immediately after involving searching....
#driver.close()
search_stock()
If the above solution is still not working instead of
actions.move_to_element(search_box).click().perform()
try
driver.execute_script("arguments[0].click();", search_box)

Click on show more button; selenium scrape with python

I'm trying to scrape a website with show more button; and I'm not able to click on it.
The website is: https://www.wtatennis.com/rankings/singles
And my code is:
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver import ActionChains
from tqdm import tqdm
import time
options = Options()
options.add_argument("--headless")
browser = webdriver.Chrome(ChromeDriverManager().install(),options=options)
browser.get('https://www.wtatennis.com/rankings/singles')
action = ActionChains(browser)
showmore = browser.find_elements_by_xpath(".//button[contains(#class, 'btn widget-footer__more-button rankings__show-more js-show-more-button')]")
action.move_to_element(showmore).perform()
showmore.click()
time.sleep(5)
Has anyone any idea? Thanks!
Don't use './/' in your locator when you are starting the search from root, as there is no current element your locator won't find any element. Also you can use any attribute to find elements uniquely. see below code:
browser = webdriver.Chrome(options=options)
browser.get('https://www.wtatennis.com/rankings/singles')
WebDriverWait(browser,10).until(EC.element_to_be_clickable((By.XPATH,
'//*[#data-text="Accept Cookies"]'))).click()
WebDriverWait(browser, 10).until(EC.element_to_be_clickable((By.XPATH,
'//*[#data-text = "Show More"]'))).click()
use webdriver wait and data attributes
tu use wait import:
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
To wait till all elements are loaded you have to make sure last element is not changing , if its changing keep scrolling .
browser.get('https://www.wtatennis.com/rankings/singles')
WebDriverWait(browser, 10).until(EC.element_to_be_clickable((By.XPATH,
'//*[#data-text="Accept Cookies"]'))).click()
value = "start"
WebDriverWait(browser, 10).until(EC.element_to_be_clickable((By.XPATH,
'//*[#data-text = "Show More"]'))).click()
while(browser.find_element_by_xpath("(//tr[#class='rankings__row'])[last()]").text != value):
elem = browser.find_element_by_xpath(
'(//*[contains(text(),"Loading")])[2]')
value = browser.find_element_by_xpath(
"(//tr[#class='rankings__row'])[last()]").text
browser.execute_script("arguments[0].scrollIntoView()", elem)
WebDriverWait(browser, 10).until(EC.presence_of_all_elements_located((By.XPATH,
"//tr[#class='rankings__row']")))
try:
WebDriverWait(browser, 10).until_not(EC.text_to_be_present_in_element((By.XPATH,
"(//tr[#class='rankings__row'])[last()]"), value))
except:
None

Python Code to add songs to a Spotify playlist using Selenium

After logging in, my program starts looping over a list of songs to add them to my Spotify Playlist. But after the first loop, it raises the "stale element reference: element is not attached to the page document" exception.
Link I'm working on
driver=webdriver.Chrome()
driver.maximize_window()
actionChain = ActionChains(driver)
driver.get('https://open.spotify.com/browse/featured')
#Login Procedure
psw=''
login=driver.find_element_by_xpath("//button[2]").click()
sleep(1)
email=driver.find_element_by_id('login-username').send_keys('abc#yahoo.com')
password=driver.find_element_by_id('login-password').send_keys(psw)
login=driver.find_element_by_id('login-button').click()
ignored_exceptions=(StaleElementReferenceException,NoSuchElementException)
def wdwfind(path):
return WebDriverWait(driver, 15,ignored_exceptions=ignored_exceptions).until(
EC.presence_of_element_located((By.XPATH,(path))))
def wdwclick(path):
return WebDriverWait(driver, 15,ignored_exceptions=ignored_exceptions).until(
EC.element_to_be_clickable((By.XPATH,(path))))
for n in range(len(songs)):
wdwfind("//li[2]/div/a/div/span").click() #going to search tab
wdwfind("//input").send_keys(songs[n]) #sending elements to navigation bar
gotosong=wdwclick("//a[#class='d9eb38f5d59f5fabd8ed07639aa3ab77-scss _59c935afb8f0130a69a7b07e50bac04b-scss']") #right clicking the name of the song
actionChain.context_click(gotosong).perform()
wdwfind("//nav/div[4]").click() #Selecting the add to playlist option
wdwfind("//div[#class='mo-coverArt-hoverContainer']").click() #clicking on the playlist to add the song to
sleep(2)
clear=wdwclick("//input[#class='_2f8ed265fb69fb70c0c9afef329ae0b6-scss']").send_keys(Keys.SHIFT,Keys.ARROW_UP) #clearing the search box
driver.refresh()
sleep(1)
I have investigated reason behind your issue. please find solution if you are trying to add songs in playlist. You are facing above issue due to the dynamic elements in the DOM. Also after running below code I am getting membership window so cant proceed further.
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
import time
from selenium.webdriver.support.ui import WebDriverWait as Wait
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import StaleElementReferenceException
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver import ActionChains
# Open Chrome
driver = webdriver.Chrome(executable_path=r"C:\New folder\chromedriver.exe")
driver.get("https://open.spotify.com/browse/featured")
element = WebDriverWait(driver, 10).until(
EC.visibility_of_element_located((By.XPATH, "//li[2]/div/a/div/span")))
element.click()
WebDriverWait(driver, 20).until(
EC.visibility_of_element_located((By.XPATH,"//input[#class='SearchInputBox__input']"))).send_keys("songs")
driver.find_element_by_xpath("//*[text()='Log in']").click()
WebDriverWait(driver, 20).until(
EC.visibility_of_element_located((By.XPATH,"//input[#id='login-username']"))).send_keys("")
WebDriverWait(driver, 20).until(
EC.visibility_of_element_located((By.XPATH,"//input[#id='login-password']"))).send_keys("")
WebDriverWait(driver, 10).until(
EC.visibility_of_element_located((By.XPATH, "//button[#id='login-button']"))).click()
items = WebDriverWait(driver, 20).until(EC.presence_of_all_elements_located((By.XPATH, "//div[#class='react-contextmenu-wrapper']/div/div/a")))
print(len(items))
for song in items:
print song.text
actionChains = ActionChains(driver)
actionChains.context_click(song).perform()
element = WebDriverWait(driver, 10).until(
EC.visibility_of_element_located((By.XPATH, "//*[text()='Add to Playlist']")))
element.click()
element12 = WebDriverWait(driver, 10).until(
EC.visibility_of_element_located((By.XPATH, "//button[#class='btn asideButton-button btn-green btn-small']")))
actionChains.move_to_element(element12).click().perform()
actionChains.context_click(song).perform()
element00=WebDriverWait(driver, 20).until(
EC.visibility_of_element_located((By.XPATH, "//input[#class='inputBox-input']"))).send_keys("testPlayList")
element11 = WebDriverWait(driver, 10).until(
EC.visibility_of_element_located((By.XPATH, "//div[#class='button-group button-group--horizontal']//div[2]/button")))
actionChains.move_to_element(element11).click().perform()
elem=WebDriverWait(driver, 20).until(
EC.visibility_of_element_located((By.XPATH, "//div[#class='TrackListHeader__entity-name']//span")))
print elem.text
break

The web scraping issue in python, web page not loading in time

I am making a program for scrapping the Amazon websites mobile phones but my program is giving me timeout exception even after the page is loaded on time.
Here is my code
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import NoSuchElementException
from bs4 import BeautifulSoup
import urllib.request
class Amazon_all_mobile_scraper:
def __init__(self):
self.driver = webdriver.Firefox()
self.delay = 60
self.url = "https://www.amazon.in/mobile-phones/b/ref=sd_allcat_sbc_mobcomp_all_mobiles?ie=UTF8&node=1389401031"
def load_amazon(self):
self.driver.get(self.url)
try:
wait = WebDriverWait(self.driver,self.delay)
wait.until(EC.presence_of_element_located((By.CLASS_NAME,"acs-ln-link")))
print("Page is ready.")
except TimeoutException:
print("Took too much time to load!")
except:
print("Something went wrong in loading part!!")
def extract_list_of_mobiles(self):
try:
mobile_list = self.driver.find_element_by_xpath('//div[#class = "acs-ln-link"]')
print(mobile_list)
except NoSuchElementException:
print("Sorry, Unable to get the requested element")
scraper = Amazon_all_mobile_scraper()
scraper.load_amazon()
scraper.extract_list_of_mobiles()
Please help me to figure out whats wrong in this code.
Only changing from acs-ln-link to acs-ln-links will not do the trick. Your xpath should look more like '//div[contains(#class,"acs-ln-nav-expanded")]//*[#class="acs-ln-links"]//a'. This is, however, you can cope with to get the required output:
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
class Amazon_all_mobile_scraper:
url = "https://www.amazon.in/mobile-phones/b/ref=sd_allcat_sbc_mobcomp_all_mobiles?ie=UTF8&node=1389401031"
def __init__(self):
self.driver = webdriver.Chrome()
self.wait = WebDriverWait(self.driver, 15)
def load_n_get_from_amazon(self):
self.driver.get(self.url)
mobile_list = self.wait.until(EC.presence_of_all_elements_located((By.XPATH,'//div[contains(#class,"acs-ln-nav-expanded")]//*[#class="acs-ln-links"]//a')))
return mobile_list
def __del__(self):
self.driver.close()
if __name__ == '__main__':
scraper = Amazon_all_mobile_scraper()
for item in scraper.load_n_get_from_amazon():
print(f'{item.text}\n{item.get_attribute("href")}\n')
The class wasn't matching "acs-ln-link" should be "acs-ln-links".

Resources