Why is selenium not applying "click" to a popup? - python-3.x

I know the xpath of this popup tabs element is correct, however when I do filters_language_dropdown.click() and then .send_keys(Keys.Enter. It doesn't do anything.
However the same popup (press 'filters' button on this page to view) works with the xpath of the initial button press element instead (see code + images below) so with filters_button.send_keys.... Whats going on?
# Initialize the browser and navigate to the page
browser = webdriver.Chrome(service=Service(ChromeDriverManager().install()))
browser.get("https://www.facebook.com/ads/library/?active_status=all&ad_type=all&country=ALL&q=%22%20%22&sort_data[direction]=desc&sort_data[mode]=relevancy_monthly_grouped&search_type=keyword_exact_phrase&media_type=all")
# (In working order): Look for keyword, make it clickable, clear existing data in box, enter new info, keep page open for 10 seconds
search_box = WebDriverWait(browser, 10).until(EC.element_to_be_clickable((By.XPATH, "//input[#placeholder='Search by keyword or advertiser']")))
search_box.click()
search_box.clear()
search_box.send_keys("" "" + Keys.ENTER)
time.sleep(3)
# Activating the filters (English, active ads, date from (last 2 days) to today)
filters_button = WebDriverWait(browser, 10).until(EC.element_to_be_clickable((By.XPATH, "//body[1]/div[1]/div[1]/div[1]/div[1]/div[1]/div[1]/div[5]/div[2]/div[1]/div[1]/div[3]/div[1]/div[1]/div[1]/div[1]/div[1]/div[1]/div[1]")))
filters_button.click()
filters_button.send_keys(Keys.ENTER)
time.sleep(3)
filters_language_dropdown = WebDriverWait(browser, 10).until(EC.element_to_be_clickable((By.XPATH, "//div[#id='js_knd']//div[#class='x6s0dn4 x78zum5 x13fuv20 xu3j5b3 x1q0q8m5 x26u7qi x178xt8z xm81vs4 xso031l xy80clv xb9moi8 xfth1om x21b0me xmls85d xhk9q7s x1otrzb0 x1i1ezom x1o6z2jb x1gzqxud x108nfp6 xm7lytj x1ykpatu xlu9dua x1w2lkzu']")))
filters_language_dropdown.click()
filters_language_dropdown.send_keys(Keys.ENTER)
time.sleep(3)

Use following xpath to click on the filter and then click on all languages and then click on English if you want to change other language you need to pass let say 'French',you need to change instead of English.
Code:
browser.get("https://www.facebook.com/ads/library/?active_status=all&ad_type=all&country=ALL&q=%22%20%22&sort_data[direction]=desc&sort_data[mode]=relevancy_monthly_grouped&search_type=keyword_exact_phrase&media_type=all")
search=WebDriverWait(browser,10).until(EC.visibility_of_element_located((By.XPATH,"//input[#placeholder='Search by keyword or advertiser']")))
search.click()
search.clear()
search.send_keys("" "" + Keys.ENTER)
WebDriverWait(browser,10).until(EC.element_to_be_clickable((By.XPATH,"//div[text()='Filters']"))).click()
WebDriverWait(browser,10).until(EC.element_to_be_clickable((By.XPATH,"//div[text()='All languages']"))).click()
WebDriverWait(browser,10).until(EC.element_to_be_clickable((By.XPATH,"//div[text()='English']"))).click()
time.sleep(10) # to check the operation
Browser snapshot:

Related

Refresh a page if an element is not visible and click, else click python selenium

I'm dealing with a very non responsive scroll bar in a window on a tableau dashboard. I cannot scroll to find the element using any code, python, or java execution, so I need to write a condition inside my for loop that refreshes the page, and clicks a location (the scroll bar) if the element isn't present, then checks again for the element's presence, but I can't seem to get it.
for key, value in series_dict.items():
xpath = driver.find_element_by_xpath('//*[#id="' + value + '"]') #how do i make this a boolean
while True:
if xpath == False:
driver.refresh()
#click the scroll bar here then break and start the whole for loop over
else:
try:
series_click = wait.until(EC.element_to_be_clickable((By.XPATH, '//*[#id="' + value + '"]')))
series_click.click()
time.sleep(5)
download_click = wait.until(EC.element_to_be_clickable((By.XPATH, '//*[#id="download"]/span[3]')))
download_click.click()
time.sleep(1)
except TimeoutError as e:
print('Error: {}'.format(str(e)))
continue
break
I've never nested so many conditionals before and I think I've gotten my continues, breaks, and order mixed up. I also don't know how to make my xpath object a boolean so that I can test it.
Any help with this would be appreciated.
Thank you!

How to send a tab key input into web-browser with selenium and python

I am trying to get this script to send a tab key input after inserting data into entry fields.
The reason is so the next set of data will be entered into the next row.
I've tried using the send_key method to send the tab button event to the browser, but I get an error. Is there a correct way to called a tab keyboard input based on this code sample?
from selenium.webdriver.common.keys import Keys
def GetQuote():
for i in range(len(orders)):
#description
driver.find_element(By.XPATH, "/html[1]/body[1]/app-root[1]/div[1]/div[1]/app-record[1]/div[1]/div[2]/div[1]/app-record-quoting[1]/div[1]/app-record-product-list-panel[1]/form[1]/div[3]/div[1]/div[1]/div[1]/input[1]").send_keys(orders[i]['description'])
#dropdown menu for Handling Unit
select_element = Select(driver.find_element_by_xpath('/html[1]/body[1]/app-root[1]/div[1]/div[1]/app-record[1]/div[1]/div[2]/div[1]/app-record-quoting[1]/div[1]/app-record-product-list-panel[1]/form[1]/div[3]/div[1]/div[3]/div[1]/select[1]'))
select_element.select_by_value('1')
driver.implicitly_wait(1)
driver.find_element(By.CSS_SELECTOR, "input[formControlName=handlingQty]").send_keys(orders[i]['handling unit'])
driver.find_element(By.CSS_SELECTOR, "input[formControlName=packageQty]").send_keys(orders[i]['pieces'])
driver.find_element(By.CSS_SELECTOR, "input[formControlName=length]").send_keys(orders[i]['length'])
driver.find_element(By.CSS_SELECTOR, "input[formControlName=width]").send_keys(orders[i]['width'])
driver.implicitly_wait(1)
driver.find_element(By.CSS_SELECTOR, "input[formControlName=height]").send_keys(orders[i]['height'])
element=driver.find_element(By.CSS_SELECTOR,"input[formControlName=weight]")
element.send_keys(orders[i]['weight'])
driver.implicitly_wait(1)
if i < len(orders):
element.send_keys(Keys.TAB);
driver.find_element(By.XPATH, "//button[#class='btn-filled clickable']").click() #generate quote button
Try to send the tab key to the element
element=driver.find_element(By.CSS_SELECTOR,"input[formControlName=weight]")
element.send_keys(orders[i]['weight'])
if i < len(orders):
element.send_keys(Keys.TAB);

python3 More button clickable in the 1st page but NOT clickable in the 2nd page

This is the extended question on how to click 'More' button on a webpage.
Below is my previous question and one person kindly answered for it.
Since I'm not that familiar with 'find element by class name' function, I just added that person's revised code on my existing code. So my revised code would not be efficient (my apology).
Python click 'More' button is not working
The situation is, there are two types of 'More' button. 1st one is in the property description part and the 2nd one is in the text reviews part. If you click only one 'More' button from any of the reviews, reviews will be expanded so that you can see the full text reviews.
The issue I run into is that I can click 'More' button for the reviews that are in the 1st page but not clickable for the reviews in the 2nd page.
Below is the error message I get but my code still runs (without stopping once it sees an error).
Message:
no such element: Unable to locate element: {"method":"tag name","selector":"span"}
Based on my understanding, there is entry class and corresponding span for every review. I don't understand why it says python can't find it.
from selenium import webdriver
from selenium.webdriver import ActionChains
from bs4 import BeautifulSoup
review_list=[]
review_appended_list=[]
review_list_v2=[]
review_appended_list_v2=[]
listed_reviews=[]
listed_reviews_v2=[]
listed_reviews_total=[]
listed_reviews_total_v2=[]
final_list=[]
#Incognito Mode
option = webdriver.ChromeOptions()
option.add_argument("--incognito")
#Open Chrome
driver=webdriver.Chrome(executable_path="C:/Users/chromedriver.exe",options=option)
#url I want to visit (I'm going to loop over multiple listings but for simplicity, I just added one listing url).
lists = ['https://www.tripadvisor.com/VacationRentalReview-g30196-d6386734-Hot_51st_St_Walk_to_Mueller_2BDR_Modern_sleeps_7-Austin_Texas.html']
for k in lists:
driver.get(k)
time.sleep(3)
#click 'More' on description part.
link = driver.find_element_by_link_text('More')
try:
ActionChains(driver).move_to_element(link)
time.sleep(1) # time to move to link
link.click()
time.sleep(1) # time to update HTML
except Exception as ex:
print(ex)
time.sleep(3)
# first "More" shows text in all reviews - there is no need to search other "More"
try:
first_entry = driver.find_element_by_class_name('entry')
more = first_entry.find_element_by_tag_name('span')
#more = first_entry.find_element_by_link_text('More')
except Exception as ex:
print(ex)
try:
ActionChains(driver).move_to_element(more)
time.sleep(1) # time to move to link
more.click()
time.sleep(1) # time to update HTML
except Exception as ex:
print(ex)
#begin parsing html and scraping data.
html =driver.page_source
soup=BeautifulSoup(html,"html.parser")
listing=soup.find_all("div", class_="review-container")
all_reviews = driver.find_elements_by_class_name('wrap')
for review in all_reviews:
all_entries = review.find_elements_by_class_name('partial_entry')
if all_entries:
review_list=[all_entries[0].text]
review_appended_list.extend([review_list])
for i in range(len(listing)):
review_id=listing[i]["data-reviewid"]
listing_v1=soup.find_all("div", class_="rating reviewItemInline")
rating=listing_v1[i].span["class"][1]
review_date=listing_v1[i].find("span", class_="ratingDate relativeDate")
review_date_detail=review_date["title"]
listed_reviews=[review_id, review_date_detail, rating[7:8]]
listed_reviews.extend([k])
listed_reviews_total.append(listed_reviews)
for a,b in zip (listed_reviews_total,review_appended_list):
final_list.append(a+b)
#loop over from the 2nd page of the reviews for the same listing.
for j in range(5,20,5):
url_1='-'.join(k.split('-',3)[:3])
url_2='-'.join(k.split('-',3)[3:4])
middle="-or%d-" % j
final_k=url_1+middle+url_2
driver.get(final_k)
time.sleep(3)
link = driver.find_element_by_link_text('More')
try:
ActionChains(driver).move_to_element(link)
time.sleep(1) # time to move to link
link.click()
time.sleep(1) # time to update HTML
except Exception as ex:
print(ex)
# first "More" shows text in all reviews - there is no need to search other "More"
try:
first_entry = driver.find_element_by_class_name('entry')
more = first_entry.find_element_by_tag_name('span')
except Exception as ex:
print(ex)
try:
ActionChains(driver).move_to_element(more)
time.sleep(2) # time to move to link
more.click()
time.sleep(2) # time to update HTML
except Exception as ex:
print(ex)
html =driver.page_source
soup=BeautifulSoup(html,"html.parser")
listing=soup.find_all("div", class_="review-container")
all_reviews = driver.find_elements_by_class_name('wrap')
for review in all_reviews:
all_entries = review.find_elements_by_class_name('partial_entry')
if all_entries:
#print('--- review ---')
#print(all_entries[0].text)
#print('--- end ---')
review_list_v2=[all_entries[0].text]
#print (review_list)
review_appended_list_v2.extend([review_list_v2])
#print (review_appended_list)
for i in range(len(listing)):
review_id=listing[i]["data-reviewid"]
#print review_id
listing_v1=soup.find_all("div", class_="rating reviewItemInline")
rating=listing_v1[i].span["class"][1]
review_date=listing_v1[i].find("span", class_="ratingDate relativeDate")
review_date_detail=review_date["title"]
listed_reviews_v2=[review_id, review_date_detail, rating[7:8]]
listed_reviews_v2.extend([k])
listed_reviews_total_v2.append(listed_reviews_v2)
for a,b in zip (listed_reviews_total_v2,review_appended_list_v2):
final_list.append(a+b)
print (final_list)
if len(listing) !=5:
break
How to enable clicking 'More' button for the 2nd and rest of the pages? so that I can scrape the full text reviews?
Edited Below:
The error messages I get are these two lines.
Message: no such element: Unable to locate element: {"method":"tag name","selector":"span"}
Message: stale element reference: element is not attached to the page document
I guess my whole codes still run because I used try and except function? Usually when python runs into an error, it stops running.
Try it like:
driver.execute_script("""
arguments[0].click()
""", link)

Python Selenium DOM Click event does not work as intended

I am trying to click the pagination links (next button). However the click goes to the site homepage. I am targeting the element by class. What could be wrong ?
driver.get('https://www.marinetraffic.com/en/data/?asset_type=vessels&columns=flag,shipname,photo,recognized_next_port,reported_eta,reported_destination,current_port,imo,ship_type,show_on_live_map,time_of_latest_position,lat_of_latest_position,lon_of_latest_position&current_port_in|begins|FUJAIRAH%20ANCH|current_port_in=20585')
# Wait 30 seconds for page to load
timeout = 30
try:
WebDriverWait(driver, timeout).until(EC.presence_of_element_located((By.CLASS_NAME, 'MuiButtonBase-root-60')))
element = driver.find_element_by_class_name('MuiButtonBase-root-60')
driver.execute_script("arguments[0].click();", element)
except TimeoutException:
print("Timed out waiting for page to load")
driver.quit()
Use Following Code :
driver.get('https://www.marinetraffic.com/en/data/?
asset_type=vessels&columns=flag,shipname,photo,recognized_next_port,reported_eta,reported_destination,current_port,imo,ship_type,show_on_live_map,time_of_latest_position,lat_of_latest_position,lon_of_latest_position&current_port_in|begins|FUJAIRAH%20ANCH|current_port_in=20585')
# Wait 30 seconds for page to load
timeout = 30
try:
element = WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, "//p[text()='Page']//..//following-sibling::button")))
driver.execute_script("arguments[0].click();", element)
except TimeoutException:
print("Timed out waiting for page to load")
driver.quit()
There are 33 elements with this class, find_element_by_class_name returns the first one, (which is located in the header). You can use the footer as starting point to narrow down the options and select the second button using index (there is no difference between the next and previous when both of the are available)
element = WebDriverWait(driver, timeout).until(EC.visibility_of_element_located((By.CSS_SELECTOR, '.r-mtGridFooter-302 button:nth-of-type(2)')))
element.click()

Selenium execute_script window.scrollTo not scrolling window

I am trying to click on the "Next" button at the bottom of a BusinessWire web page. I have some code that goes from the BusinessWire homepage to my desired search results page. I want to be able to click on that page's "Next" button, but I get the error message telling me that the "Next" element is not clickable at point(X,Y). The next button is at the bottom of the window. For some reason the
browser.execute_script("window.scrollTo(0, document.body.scrollHeight);")
command is not scrolling as I would expect. Because the window is not scrolling as expected the element is not visible to be clicked (at least I believe that is the problem). I use this same command twice earlier in the code and it works fine in those two instances. Any help on getting the window to scroll would be greatly appreciated. Here is my code:
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
def BusinessWire():
browser = webdriver.Chrome()
browser.get('http://www.businesswire.com/portal/site/home/')
search_box_element = browser.find_element_by_id('bw-search-
input')
search_box_element.clear()
search_box_element.send_keys('biotechnology')
search_box_element.send_keys(Keys.ENTER)
browser.execute_script("window.scrollTo(0,
document.body.scrollHeight);")
search_box_element_two = browser.find_element_by_id('more-news-
results')
search_box_element_two.click()
browser.execute_script("window.scrollTo(0,
document.body.scrollHeight);")
time.sleep(5)
next_page_click_element = WebDriverWait(browser, 10).until(
EC.presence_of_element_located((By.XPATH, '//*[#id="more-news-pagination"]/div/div[1]/div/a'))
)
next_page_click_element.click()
Try using scrollIntoView of the element you want to scroll to, sorry this is in Java, but should work the same in python:
driver.manage().window().maximize();
driver.get("http://www.businesswire.com/portal/site/home/");
wait = new WebDriverWait(driver, 10);
driver.findElement(By.id("bw-search-input")).clear();
driver.findElement(By.id("bw-search-input")).sendKeys("biotechnology");
driver.findElement(By.id("bw-search-input")).sendKeys(Keys.ENTER);
WebElement clicklink = driver.findElement(By.id("more-news-results"));
((JavascriptExecutor) driver).executeScript("arguments[0].scrollIntoView(true);", clicklink);
clicklink.click();
Thread.sleep(1000);
WebElement clicknext = wait.until(ExpectedConditions.presenceOfElementLocated(By.xpath("//*[#id=\"more-news-pagination\"]/div/div[1]/div/a")));
((JavascriptExecutor) driver).executeScript("arguments[0].scrollIntoView(true);", clicknext);
clicknext.click();
StackOverFlow won't let me post any more comments? The url is in the code. But the homepage url is not the problem page. The problem page is the search page after the home page. The search page can only be gotten to through a search on the homepage. My code does all this.

Resources