How to modified field from URL using python3 - python-3.x

Hi all I need to modified URL using pthon3 script. For example here is a my script output " http://10.10.10.5/dvwa/vulnerabilities/fi/?page=include.php" and I would like to remove "include.php" from my output. So I want to output is "http://10.10.10.5/dvwa/vulnerabilities/fi/?page=" how can I do for it.
import requests
from bs4 import BeautifulSoup
from urllib.request import urlparse, urljoin
url = "http://10.10.10.5/dvwa/index.php"
cookies = {'security' : 'low', 'PHPSESSID':'ev5mlspqdiklrgaqfqbh00act0'}
r = requests.post(url, cookies = cookies)
soup = BeautifulSoup(r.text, 'lxml')
for link in soup.findAll('a', href = True):
href_csrf = (link['href'])
if "fi" in href_csrf:
csrf_url = urljoin(url, href_csrf)
print("Found fi : ", csrf_url)

url = "http://10.10.10.5/dvwa/vulnerabilities/fi/?page=include.php"
url = url.replace("include.php", "")
print(url)
You could do something like that.
Where you replace the part you dont want with nothing.

Related

web scraping from news articles

I have been trying to access the links from a given news website. I've found the code which works really well, but the only issue is that, it outputs "javascript:void();" along with all the other links. Please let me know what changes I can make such that I don't encounter "javascript:void();" in the output with all the other links.
The following is the code:
from bs4 import BeautifulSoup
from bs4.dammit import EncodingDetector
import requests
parser = 'html.parser' # or 'lxml' (preferred) or 'html5lib', if installed
resp = requests.get("https://www.ndtv.com/coronavirus?pfrom=home-mainnavgation")
http_encoding = resp.encoding if 'charset' in resp.headers.get('content-type', '').lower() else None
html_encoding = EncodingDetector.find_declared_encoding(resp.content, is_html=True)
encoding = html_encoding or http_encoding
soup = BeautifulSoup(resp.content, parser, from_encoding=encoding)
for link in soup.find_all('a', href=True):
print(link['href'])
If you don't want them, just filter them out.
Here's how:
import requests
from bs4 import BeautifulSoup
from bs4.dammit import EncodingDetector
resp = requests.get("https://www.ndtv.com/coronavirus?pfrom=home-mainnavgation")
http_encoding = resp.encoding if 'charset' in resp.headers.get('content-type', '').lower() else None
html_encoding = EncodingDetector.find_declared_encoding(resp.content, is_html=True)
encoding = html_encoding or http_encoding
soup = BeautifulSoup(resp.content, 'html.parser', from_encoding=encoding)
for link in soup.find_all('a', href=True):
if link["href"] != "javascript:void();":
print(link['href'])

How to get the values from this with beautifulsoup?

I am trying to learn beautifulsoup and is scraping this website.
My python code looks like this:
import requests
from bs4 import BeautifulSoup
print("Enter the last 3 characters from the share link")
share_link = input()
link = "https://website.com" + share_link
print(link)
r = requests.get(link)
raw = r.text
soup = BeautifulSoup(raw, features="html.parser")
print(soup.prettify)
inputTag = soup.find("input", {"id": "hiddenInput"})
output = inputTag["value"]
print(output)
It gives me this output:
{"broadcastId":"BroadcastID: 252940","rtmp_url":"rtmp://live.gchao.cn/live/23331_9wx2w0c9","sex":0,"accountType":"26073","hls_url":"http://live.gchao.cn/live/23331_9wx2w0c9.m3u8","onlineNum":99,"likeNum":67,"live_id":282878,"flv_url":"http://live.gchao.cn/live/23331_9wx2w0c9.flv?txSecret=40d318efbbbca6afb8be2450b8d1f8fa&txTime=5D6086D1","user_id":252940,"stream_id":"23331_9wx2w0c9","nick_name":"Princess","sdkAppID":"1400088004","info_id":33189,"info_name":"Hi","IM_ID":"#TGS#aXMZYZ7FB","earning":424}
How do I get inside this and with beautifulsoup get the values?
If it is json you can load with json library then parse e.g.
import json
s = '{"broadcastId":"BroadcastID: 252940","rtmp_url":"rtmp://live.gchao.cn/live/23331_9wx2w0c9","sex":0,"accountType":"26073","hls_url":"http://live.gchao.cn/live/23331_9wx2w0c9.m3u8","onlineNum":99,"likeNum":67,"live_id":282878,"flv_url":"http://live.gchao.cn/live/23331_9wx2w0c9.flv?txSecret=40d318efbbbca6afb8be2450b8d1f8fa&txTime=5D6086D1","user_id":252940,"stream_id":"23331_9wx2w0c9","nick_name":"Princess","sdkAppID":"1400088004","info_id":33189,"info_name":"Hi","IM_ID":"#TGS#aXMZYZ7FB","earning":424}'
data = json.loads(s)
print(data['broadcastId'])

How can I extract the links from HTML?

I'm trying to get a link of every article in this category on the SF chronicle but I'm not sure as to where I should begin on extracting the URLs. Here is my progress so far:
from urllib.request import urlopen as uReq
from bs4 import BeautifulSoup as soup
my_url = 'https://www.sfchronicle.com/local/'
# opening up connection, grabbing the page
uClient = uReq(my_url)
page_html = uClient.read()
uClient.close()
# html parsing
page_soup = soup(page_html, "html.parser")
zone2_container = page_soup.findAll("div",{"class":"zone zone-2"})
zone3_container = page_soup.findAll("div",{"class":"zone zone-3"})
zone4_container = page_soup.findAll("div",{"class":"zone zone-4"})
right_rail_container = page_soup.findAll("div",{"class":"right-rail"})
All of the links I want are located in zone2-4_container and right_rail_container.
You can use the following code to get all links:
all_zones = [zone2_container, zone3_container, zone4_container, right_rail_container]
urls = []
for i in all_zones:
links = i[0].findAll('a')
for link in links:
urls.append(link['href'])
I have merged all the lists in one list but you can also define a function to achieve the same.
def get_urls(zone):
urls = []
for i in zone:
links = i.findAll('a')
for link in links:
urls.append(link['href'])
return urls
get_urls(zone2_container)
It now sounds like you basically want all the article links, in which case you can use an attribute = value css selector with contains operator to target href attributes whose value contains the substring 'article'.
import requests
from bs4 import BeautifulSoup as bs
from urllib.parse import urljoin
base = 'https://www.sfchronicle.com/'
url = 'https://www.sfchronicle.com/local/'
res = requests.get(url)
soup = bs(res.content, 'lxml')
links = [urljoin(base,link['href']) for link in soup.select('[href*=article]')]
print(links)
print(len(links))

Not able to use BeautifulSoup to get span content of Nasdaq100 future

from bs4
import BeautifulSoup
import re
import requests
url = 'www.barchart.com/futures/quotes/NQU18'
r = requests.get("https://" +url)
data = r.text
soup = BeautifulSoup(data)
price = soup.find('span', {'class': 'last-change',
'data-ng-class': "highlightValue('priceChange’)”}).text
print(price)
Result:
[[ item.priceChange ]]
It is not the span content. The result should be price. Where am I going wrong?
The following is the span tag of the page:
2nd screenshot: How can I get the time?
Use price = soup.find('span', {'class': 'up'}).text instead to get the +X.XX value:
from bs4 import BeautifulSoup
import requests
url = 'www.barchart.com/futures/quotes/NQU18'
r = requests.get("https://" +url)
data = r.text
soup = BeautifulSoup(data, "lxml")
price = soup.find('span', {'class': 'up'}).text
print(price)
Output currently is:
+74.75
The tradeTime you seek seems to not be present in the page_source, since it's dynamically generated through JavaScript. You can, however, find it elsewhere if you're a little clever, and use the json library to parse the JSON data from a certain script element:
import json
trade_time = soup.find('script', {"id": 'barchart-www-inline-data'}).text
json_data = json.loads(trade_time)
print(json_data["NQU18"]["quote"]["tradeTime"])
This outputs:
2018-06-14T18:14:05
If these don't solve your problem then you will have to resort to something like Selenium that can run JavaScript to get what you're looking for:
from selenium import webdriver
driver = webdriver.Chrome()
url = ("https://www.barchart.com/futures/quotes/NQU18")
driver.get(url)
result = driver.find_element_by_xpath('//*[#id="main-content-column"]/div/div[1]/div[2]/span[2]/span[1]')
print(result.text)
Currently the output is:
-13.00

Not able to parse webpage contents using beautiful soup

I have been using Beautiful Soup for parsing webpages for some data extraction. It has worked perfectly well for me so far, for other webpages. But however I'm trying to count the number of < a> tags in this page,
from bs4 import BeautifulSoup
import requests
catsection = "cricket"
url_base = "http://www.dnaindia.com/"
i = 89
url = url_base + catsection + "?page=" + str(i)
print(url)
#This is the page I'm trying to parse and also the one in the hyperlink
#I get the correct url i'm looking for at this stage
r = requests.get(url)
data = r.text
soup = BeautifulSoup(data, 'html.parser')
j=0
for num in soup.find_all('a'):
j=j+1
print(j)
I'm getting the output as 0. This makes me think that the 2 lines after r=requests.get(url) is probably not working(there's obviously no chance that there's zero < a> tags in the page), and i'm not sure about what alternative solution I can use here. Does anybody have any solution or faced a similar kind of problem before?
Thanks, in advance.
You need to pass some of the information along with the request to the server.
Following code should work...You can play along with other parameter as well
from bs4 import BeautifulSoup
import requests
catsection = "cricket"
url_base = "http://www.dnaindia.com/"
i = 89
url = url_base + catsection + "?page=" + str(i)
print(url)
headers = {
'User-agent': 'Mozilla/5.0'
}
#This is the page I'm trying to parse and also the one in the hyperlink
#I get the correct url i'm looking for at this stage
r = requests.get(url, headers=headers)
data = r.text
soup = BeautifulSoup(data, 'html.parser')
j=0
for num in soup.find_all('a'):
j=j+1
print(j)
Put any url in the parser and check the number of "a" tags available on that page:
from bs4 import BeautifulSoup
import requests
url_base = "http://www.dnaindia.com/cricket?page=1"
res = requests.get(url_base, headers={'User-agent': 'Existed'})
soup = BeautifulSoup(res.text, 'html.parser')
a_tag = soup.select('a')
print(len(a_tag))

Resources