How to send http 2.0 pseudo headers in python3 - python-3.x

Basically i tried sending http2.0 headers with hyper for python
https://hyper.readthedocs.io/en/latest/
https://github.com/python-hyper/hyper
Mounting HTTP20Adapter in my request.session but didnt worked as expected.
First i explain thath "from tls_version import MyAdapter" thath is used later in the Main code its these tls_version.py file
from requests.adapters import HTTPAdapter
from urllib3.poolmanager import PoolManager
import ssl
class MyAdapter(HTTPAdapter):
def init_poolmanager(self, connections, maxsize, block=False):
self.poolmanager = PoolManager(num_pools=connections,
maxsize=maxsize,
block=block,
ssl_version=ssl.PROTOCOL_TLSv1_2)
Just to force to use tls1.2 nothing more.
The Main code is here but basically im trying to send a get call with http2.0 pseudo headers mounting hyper adapter in request.session and having control over headers order with collections.OrderectDict
import requests
from tls_version import MyAdapter
import json
import collections
from userdata import UserData
from hyper.contrib import HTTP20Adapter
headers2 = [('Upgrade-Insecure-Requests', '1'),
('User-Agent', 'Mozilla/5.0 (Linux; Android 5.1.1; google Pixel 2 Build/LMY47I; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/74.0.3729.136 Mobile Safari/537.36'),
('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3'),
('Accept-Encoding', 'gzip, deflate'),
('Accept-Language', 'es-ES,es;q=0.9,en-US;q=0.8,en;q=0.7'),
]
class post_calls():
def start(self,headers_pass,body_pass,params,url,method):
proxies = {
'http': ip,
'https': ip
}
body = str(body_pass)
#send the POST request
session = requests.session()
session.mount('https://', MyAdapter())
session.headers = collections.OrderedDict(headers_pass)
if method == 'get':
q = 'https://' + server + '.' + host
q = q.replace('.www.', '.')
session.mount('https://', HTTP20Adapter())
print('q='+q)
response = session.get(url, proxies=proxies, params=params, verify=charlesproxy)
def login_world2(sid):
a = post_calls()
q ='https://'+server+'.'+ host+'/login.php?mobile&sid='+sid+'&2'
q = q.replace('.www.','.')
params = {}
url = q
body = '0'
login = a.start(headers2,body,params,url,'get')
return login
if __name__ == "__main__":
login_get = login_world(sid)
print(login_get)
these is headers sends these file:
:method: GET
:scheme: https
:authority: server.url.com
:path: /login.php?mobile&sid=577f0967545d6acec94716d265dd4867fa4db4a446326ecde7486a97feede14702f4911438f4a4cd097677f0dd962786ef14b3f16f1184ee63a506155d522f53&2
upgrade-insecure-requests: 1
user-agent: Mozilla/5.0 (Linux; Android 5.1.1; google Pixel 2 Build/LMY47I; wv) AppleWebKit/537.36 (KHTML
user-agent: like Gecko) Version/4.0 Chrome/74.0.3729.136 Mobile Safari/537.36
accept: text/html
accept: application/xhtml+xml
accept: application/xml;q=0.9
accept: image/webp
accept: image/apng
accept: */*;q=0.8
accept: application/signed-exchange;v=b3
accept-encoding: gzip
accept-encoding: deflate
accept-language: es-ES
accept-language: es;q=0.9
accept-language: en-US;q=0.8
accept-language: en;q=0.7
and these is what i need to send cause if i send them like i put above, like these script does, the server rejects my get requests.
:method: GET
:authority: server.url.com
:scheme: https
:path: /login.php?mobile&sid=2ea530a62cb63af6c14be116b7df86ad85cd77c9a11aa3c881b3a460e6c14fbd1fd8b79bd66c9782073705cdff25e890e65b5aeb852fde24c2d54a6e4ee49890&2
upgrade-insecure-requests: 1
user-agent: Mozilla/5.0 (Linux; Android 5.1.1; google Pixel 2 Build/LMY47I; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/74.0.3729.136 Mobile Safari/537.36
accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3
accept-encoding: gzip, deflate
accept-language: es-ES,es;q=0.9,en-US;q=0.8,en;q=0.7
Seems thath for each "," i put in headers dict, hyper creates a new header instead of sending them in same value, witouth hyper it works normal but i need to send those headers thath are http 2.0 and witouth hyper or any other alternative i cannot, requests dosnt have support for it
:method: GET
:scheme: https
:authority: server.url.com
:path: /login.php?mobile&sid=577f0967545d6acec94716d265dd4867fa4db4a446326ecde7486a97feede14702f4911438f4a4cd097677f0dd962786ef14b3f16f1184ee63a506155d522f53&2

Set the headers in the HTTP20Adapter instead of in session and it should work.
adapter = HTTP20Adapter(headers=headers)
session.mount(prefix='https://', adapter=adapter)

Related

Login Request with CSRF Token in HTML HTTP Response using Python

I need to create a login request using Python, but the roadblock is when I load the URL using GET Request, a specific value of CSRF Token is set by the server in HTML Response, and the same CSRF Token in being validated in the login request without that CSRF Token value, and I won't be able to login to the web application.
Let's say, I am loading the URL using GET Request and I am printing the value of CSRF Token which is being generated new token with each refresh.
Using the following Script 1, the value of the CSRF Token is retrieved:
import requests
from bs4 import BeautifulSoup
response = request.get("https://myloginpage.com", verify=False)
print(response.status_code)
soup = BeautifulSoup(response.text, "html.parser")
csrf_token = soup.find("input", {"name": "csrf_token"})["value"]
print(csrf_token)
And the login request is given below where the CSRF Token value is set by the server in the response to GET Request given above (or retrieved by Script 1).
POST /auth/login HTTP/1.1
Host: myloginpage.com
Cookie: cookiesession1=0A1256CFAXDBEO4P8WONHSGEAPSP5B4F; csrf_token=<token value of csrf>; ci_session=r90o6pi8not5barjnhhtnu8atobmgbae
Content-Length: 131
Sec-Ch-Ua: "Not?A_Brand";v="8", "Chromium";v="108"
Accept: */*
Content-Type: application/x-www-form-urlencoded; charset=UTF-8
X-Requested-With: XMLHttpRequest
Sec-Ch-Ua-Mobile: ?0
User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.5359.72 Safari/537.36
Sec-Ch-Ua-Platform: "Windows"
Origin: https://myloginpage.com
Sec-Fetch-Site: same-origin
Sec-Fetch-Mode: cors
Sec-Fetch-Dest: empty
Referer: https://myloginpage.com
Accept-Encoding: gzip, deflate
Accept-Language: en-US,en;q=0.9
Connection: close
user_id=0&csrf_token=<token value of csrf>&iusername=<username>&ipassword=<password>&is_ajax=1&form=login
Hence, I created a script as given below:
import requests
from bs4 import BeautifulSoup
headers = {
'Accept': '*/*',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.5359.72 Safari/537.36',
'Referrer': 'https://myloginpage.com/',
}
response = requests.get("https://myloginpage.com", headers, verify=False)
print(response.status_code)
soup = BeautifulSoup(response.text, "html.parser")
csrf_token = soup.find("input", {"name": "csrf_token"})["value"]
print(csrf_token)
login = 'https://myloginpage.com/auth/login'
payload = {
'user_id': '<user_id>',
'csrf_token': <csrf_token>,
'iusername': '<username>',
'ipassword': '<password>',
'is_ajax': '1',
'form': 'login'
}
response = requests.post(login, data=payload, verify=False)
print(response.status_code)
But still, the login is unsuccessful with the 403 HTTP Response code.

socket cant detect anyhting in a praw implicit authentication url

im opening a socket server that hosts the reddit confirmation s that you can login to the application without compromising the bot and logging in with one sting called an access token that is in the url of the socket server and what i want to do it sake that access token from the url and display it into the socket server page but the server cant detect any variables inside of the url. Here is my code if you need it. it works when implicit is not part of the url generation, but as soon as i add implicit=True (which i need to, i plan on this being an app) it no longer works.
what i want: (about, this is for the non implicit)GET /?state=state&code=code HTTP/1.1.
what i get: GET / HTTP/1.1.
that is what happens when i do data = client.recv(1024).decode("utf-8").
it will return proper data when the url is generated with url = reddit.auth.url(scopes, state).
but when i generate the url with url = reddit.auth.url(scopes, state, implicit=True).
the data will return GET / HTTP/1.1.
I need the access token to be recognized after the /.
I am using praw 7.1.0
GET / HTTP/1.1
Host: localhost:8080
Connection: keep-alive
Cache-Control: max-age=0
Upgrade-Insecure-Requests: 1
User-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) QtWebEngine/5.15.2 Chrome/83.0.4103.122 Safari/537.36
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9
Accept-Language: en-US,en;q=0.9
DNT: 1
Sec-Fetch-Site: cross-site
Sec-Fetch-Mode: navigate
Sec-Fetch-User: ?1
Sec-Fetch-Dest: document
Accept-Encoding: gzip, deflate, br
/
/
Traceback (most recent call last):
File "path/prawTest.py", line 81, in <module>
sys.exit(main())
File "path/prawTest.py", line 61, in main
params = {
File "path/prawTest.py", line 62, in <dictcomp>
key: value for (key, value) in [token.split("=") for token in param_tokens]
ValueError: not enough values to unpack (expected 2, got 1)
It expects to see variables after the /.
Without implicit=Tru i get the following:
GET /?state=50470&code=code HTTP/1.1
Host: localhost:8080
Connection: keep-alive
Cache-Control: max-age=0
Upgrade-Insecure-Requests: 1
User-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) QtWebEngine/5.15.2 Chrome/83.0.4103.122 Safari/537.36
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9
Accept-Language: en-US,en;q=0.9
DNT: 1
Sec-Fetch-Site: cross-site
Sec-Fetch-Mode: navigate
Sec-Fetch-User: ?1
Sec-Fetch-Dest: document
Accept-Encoding: gzip, deflate, br
The implicit=True url looks like this:
http://localhost:8080/#access_token=token&token_type=bearer&state=39771&expires_in=3600&scope=%2A

How can I specify the exact http method with python requests?

In Burp Suite the first line of a captured request is usually GET / HTTP/1.1. However, I am currently practicing Host Header injection using the method of supplying an absolute URL in order to something like this:
GET https://vulnerable-website.com/ HTTP/1.1
Host: bad-stuff-here
In python I am using the requests library and am unable to specify the exact GET request I need.
import requests
burp0_url = "https://vulnerable-website.com:443/"
burp0_cookies = {[redacted]}
burp0_headers = {"Host": "bad-stuff-here", "User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:68.0) Gecko/20100101 Firefox/68.0", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", "Accept-Language": "en-US,en;q=0.5", "Accept-Encoding": "gzip, deflate", "Referer": "https://vulnerable-website.com/", "Connection": "close", "Upgrade-Insecure-Requests": "1"}
output = requests.get(burp0_url, headers=burp0_headers, cookies=burp0_cookies)
print(output, output.text)
I have tried specifying the GET request in the header dictionary (header = {"GET":" / HTTP/1.1", ...}), however this only results in a GET Header not r
Request on the 6th line being sent:
GET / HTTP/1.1
User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:68.0) Gecko/20100101 Firefox/68.0
Accept-Encoding: gzip, deflate
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
Connection: close
GET: /
Host: bad-stuff-here
Accept-Language: en-US,en;q=0.5
Referer: https://vulnerable-website.com/
Upgrade-Insecure-Requests: 1
Cookie: [redacted]
This is a very specific problem and I'm not sure if anyone has had the same issues but any help is appreciated. Maybe a workaround with urllib or something I'm missing. Thanks.
requests uses urllib3 under the hood.
You have to craft the request yourself because of non of the clients [urlib, requests, http.client] won't allow you to insert a control character by design.
You can use a plain socket for this
msg = 'GET / HTTP/1.1\r\n\r\n'
s = socket.create_connection(("vulnerable-website.com", 80))
with closing(s):
s.send(msg)
buf = ''.join(iter(partial(s.recv, 4096), ''))

Process raw HTTP request to python requests

I'd like to pass a raw HTTP request like:
GET /foo/bar HTTP/1.1
Host: example.org
User-Agent: Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; fr; rv:1.9.2.8) Gecko/20100722 Firefox/3.6.8
Accept: */*
Accept-Language: fr,fr-fr;q=0.8,en-us;q=0.5,en;q=0.3
Accept-Encoding: gzip,deflate
Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7
Keep-Alive: 115
Connection: keep-alive
Content-Type: application/x-www-form-urlencoded
X-Requested-With: XMLHttpRequest
Referer: http://example.org/test
Cookie: foo=bar; lorem=ipsum;
And generate the python request such as:
import requests
burp0_url = "http://example.org:80/foo/bar"
burp0_cookies = {"foo": "bar", "lorem": "ipsum"}
burp0_headers = {"User-Agent": "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; fr; rv:1.9.2.8) Gecko/20100722 Firefox/3.6.8", "Accept": "*/*", "Accept-Language": "fr,fr-fr;q=0.8,en-us;q=0.5,en;q=0.3", "Accept-Encoding": "gzip,deflate", "Accept-Charset": "ISO-8859-1,utf-8;q=0.7,*;q=0.7", "Keep-Alive": "115", "Connection": "keep-alive", "Content-Type": "application/x-www-form-urlencoded", "X-Requested-With": "XMLHttpRequest", "Referer": "http://example.org/test"}
requests.get(burp0_url, headers=burp0_headers, cookies=burp0_cookies)
Is there a library for that?
I could not find an existing library that does this conversion, but there is a Python library to convert curl commands to python requests code.
https://github.com/spulec/uncurl
e.g.
import uncurl
print(uncurl.parse('curl --header "Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7" --compressed --header "Accept-Language: fr,fr-fr;q=0.8,en-us;q=0.5,en;q=0.3" --header "Connection: keep-alive" --header "Content-Type: application/x-www-form-urlencoded" --cookie "foo=bar; lorem=ipsum;" --header "Keep-Alive: 115" --header "Referer: http://example.org/test" --user-agent "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; fr; rv:1.9.2.8) Gecko/20100722 Firefox/3.6.8" --header "X-Requested-With: XMLHttpRequest" https://example.org/foo/bar '))
I haven't found a Python library to transform raw HTTP into such a curl command. However, this Perl program does it.
Like this:
$ cat basic
GET /index.html HTTP/2
Host: example.com
Authorization: Basic aGVsbG86eW91Zm9vbA==
Accept: */*
$ ./h2c < basic
curl --http2 --header User-Agent: --user "hello:youfool" https://example.com/index.html
You could either call it from your python script, use a Python-Perl bridge or try to port it.
Postman also allows you to convert raw HTTP requests directly to python requests code, using its Code snippet generator. Although, it seems this can only be done via the GUI. It's also not Open-Source, so you can't access the code that does this transformation.
I needed something that can generate a request and couldn't find it so ended up writing it in gist:
class RequestParser(object):
def __parse_request_line(self, request_line):
request_parts = request_line.split(' ')
self.method = request_parts[0]
self.url = request_parts[1]
self.protocol = request_parts[2] if len(request_parts) > 2 else DEFAULT_HTTP_VERSION
def __init__(self, req_text):
req_lines = req_text.split(CRLF)
self.__parse_request_line(req_lines[0])
ind = 1
self.headers = dict()
while ind < len(req_lines) and len(req_lines[ind]) > 0:
colon_ind = req_lines[ind].find(':')
header_key = req_lines[ind][:colon_ind]
header_value = req_lines[ind][colon_ind + 1:]
self.headers[header_key] = header_value
ind += 1
ind += 1
self.data = req_lines[ind:] if ind < len(req_lines) else None
self.body = CRLF.join(self.data)
def __str__(self):
headers = CRLF.join(f'{key}: {self.headers[key]}' for key in self.headers)
return f'{self.method} {self.url} {self.protocol}{CRLF}' \
f'{headers}{CRLF}{CRLF}{self.body}'
def to_request(self):
req = requests.Request(method=self.method,
url=self.url,
headers=self.headers,
data=self.data, )
return req

LinkedIn HTTP Error 999 - Request denied

I am writing a simple script to get public profile visible without login on LinkedIn.
Below is my code to get the page for beautifulsoup. I am using public proxies as well.
import urllib.request, urllib.error
from bs4 import BeautifulSoup
url = "https://www.linkedin.com/company/amazon"
proxy = urllib.request.ProxyHandler({'https': proxy, })
opener = urllib.request.build_opener(proxy)
urllib.request.install_opener(opener)
hdr = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3218.0 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9,hi;q=0.8',
'Connection': 'keep-alive'}
req = urllib.request.Request(url, headers=hdr)
page = urllib.request.urlopen(req, timeout=20)
self.soup = BeautifulSoup(page.read(), "lxml")
But it is raising "HTTPError 999 - request Denied" error. This is only for testing purpose till I am getting access via partnership program.
What am I doing wrong? Please help.
You did not do anything wrong, LinkedIn blacklist cloud servers ip addresses to prevent "stealing" their data. Questionable practice but this is how it is.

Resources