How to generate new access token using refresh token in flask? - python-3.x

How to generate a new access token with the use of refresh token in python.if I'm using google fit API.?

I need to update that i have found my answer
from urllib2 import Request, urlopen, URLError
import json
import mimetools
BOUNDARY = mimetools.choose_boundary()
CRLF = '\r\n'
def EncodeMultiPart(fields, files, file_type='application/xml'):
"""Encodes list of parameters and files for HTTP multipart format.
Args:
fields: list of tuples containing name and value of parameters.
files: list of tuples containing param name, filename, and file contents.
file_type: string if file type different than application/xml.
Returns:
A string to be sent as data for the HTTP post request.
"""
lines = []
for (key, value) in fields:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('') # blank line
lines.append(value)
for (key, filename, value) in files:
lines.append('--' + BOUNDARY)
lines.append(
'Content-Disposition: form-data; name="%s"; filename="%s"'
% (key, filename))
lines.append('Content-Type: %s' % file_type)
lines.append('') # blank line
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('') # blank line
return CRLF.join(lines)
def refresh_token():
url = "https://oauth2.googleapis.com/token"
headers = [
("grant_type", "refresh_token"),
("client_id", "xxxxxx"),
("client_secret", "xxxxxx"),
("refresh_token", "xxxxx"),
]
files = []
edata = EncodeMultiPart(headers, files, file_type='text/plain')
#print(EncodeMultiPart(headers, files, file_type='text/plain'))
headers = {}
request = Request(url, headers=headers)
request.add_data(edata)
request.add_header('Content-Length', str(len(edata)))
request.add_header('Content-Type', 'multipart/form-data;boundary=%s' % BOUNDARY)
response = urlopen(request).read()
print(response)
refresh_token()
#response = json.decode(response)
#print(refresh_token())

Related

Merge a string to URL in python

I have to concatenate a hardcoded path of "string" type to a URL to have a result which is a URL.
url (which doesn't end with "/") + "/path/to/file/" = new_url
I tried concatenation using URL join and also tried used simple string concat but the result is not a URL which can be reached. (not that the URL address is invalid )
mirror_url = "http://amazonlinux.us-east-
2.amazonaws.com/2/core/latest/x86_64/mirror.list"
response = requests.get(mirror_url)
contents_in_url = response.content
## returns a URL as shown below but of string type which cannot be
##concatenated to another string type which could be requested as a valid
##URL.
'http://amazonlinux.us-east- 2.amazonaws.com/2/core/2.0/x86_64/8cf736cd3252ada92b21e91b8c2a324d05b12ad6ca293a14a6ab7a82326aec43'
path_to_add_to_url = "/repodata/primary.sqlite.gz"
final_url = contents_in_url + path_to_add_to_url
Desired Result:
Without omitting any path to that file.
final_url = "http://amazonlinux.us-west-2.amazonaws.com/2/core/2.0/x86_64/8cf736cd3252ada92b21e91b8c2a324d05b12ad6ca293a14a6ab7a82326aec43/repodata/primary.sqlite.gz"
You need to get contents of the first response by response.text method, not response.content:
import requests
mirror_url = "http://amazonlinux.us-east-2.amazonaws.com/2/core/latest/x86_64/mirror.list"
response = requests.get(mirror_url)
contents_in_url = response.text.strip()
path_to_add_to_url = "/repodata/primary.sqlite.gz"
response = requests.get(contents_in_url + path_to_add_to_url)
with open('primary.sqlite.gz', 'wb') as f_out:
f_out.write(response.content)
print('Downloading done.')

Selective download and extraction of data (CAB)

So I have a specific need to download and extract a cab file but the size of each cab file is huge > 200MB. I wanted to selectively download files from the cab as rest of the data is useless.
Done so much so far :
Request 1% of the file from the server. Get the headers and parse them.
Get the files list, their offsets according to This CAB Link.
Send a GET request to server with the Range header set to the file Offset and the Offset+Size.
I am able to get the response but it is in a way "Unreadable" cause it is compressed (LZX:21 - Acc to 7Zip)
Unable to decompress using zlib. Throws invlid header.
Also I did not quite understand nor could trace the CFFOLDER or CFDATA as shown in the example cause its uncompressed.
totalByteArray =b''
eofiles =0
def GetCabMetaData(stream):
global eofiles
cabMetaData={}
try:
cabMetaData["CabFormat"] = stream[0:4].decode('ANSI')
cabMetaData["CabSize"] = struct.unpack("<L",stream[8:12])[0]
cabMetaData["FilesOffset"] = struct.unpack("<L",stream[16:20])[0]
cabMetaData["NoOfFolders"] = struct.unpack("<H",stream[26:28])[0]
cabMetaData["NoOfFiles"] = struct.unpack("<H",stream[28:30])[0]
# skip 30,32,34,35
cabMetaData["Files"]= {}
cabMetaData["Folders"]= {}
baseOffset = cabMetaData["FilesOffset"]
internalOffset = 0
for i in range(0,cabMetaData["NoOfFiles"]):
fileDetails = {}
fileDetails["Size"] = struct.unpack("<L",stream[baseOffset+internalOffset:][:4])[0]
fileDetails["UnpackedStartOffset"] = struct.unpack("<L",stream[baseOffset+internalOffset+4:][:4])[0]
fileDetails["FolderIndex"] = struct.unpack("<H",stream[baseOffset+internalOffset+8:][:2])[0]
fileDetails["Date"] = struct.unpack("<H",stream[baseOffset+internalOffset+10:][:2])[0]
fileDetails["Time"] = struct.unpack("<H",stream[baseOffset+internalOffset+12:][:2])[0]
fileDetails["Attrib"] = struct.unpack("<H",stream[baseOffset+internalOffset+14:][:2])[0]
fileName =''
for j in range(0,len(stream)):
if(chr(stream[baseOffset+internalOffset+16 +j])!='\x00'):
fileName +=chr(stream[baseOffset+internalOffset+16 +j])
else:
break
internalOffset += 16+j+1
cabMetaData["Files"][fileName] = (fileDetails.copy())
eofiles = baseOffset + internalOffset
except Exception as e:
print(e)
pass
print(cabMetaData["CabSize"])
return cabMetaData
def GetFileSize(url):
resp = requests.head(url)
return int(resp.headers["Content-Length"])
def GetCABHeader(url):
global totalByteArray
size = GetFileSize(url)
newSize ="bytes=0-"+ str(int(0.01*size))
totalByteArray = b''
cabHeader= requests.get(url,headers={"Range":newSize},stream=True)
for chunk in cabHeader.iter_content(chunk_size=1024):
totalByteArray += chunk
def DownloadInfFile(baseUrl,InfFileData,InfFileName):
global totalByteArray,eofiles
if(not os.path.exists("infs")):
os.mkdir("infs")
baseCabName = baseUrl[baseUrl.rfind("/"):]
baseCabName = baseCabName.replace(".","_")
if(not os.path.exists("infs\\" + baseCabName)):
os.mkdir("infs\\"+baseCabName)
fileBytes = b''
newRange = "bytes=" + str(eofiles+InfFileData["UnpackedStartOffset"] ) + "-" + str(eofiles+InfFileData["UnpackedStartOffset"]+InfFileData["Size"] )
data = requests.get(baseUrl,headers={"Range":newRange},stream=True)
with open("infs\\"+baseCabName +"\\" + InfFileName ,"wb") as f:
for chunk in data.iter_content(chunk_size=1024):
fileBytes +=chunk
f.write(fileBytes)
f.flush()
print("Saved File " + InfFileName)
pass
def main(url):
GetCABHeader(url)
cabMetaData = GetCabMetaData(totalByteArray)
for fileName,data in cabMetaData["Files"].items():
if(fileName.endswith(".txt")):
DownloadInfFile(url,data,fileName)
main("http://path-to-some-cabinet.cab")
All the file details are correct. I have verified them.
Any guidance will be appreciated. Am I doing it wrong? Another way perhaps?
P.S : Already Looked into This Post
First, the data in the CAB is raw deflate, not zlib-wrapped deflate. So you need to ask zlib's inflate() to decode raw deflate with a negative windowBits value on initialization.
Second, the CAB format does not exactly use standard deflate, in that the 32K sliding window dictionary carries from one block to the next. You'd need to use inflateSetDictionary() to set the dictionary at the start of each block using the last 32K decompressed from the last block.

Azure Put Blob API returns with a non-matching size of file in canonicalized Header

I am trying to upload a blob (pdf) file from my laptop to a container in Azure storage account. I found it to be working but with one glitch.
I am calculating the file size using:
f_info = os.stat(file_path)
file_size = (f_info.st_size) # returns - 19337
Then I insert this value in below canonicalized header:
ch = "PUT\n\n\n"+str(file_size)+"\n\napplication/pdf\n\n\n\n\n\n\nx-ms-blob-type:BlockBlob" + "\nx-ms-date:" + date + "\nx-ms-version:" + version + "\n"
and send the PUT request to PUT Blob API, however, it returns an error saying, "Authentication failed because the server used below below string to calculate the signature"
\'PUT\n\n\n19497\n\napplication/pdf\n\n\n\n\n\n\nx-ms-blob-type:BlockBlob\nx-ms-date:[date]\nx-ms-version:[API version]
Looking at this string it obvious that authentication failed because file size which azure calculated returns a different value! I don't understand how its calculating this value of file size?!?!
FYI: If I replace 19337 with 19497 in canonicalized string and re run. It works!
Any suggestion on where I am making mistakes?
Below is the code:
storage_AccountName = '<storage account name>'
storage_ContainerName = "<container_name>"
storageKey='<key>'
fd = "C:\\<path>\\<to>\\<file_to_upload>.pdf"
URI = 'https://' + storageAccountName + '.blob.core.windows.net/<storage_ContainerName >/<blob_file_name.pdf>
version = '2017-07-29'
date = datetime.datetime.utcnow().strftime("%a, %d %b %Y %H:%M:%S GMT")
if os.path.isfile(fd):
file_info = os.stat(fd)
file_size = (file_info.st_size)
ch = "PUT\n\n\n"+str(file_size)+"\n\napplication/pdf\n\n\n\n\n\n\nx-ms-blob-type:BlockBlob" + "\nx-ms-date:" + date + "\nx-ms-version:" + version + "\n"
cr = "/<storage_AccountName>/<storage_Containername>/<blob_file_name.pdf>"
canonicalizedString = ch + cr
storage_account_key = base64.b64decode(storageKey)
byte_canonicalizedString=canonicalizedString.encode('utf-8')
signature = base64.b64encode(hmac.new(key=storage_account_key, msg=byte_canonicalizedString, digestmod=hashlib.sha256).digest())
header = {
'x-ms-blob-type': "BlockBlob",
'x-ms-date': date,
'x-ms-version': version,
'Authorization': 'SharedKey ' + storageAccountName + ':' + signature.decode('utf-8'),
#'Content-Length': str(19497), # works
'Content-Length': str(file_size), # doesn't work
'Content-Type': "application/pdf"}
files = {'file': open(fd, 'rb')}
result = requests.put(url = URI, headers = header, files = files)
print (result.content)
As mentioned in the comments, the reason you're getting the content length mismatched header is because instead of uploading the file, you're uploading an object which contains file contents and that is causing the content length to increase.
Please change the following line of codes:
files = {'file': open(fd, 'rb')}
result = requests.put(url = URI, headers = header, files = files)
to something like:
data = open(fd, 'rb') as stream
result = requests.put(url = URI, headers = header, data = data)
And now you're only uploading the file contents.

Python: How to get HTTP header using RAW_Sockets

I'm beginner in Python and I would like to build simple port sniffer.
For this purpose I'm using code from this site, as example: Simple packege snffer using python
And I would like to unpack bites from socket to exctract http header, using function struct.unpack()
What format of string should I use in unpack HTTP header, (e.g '!BBH', "!BBHHHBBH4s4s",'!HHLLBBHHH')
the HTTP header is not fixed-length, so you'll need to parse it other way, for example:
import logging
log = logging.getLogger(__name__)
def parse_http(data):
lines = data.split(b'\r\n')
if len(lines) < 1:
log.error('Invalid http header: %s', lines)
return
request = lines[0]
header = {}
rest = []
in_header = True
for line in lines[1:]:
if line == b'':
in_header = False
continue
if in_header:
try:
key, val = line.split(b': ')
except ValueError:
log.error('Invalid header line: %s', line)
continue
header[key] = val
else:
rest.append(line)
return request, header, b'\r\n'.join(rest)
In order to detect a HTTP packet, you could check if the payload starts with POST, GET, HTTP ... etc

SOAP UI - How to Capture REST raw Response in a file

I am trying to capture the raw response for REST (POST) API call into a file using groovy Script.
I can see the response as below in RAW, but when file is produced it is blank.
REST Response:
HTTP/1.1 401 Unauthorized
content-length: 0
Date: Tue 12 jul 2016 12:12:12gmt
WWW-Autheticate: OAuth
Server: Jetty (8.1.17V20150415)
I am using SOAP UI version 5.2.
Any help appreciated.
Groovy Script:
def Date startTime = new Date()
File it=new File("Result")
def cur_Time = startTime.getMonth()+1 + "_" + startTime.getDate()
cur_Time = cur_Time + "_" + startTime.getHours() + startTime.getMinutes() +startTime.getSeconds()
def fileName = it.name + "_" + cur_Time
//Request File
def myXmlRequest="C:\\ConnectivityResults\\"+ "Rest_Request" + fileName+".xml"
def request=context.expand('${Testcasename#Request}')
def req = new File (myXmlRequest)
req.write(request,"UTF-8")
//Response File
def myXmlResponse="C:\\ConnectivityResults\\"+ "Rest_Response" + fileName+".xml"
def response=context.expand('${Testcasename#Response}')
def res = new File (myXmlResponse)
res.write(response,"UTF-8")
The problem isn't probably in your Groovy script, the problem is simply that your request is incorrect and nothing is returned as response. Based on the http-headers you show in the question:
HTTP/1.1 401 Unauthorized
content-length: 0
Date: Tue 12 jul 2016 12:12:12gmt
WWW-Autheticate: OAuth
Server: Jetty (8.1.17V20150415)
You're receiving an 401 Unauthorized response instead of 200 OK, and based on the Content-lenght which is 0. It's normal that your response is blank, so there is no content to save in file.
EDIT BASED ON COMMENT
If you want also to save the http-headers in a file, you can add the follow snippet to your Groovy script:
def fileName = ...
// http-headers file
def httpHeadersFilePath ="C:/ConnectivityResults/Rest_Request${fileName}.txt"
def ts = testRunner.testCase.getTestStepByName('Testcasename')
def headers = ts.getTestRequest().response.responseHeaders
def httpHeaderFile = new File(httpHeadersFilePath)
httpHeaderFile.text = ''
headers.each { key, value ->
httpHeaderFile.append("${key}:${value}\n",'UTF-8')
}
Hope it helps,
Sorry about the late...
There's a simple way to take it and record in a file, using Groovy Script on your SoapUI:
#Take the Raw Request into a variable "request":
def request = context.expand( '${Request 1#RawRequest}' )
#Take the Raw Response into a variable "response":
def response = context.expand( '${Request 1#Response}' )
#Create and fill a file "MyFile.json" whit the variables values:
new File( "C:/foo/bar/MyFile.json" ).write( request + response, "UTF-8" )
Hope that's useful.

Resources