Locust load testing For API calls - python-3.x

I have written a code for locust load testing for my case, where i can do a token call and then will make feature calls as per below code.
This is working fine with single token and 'n' number of users mentioned in master
I made a token call outside class and sending it as parameter to ENV. The userclass is reading the single token and using the same for all users.
I do not want to make token call inside the class, as it generates new token in every execution.
I'm looking if there is anyway like making token calls based on number of users mentioned on master -u and using only those tokens in User class.
Please suggest me if there is any documentation pointer which i can refer for this usecase
#! /usr/bin/python3.6
import json
from locust import HttpUser, task, constant, tag, events
from locust.log import setup_logging
import os
from datetime import datetime
import requests
setup_logging("INFO", None)
#events.init_command_line_parser.add_listener
def init_parser(parser):
parser.add_argument("--resp-delay", type=str, env_var="LOCUST_RESP_DELAY", default="", help="It's working")
parser.add_argument("--resp-size", type=str, env_var="LOCUST_RESP_SIZE", default="", help="It's working")
parser.add_argument("--env-endpoint", type=str, env_var="LOCUST_ENV_ENDPOINT", default="", help="It's working")
#events.init.add_listener
def _(environment, **kw):
os.environ['resp-delay'] = environment.parsed_options.resp_delay
os.environ['resp-size'] = environment.parsed_options.resp_size
os.environ['env-endpoint'] = environment.parsed_options.env_endpoint
with open("resources/data/" + environment.parsed_options.env_endpoint + '/data.json') as f:
data = json.load(f)
cal_transaction_id = datetime.now().strftime('%Y%m%dT%H%M%S')
#retrive cliend id and client secret from bitbucket repo
dict_car_app_all = data["data"]
print("env-endpoint:" + os.environ.get("env-endpoint"))
token_url = "https://ENDPOINT/auth/token"
#retrive cliend id and client secret from bitbucket repo
token_payload = "client_id=" + dict_car_app_all[0]["client_id"] + "&client_secret=" + dict_car_app_all[0]["client_secret"]
token_headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
response = requests.request("POST", token_url, data=token_payload, headers=token_headers,
cert=( 'resources/certs/' + environment.parsed_options.env_endpoint + '/MyCERT.pem',
'resources/certs/' + environment.parsed_options.env_endpoint + '/MYCERT.key'))
result = json.loads(response.text)
token = result["access_token"]
os.environ['access_token'] = token
os.environ['cal_transaction_id'] = cal_transaction_id
#class User_1(User):
class User_0(HttpUser):
wait_time = constant(1)
host = "host.com"
#tag('tag1')
#task
def load_test_api_tag1(self):
token_0 = os.environ.get('access_token')
cal_transaction_id = os.environ.get('cal_transaction_id')
env_endpoint = os.environ.get('env-endpoint')
resp_delay = os.environ.get("resp-delay")
resp_size = os.environ.get("resp-size")
feature_headers = {
'Authorization': "Bearer " + str(token_0),
'sm_transactionID': cal_transaction_id
}
url = "https://ENDPOINT/SERVICE/mytestservice/first_test"
querystring = {"response_delay": resp_delay, "data_size": resp_size}
self.client.request("GET", url, headers=feature_headers, params=querystring,
cert = ('resources/certs/' + env_endpoint + '/MyCERT.pem',
'resources/certs/' + env_endpoint + '/MyCERT.key'))

You can generate tokens in User class's on_start method so each user generates a new token when spawning.
class MyUser(User):
def on_start(self):
#generate token here and assign an instance variable like self.token=abc
super().on_start()
there is a drawback to this though, if your user count is more than your token generating service can handle some users will not be able to spawn, the way I do in my tests is if token generating part is not a part of the system I am testing, I generate tokens beforehand and write it in some file or some external db and read them from there.

Related

Single setup step in Locust 1.6.0?

I have been trying out how to setup a resource only once and share with user that gets created with locust
This is what I have been trying:
from locust import HttpUser
from locust.user.task import task
class WebsiteUser(HttpUser):
def setup(self):
client = clients.HttpSession(base_url=self.host)
token = client.post("get-token/", {'email': 'someemail', 'password': 'somepassword'})
client.headers.update(
{
'Authorization': 'Bearer ' + token
}
)
tasks = UserBehavior
min_wait = 500
max_wait = 900
As you see I wanted to fetch the token for once and share this with all other user instances that are created while load testing.
I can't figure out a way to achieve this.
Any sort of help would be really appreciated :)
You'll want to save the token in a variable accessible outside of the User to make it accessible by all users, otherwise each user would create its own.
from locust import HttpUser
from locust.user.task import task
token = ''
class WebsiteUser(HttpUser):
def setup(self):
global token
client = clients.HttpSession(base_url=self.host)
token = client.post("get-token/", {'email': 'someemail', 'password': 'somepassword'})
client.headers.update(
{
'Authorization': 'Bearer ' + token
}
)
tasks = UserBehavior
min_wait = 500
max_wait = 900
I'm not positive but you might also need to set the headers before each request instead of setting them once.

How to refresh the boto3 credetials when python script is running indefinitely

I am trying to write a python script that uses watchdog to look for file creation and upload that to s3 using boto3. However, my boto3 credentials expire after every 12hrs, So I need to renew them. I am storing my boto3 credentials in ~/.aws/credentials. So right now I am trying to catch the S3UploadFailedError, renew the credentials, and write them to ~/.aws/credentials. But though the credentials are getting renewed and I am calling boto3.client('s3') again its throwing exception.
What am I doing wrong? Or how can I resolve it?
Below is the code snippet
try:
s3 = boto3.client('s3')
s3.upload_file(event.src_path,'bucket-name',event.src_path)
except boto3.exceptions.S3UploadFailedError as e:
print(e)
get_aws_credentials()
s3 = boto3.client('s3')
I have found a good example to refresh the credentials within this link:
https://pritul95.github.io/blogs/boto3/2020/08/01/refreshable-boto3-session/
but there this a little bug inside. Be careful about that.
Here is the corrected code:
from uuid import uuid4
from datetime import datetime
from time import time
from boto3 import Session
from botocore.credentials import RefreshableCredentials
from botocore.session import get_session
class RefreshableBotoSession:
"""
Boto Helper class which lets us create refreshable session, so that we can cache the client or resource.
Usage
-----
session = RefreshableBotoSession().refreshable_session()
client = session.client("s3") # we now can cache this client object without worrying about expiring credentials
"""
def __init__(
self,
region_name: str = None,
profile_name: str = None,
sts_arn: str = None,
session_name: str = None,
session_ttl: int = 3000
):
"""
Initialize `RefreshableBotoSession`
Parameters
----------
region_name : str (optional)
Default region when creating new connection.
profile_name : str (optional)
The name of a profile to use.
sts_arn : str (optional)
The role arn to sts before creating session.
session_name : str (optional)
An identifier for the assumed role session. (required when `sts_arn` is given)
session_ttl : int (optional)
An integer number to set the TTL for each session. Beyond this session, it will renew the token.
50 minutes by default which is before the default role expiration of 1 hour
"""
self.region_name = region_name
self.profile_name = profile_name
self.sts_arn = sts_arn
self.session_name = session_name or uuid4().hex
self.session_ttl = session_ttl
def __get_session_credentials(self):
"""
Get session credentials
"""
session = Session(region_name=self.region_name, profile_name=self.profile_name)
# if sts_arn is given, get credential by assuming given role
if self.sts_arn:
sts_client = session.client(service_name="sts", region_name=self.region_name)
response = sts_client.assume_role(
RoleArn=self.sts_arn,
RoleSessionName=self.session_name,
DurationSeconds=self.session_ttl,
).get("Credentials")
credentials = {
"access_key": response.get("AccessKeyId"),
"secret_key": response.get("SecretAccessKey"),
"token": response.get("SessionToken"),
"expiry_time": response.get("Expiration").isoformat(),
}
else:
session_credentials = session.get_credentials().__dict__
credentials = {
"access_key": session_credentials.get("access_key"),
"secret_key": session_credentials.get("secret_key"),
"token": session_credentials.get("token"),
"expiry_time": datetime.fromtimestamp(time() + self.session_ttl).isoformat(),
}
return credentials
def refreshable_session(self) -> Session:
"""
Get refreshable boto3 session.
"""
# get refreshable credentials
refreshable_credentials = RefreshableCredentials.create_from_metadata(
metadata=self.__get_session_credentials(),
refresh_using=self.__get_session_credentials,
method="sts-assume-role",
)
# attach refreshable credentials current session
session = get_session()
session._credentials = refreshable_credentials
session.set_config_variable("region", self.region_name)
autorefresh_session = Session(botocore_session=session)
return autorefresh_session
According to the documentation, the client looks in several locations for credentials and there are other options that are also more programmatic-friendly that you might want to consider instead of the .aws/credentials file.
Quoting the docs:
The order in which Boto3 searches for credentials is:
Passing credentials as parameters in the boto.client() method
Passing credentials as parameters when creating a Session object
Environment variables
Shared credential file (~/.aws/credentials)
AWS config file (~/.aws/config)
Assume Role provider
In your case, since you are already catching the exception and renewing the credentials, I would simply pass the new ones to a new instance of the client like so:
client = boto3.client(
's3',
aws_access_key_id=NEW_ACCESS_KEY,
aws_secret_access_key=NEW_SECRET_KEY,
aws_session_token=NEW_SESSION_TOKEN
)
If instead you are using these same credentials elsewhere in the code to create other clients, I'd consider setting them as environment variables:
import os
os.environ['AWS_ACCESS_KEY_ID'] = NEW_ACCESS_KEY
os.environ['AWS_SECRET_ACCESS_KEY'] = NEW_SECRET_KEY
os.environ['AWS_SESSION_TOKEN'] = NEW_SESSION_TOKEN
Again, quoting the docs:
The session key for your AWS account [...] is only needed when you are using temporary credentials.
Here is my implementation which only generates new credentials if existing credentials expire using a singleton design pattern
import boto3
from datetime import datetime
from dateutil.tz import tzutc
import os
import binascii
class AssumeRoleProd:
__credentials = None
def __init__(self):
assert True==False
#staticmethod
def __setCredentials():
print("\n\n ======= GENERATING NEW SESSION TOKEN ======= \n\n")
# create an STS client object that represents a live connection to the
# STS service
sts_client = boto3.client('sts')
# Call the assume_role method of the STSConnection object and pass the role
# ARN and a role session name.
assumed_role_object = sts_client.assume_role(
RoleArn=your_role_here,
RoleSessionName=f"AssumeRoleSession{binascii.b2a_hex(os.urandom(15)).decode('UTF-8')}"
)
# From the response that contains the assumed role, get the temporary
# credentials that can be used to make subsequent API calls
AssumeRoleProd.__credentials = assumed_role_object['Credentials']
#staticmethod
def getTempCredentials():
credsExpired = False
# Return object for the first time
if AssumeRoleProd.__credentials is None:
AssumeRoleProd.__setCredentials()
credsExpired = True
# Generate if only 5 minutes are left for expiry. You may setup for entire 60 minutes by catching botocore ClientException
elif (AssumeRoleProd.__credentials['Expiration']-datetime.now(tzutc())).seconds//60<=5:
AssumeRoleProd.__setCredentials()
credsExpired = True
return AssumeRoleProd.__credentials
And then I am using singleton design pattern for client as well which would generate a new client only if new session is generated. You can add region as well if required.
class lambdaClient:
__prodClient = None
def __init__(self):
assert True==False
#staticmethod
def __initProdClient():
credsExpired, credentials = AssumeRoleProd.getTempCredentials()
if lambdaClient.__prodClient is None or credsExpired:
lambdaClient.__prodClient = boto3.client('lambda',
aws_access_key_id=credentials['AccessKeyId'],
aws_secret_access_key=credentials['SecretAccessKey'],
aws_session_token=credentials['SessionToken'])
return lambdaClient.__prodClient
#staticmethod
def getProdClient():
return lambdaClient.__initProdClient()

How to send a GraphQL query to AppSync from python?

How do we post a GraphQL request through AWS AppSync using boto?
Ultimately I'm trying to mimic a mobile app accessing our stackless/cloudformation stack on AWS, but with python. Not javascript or amplify.
The primary pain point is authentication; I've tried a dozen different ways already. This the current one, which generates a "401" response with "UnauthorizedException" and "Permission denied", which is actually pretty good considering some of the other messages I've had. I'm now using the 'aws_requests_auth' library to do the signing part. I assume it authenticates me using the stored /.aws/credentials from my local environment, or does it?
I'm a little confused as to where and how cognito identities and pools will come into it. eg: say I wanted to mimic the sign-up sequence?
Anyways the code looks pretty straightforward; I just don't grok the authentication.
from aws_requests_auth.boto_utils import BotoAWSRequestsAuth
APPSYNC_API_KEY = 'inAppsyncSettings'
APPSYNC_API_ENDPOINT_URL = 'https://aaaaaaaaaaaavzbke.appsync-api.ap-southeast-2.amazonaws.com/graphql'
headers = {
'Content-Type': "application/graphql",
'x-api-key': APPSYNC_API_KEY,
'cache-control': "no-cache",
}
query = """{
GetUserSettingsByEmail(email: "john#washere"){
items {name, identity_id, invite_code}
}
}"""
def test_stuff():
# Use the library to generate auth headers.
auth = BotoAWSRequestsAuth(
aws_host='aaaaaaaaaaaavzbke.appsync-api.ap-southeast-2.amazonaws.com',
aws_region='ap-southeast-2',
aws_service='appsync')
# Create an http graphql request.
response = requests.post(
APPSYNC_API_ENDPOINT_URL,
json={'query': query},
auth=auth,
headers=headers)
print(response)
# this didn't work:
# response = requests.post(APPSYNC_API_ENDPOINT_URL, data=json.dumps({'query': query}), auth=auth, headers=headers)
Yields
{
"errors" : [ {
"errorType" : "UnauthorizedException",
"message" : "Permission denied"
} ]
}
It's quite simple--once you know. There are some things I didn't appreciate:
I've assumed IAM authentication (OpenID appended way below)
There are a number of ways for appsync to handle authentication. We're using IAM so that's what I need to deal with, yours might be different.
Boto doesn't come into it.
We want to issue a request like any regular punter, they don't use boto, and neither do we. Trawling the AWS boto docs was a waste of time.
Use the AWS4Auth library
We are going to send a regular http request to aws, so whilst we can use python requests they need to be authenticated--by attaching headers.
And, of course, AWS auth headers are special and different from all others.
You can try to work out how to do it
yourself, or you can go looking for someone else who has already done it: Aws_requests_auth, the one I started with, probably works just fine, but I have ended up with AWS4Auth. There are many others of dubious value; none endorsed or provided by Amazon (that I could find).
Specify appsync as the "service"
What service are we calling? I didn't find any examples of anyone doing this anywhere. All the examples are trivial S3 or EC2 or even EB which left uncertainty. Should we be talking to api-gateway service? Whatsmore, you feed this detail into the AWS4Auth routine, or authentication data. Obviously, in hindsight, the request is hitting Appsync, so it will be authenticated by Appsync, so specify "appsync" as the service when putting together the auth headers.
It comes together as:
import requests
from requests_aws4auth import AWS4Auth
# Use AWS4Auth to sign a requests session
session = requests.Session()
session.auth = AWS4Auth(
# An AWS 'ACCESS KEY' associated with an IAM user.
'AKxxxxxxxxxxxxxxx2A',
# The 'secret' that goes with the above access key.
'kwWxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxgEm',
# The region you want to access.
'ap-southeast-2',
# The service you want to access.
'appsync'
)
# As found in AWS Appsync under Settings for your endpoint.
APPSYNC_API_ENDPOINT_URL = 'https://nqxxxxxxxxxxxxxxxxxxxke'
'.appsync-api.ap-southeast-2.amazonaws.com/graphql'
# Use JSON format string for the query. It does not need reformatting.
query = """
query foo {
GetUserSettings (
identity_id: "ap-southeast-2:8xxxxxxb-7xx4-4xx4-8xx0-exxxxxxx2"
){
user_name, email, whatever
}}"""
# Now we can simply post the request...
response = session.request(
url=APPSYNC_API_ENDPOINT_URL,
method='POST',
json={'query': query}
)
print(response.text)
Which yields
# Your answer comes as a JSON formatted string in the text attribute, under data.
{"data":{"GetUserSettings":{"user_name":"0xxxxxxx3-9102-42f0-9874-1xxxxx7dxxx5"}}}
Getting credentials
To get rid of the hardcoded key/secret you can consume the local AWS ~/.aws/config and ~/.aws/credentials, and it is done this way...
# Use AWS4Auth to sign a requests session
session = requests.Session()
credentials = boto3.session.Session().get_credentials()
session.auth = AWS4Auth(
credentials.access_key,
credentials.secret_key,
boto3.session.Session().region_name,
'appsync',
session_token=credentials.token
)
...<as above>
This does seem to respect the environment variable AWS_PROFILE for assuming different roles.
Note that STS.get_session_token is not the way to do it, as it may try to assume a role from a role, depending where it keyword matched the AWS_PROFILE value. Labels in the credentials file will work because the keys are right there, but names found in the config file do not work, as that assumes a role already.
OpenID
In this scenario, all the complexity is transferred to the conversation with the openid connect provider. The hard stuff is all the auth hoops you jump through to get an access token, and thence using the refresh token to keep it alive. That is where all the real work lies.
Once you finally have an access token, assuming you have configured the "OpenID Connect" Authorization Mode in appsync, then you can, very simply, drop the access token into the header:
response = requests.post(
url="https://nc3xxxxxxxxxx123456zwjka.appsync-api.ap-southeast-2.amazonaws.com/graphql",
headers={"Authorization": ACCESS_TOKEN},
json={'query': "query foo{GetStuff{cat, dog, tree}}"}
)
You can set up an API key on the AppSync end and use the code below. This works for my case.
import requests
# establish a session with requests session
session = requests.Session()
# As found in AWS Appsync under Settings for your endpoint.
APPSYNC_API_ENDPOINT_URL = 'https://vxxxxxxxxxxxxxxxxxxy.appsync-api.ap-southeast-2.amazonaws.com/graphql'
# setup the query string (optional)
query = """query listItemsQuery {listItemsQuery {items {correlation_id, id, etc}}}"""
# Now we can simply post the request...
response = session.request(
url=APPSYNC_API_ENDPOINT_URL,
method='POST',
headers={'x-api-key': '<APIKEYFOUNDINAPPSYNCSETTINGS>'},
json={'query': query}
)
print(response.json()['data'])
Building off Joseph Warda's answer you can use the class below to send AppSync commands.
# fileName: AppSyncLibrary
import requests
class AppSync():
def __init__(self,data):
endpoint = data["endpoint"]
self.APPSYNC_API_ENDPOINT_URL = endpoint
self.api_key = data["api_key"]
self.session = requests.Session()
def graphql_operation(self,query,input_params):
response = self.session.request(
url=self.APPSYNC_API_ENDPOINT_URL,
method='POST',
headers={'x-api-key': self.api_key},
json={'query': query,'variables':{"input":input_params}}
)
return response.json()
For example in another file within the same directory:
from AppSyncLibrary import AppSync
APPSYNC_API_ENDPOINT_URL = {YOUR_APPSYNC_API_ENDPOINT}
APPSYNC_API_KEY = {YOUR_API_KEY}
init_params = {"endpoint":APPSYNC_API_ENDPOINT_URL,"api_key":APPSYNC_API_KEY}
app_sync = AppSync(init_params)
mutation = """mutation CreatePost($input: CreatePostInput!) {
createPost(input: $input) {
id
content
}
}
"""
input_params = {
"content":"My first post"
}
response = app_sync.graphql_operation(mutation,input_params)
print(response)
Note: This requires you to activate API access for your AppSync API. Check this AWS post for more details.
graphql-python/gql supports AWS AppSync since version 3.0.0rc0.
It supports queries, mutation and even subscriptions on the realtime endpoint.
The documentation is available here
Here is an example of a mutation using the API Key authentication:
import asyncio
import os
import sys
from urllib.parse import urlparse
from gql import Client, gql
from gql.transport.aiohttp import AIOHTTPTransport
from gql.transport.appsync_auth import AppSyncApiKeyAuthentication
# Uncomment the following lines to enable debug output
# import logging
# logging.basicConfig(level=logging.DEBUG)
async def main():
# Should look like:
# https://XXXXXXXXXXXXXXXXXXXXXXXXXX.appsync-api.REGION.amazonaws.com/graphql
url = os.environ.get("AWS_GRAPHQL_API_ENDPOINT")
api_key = os.environ.get("AWS_GRAPHQL_API_KEY")
if url is None or api_key is None:
print("Missing environment variables")
sys.exit()
# Extract host from url
host = str(urlparse(url).netloc)
auth = AppSyncApiKeyAuthentication(host=host, api_key=api_key)
transport = AIOHTTPTransport(url=url, auth=auth)
async with Client(
transport=transport, fetch_schema_from_transport=False,
) as session:
query = gql(
"""
mutation createMessage($message: String!) {
createMessage(input: {message: $message}) {
id
message
createdAt
}
}"""
)
variable_values = {"message": "Hello world!"}
result = await session.execute(query, variable_values=variable_values)
print(result)
asyncio.run(main())
I am unable to add a comment due to low rep, but I just want to add that I tried the accepted answer and it didn't work. I was getting an error saying my session_token is invalid. Probably because I was using AWS Lambda.
I got it to work pretty much exactly, but by adding to the session token parameter of the aws4auth object. Here's the full piece:
import requests
import os
from requests_aws4auth import AWS4Auth
def AppsyncHandler(event, context):
# These are env vars that are always present in an AWS Lambda function
# If not using AWS Lambda, you'll need to add them manually to your env.
access_id = os.environ.get("AWS_ACCESS_KEY_ID")
secret_key = os.environ.get("AWS_SECRET_ACCESS_KEY")
session_token = os.environ.get("AWS_SESSION_TOKEN")
region = os.environ.get("AWS_REGION")
# Your AppSync Endpoint
api_endpoint = os.environ.get("AppsyncConnectionString")
resource = "appsync"
session = requests.Session()
session.auth = AWS4Auth(access_id,
secret_key,
region,
resource,
session_token=session_token)
The rest is the same.
Hope this Helps Everyone
import requests
import json
import os
from dotenv import load_dotenv
load_dotenv(".env")
class AppSync(object):
def __init__(self,data):
endpoint = data["endpoint"]
self.APPSYNC_API_ENDPOINT_URL = endpoint
self.api_key = data["api_key"]
self.session = requests.Session()
def graphql_operation(self,query,input_params):
response = self.session.request(
url=self.APPSYNC_API_ENDPOINT_URL,
method='POST',
headers={'x-api-key': self.api_key},
json={'query': query,'variables':{"input":input_params}}
)
return response.json()
def main():
APPSYNC_API_ENDPOINT_URL = os.getenv("APPSYNC_API_ENDPOINT_URL")
APPSYNC_API_KEY = os.getenv("APPSYNC_API_KEY")
init_params = {"endpoint":APPSYNC_API_ENDPOINT_URL,"api_key":APPSYNC_API_KEY}
app_sync = AppSync(init_params)
mutation = """
query MyQuery {
getAccountId(id: "5ca4bbc7a2dd94ee58162393") {
_id
account_id
limit
products
}
}
"""
input_params = {}
response = app_sync.graphql_operation(mutation,input_params)
print(json.dumps(response , indent=3))
main()

Bad Request using python_HTTP:400

Im using python 3 to scrape facebook page of "nytimes".
I tried to create my own Api first so i can get my app_id && app_secret.
when i tried to ping NYT's Facebook page to verify that the access_token works and the page_id is valid i got this ERROR.
HTTPError: HTTP Error 400: Bad Request
My code is Below
First connect to my API via id and secret code.
`
import urllib
import datetime
import json
import time
import urllib.request
id = "111111111111"
secret ="123123123123123113"
token = id +"|"+ secret`
Second To ping the page i did:
page_id = 'nytimes'
Then
def testFacebookPageData(page_id, token):
# construct the URL string
base = "https://graph.facebook.com/v2.4"
node = "/" + page_id
parameters = "/?token=%s" % token
url = base + node + parameters
# retrieve data
req = urllib.request.Request(url)
response = urllib.request.urlopen(req)
data = json.loads(response.read())
print (json.dumps(data, indent=4, sort_keys=True))
testFacebookPageData(page_id, token)
Hope that im clear..
I need your help
thank You
According to Facebooks docs for Graph API, the parameter should be named access_token, and not token.
parameters = "/?access_token=%s" % token

Khan Academy API doesn't return data

I'm using the test_client2 included in the git repo. Making requests returns <Response [200]>, so apparently the system is working, but it doesn't print any data. What could cause this?
Full Code added here. The only changes I have made from the original code as pulled from GIT were to add CONSUMER_SECRET and CONSUMER_KEY values for the auth_token (which seems to work) and change one of the print functions to show explicitly where results start.
""" An interactive script for testing Khan Academy API Authentication.
This is an example of how to use the /api/auth2 authentication flow.
See https://github.com/Khan/khan-api/wiki/Khan-Academy-API-Authentication for
documentation.
"""
import cgi
import rauth
import SimpleHTTPServer
import SocketServer
import time
import webbrowser
# You can get a CONSUMER_KEY and CONSUMER_SECRET for your app here:
# http://www.khanacademy.org/api-apps/register
CONSUMER_SECRET = 'Redacted but auth-token works'
CONSUMER_KEY = 'Redacted'
CALLBACK_BASE = '127.0.0.1'
SERVER_URL = 'http://www.khanacademy.org'
DEFAULT_API_RESOURCE = '/api/v1/playlists'
VERIFIER = None
# Create the callback server that's used to set the oauth verifier after the
# request token is authorized.
def create_callback_server():
class CallbackHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
global VERIFIER
params = cgi.parse_qs(self.path.split('?', 1)[1],
keep_blank_values=False)
VERIFIER = params['oauth_verifier'][0]
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
self.wfile.write('OAuth request token fetched and authorized;' +
' you can close this window.')
def log_request(self, code='-', size='-'):
pass
server = SocketServer.TCPServer((CALLBACK_BASE, 0), CallbackHandler)
return server
# Make an authenticated API call using the given rauth session.
def get_api_resource(session):
resource_url = raw_input("Resource relative url (e.g. %s): " %
DEFAULT_API_RESOURCE) or DEFAULT_API_RESOURCE
url = SERVER_URL + resource_url
split_url = url.split('?', 1)
params = {}
# Separate out the URL's parameters, if applicable.
if len(split_url) == 2:
url = split_url[0]
params = cgi.parse_qs(split_url[1], keep_blank_values=False)
start = time.time()
response = session.get(url, params=params)
end = time.time()
print "Result\n"
print response
print "\nTime: %ss\n" % (end - start)
def run_tests():
global CONSUMER_KEY, CONSUMER_SECRET, SERVER_URL
# Set consumer key, consumer secret, and server base URL from user input or
# use default values.
CONSUMER_KEY = raw_input("consumer key: ") or CONSUMER_KEY
CONSUMER_SECRET = raw_input("consumer secret: ") or CONSUMER_SECRET
SERVER_URL = raw_input("server base url: ") or SERVER_URL
# Create an OAuth1Service using rauth.
service = rauth.OAuth1Service(
name='test',
consumer_key=CONSUMER_KEY,
consumer_secret=CONSUMER_SECRET,
request_token_url=SERVER_URL + '/api/auth2/request_token',
access_token_url=SERVER_URL + '/api/auth2/access_token',
authorize_url=SERVER_URL + '/api/auth2/authorize',
base_url=SERVER_URL + '/api/auth2')
callback_server = create_callback_server()
# 1. Get a request token.
request_token, secret_request_token = service.get_request_token(
params={'oauth_callback': 'http://%s:%d/' %
(CALLBACK_BASE, callback_server.server_address[1])})
# 2. Authorize your request token.
authorize_url = service.get_authorize_url(request_token)
webbrowser.open(authorize_url)
callback_server.handle_request()
callback_server.server_close()
# 3. Get an access token.
session = service.get_auth_session(request_token, secret_request_token,
params={'oauth_verifier': VERIFIER})
# Repeatedly prompt user for a resource and make authenticated API calls.
print
while(True):
get_api_resource(session)
def main():
run_tests()
if __name__ == "__main__":
main()

Resources