OS: Ubuntu 18
Python: Python 3.6
MLflow: 1.4
I'm trying to get MLflow Projects to run. Here is my project:
MLflow
conda.yaml
main.py
prep_data.py
learn.py
List item
The project is heavily based up on this repo: https://github.com/mlflow/mlflow/tree/master/examples/multistep_workflow
I'm trying to run both the prep_data and learn scripts using MLflow Projects and the main.py script as an entry point.
For execution I use the following command: mlflow run . -P experiment_name=testproject
But I get the following Error:
Traceback (most recent call last):
File "prep_data.py", line 126, in <module>
prep_data()
File "/home/ubuntu/venv/lib/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/home/ubuntu/venv/lib/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/home/ubuntu/venv/lib/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/ubuntu/venv/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "prep_data.py", line 65, in prep_data
with mlflow.start_run() as active_run:
File "/home/ubuntu/venv/lib/python3.6/site-packages/mlflow/tracking/fluent.py", line 129, in start_run
"arguments".format(existing_run_id))
mlflow.exceptions.MlflowException: Cannot start run with ID 405b83bbb61046afa83b8dcd71b4db14 because active run ID does not match environment run ID. Make sure --experiment-name or --experiment-id matches experiment set with set_experiment(), or just use command-line arguments
Traceback (most recent call last):
File "main.py", line 75, in <module>
workflow()
File "/home/ubuntu/venv/lib/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/home/ubuntu/venv/lib/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/home/ubuntu/venv/lib/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/ubuntu/venv/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "main.py", line 61, in workflow
}, experiment_name)
File "main.py", line 40, in _get_or_run
submitted_run = mlflow.run('.', entry_point=entry_point, parameters=params)
File "/home/ubuntu/venv/lib/python3.6/site-packages/mlflow/projects/__init__.py", line 287, in run
_wait_for(submitted_run_obj)
File "/home/ubuntu/venv/lib/python3.6/site-packages/mlflow/projects/__init__.py", line 304, in _wait_for
raise ExecutionException("Run (ID '%s') failed" % run_id)
mlflow.exceptions.ExecutionException: Run (ID '405b83bbb61046afa83b8dcd71b4db14') failed
2019/11/22 18:51:59 ERROR mlflow.cli: === Run (ID '62c229b2d9194b569a7b2bfc14338800') failed ===
I'm not sure if I understand the error correctly but it seems like it's saying I am using multiple experiments. However I'm fairly certain I am only using 1 (testproject).
Browsing SO and Github issues suggested I'd should set the environment variable MLFLOW_TRACKING_URI but it wasn't stated on how to set that. Thus I tried two different ways:
1) exporting it before running the MLflow project: $ export MLFLOW_TRACKING_URI='http://127.0.0.1:5099'
2) setting it at the beginning of my main.py script using python: os.environ['MLFLOW_TRACKING_URI'] = 'http://127.0.0.1:5099'
Neither had any effect.
Here you can see my project:
main.py
import os
import click
import mlflow
from mlflow.entities import RunStatus
def _already_ran(entry_point, params, experiment_name):
# experiment = mlflow.get_experiment_by_name('{}_{}'.format(experiment_name, entry_point))
experiment = mlflow.get_experiment_by_name(experiment_name)
if experiment == None:
return None
experiment_id = experiment.experiment_id
client = mlflow.tracking.MlflowClient()
all_run_infos = reversed(client.list_run_infos(experiment_id))
match_failed = False
for run_info in all_run_infos
full_run = client.get_run(run_info.run_id)
for p_key, p_val in params:
run_value = full_run.data.params.get(p_key)
if run_value != p_val:
match_failed = True
break
if match_failed:
continue
if run_info.to_proto().status != RunStatus.FINISHED:
continue
return client.get_run(run_info.run_id)
return None
def _get_or_run(entry_point, params, experiment_name, use_cache=True):
existing_run = _already_ran(entry_point, params, experiment_name)
if use_cache and existing_run:
return existing_run
submitted_run = mlflow.run('.', entry_point=entry_point, parameters=params)
return mlflow.tracking.MlflowClient().get_run(submitted_run.run_id)
#click.command()
#click.option("--experiment-name")
#click.option('--prep-data-time-avg', default='placeholder')
#click.option('--prep-data-sensor-id', default='placeholder')
#click.option('--learn-epochs', default=100, type=int)
#click.option('--learn-neurons', default=5, type=int)
#click.option('--learn-layers', default=2, type=int)
def workflow(experiment_name, prep_data_time_avg, prep_data_sensor_id, learn_epochs, learn_neurons, learn_layers):
# mlflow.set_tracking_uri('http://127.0.0.1:5099')
# mlflow.set_experiment(experiment_name)
# with mlflow.start_run() as active_run:
data_run = _get_or_run('prep_data', {
'time_avg': prep_data_time_avg,
'sensor_id':prep_data_sensor_id,
'experiment_name': experiment_name
}, experiment_name)
learn_run = _get_or_run('learn', {
'epochs': learn_epochs,
'neurons': learn_neurons,
'layers': learn_layers,
'prep_data_run_id': data_run.run_id,
'experiment_name': experiment_name,
}, experiment_name)
if __name__ == '__main__':
# os.environ['MLFLOW_TRACKING_URI'] = 'http://127.0.0.1:5099'
workflow()
prep_data.py
#click.command()
#click.option("--experiment-name")
#click.option('--time-avg', default='placeholder')
#click.option('--sensor-id', default='placeholder')
def prep_data(experiment_name, time_avg, sensor_id):
mlflow.set_experiment(experiment_name)
with mlflow.start_run() as active_run:
# logic code of prep_data
if __name__ == '__main__':
prep_data()
I'm happy about any ideas on how to fix this issue.
Thank you very much!
Cheers,
Raphael
You need to provide the same experiment name to the mlflow CLI:
mlflow run . -P experiment_name=testproject --experiment-name testproject
For more details:
https://www.mlflow.org/docs/latest/cli.html#mlflow-run
Related
Hi i am receiving an error starting a bot with nothing on it yet
I am using Pyrogram (2.0.97) and Python (3.9.6)
I didn't developed more code for this bot yet, but i want to use this library (pyrogram) and not other library, because of this decorators that has on this library
Here is the simple code
from pyrogram import Client, filters
from pyrogram.types import Message
from pyrogram.types import (
InlineKeyboardButton, InlineKeyboardMarkup, ReplyKeyboardMarkup
)
TELEGRAM_API_ID = "22284blablabla"
TELEGRAM_API_HASH = "186a3759fc29f394bad2700cc29ecblablabla"
TELEGRAM_BOT_TOKEN = "blablablablabla"
app = Client(
'SegBot',
api_id=TELEGRAM_API_ID,
api_hash=TELEGRAM_API_HASH,
bot_token=TELEGRAM_BOT_TOKEN
)
#app.on_message(filters.private)
async def hello(client, message):
await message.reply("Hello from Pyrogram!")
print('running!!!')
app.run()
And the error is:
running!!!
Traceback (most recent call last):
File "G:\Drives compartilhados\SEG FIS\API\TELEGRAM BOT TEST\bot_test.py", line 151, in <module>
app.run()
File "C:\Users\roberto.lins\AppData\Roaming\Python\Python39\site-packages\pyrogram\methods\utilities\run.py", line 84, in run
self.start()
return loop.run_until_complete(coroutine)
File "C:\Program Files\Python39\lib\asyncio\base_events.py", line 642, in run_until_complete
return future.result()
File "C:\Users\roberto.lins\AppData\Roaming\Python\Python39\site-packages\pyrogram\methods\utilities\start.py", line 58, in start
is_authorized = await self.connect()
File "C:\Users\roberto.lins\AppData\Roaming\Python\Python39\site-packages\pyrogram\methods\auth\connect.py", line 40, in connect
await self.load_session()
File "C:\Users\roberto.lins\AppData\Roaming\Python\Python39\site-packages\pyrogram\client.py", line 586, in load_session
await Auth(
File "C:\Users\roberto.lins\AppData\Roaming\Python\Python39\site-packages\pyrogram\session\auth.py", line 254, in create
raise e
File "C:\Users\roberto.lins\AppData\Roaming\Python\Python39\site-packages\pyrogram\session\auth.py", line 89, in create
res_pq = await self.invoke(raw.functions.ReqPqMulti(nonce=nonce))
File "C:\Users\roberto.lins\AppData\Roaming\Python\Python39\site-packages\pyrogram\session\auth.py", line 67, in invoke
return self.unpack(response)
File "C:\Users\roberto.lins\AppData\Roaming\Python\Python39\site-packages\pyrogram\session\auth.py", line 60, in unpack
return TLObject.read(b)
File "C:\Users\roberto.lins\AppData\Roaming\Python\Python39\site-packages\pyrogram\raw\core\tl_object.py", line 33, in read
return cast(TLObject, objects[int.from_bytes(b.read(4), "little")]).read(b, *args)
KeyError: 0
Any help, please?
I'm trying to get MLFlow Projects to run using the MLFlow CLI and its following the tutorial leads to an error. For any project I try to run from the CLI, I get the following error
Traceback (most recent call last):
File "/home/rbc/.local/bin/mlflow", line 11, in <module>
sys.exit(cli())
File "/home/rbc/.local/lib/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/home/rbc/.local/lib/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/home/rbc/.local/lib/python3.6/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/rbc/.local/lib/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/rbc/.local/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/rbc/.local/lib/python3.6/site-packages/mlflow/cli.py", line 139, in run
run_id=run_id,
File "/home/rbc/.local/lib/python3.6/site-packages/mlflow/projects/__init__.py", line 230, in run
storage_dir=storage_dir, block=block, run_id=run_id)
File "/home/rbc/.local/lib/python3.6/site-packages/mlflow/projects/__init__.py", line 88, in _run
active_run = _create_run(uri, experiment_id, work_dir, entry_point)
File "/home/rbc/.local/lib/python3.6/site-packages/mlflow/projects/__init__.py", line 579, in _create_run
active_run = tracking.MlflowClient().create_run(experiment_id=experiment_id, tags=tags)
File "/home/rbc/.local/lib/python3.6/site-packages/mlflow/tracking/client.py", line 101, in create_run
source_version=source_version
File "/home/rbc/.local/lib/python3.6/site-packages/mlflow/store/rest_store.py", line 156, in create_run
response_proto = self._call_endpoint(CreateRun, req_body)
File "/home/rbc/.local/lib/python3.6/site-packages/mlflow/store/rest_store.py", line 66, in _call_endpoint
js_dict = json.loads(response.text)
File "/usr/lib/python3.6/json/__init__.py", line 354, in loads
return _default_decoder.decode(s)
File "/usr/lib/python3.6/json/decoder.py", line 339, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
File "/usr/lib/python3.6/json/decoder.py", line 357, in raw_decode
raise JSONDecodeError("Expecting value", s, err.value) from None
json.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)
Here's an example of the type of command I'm using to start the run, which comes directly from the tutorial
mlflow run https://github.com/mlflow/mlflow#examples/sklearn_elasticnet_wine -m databricks -c cluster-spec.json --experiment-id 72647065958042 -P alpha=2.0 -P l1_ratio=0.5
I've traced the error to something involving MLFLow returning empty when it tries to start a run but I can successfully run MLFlow experiments using the Databricks environment I'm connecting to so I'm not sure where the problem is, I'm running MLFlow 0.9.1 on Ubuntu 18.04
not sure if you have solved your issue, but here is how I fixed it:
the databricks-cli work with the following config without problem:
host = https://xxx.databricks.net/?o=<org_id>
token=dapixxx
but mlflow not quit happy about that, change it to:
host = https://xxx.databricks.net
username = token
password = dapixxx
I've recently configured celery to run some dummy tasks, and ran the workers through Terminal on my Mac. It all seems to run accordingly, took a while, since some of the literature out there seems to advise different configuration scenarios, but I got there anyway. Now the next step is to trigger the tasks via my view in Django. I'm using celery 1.2.26.post2
My project structure:
/MyApp
celery_tasks.py
celeryconfig.py
__init__.py
I've been following several tutorials and found this video and this video and this video very helpful to obtain an overall view of celery.
My scripts are:
celery_tasks.py
from celery import Celery
from celery.task import task
app = Celery() # Initialise the app
app.config_from_object('celeryconfig') # Tell Celery instance to use celeryconfig module
suf = lambda n: "%d%s" % (n, {1: "st", 2: "nd", 3: "rd"}.get(n if n < 20 else n % 10, "th"))
#task
def fav_doctor():
"""Reads doctor.txt file and prints out fav doctor, then adds a new
number to the file"""
with open('doctor.txt', 'r+') as f:
for line in f:
nums = line.rstrip().split()
print ('The {} doctor is my favorite'.format(suf(int(nums[0]))))
for num in nums[1:]:
print ('Wait! The {} doctor is my favorite'.format(suf(int(num))))
last_num = int(nums[-1])
new_last_num = last_num + 1
f.write(str(new_last_num) + ' ')
#task
def reverse(string):
return string[::-1]
#task
def add(x, y):
return x+y
celeryconfig.py
from datetime import timedelta
## List of modules to import when celery starts.
CELERY_IMPORTS = ('celery_tasks',)
## Message Broker (RabbitMQ) settings.
BROKER_URL = 'amqp://'
BROKER_PORT = 5672
#BROKER_TRANSPORT = 'sqlalchemy'
#BROKER_HOST = 'sqlite:///tasks.db'
#BROKER_VHOST = '/'
#BROKER_USER = 'guest'
#BROKER_PASSWORD = 'guest'
## Result store settings.
CELERY_RESULT_BACKEND = 'rpc://'
#CELERY_RESULT_DBURI = 'sqlite:///mydatabase.db'
## Worker settings
#CELERYD_CONCURRENCY = 1
#CELERYD_TASK_TIME_LIMIT = 20
#CELERYD_LOG_FILE = 'celeryd.log'
#CELERYD_LOG_LEVEL = 'INFO'
## Misc
CELERY_IGNORE_RESULT = False
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_ACCEPT_CONTENT=['json']
CELERY_TIMEZONE = 'Europe/Berlin'
CELERY_ENABLE_UTC = True
CELERYBEAT_SCHEDULE = {
'doctor-every-10-seconds': {
'task': 'celery_tasks.fav_doctor',
'schedule': timedelta(seconds=3),
},
}
__init__.py
from .celery_tasks import app as celery_app # Ensures app is always imported when Django starts so that shared_task will use this app.
__all__ = ['celery_app']
In settings.py
INSTALLED_APPS = [
...
'djcelery',
]
In my views folder, I have a specific view module, admin_scripts.py
from MyApp.celery_tasks import fav_doctor, reverse, send_email, add
#login_required
def admin_script_dashboard(request):
if request.method == 'POST':
form = Admin_Script(request.POST)
if form.is_valid():
backup_script_select = form.cleaned_data['backup_script_select']
dummy_script_select = form.cleaned_data['dummy_script_select']
print ("backup_script_select: {0}".format(backup_script_select))
print ("dummy_script_select: {0}".format(dummy_script_select))
if backup_script_select:
print ("Backup script exectuting. Please wait...")
dbackup_script_dir = str(Path.home()) + '/Software/MyOtherApp/cli-tools/dbbackup_DRAFT.py'
subprocess.call(" python {} ".format(dbackup_script_dir), shell=True)
async_result = reverse.delay('Using Celery')
print ("async_result: {0}".format(async_result))
result = reverse.AsyncResult(async_result.id)
print ("result: {0}".format(result))
print ("Something occured...")
if dummy_script_select:
print ("Dummy script exectuting. Please wait...")
dummy_script_dir = str(Path.home()) + '/Software/MyOtherApp/cli-tools/dummy.py'
subprocess.call(" python {} ".format(dummy_script_dir), shell=True)
async_result = add.delay(2, 5)
print ("async_result: {0}".format(async_result))
result = add.AsyncResult(async_result.id)
print ("result: {0}".format(result))
print ("Something occured...")
return render(request, 'MyApp/admin_scripts_db.html')
The problem occurs at the line in my admin_scripts.py file, where async_result = add.delay(2, 5) is called. Below the traceback:
[12/Jul/2018 09:23:19] ERROR [django.request:135] Internal Server Error: /MyProject/adminscripts/
Traceback (most recent call last):
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/celery/local.py", line 309, in _get_current_object
return object.__getattribute__(self, '__thing')
AttributeError: 'PromiseProxy' object has no attribute '__thing'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/kombu/utils/__init__.py", line 323, in __get__
return obj.__dict__[self.__name__]
KeyError: 'conf'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/celery/loaders/base.py", line 158, in _smart_import
return imp(path)
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/celery/loaders/base.py", line 112, in import_from_cwd
package=package,
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/celery/utils/imports.py", line 101, in import_from_cwd
return imp(module, package=package)
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/celery/loaders/base.py", line 106, in import_module
return importlib.import_module(module, package=package)
File "/Users/MyMBP/anaconda3/lib/python3.6/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 978, in _gcd_import
File "<frozen importlib._bootstrap>", line 961, in _find_and_load
File "<frozen importlib._bootstrap>", line 948, in _find_and_load_unlocked
ModuleNotFoundError: No module named 'celeryconfig'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/django/core/handlers/exception.py", line 41, in inner
response = get_response(request)
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/django/core/handlers/base.py", line 187, in _get_response
response = self.process_exception_by_middleware(e, request)
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/django/core/handlers/base.py", line 185, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/django/contrib/auth/decorators.py", line 23, in _wrapped_view
return view_func(request, *args, **kwargs)
File "/Users/MyMBP/Software/MyProject/MyProjectsite/MyProject/views/admin_scripts.py", line 44, in admin_script_dashboard
async_result = add.delay(2, 5)
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/celery/local.py", line 143, in __getattr__
return getattr(self._get_current_object(), name)
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/celery/local.py", line 311, in _get_current_object
return self.__evaluate__()
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/celery/local.py", line 341, in __evaluate__
thing = Proxy._get_current_object(self)
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/celery/local.py", line 101, in _get_current_object
return loc(*self.__args, **self.__kwargs)
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/celery/app/base.py", line 270, in _task_from_fun
'__wrapped__': fun}, **options))()
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/celery/app/task.py", line 201, in __new__
instance.bind(app)
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/celery/app/task.py", line 365, in bind
conf = app.conf
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/kombu/utils/__init__.py", line 325, in __get__
value = obj.__dict__[self.__name__] = self.__get(obj)
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/celery/app/base.py", line 638, in conf
return self._get_config()
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/celery/app/base.py", line 454, in _get_config
self.loader.config_from_object(self._config_source)
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/celery/loaders/base.py", line 140, in config_from_object
obj = self._smart_import(obj, imp=self.import_from_cwd)
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/celery/loaders/base.py", line 161, in _smart_import
return symbol_by_name(path, imp=imp)
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/kombu/utils/__init__.py", line 96, in symbol_by_name
module = imp(module_name, package=package, **kwargs)
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/celery/loaders/base.py", line 112, in import_from_cwd
package=package,
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/celery/utils/imports.py", line 101, in import_from_cwd
return imp(module, package=package)
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/celery/loaders/base.py", line 106, in import_module
return importlib.import_module(module, package=package)
File "/Users/MyMBP/anaconda3/lib/python3.6/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 978, in _gcd_import
File "<frozen importlib._bootstrap>", line 961, in _find_and_load
File "<frozen importlib._bootstrap>", line 948, in _find_and_load_unlocked
ModuleNotFoundError: No module named 'celeryconfig'
Numerous errors get thrown, and the traceback is very large, about 9000 lines long in total. This is just a snippet. I'm new to celery and task queueing in general, so perhaps for some of the experts out there you can pick out some very obvious mistakes from my code.
As I said, the configuration of celery is successful, and when triggering the tasks in Terminal, the tasks do what they are supposed to do. I'm building this up piece by piece, so this next step is to trigger the tasks using my view in Django (instead of being called using Terminal). Once I have figured that out, then the ultimate aim is to track the progress of a task, and report the output to the user in a separate window (.js, AJAX etc.) that shows for example the line output that you see in Console.
I read that the tasks.py (in my case celery_tasks.py) file needs to be in a django app that's registered in settings.py. Is this true?
This is not a full answer, but may help partly others who encounter a similar issue:
Basically in the celery_tasks.py there is the following:
app.config_from_object('celeryconfig')
When I trigger the workers through Terminal, this works. When I do it via my view, then the error message above can be seen. Changing this line works via the view:
app.config_from_object('MyApp.celeryconfig')
I still need to figure out why there is this discrepancy and how to resolve this so that it is indifferent whether the Tasks are called via my view or Terminal.
I have a problem that I can't work out. My code worked for localhost however how I have a proper domain setup i'm getting some strange problems trying to login with facebook. I have since moved to python 3.6
I have the following tornado setup code:
handlers = [
(r"/facebookAuth",FBAuth),
# other handlers...
]
# Settings dict for Application
settings = {
# static handler
# Set specific HTTP404 errors to Error404 Class
"default_handler_class": Error404,
"cookie_secret":"xxx",
"facebook_redirect_uri":"https://www.example.com/facebookAuth",
"facebook_secret":"xxx",
"facebook_app_id":"xxx",
}
class FBAuth(BaseHandler,tornado.auth.FacebookGraphMixin):
async def get(self):
if self.get_argument("code", False):
print("not code")
user = await self.get_authenticated_user(redirect_uri=self.settings["facebook_redirect_uri"],
client_id=self.settings["facebook_app_id"],
client_secret=self.settings["facebook_secret"],
code=self.get_argument("code"))
print("******")
print(user)
firstName=user["first_name"]
lastName=user["last_name"]
# set cookie and start up code
else:
print("code")
await self.authorize_redirect(redirect_uri=self.settings["facebook_redirect_uri"],
client_id=self.settings["facebook_app_id"],
scope=["email","public_profile"])
I can't work out the result of the code. It shows:
code
not code
and crashes with the following:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/tornado-4.5.dev1-py3.6-linux-x86_64.egg/tornado/web.py", line 1474, in _execute
result = yield result
File "/usr/local/lib/python3.6/site-packages/tornado-4.5.dev1-py3.6-linux-x86_64.egg/tornado/gen.py", line 1045, in run
value = future.result()
File "/usr/local/lib/python3.6/site-packages/tornado-4.5.dev1-py3.6-linux-x86_64.egg/tornado/concurrent.py", line 237, in result
raise_exc_info(self._exc_info)
File "<string>", line 3, in raise_exc_info
File "/usr/local/lib/python3.6/site-packages/tornado-4.5.dev1-py3.6-linux-x86_64.egg/tornado/gen.py", line 1051, in run
yielded = self.gen.throw(*exc_info)
File "<string>", line 6, in _wrap_awaitable
File "/home/cs/charliesays/authHandlers.py", line 13, in get
code=self.get_argument("code"))
File "<string>", line 3, in __await__
File "/usr/local/lib/python3.6/site-packages/tornado-4.5.dev1-py3.6-linux-x86_64.egg/tornado/gen.py", line 1045, in run
value = future.result()
File "/usr/local/lib/python3.6/site-packages/tornado-4.5.dev1-py3.6-linux-x86_64.egg/tornado/concurrent.py", line 237, in result
raise_exc_info(self._exc_info)
File "<string>", line 3, in raise_exc_info
File "/usr/local/lib/python3.6/site-packages/tornado-4.5.dev1-py3.6-linux-x86_64.egg/tornado/stack_context.py", line 314, in wrapped
ret = fn(*args, **kwargs)
File "/usr/local/lib/python3.6/site-packages/tornado-4.5.dev1-py3.6-linux-x86_64.egg/tornado/auth.py", line 983, in _on_access_token
"access_token": args["access_token"][-1],
KeyError: 'access_token'
It seems there is a problem with the:
code=self.get_argument("code") in the call to get_authenticated_user()
I'm working on a little project with unittest.
I can run my tests but if I add an argument to my command line entry (e.g : ./myprogramm.py test instead of just ./myprogramm.py) in order to add an argparse layer (argparse itself doesn't seem to be the cause of the problem) a RecursionError occurs.
Sample file to reproduce :
#!/usr/bin/env python3
import argparse
import unittest
def foo():
return True
class FooTestCase(unittest.TestCase):
def test_foo(self):
self.assertTrue(foo())
def test():
unittest.main()
def main():
foo()
def make_parser():
parser = argparse.ArgumentParser(description='test or foo.')
subparsers = parser.add_subparsers(dest='subcommand')
subparsers.required = True
subparsers.add_parser('test')
subparsers.add_parser('foo')
return parser
if __name__ == '__main__':
parser = make_parser()
args = parser.parse_args()
if args.subcommand == 'test':
test()
elif args.subcommand == 'foo':
main()
Launching this programm with ./myprogramm.py foo works, but not if I launch it with ./myprogramm.py test.
Output with RecursionError :
File "./myprogramm.py", line 14, in test
unittest.main()
File "/usr/lib/python3.5/unittest/main.py", line 93, in __init__
self.parseArgs(argv)
File "/usr/lib/python3.5/unittest/main.py", line 140, in parseArgs
self.createTests()
File "/usr/lib/python3.5/unittest/main.py", line 147, in createTests
self.module)
File "/usr/lib/python3.5/unittest/loader.py", line 219, in loadTestsFromNames
suites = [self.loadTestsFromName(name, module) for name in names]
File "/usr/lib/python3.5/unittest/loader.py", line 219, in <listcomp>
suites = [self.loadTestsFromName(name, module) for name in names]
File "/usr/lib/python3.5/unittest/loader.py", line 204, in loadTestsFromName
test = obj()
File "./myprogramm.py", line 14, in test
unittest.main()
File "/usr/lib/python3.5/unittest/main.py", line 93, in __init__
self.parseArgs(argv)
File "/usr/lib/python3.5/unittest/main.py", line 114, in parseArgs
self._initArgParsers()
File "/usr/lib/python3.5/unittest/main.py", line 150, in _initArgParsers
parent_parser = self._getParentArgParser()
File "/usr/lib/python3.5/unittest/main.py", line 155, in _getParentArgParser
parser = argparse.ArgumentParser(add_help=False)
File "/usr/lib/python3.5/argparse.py", line 1629, in __init__
self._positionals = add_group(_('positional arguments'))
File "/usr/lib/python3.5/gettext.py", line 514, in gettext
return dgettext(_current_domain, message)
File "/usr/lib/python3.5/gettext.py", line 478, in dgettext
codeset=_localecodesets.get(domain))
File "/usr/lib/python3.5/gettext.py", line 413, in translation
mofiles = find(domain, localedir, languages, all=True)
File "/usr/lib/python3.5/gettext.py", line 376, in find
val = os.environ.get(envar)
File "/usr/lib/python3.5/_collections_abc.py", line 595, in get
return self[key]
RecursionError: maximum recursion depth exceeded
How could I (should I?) handle this?
Thus far to avoid this problem, I run del sys.argv[1]. But is there a better way?
I guess that unittest.main() uses sys.argv and test as first argument triggers a particular behaviour for this function.
If I use an other keyword that test I get something like this (using bar here) :
E
======================================================================
ERROR: bar (unittest.loader._FailedTest)
----------------------------------------------------------------------
AttributeError: module '__main__' has no attribute 'bar'
----------------------------------------------------------------------
Ran 1 test in 0.000s
FAILED (errors=1)