Unable to run python file - python-3.x

I have been trying to run a python script, but I keep on getting the following error.
Error:
Traceback (most recent call last):
File "cloud_copasi/background_daemon/cloud_copasi_daemon.py", line 18, in <module>
django.setup()
File "/Users/cloudcopasi/cloud-copasi/venv/lib/python3.8/site-packages/django/__init__.py", line 24, in setup
apps.populate(settings.INSTALLED_APPS)
File "/Users/cloudcopasi/cloud-copasi/venv/lib/python3.8/site-packages/django/apps/registry.py", line 91, in populate
app_config = AppConfig.create(entry)
File "/Users/cloudcopasi/cloud-copasi/venv/lib/python3.8/site-packages/django/apps/config.py", line 90, in create
module = import_module(entry)
File "/Applications/Xcode.app/Contents/Developer/Library/Frameworks/Python3.framework/Versions/3.8/lib/python3.8/importlib/__init__.py", line 127, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 1014, in _gcd_import
File "<frozen importlib._bootstrap>", line 991, in _find_and_load
File "<frozen importlib._bootstrap>", line 973, in _find_and_load_unlocked
ModuleNotFoundError: No module named 'web_interface'
The script file which I am trying to run (cloud_copasi_daemon) is:
import sys, time
import django
django.setup()
from tools.daemon import Daemon
import tools.background_script
from tools.response import RemoteLoggingResponse
from cloud_copasi import settings
import logging
log=logging.getLogger(__name__)
class MyDaemon(Daemon):
#Set the level we wish to log at. Logs are sent back to the central server
#Choices are all, debug, info, error, none
def __init__(self, *args, **kwargs):
return super(MyDaemon, self).__init__(*args, **kwargs)
def stop(self, *args, **kwargs):
return super(MyDaemon, self).stop(*args, **kwargs)
def run(self):
log.debug('Daemon running')
while True:
min_repeat_time = settings.DAEMON_POLL_TYME #Seconds
start_time = time.time()
try:
tools.background_script.run()
log.debug('Background script finished')
except Exception as e:
log.exception(e)
finish_time = time.time()
difference = finish_time - start_time
if difference < min_repeat_time:
time.sleep(min_repeat_time - difference)
if __name__ == "__main__":
daemon = MyDaemon('/tmp/Cloud-COPASI.pid')
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
else:
print ("Unknown command")
sys.exit(2)
sys.exit(0)
else:
print("usage: %s start|stop|restart" % sys.argv[0])
sys.exit(2)
"web_interface" is the Django App and I have verified that the path referring to it is correct. I don't know where else do I need to fix the file path to get this python script to work.
I am having the same problem on Mac OS Big Sur (running Python 3.8) as well as on Linux CentOS (running Python 3.6).
Any help is much appreciated.

When you are trying to bootstrap django like this you need to make sure to set the environment variable PYTHONPATH to include the folder where web_interface lives. I'm guessing that your cloud_copasi_daemon.py is in a different folder than web_interface so when you run python cloud_copasi_daemon.py it looks in the immediate folder in which you invoked the script and can't find it.

Related

How to split my subcommands in several files in Python 3.6 and Click 7.1.2

I would like to implement each subcommand in a different file for a better clarity.
Right now I have only one but the idea will be to add more with the time.
For that I tried 2 ways and it ended with a big failure...
Basically I try to have this result:
$ test_cli
Usage: test_cli [OPTIONS] COMMAND [ARGS]...
Cli command
Options:
--help Show this message and exit.
Commands:
test Test command.
$ test_cli test hello
Hello !
Here are files
$ tree
.
├── cli.py
├── setup.py
└── test.py
I'm using virtualenv and I use the following command to test my application:
$ pip install --editable .
The code of setup.py is the same for both :
from setuptools import setup
setup(
name = 'test_cli',
version = '1.0',
py_modules = [ 'cli', 'test' ],
install_requires = [
'Click',
],
entry_points = '''
[console_scripts]
test_cli=cli:cli
''',
)
Try 1 - FAILURE
Code based on this link, but it did not work with me...
Here is the code of each file:
cli.py
import click
import test
#click.group()
def cli():
''' Cli command '''
pass
cli.add_command(test)
test.py
import click
#click.group()
def test():
''' Test command. '''
pass
#test.command()
def hello():
click.echo('Hello !')
Here is the error I have :
Traceback (most recent call last):
File "/tmp/myenv/bin/test_cli", line 33, in <module>
sys.exit(load_entry_point('test-cli', 'console_scripts', 'test_cli')())
File "/tmp/myenv/bin/test_cli", line 25, in importlib_load_entry_point
return next(matches).load()
File "/tmp/myenv/lib/python3.6/site-packages/importlib_metadata/__init__.py", line 105, in load
module = import_module(match.group('module'))
File "/usr/lib64/python3.6/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 994, in _gcd_import
File "<frozen importlib._bootstrap>", line 971, in _find_and_load
File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 665, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 678, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/tmp/toto/cli.py", line 9, in <module>
cli.add_command(test)
File "/tmp/myenv/lib/python3.6/site-packages/click/core.py", line 1347, in add_command
name = name or cmd.name
AttributeError: module 'test' has no attribute 'name'
Try 2 - FAILED
This time I found a code here. I don't have any error but I cannot execute any subcommand :-/
cli.py
import click
import test
#click.group()
def cli():
''' Cli command '''
pass
import click
import cli
test.py
import click
import cli
#cli.group()
def test():
''' Test command. '''
pass
#test.command()
def hello():
click.echo('Hello !')
When I execute try to execute the subcommand I have this issue:
$ test_cli test hello
Usage: test_cli [OPTIONS] COMMAND [ARGS]...
Try 'test_cli --help' for help.
Error: No such command 'test'.
Any idea of the issue ?
Thank you.
The easiest way to do this is to implement a separate .py file for your CLI and import the functions into it. I've implemented this in my own program with:
console.py
import click
from .main import store, retrieve
#click.group()
def cli():
pass
#cli.command(no_args_is_help=True)
(series of #click.arguments)
def store(application, key, expiration, userdata):
# sends the arguments directly to the "store" function in main.py
store(application,key,expiration,userdata)
(other commands)
if __name__ == '__main__':
cli()
and in main.py
def store(app, key, expiration, userdata):
"""Program for a user to submit a user-generated API key to their database."""
# The actual function doing its work, in a separate file
This method separates your CLI setup from the actual code, while allowing full access to all of clicks functions including groups, subcommands, etc. It also allows you to call commands from many different modules or even calling external commands within your CLI (for example, if you want a CLI that conglomerates multiple functions from various packages, setting aside the discussion over whether that is appropriate behavior or not).
In your first example you need to import the test group into your module.
cli.py
import click
from test import test
#click.group()
def cli():
''' Cli command '''
pass
cli.add_command(test)
if __name__ == '__main__':
cli()
test.py
import click
#click.group()
def test():
''' Test command. '''
pass
#test.command()
def hello():
click.echo('Hello !')

MLflow: active run ID does not match environment run ID

OS: Ubuntu 18
Python: Python 3.6
MLflow: 1.4
I'm trying to get MLflow Projects to run. Here is my project:
MLflow
conda.yaml
main.py
prep_data.py
learn.py
List item
The project is heavily based up on this repo: https://github.com/mlflow/mlflow/tree/master/examples/multistep_workflow
I'm trying to run both the prep_data and learn scripts using MLflow Projects and the main.py script as an entry point.
For execution I use the following command: mlflow run . -P experiment_name=testproject
But I get the following Error:
Traceback (most recent call last):
File "prep_data.py", line 126, in <module>
prep_data()
File "/home/ubuntu/venv/lib/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/home/ubuntu/venv/lib/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/home/ubuntu/venv/lib/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/ubuntu/venv/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "prep_data.py", line 65, in prep_data
with mlflow.start_run() as active_run:
File "/home/ubuntu/venv/lib/python3.6/site-packages/mlflow/tracking/fluent.py", line 129, in start_run
"arguments".format(existing_run_id))
mlflow.exceptions.MlflowException: Cannot start run with ID 405b83bbb61046afa83b8dcd71b4db14 because active run ID does not match environment run ID. Make sure --experiment-name or --experiment-id matches experiment set with set_experiment(), or just use command-line arguments
Traceback (most recent call last):
File "main.py", line 75, in <module>
workflow()
File "/home/ubuntu/venv/lib/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/home/ubuntu/venv/lib/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/home/ubuntu/venv/lib/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/ubuntu/venv/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "main.py", line 61, in workflow
}, experiment_name)
File "main.py", line 40, in _get_or_run
submitted_run = mlflow.run('.', entry_point=entry_point, parameters=params)
File "/home/ubuntu/venv/lib/python3.6/site-packages/mlflow/projects/__init__.py", line 287, in run
_wait_for(submitted_run_obj)
File "/home/ubuntu/venv/lib/python3.6/site-packages/mlflow/projects/__init__.py", line 304, in _wait_for
raise ExecutionException("Run (ID '%s') failed" % run_id)
mlflow.exceptions.ExecutionException: Run (ID '405b83bbb61046afa83b8dcd71b4db14') failed
2019/11/22 18:51:59 ERROR mlflow.cli: === Run (ID '62c229b2d9194b569a7b2bfc14338800') failed ===
I'm not sure if I understand the error correctly but it seems like it's saying I am using multiple experiments. However I'm fairly certain I am only using 1 (testproject).
Browsing SO and Github issues suggested I'd should set the environment variable MLFLOW_TRACKING_URI but it wasn't stated on how to set that. Thus I tried two different ways:
1) exporting it before running the MLflow project: $ export MLFLOW_TRACKING_URI='http://127.0.0.1:5099'
2) setting it at the beginning of my main.py script using python: os.environ['MLFLOW_TRACKING_URI'] = 'http://127.0.0.1:5099'
Neither had any effect.
Here you can see my project:
main.py
import os
import click
import mlflow
from mlflow.entities import RunStatus
def _already_ran(entry_point, params, experiment_name):
# experiment = mlflow.get_experiment_by_name('{}_{}'.format(experiment_name, entry_point))
experiment = mlflow.get_experiment_by_name(experiment_name)
if experiment == None:
return None
experiment_id = experiment.experiment_id
client = mlflow.tracking.MlflowClient()
all_run_infos = reversed(client.list_run_infos(experiment_id))
match_failed = False
for run_info in all_run_infos
full_run = client.get_run(run_info.run_id)
for p_key, p_val in params:
run_value = full_run.data.params.get(p_key)
if run_value != p_val:
match_failed = True
break
if match_failed:
continue
if run_info.to_proto().status != RunStatus.FINISHED:
continue
return client.get_run(run_info.run_id)
return None
def _get_or_run(entry_point, params, experiment_name, use_cache=True):
existing_run = _already_ran(entry_point, params, experiment_name)
if use_cache and existing_run:
return existing_run
submitted_run = mlflow.run('.', entry_point=entry_point, parameters=params)
return mlflow.tracking.MlflowClient().get_run(submitted_run.run_id)
#click.command()
#click.option("--experiment-name")
#click.option('--prep-data-time-avg', default='placeholder')
#click.option('--prep-data-sensor-id', default='placeholder')
#click.option('--learn-epochs', default=100, type=int)
#click.option('--learn-neurons', default=5, type=int)
#click.option('--learn-layers', default=2, type=int)
def workflow(experiment_name, prep_data_time_avg, prep_data_sensor_id, learn_epochs, learn_neurons, learn_layers):
# mlflow.set_tracking_uri('http://127.0.0.1:5099')
# mlflow.set_experiment(experiment_name)
# with mlflow.start_run() as active_run:
data_run = _get_or_run('prep_data', {
'time_avg': prep_data_time_avg,
'sensor_id':prep_data_sensor_id,
'experiment_name': experiment_name
}, experiment_name)
learn_run = _get_or_run('learn', {
'epochs': learn_epochs,
'neurons': learn_neurons,
'layers': learn_layers,
'prep_data_run_id': data_run.run_id,
'experiment_name': experiment_name,
}, experiment_name)
if __name__ == '__main__':
# os.environ['MLFLOW_TRACKING_URI'] = 'http://127.0.0.1:5099'
workflow()
prep_data.py
#click.command()
#click.option("--experiment-name")
#click.option('--time-avg', default='placeholder')
#click.option('--sensor-id', default='placeholder')
def prep_data(experiment_name, time_avg, sensor_id):
mlflow.set_experiment(experiment_name)
with mlflow.start_run() as active_run:
# logic code of prep_data
if __name__ == '__main__':
prep_data()
I'm happy about any ideas on how to fix this issue.
Thank you very much!
Cheers,
Raphael
You need to provide the same experiment name to the mlflow CLI:
mlflow run . -P experiment_name=testproject --experiment-name testproject
For more details:
https://www.mlflow.org/docs/latest/cli.html#mlflow-run

python nose --with-doctest adding extra test

I am needing to defer the import of some libraries unless they are needed so I have created have an object to defer the import.
class Importer(object):
def __init__(self, name):
self.name = name
def __bool__(self):
try:
mod = importlib.import_module(self.name)
return True
except ImportError:
return False
def __getattr__(self, name):
try:
mod = importlib.import_module(self.name)
attr = getattr(mod, name)
setattr(self, name, attr)
return attr
except ImportError as e:
msg = self.name + " must be installed"
raise ImportError(msg)
I have run nose with my tests and everything passes. However, when I run --with-doctest it runs an extra test (that I have not defined) that generates an error in any environment where a given library isn't installed.
Testing the code below recreates the issue I am experiencing. In an environment without numpy, nosetests --with-doctest is running a second test which I have not defined.
numpy = Importer('numpy')
def mean(array):
"""
Mean of an array.
Examples
--------
>>> mean([1, 2, 3])
2.0
>>> mean([1, -1])
0.0
"""
if numpy:
return numpy.mean(array)
return float(sum(array) / len(array))
Clearly, there is only one doctest available to test. Also if numpy is installed only 1 test is run. So why then when numpy is not installed am I getting a second test? Here is the output.
.E
======================================================================
ERROR: Failure: ImportError (numpy must be installed)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/Users/devin/Dropbox/Devin_S_+_Spark_Wave/nimble/zissue/issue.py", line 16, in __getattr__
mod = importlib.import_module(self.name)
File "/Users/devin/anaconda3/envs/issueEnv/lib/python3.6/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 994, in _gcd_import
File "<frozen importlib._bootstrap>", line 971, in _find_and_load
File "<frozen importlib._bootstrap>", line 953, in _find_and_load_unlocked
ModuleNotFoundError: No module named 'numpy'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/devin/anaconda3/envs/issueEnv/lib/python3.6/site-packages/nose/failure.py", line 39, in runTest
raise self.exc_val.with_traceback(self.tb)
File "/Users/devin/anaconda3/envs/issueEnv/lib/python3.6/site-packages/nose/plugins/manager.py", line 154, in generate
for r in result:
File "/Users/devin/anaconda3/envs/issueEnv/lib/python3.6/site-packages/nose/plugins/doctests.py", line 228, in loadTestsFromModule
tests = self.finder.find(module)
File "/Users/devin/anaconda3/envs/issueEnv/lib/python3.6/doctest.py", line 933, in find
self._find(tests, obj, name, module, source_lines, globs, {})
File "/Users/devin/anaconda3/envs/issueEnv/lib/python3.6/doctest.py", line 992, in _find
if ((inspect.isroutine(inspect.unwrap(val))
File "/Users/devin/anaconda3/envs/issueEnv/lib/python3.6/inspect.py", line 512, in unwrap
while _is_wrapper(func):
File "/Users/devin/anaconda3/envs/issueEnv/lib/python3.6/inspect.py", line 503, in _is_wrapper
return hasattr(f, '__wrapped__')
File "/Users/devin/Dropbox/testing/issue/issue.py", line 22, in __getattr__
raise ImportError(msg)
ImportError: numpy must be installed
----------------------------------------------------------------------
Ran 2 tests in 0.003s
Well, this is fun!
The core difficulty is that the doctest module "tricked" you into attempting a numpy import.
That is, it probed for numpy.__wrapped__, and you took the bait.
You need to distinguish between a probe call,
for which returning a dummy value is acceptable,
and a real call, for which you must import or raise.
Perhaps a heuristic of testing for __wrapped__ suffices.
But you may find it necessary to inspect the call stack to find the caller,
notice it is e.g. autodoc,
and change behavior based on that.

Celery configuration in Django, connecting tasks to the view

I've recently configured celery to run some dummy tasks, and ran the workers through Terminal on my Mac. It all seems to run accordingly, took a while, since some of the literature out there seems to advise different configuration scenarios, but I got there anyway. Now the next step is to trigger the tasks via my view in Django. I'm using celery 1.2.26.post2
My project structure:
/MyApp
celery_tasks.py
celeryconfig.py
__init__.py
I've been following several tutorials and found this video and this video and this video very helpful to obtain an overall view of celery.
My scripts are:
celery_tasks.py
from celery import Celery
from celery.task import task
app = Celery() # Initialise the app
app.config_from_object('celeryconfig') # Tell Celery instance to use celeryconfig module
suf = lambda n: "%d%s" % (n, {1: "st", 2: "nd", 3: "rd"}.get(n if n < 20 else n % 10, "th"))
#task
def fav_doctor():
"""Reads doctor.txt file and prints out fav doctor, then adds a new
number to the file"""
with open('doctor.txt', 'r+') as f:
for line in f:
nums = line.rstrip().split()
print ('The {} doctor is my favorite'.format(suf(int(nums[0]))))
for num in nums[1:]:
print ('Wait! The {} doctor is my favorite'.format(suf(int(num))))
last_num = int(nums[-1])
new_last_num = last_num + 1
f.write(str(new_last_num) + ' ')
#task
def reverse(string):
return string[::-1]
#task
def add(x, y):
return x+y
celeryconfig.py
from datetime import timedelta
## List of modules to import when celery starts.
CELERY_IMPORTS = ('celery_tasks',)
## Message Broker (RabbitMQ) settings.
BROKER_URL = 'amqp://'
BROKER_PORT = 5672
#BROKER_TRANSPORT = 'sqlalchemy'
#BROKER_HOST = 'sqlite:///tasks.db'
#BROKER_VHOST = '/'
#BROKER_USER = 'guest'
#BROKER_PASSWORD = 'guest'
## Result store settings.
CELERY_RESULT_BACKEND = 'rpc://'
#CELERY_RESULT_DBURI = 'sqlite:///mydatabase.db'
## Worker settings
#CELERYD_CONCURRENCY = 1
#CELERYD_TASK_TIME_LIMIT = 20
#CELERYD_LOG_FILE = 'celeryd.log'
#CELERYD_LOG_LEVEL = 'INFO'
## Misc
CELERY_IGNORE_RESULT = False
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_ACCEPT_CONTENT=['json']
CELERY_TIMEZONE = 'Europe/Berlin'
CELERY_ENABLE_UTC = True
CELERYBEAT_SCHEDULE = {
'doctor-every-10-seconds': {
'task': 'celery_tasks.fav_doctor',
'schedule': timedelta(seconds=3),
},
}
__init__.py
from .celery_tasks import app as celery_app # Ensures app is always imported when Django starts so that shared_task will use this app.
__all__ = ['celery_app']
In settings.py
INSTALLED_APPS = [
...
'djcelery',
]
In my views folder, I have a specific view module, admin_scripts.py
from MyApp.celery_tasks import fav_doctor, reverse, send_email, add
#login_required
def admin_script_dashboard(request):
if request.method == 'POST':
form = Admin_Script(request.POST)
if form.is_valid():
backup_script_select = form.cleaned_data['backup_script_select']
dummy_script_select = form.cleaned_data['dummy_script_select']
print ("backup_script_select: {0}".format(backup_script_select))
print ("dummy_script_select: {0}".format(dummy_script_select))
if backup_script_select:
print ("Backup script exectuting. Please wait...")
dbackup_script_dir = str(Path.home()) + '/Software/MyOtherApp/cli-tools/dbbackup_DRAFT.py'
subprocess.call(" python {} ".format(dbackup_script_dir), shell=True)
async_result = reverse.delay('Using Celery')
print ("async_result: {0}".format(async_result))
result = reverse.AsyncResult(async_result.id)
print ("result: {0}".format(result))
print ("Something occured...")
if dummy_script_select:
print ("Dummy script exectuting. Please wait...")
dummy_script_dir = str(Path.home()) + '/Software/MyOtherApp/cli-tools/dummy.py'
subprocess.call(" python {} ".format(dummy_script_dir), shell=True)
async_result = add.delay(2, 5)
print ("async_result: {0}".format(async_result))
result = add.AsyncResult(async_result.id)
print ("result: {0}".format(result))
print ("Something occured...")
return render(request, 'MyApp/admin_scripts_db.html')
The problem occurs at the line in my admin_scripts.py file, where async_result = add.delay(2, 5) is called. Below the traceback:
[12/Jul/2018 09:23:19] ERROR [django.request:135] Internal Server Error: /MyProject/adminscripts/
Traceback (most recent call last):
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/celery/local.py", line 309, in _get_current_object
return object.__getattribute__(self, '__thing')
AttributeError: 'PromiseProxy' object has no attribute '__thing'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/kombu/utils/__init__.py", line 323, in __get__
return obj.__dict__[self.__name__]
KeyError: 'conf'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/celery/loaders/base.py", line 158, in _smart_import
return imp(path)
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/celery/loaders/base.py", line 112, in import_from_cwd
package=package,
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/celery/utils/imports.py", line 101, in import_from_cwd
return imp(module, package=package)
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/celery/loaders/base.py", line 106, in import_module
return importlib.import_module(module, package=package)
File "/Users/MyMBP/anaconda3/lib/python3.6/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 978, in _gcd_import
File "<frozen importlib._bootstrap>", line 961, in _find_and_load
File "<frozen importlib._bootstrap>", line 948, in _find_and_load_unlocked
ModuleNotFoundError: No module named 'celeryconfig'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/django/core/handlers/exception.py", line 41, in inner
response = get_response(request)
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/django/core/handlers/base.py", line 187, in _get_response
response = self.process_exception_by_middleware(e, request)
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/django/core/handlers/base.py", line 185, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/django/contrib/auth/decorators.py", line 23, in _wrapped_view
return view_func(request, *args, **kwargs)
File "/Users/MyMBP/Software/MyProject/MyProjectsite/MyProject/views/admin_scripts.py", line 44, in admin_script_dashboard
async_result = add.delay(2, 5)
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/celery/local.py", line 143, in __getattr__
return getattr(self._get_current_object(), name)
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/celery/local.py", line 311, in _get_current_object
return self.__evaluate__()
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/celery/local.py", line 341, in __evaluate__
thing = Proxy._get_current_object(self)
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/celery/local.py", line 101, in _get_current_object
return loc(*self.__args, **self.__kwargs)
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/celery/app/base.py", line 270, in _task_from_fun
'__wrapped__': fun}, **options))()
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/celery/app/task.py", line 201, in __new__
instance.bind(app)
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/celery/app/task.py", line 365, in bind
conf = app.conf
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/kombu/utils/__init__.py", line 325, in __get__
value = obj.__dict__[self.__name__] = self.__get(obj)
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/celery/app/base.py", line 638, in conf
return self._get_config()
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/celery/app/base.py", line 454, in _get_config
self.loader.config_from_object(self._config_source)
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/celery/loaders/base.py", line 140, in config_from_object
obj = self._smart_import(obj, imp=self.import_from_cwd)
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/celery/loaders/base.py", line 161, in _smart_import
return symbol_by_name(path, imp=imp)
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/kombu/utils/__init__.py", line 96, in symbol_by_name
module = imp(module_name, package=package, **kwargs)
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/celery/loaders/base.py", line 112, in import_from_cwd
package=package,
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/celery/utils/imports.py", line 101, in import_from_cwd
return imp(module, package=package)
File "/Users/MyMBP/anaconda3/lib/python3.6/site-packages/celery/loaders/base.py", line 106, in import_module
return importlib.import_module(module, package=package)
File "/Users/MyMBP/anaconda3/lib/python3.6/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 978, in _gcd_import
File "<frozen importlib._bootstrap>", line 961, in _find_and_load
File "<frozen importlib._bootstrap>", line 948, in _find_and_load_unlocked
ModuleNotFoundError: No module named 'celeryconfig'
Numerous errors get thrown, and the traceback is very large, about 9000 lines long in total. This is just a snippet. I'm new to celery and task queueing in general, so perhaps for some of the experts out there you can pick out some very obvious mistakes from my code.
As I said, the configuration of celery is successful, and when triggering the tasks in Terminal, the tasks do what they are supposed to do. I'm building this up piece by piece, so this next step is to trigger the tasks using my view in Django (instead of being called using Terminal). Once I have figured that out, then the ultimate aim is to track the progress of a task, and report the output to the user in a separate window (.js, AJAX etc.) that shows for example the line output that you see in Console.
I read that the tasks.py (in my case celery_tasks.py) file needs to be in a django app that's registered in settings.py. Is this true?
This is not a full answer, but may help partly others who encounter a similar issue:
Basically in the celery_tasks.py there is the following:
app.config_from_object('celeryconfig')
When I trigger the workers through Terminal, this works. When I do it via my view, then the error message above can be seen. Changing this line works via the view:
app.config_from_object('MyApp.celeryconfig')
I still need to figure out why there is this discrepancy and how to resolve this so that it is indifferent whether the Tasks are called via my view or Terminal.

Python Module attribute error

I know this topic has been came up many times, but I am totally stuck and need help. Please tell me what I have done wrong here and how to fix it. Thanks in advance.
# testcase1.py
import unittest
import sys
class Global:
b = 0
class Util_Case_ID(unittest.TestCase):
def setUp(self):
sys.path.insert(0, 'C:/**/views')
sys.path.insert(0, 'C:/**/app')
sys.path.insert(0, 'C:/**/tests')
from app.views.utility import method_a
Global.b = len(method_a())
def test1(self):
self.assertEqual(Global.b, 11)
def tearDown(self):
sys.path.remove('C:/***/app/views')
sys.path.remove('C:/***/app')
sys.path.remove('C:/*/tests')
if __name__ == "__main__":
unittest.main()
AttributeError: module 'UnitTests' has no attribute 'testcase1'
The Traceback is as follows:
Traceback (most recent call last):
File "C:\Users\*****\AppData\Roaming\JetBrains\PyCharm Edu 3.5.1\helpers\pycharm\utrunner.py", line 167, in <module>
all.addTests(testLoader.loadTestsFromTestClass(getattr(module, a[1])),
File "C:\Users\*****\AppData\Roaming\JetBrains\PyCharm Edu 3.5.1\helpers\pycharm\nose_helper\loader.py", line 108, in loadTestsFromTestClass
return self.suiteClass(ContextList(cases, context=cls))
File "C:\Users\*****\AppData\Roaming\JetBrains\PyCharm Edu 3.5.1\helpers\pycharm\nose_helper\suite.py", line 253, in __call__
return self.makeSuite(tests, context, **kw)
File "C:\Users\*****\AppData\Roaming\JetBrains\PyCharm Edu 3.5.1\helpers\pycharm\nose_helper\suite.py", line 291, in makeSuite
for ancestor in self.ancestry(context):
File "C:\Users\******\AppData\Roaming\JetBrains\PyCharm Edu 3.5.1\helpers\pycharm\nose_helper\suite.py", line 269, in ancestry
yield resolve_name('.'.join(ancestors))
File "C:\Users\*****\AppData\Roaming\JetBrains\PyCharm Edu 3.5.1\helpers\pycharm\nose_helper\util.py", line 70, in resolve_name
obj = getattr(obj, part)
AttributeError: module 'UnitTests' has no attribute 'testcase1'
Process finished with exit code 1

Resources