Adding python logging to FastApi endpoints, hosted on docker doesn't display API Endpoints logs - python-3.x

I have a fastapi app on which I want to add python logging. I followed the basic tutorial and added this, however this doesn't add API but just gunicorn logging.
So I have a local server hosted using docker build so running server using docker-compose up and testing my endpoints using api client (Insomnia, similar to postman).
Below is the code where no log file is created and hence no log statements added.
My project str is as follows:
project/
src/
api/
models/
users.py
routers/
users.py
main.py
logging.conf
"""
main.py Main is the starting point for the app.
"""
import logging
import logging.config
from fastapi import FastAPI
from msgpack_asgi import MessagePackMiddleware
import uvicorn
from api.routers import users
logger = logging.getLogger(__name__)
app = FastAPI(debug=True)
app.include_router(users.router)
#app.get("/check")
async def check():
"""Simple health check endpoint."""
logger.info("logging from the root logger")
return {"success": True}
Also, I am using gunicorn.conf that looks like this:
[program:gunicorn]
command=poetry run gunicorn -c /etc/gunicorn/gunicorn.conf.py foodgame_api.main:app
directory=/var/www/
autostart=true
autorestart=true
redirect_stderr=true
And gunicorn.conf.py as
import multiprocessing
bind = "unix:/tmp/gunicorn.sock"
workers = multiprocessing.cpu_count() * 2 + 1
worker_class = "uvicorn.workers.UvicornWorker"
loglevel = "debug"
errorlog = "-"
capture_output = True
chdir = "/var/www"
reload = True
reload_engine = "auto"
accesslog = "-"
access_log_format = '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"'
This is my output terminal for the above API endpoint on docker:
Could anyone please guide me here? I am new to FastApi so some help will be appreciated.

Inspired by #JPG's answer, but using a pydantic model looked cleaner.
You might want to expose more variables. This config worked good for me.
from pydantic import BaseModel
class LogConfig(BaseModel):
"""Logging configuration to be set for the server"""
LOGGER_NAME: str = "mycoolapp"
LOG_FORMAT: str = "%(levelprefix)s | %(asctime)s | %(message)s"
LOG_LEVEL: str = "DEBUG"
# Logging config
version = 1
disable_existing_loggers = False
formatters = {
"default": {
"()": "uvicorn.logging.DefaultFormatter",
"fmt": LOG_FORMAT,
"datefmt": "%Y-%m-%d %H:%M:%S",
},
}
handlers = {
"default": {
"formatter": "default",
"class": "logging.StreamHandler",
"stream": "ext://sys.stderr",
},
}
loggers = {
LOGGER_NAME: {"handlers": ["default"], "level": LOG_LEVEL},
}
Then import it into your main.py file as:
from logging.config import dictConfig
import logging
from .config import LogConfig
dictConfig(LogConfig().dict())
logger = logging.getLogger("mycoolapp")
logger.info("Dummy Info")
logger.error("Dummy Error")
logger.debug("Dummy Debug")
logger.warning("Dummy Warning")
Which gives:

I would use dict log config
create a logger config as below,
# my_log_conf.py
log_config = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"default": {
"()": "uvicorn.logging.DefaultFormatter",
"fmt": "%(levelprefix)s %(asctime)s %(message)s",
"datefmt": "%Y-%m-%d %H:%M:%S",
},
},
"handlers": {
"default": {
"formatter": "default",
"class": "logging.StreamHandler",
"stream": "ext://sys.stderr",
},
},
"loggers": {
"foo-logger": {"handlers": ["default"], "level": "DEBUG"},
},
}
Then, load the config using dictConfig function as,
from logging.config import dictConfig
from fastapi import FastAPI
from some.where.my_log_conf import log_config
dictConfig(log_config)
app = FastAPI(debug=True)
Note: It is recommended to call the dictConfig(...) function before the FastAPI initialization.
After the initialization, you can use logger named foo-logger anywhere in your code as,
import logging
logger = logging.getLogger('foo-logger')
logger.debug('This is test')

Related

Python - Request and Response logging using HTTPS Connection - Swagger Codegen - OpenAPI

I have a Python FASTAPI application which I run through Uvicorn.
I have a dependent service and I generate a python client using the swaggen-codegen provided by the openapi.json of the dependent service.
My logger dictconfig is:
def get_logging_config():
return {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'()': ColoredFormatter
},
},
'handlers': {
'console': {
'level': logging.getLevelName(logging.DEBUG),
'formatter': 'standard',
'class': 'logging.StreamHandler',
'stream': 'ext://sys.stdout'
}
},
'loggers': {
'': { # root logger
'handlers': ['console'],
'level': logging.getLevelName(get_logging_level()),
'propagate': False
},
'uvicorn': { # uvicorn logger
'handlers': ['console'],
'level': logging.getLevelName(logging.WARN),
'propagate': False
},
'urllib3': { # urllib3 logger
'handlers': ['console'],
'level': logging.getLevelName(logging.DEBUG),
'propagate': False
},
'urllib3.connectionpool': { # urllib3 logger for HTTP calls
'handlers': ['console'],
'level': logging.getLevelName(logging.DEBUG),
'propagate': False
},
'requests.packages.urllib3': {
'handlers': ['console'],
'level': logging.getLevelName(logging.DEBUG),
'propagate': True
}
}
}
My logging module init.py contains:
import logging
import logging.config
from .http_client_logger import httpclient_logging_patch
from .logging_config import get_logging_config
httpclient_logging_patch()
logging.config.dictConfig(get_logging_config())
My http_client_logger.py file looks like:
import logging
import http.client
httpclient_logger = logging.getLogger("http.client")
def httpclient_logging_patch(level=logging.DEBUG):
"""Enable HTTPConnection debug logging to the logging framework"""
def httpclient_log(*args):
httpclient_logger.log(level, "test-" + "".join(args))
# mask the print() built-in in the http.client module to use
# logging instead
http.client.print = httpclient_log
# enable debugging
http.client.HTTPConnection.debuglevel = 1
http.client.HTTPSConnection.debuglevel = 1
The logs which I could see are
2023-01-14 20:18:02.384 - DEBUG - https://test-dependent-service-url.com:443 "POST /test-endpoint HTTP/1.1" 200 None (urllib3.connectionpool:_make_request:456)
2023-01-14 20:18:02.385 - DEBUG - response body: b'{"data":[],"id":"93d0f81a-3d4e-4ae6-9c47-07adde47400a","timestamp":"2023-01-14T19:18:02.489899239Z"}' (dependent_service.client.swagger_client.rest:request:219)
I would like to log the request sent (POST, GET, PUT, DELETE) to the dependent service and response received from the dependent service which contains the body of the request and response. Note that I am using an HTTPS connection. But in the logs, the request body is not being logged. Also, in none of the cases, the headers are getting logged.
Please, anyone could help here?
I have tried the following:
Log all requests from the python-requests module
I have also tried to put the debuglevel to 2 but nothing helped
http.client.HTTPConnection.debuglevel = 2
http.client.HTTPSConnection.debuglevel = 2
https://bhoey.com/blog/better-debug-logging-for-the-python-requests-library/ It talks about the file handler but it same as previous and I am using StreamHandler.
Although it works for GET but not for POST.

unable to initialize snowflake data source

I am trying to access the snowflake datasource using "great_expectations" library.
The following is what I tried so far:
from ruamel import yaml
import great_expectations as ge
from great_expectations.core.batch import BatchRequest, RuntimeBatchRequest
context = ge.get_context()
datasource_config = {
"name": "my_snowflake_datasource",
"class_name": "Datasource",
"execution_engine": {
"class_name": "SqlAlchemyExecutionEngine",
"connection_string": "snowflake://myusername:mypass#myaccount/myDB/myschema?warehouse=mywh&role=myadmin",
},
"data_connectors": {
"default_runtime_data_connector_name": {
"class_name": "RuntimeDataConnector",
"batch_identifiers": ["default_identifier_name"],
},
"default_inferred_data_connector_name": {
"class_name": "InferredAssetSqlDataConnector",
"include_schema_name": True,
},
},
}
print(context.test_yaml_config(yaml.dump(datasource_config)))
I initiated great_expectation before executing above code:
great_expectations init
but I am getting the error below:
great_expectations.exceptions.exceptions.DatasourceInitializationError: Cannot initialize datasource my_snowflake_datasource, error: 'NoneType' object has no attribute 'create_engine'
What am I doing wrong?
Your configuration seems to be ok, corresponding to the example here.
If you look at the traceback you should notice that the error propagates starting at the file great_expectations/execution_engine/sqlalchemy_execution_engine.py in your virtual environment.
The actual line where the error occurs is:
self.engine = sa.create_engine(connection_string, **kwargs)
And if you search for that sa at the top of that file:
import sqlalchemy as sa
make_url = import_make_url()
except ImportError:
sa = None
So sqlalchemy is not installed, which you
don't get automatically in your environement if you install greate_expectiations. The thing to do is to
install snowflake-sqlalchemy, since you want to use sqlalchemy's snowflake
plugin (assumption based on your connection_string).
/your/virtualenv/bin/python -m pip install snowflake-sqlalchemy
After that you should no longer get an error, it looks like test_yaml_config is waiting for the connection
to time out.
What worries me greatly is the documented use of a deprecated API of ruamel.yaml.
The function ruamel.yaml.dump is going to be removed in the near future, and you
should use the .dump() method of a ruamel.yaml.YAML() instance.
You should use the following code instead:
import sys
from ruamel.yaml import YAML
import great_expectations as ge
context = ge.get_context()
datasource_config = {
"name": "my_snowflake_datasource",
"class_name": "Datasource",
"execution_engine": {
"class_name": "SqlAlchemyExecutionEngine",
"connection_string": "snowflake://myusername:mypass#myaccount/myDB/myschema?warehouse=mywh&role=myadmin",
},
"data_connectors": {
"default_runtime_data_connector_name": {
"class_name": "RuntimeDataConnector",
"batch_identifiers": ["default_identifier_name"],
},
"default_inferred_data_connector_name": {
"class_name": "InferredAssetSqlDataConnector",
"include_schema_name": True,
},
},
}
yaml = YAML()
yaml.dump(datasource_config, sys.stdout, transform=context.test_yaml_config)
I'll make a PR for great-excpectations to update their documentation/use of ruamel.yaml.

Azure Functions Logging - Log info at module level

I'm trying to log info to Application Insights at the module level of code as oppsosed to the function level.
I can successfully log INFO, WARNING etc when the logger is called from within a function (in any module of my project), but not when called outside a function, (eg initializing a module, want to log some settings)
For example, when running my HttpTrigger app in azure functions, this works, and logs info to app insights:
import logging
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('this message is logged successfully')
do_something()
while this does not work:
import logging
logging.info('this message isnt logged anywhere')
def main(req: func.HttpRequest) -> func.HttpResponse:
do_something()
I've tried using named loggers, changing logging settings eg:
import logging
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
log.info('This still isnt logged')
def main(req: func.HttpRequest) -> func.HttpResponse:
do_something()
and changing config in host.json:
{
"version": "2.0",
"extensionBundle": {
"id": "Microsoft.Azure.Functions.ExtensionBundle",
"version": "[1.*, 2.0.0)"
},
"logging": {
"fileLoggingMode": "always",
"logLevel": {
"default": "Information",
"Host.Results": "Information",
"Function": "Information",
"Host.Aggregator": "Information"
}
}
}
When running code locally, info is printed to stdout correctly everywhere.
I assume I must be misunderstanding exactlty how logging works in az. If anyone can fill in my gaps here for why the logging isn't working as I expect that would be appreciated!
This is the cause:
Problem comes from the app logging is not open by default on azure. This is the solution:
Go to Platform features -> All Settings.
Go to search app service logs -> app logging -> set time for saving log -> save the edit.
Then everything will be ok.
This is my code:
import logging
import azure.functions as func
logger = logging.getLogger('name')
logger.setLevel(logging.DEBUG)
sh = logging.StreamHandler()
sh.setLevel(logging.INFO)
logger.addHandler(sh)
logger.info('This will work.')
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('Python HTTP trigger function processed a request.')
name = req.params.get('name')
if not name:
try:
req_body = req.get_json()
except ValueError:
pass
else:
name = req_body.get('name')
if name:
return func.HttpResponse(f"Hello {name}!")
else:
return func.HttpResponse(
"Please pass a name on the query string or in the request body",
status_code=400
)
Before setting, I face the same problem as yours. But after setting, I can get the INFO:
This is the offcial doc:
https://learn.microsoft.com/en-us/azure/app-service/troubleshoot-diagnostic-logs#enable-application-logging-linuxcontainer

Logging from multiple modules in package not working from __main__

I am trying to implement logging in my project and want all modules to log to the same file. I managed to get this to work if I set up the logger in init, but not if I set it up in main. When I set it up in main it only logs the statements from main and not other modules.
I want to set it up in main so that I can place the configuration of the logger in dictConfig in a config.py file. When I do this from init something goes wrong with the import statements.
Here is what I have in ___main____.py:
import logging
from logging.config import dictConfig
import config as cfg
if __name__ == '__main__':
dictConfig(cfg.logging)
logger = logging.getLogger()
logger.info('Completed configuring logger()!')
main()
In config.py:
logging = dict(
version = 1,
formatters = {
'f': {'format':
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s'}
},
handlers = {
'h': {'class': 'logging.StreamHandler',
'formatter': 'f',
'level': logging.INFO},
'r': {'class': 'logging.handlers.RotatingFileHandler',
'filename': 'data/logger.log',
'formatter':'f',
'maxBytes': 10000,
'backupCount':5},
},
root = {
'handlers': ['h', 'r'],
'level': logging.INFO,
},
)
In backend.py:
import logging
logger = logging.getLogger(__name__)
class Backend(object):
def __init__(self, dbi):
self._dbi = dbi
def getDimensionTableColumns(self, table_name):
logger.warning('still not working')
The output in my logger.log file and terminal is:
2018-03-07 09:48:00,858 root INFO Completed configuring logger()!
And I know that the getDimensionTableColumns is running because if I put a print statement it outputs to the terminal.
Could someone please explain what is going wrong and why?
you are using two different loggers: the root logger (which is configured) in your __main__ module (the one you get with logger = logging.getLogger()) and a logger called __name__ = 'backend'(which is not).
you can either use the default logger in backend.py using
logger = logging.getLogger() # no __name__ !
or you could configure a named logger and use that in both modules.

Python ValueError while attempting a dictionary based logging configuration

I'm trying to code a dictionary based logging configuration and have been stumped by a ValueError that occurs when I run the program. I've stripped it down to the essentials and the problem remains. I've read the 3.5 docs, logging HOWTO, Logging Cookbook, etc. but unfortunately, the solution has not presented itself. Any help would be appreciated.
Also, I'm only 3 weeks into python so I may just be out of my depth at this point. Here's the code...
import logging.config
log_config = {
'version': 1,
'disable_existing_loggers': False,
'formatters':{
'verbose_formatter':{
'format':'%(levelname)s: %(name)s: %(asctime)s.%(msecs).03d : '\
'%(message)s: %(process)s: %(processName)s',
'datefmt':'%Y-%m-%d %H:%M:%S'
},
'precise_formatter':{
'format':'%(levelname)s: %(name)s: %(asctime)s.%(msecs).03d : '\
'%(message)s',
'datefmt':'%Y-%m-%d %H:%M:%S'
},
'brief_formatter':{
'format':'%(levelname)s: %(message)s'
}
},
'handlers':{
'con_handler':{
'class':'logging.StreamHandler',
'level':'DEBUG',
'formatter':'precise_formatter',
'stream':'ext://sys.stdout'
},
'file_handler':{
'class':'logging.handlers.RotatingFileHandler',
'filename':'logger.log',
'maxBytes':1048576,
'backupCount':4,
'level':'DEBUG',
'formatter':'precise_formatter',
'encoding':'utf8'
}
},
'loggers':{
'level':'DEBUG',
'handlers':['con_handler', 'file_handler']
}
}
logging.config.dictConfig(log_config)
logger = logging.getLogger(__name__)
logger.critical('This should always be seen!')
When run, I receive the follow:
ValueError was unhandled by user code
Message: Unable to configure logger 'handlers': 'ConvertingList' object has no attribute 'get'
or sometimes this...
ValueError was unhandled by user code
Message: Unable to configure logger 'level': 'str' object has no attribute 'get'
I suspect that the different errors may have to do with the sometimes changing order of the dictionary?
Change the loggers section to
'loggers':{
'': {
'level':'DEBUG',
'handlers':['con_handler', 'file_handler']
}
}
The '' (empty string) refers to the root logger. you can add more loggers for different components:
'loggers':{
'': {
'level':'DEBUG',
'handlers':['con_handler', 'file_handler']
}
'bottle': { #I only want error level from bottle :)
'level':'ERROR',
'handlers':['con_handler', 'file_handler']
}
}
To config the root logger use a root key of your log_config dictionary.
root - this will be the configuration for the root logger.
Source: Dictionary Schema Details
Following this description your config should look something like this:
log_config = {
...
'handlers': {
'con_handler': ...,
'file_handler': ...
},
'loggers': {
'other_logger': ...
},
'root': {
'level': 'DEBUG',
'handlers': ['con_handler', 'file_handler']
}
}

Resources