from fastapi import FastAPI
from fastapi.logger import logger
import logging
app = FastAPI()
@app.get("/")
async def pong():
logger.setLevel(logging.DEBUG)
logger.error("Error log")
logger.warning("Warning log")
logger.info("Info log")
logger.debug("Debug log")
return {"ping": "pong"}
I'm trying to get some info level logs in my FastAPI app. I have tried starting my app with uvicorn app.main:app --reload --log-level debug and also by setting the level in the code as you can see above. Unfortunately I can only get error and warning logs to display. This might be a uvicorn or logging problem or, more likely, my own misunderstanding. Any help would be greatly appreciated!
Error log
Warning log
You also need logging.basicConfig(level=logging.INFO).
It gives very weird results when running under uvicorn but to fix that you will need to go deep into uvicorn logging config.
If it becomes very annoying i would advice to use gunicorn with the uvicorn worker.
That's very useful. Thank you for that!
from loguru import logger
`### -------------------Log configuration---------------*--
logger.add("logs/info/runINFO_{time}.log", rotation="100 MB", level="INFO", enqueue=True, compression="zip", retention='2 days')
logger.add("logs/debug/runDEBUG_{time}.log", rotation="100 MB", level="DEBUG", enqueue=True, compression="zip", retention='2 days')
logger.add("logs/warn/runWARN_{time}.log", rotation="100 MB", level="WARNING", enqueue=True, compression="zip", retention='2 days')
logger.add("logs/error/runERROR_{time}.log", rotation="100 MB", level="ERROR", enqueue=True, compression="zip", retention='2 days')
status_code_dict = {
100200:'success',
100400:'The syntax of the client request is wrong, the server cannot understand "input is empty, token is empty, input error"',
100401:'Permission denied',
100402:'existed',
100404:'Not found, does not exist',
100405:'Unsuccessful, there are unfinished',
100408:'time out',
100500:'Operation failed, server error‘
100501:'server error'
}
@app.exception_handler(StarletteHTTPException)
async def http_exception_handler(request, exc):
code=exc.status_code
msg=exc.detail
debug_ = [100200,100400,100401,100402,100404,100408,100500]
info_ = [100200,100400,100401,100402,100404]
error_ = [100408,100500]
content = 'code:{}-msg{}'.format(code,msg)
# print(content)
if code in debug_:
logger.debug(content )
if code in error_:
logger.error(content )
if code in info_:
logger.info(content )
return JSONResponse( content={"code":code,'msg':msg } )`
logger.debug(content )
logger.error(content )
logger.warn(content )
logger.info(content )
You can consider this library, you can easily configure the log storage location, size, time limit, compression format, error traceability, and it is asynchronous, and this global unified error handling
可以考虑这个库,可以很轻松的配置日志存储位置,大小,时间限制,压缩格式,错误的溯源,且是异步的,还有这个全局统一错误处理
The comment above is suggesting to use loguru, but the example uses a middleware which is not mandatory for the use case of intercepting logs.
Here is a working example:
import logging
import sys
from enum import Enum
from pathlib import Path
from typing import Optional
from loguru import logger
from loguru._logger import Logger
from pydantic import BaseSettings
class LoggingLevel(str, Enum):
"""
Allowed log levels for the application
"""
CRITICAL: str = "CRITICAL"
ERROR: str = "ERROR"
WARNING: str = "WARNING"
INFO: str = "INFO"
DEBUG: str = "DEBUG"
class LoggingSettings(BaseSettings):
"""Configure your service logging using a LoggingSettings instance.
All arguments are optional.
Arguments:
level (str): the minimum log-level to log. (default: "DEBUG")
format (str): the logformat to use. (default: "<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> | <level>{level: <8}</level> | <cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> | <level>{message}</level>")
filepath (Path): the path where to store the logfiles. (default: None)
rotation (str): when to rotate the logfile. (default: "1 days")
retention (str): when to remove logfiles. (default: "1 months")
"""
level: LoggingLevel = "DEBUG"
format: str = (
"<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> | "
"<level>{level: <8}</level> | "
"<cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> | "
"<level>{message}</level>"
)
filepath: Optional[Path] = None
rotation: str = "1 days"
retention: str = "1 months"
class Config:
env_prefix = "logging_"
class InterceptHandler(logging.Handler):
def emit(self, record):
# Get corresponding Loguru level if it exists
try:
level = logger.level(record.levelname).name
except ValueError:
level = record.levelno
# Find caller from where originated the logged message
frame, depth = logging.currentframe(), 2
while frame.f_code.co_filename == logging.__file__:
frame = frame.f_back
depth += 1
logger.opt(depth=depth, exception=record.exc_info).log(
level, record.getMessage()
)
def setup_logger(
level: str,
format: str,
filepath: Optional[Path] = None,
rotation: Optional[str] = None,
retention: Optional[str] = None,
) -> Logger:
"""Define the global logger to be used by your entire service.
Arguments:
level: the minimum log-level to log.
format: the logformat to use.
filepath: the path where to store the logfiles.
rotation: when to rotate the logfile.
retention: when to remove logfiles.
Returns:
the logger to be used by the service.
References:
- [Loguru: Intercepting logging logs #247](https://github.com/Delgan/loguru/issues/247)
- [Gunicorn: generic logging options #1572](https://github.com/benoitc/gunicorn/issues/1572#issuecomment-638391953)
"""
# Remove loguru default logger
logger.remove()
# Cath all existing loggers
LOGGERS = [logging.getLogger(name) for name in logging.root.manager.loggerDict]
# Add stdout logger
logger.add(
sys.stdout,
enqueue=True,
colorize=True,
backtrace=True,
level=level.upper(),
format=format,
)
# Optionally add filepath logger
if filepath:
Path(filepath).parent.mkdir(parents=True, exist_ok=True)
logger.add(
str(filepath),
rotation=rotation,
retention=retention,
enqueue=True,
colorize=False,
backtrace=True,
level=level.upper(),
format=format,
)
# Overwrite config of standard library root logger
logging.basicConfig(handlers=[InterceptHandler()], level=0)
# Overwrite handlers of all existing loggers from standard library logging
for _logger in LOGGERS:
_logger.handlers = [InterceptHandler()]
_logger.propagate = False
return logger
def setup_logger_from_settings(settings: Optional[LoggingSettings] = None) -> Logger:
"""Define the global logger to be used by your entire service.
Arguments:
settings: the logging settings to apply.
Returns:
the logger instance.
"""
# Parse from env when no settings are given
if not settings:
settings = LoggingSettings()
# Return logger even though it's not necessary
return setup_logger(
settings.level,
settings.format,
settings.filepath,
settings.rotation,
settings.retention,
)
It's quite long, but with this you simply need to:
setup_logger_from_settings when your application startsfrom loguru import logger anywhere in your code and use logger.debug(), logger.info(), logger.warning() regardless of the location of the code. Those logs will be nicely formatted and printed to stdout by default, and optionally to file as well.All logs produced using logging library will be intercepted and formatted as well.
Here is an example result:

Most helpful comment
You also need
logging.basicConfig(level=logging.INFO).It gives very weird results when running under uvicorn but to fix that you will need to go deep into uvicorn logging config.
If it becomes very annoying i would advice to use
gunicornwith the uvicorn worker.