Please help.
I try to use SQLAlchemy with dataclasses.
Here is the code.
from dataclasses import fields
from sqlalchemy import Column
from sqlalchemy import Integer, Boolean, String
from sqlalchemy import MetaData
from sqlalchemy import Table
from sqlalchemy.orm import registry
from models import Project, Model, Test
TABLES: dict = {}
#dataclass
class Test:
identifier: int = field(init=False)
name: str = None
def create_table(
sql_mapper_registry: registry, sql_metadata: MetaData, model: type[Test]
) -> None:
table_name: str = model.__name__.lower() + "_table"
TABLES[table_name] = Table(table_name, sql_metadata)
for i in range(len(fields(model))):
if fields(model)[i].type == type(bool()):
type_var = Boolean
elif fields(model)[i].type == type(int()):
type_var = Integer
elif fields(model)[i].type == type(str()):
type_var = String(60)
else:
raise TypeError
TABLES[table_name].append_column(
Column(
fields(model)[i].name,
type_var,
primary_key=(fields(model)[i].name == "identifier"),
)
)
sql_mapper_registry.map_imperatively(
model,
TABLES[table_name],
)
return None
if __name__ == '__main__':
engine = create_engine("mariadb+mariadbconnector://root:Password#127.0.0.1:3306/company")
metadata_obj.create_all(engine)
session = orm.sessionmaker(bind=engine)()
create_table(mapper_registry, metadata_obj, Test)
test = Test()
session.add(test)
session.commit()
But this doen't work and i don`t know why.
I've got this error:
sqlalchemy.exc.OperationalError: (mariadb.OperationalError)
Unknown prepared statement handler (4294967295) given to mysqld_stmt_execute
[SQL: INSERT INTO test_table (name) VALUES (?)]
[parameters: (None,)]
I want to update the table with data of class Test.
Related
I have some questions about using data calss in python.
I am using data class structure to create json object which is created by data class.
I'm share this protocol with C# classes using Newtonsoft.Json, Jsonconvert function.
I'd like to share some dataclass, like commented style.
Or share me, more clear way of using data class for creating json object
import os
import sys
import inspect
import json
from datetime import datetime
from dataclasses import dataclass, field, asdict
from dacite.core import from_dict
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from protocol.ofrobot import RobotRaw
from protocol.ofcrawl import Crawl
from protocol.ofcomm import Comm
from protocol.ofgeneral import General
class Protocol(RobotRaw, Comm, Crawl, General):
#dataclass
class Data: # I'd like to move some dataclass objects for sharing other classes
request_id: int = -1
result: str = ""
message: str = ""
#dataclass
class Response:
key: str = ""
cmd: str = ""
# #dataclass
# class Data:
# request_id: int = -1
# result: str = ""
# message: str = ""
data: Data = field(default_factory=Data) # cannot create Data class instance
#dataclass
class Action:
key: str = ""
cmd: str = ""
#dataclass
class Data:
request_id: int = -1
#dataclass
class ActionItem:
ridx: int = -1
action: str = ""
fidx: int = -1
fry_count: int = -1
items: List[ActionItem] = field(default_factory=list)
data: Data = field(default_factory=Data)
#staticmethod
def todict(class_object):
return asdict(class_object)
#staticmethod
def tojson(class_object):
return json.dumps(asdict(class_object))
#staticmethod
def toclass(protocol_type, data_dict: dict, type_recovery: bool = False):
for key in data_dict.keys():
if str(key).find("datetime") > -1 and type(data_dict[key]) == str:
data_dict[key] = datetime.strptime(data_dict[key], "%Y-%m-%d %H:%M:%S")
else:
if type_recovery:
type_dict = Protocol.todict(protocol_type())
data_dict[key] = type(type_dict[key])(data_dict[key])
return from_dict(protocol_type, data_dict)
if __name__ == "__main__":
print("test code")
items = Protocol.Action.Data.ActionItem(0, "grip", 0, 3)
data = Protocol.Action.Data("id123", items)
action = Protocol.Action("key", "action", data)
json_str = Protocol.tojson(action)
Goal:
I want a scalable way to add factories that represent my models so I can manipulate them at run time and have a test DB from test set up to tear down.
Issue:
cls = <class 'tests.factories.analysis.Analysis'>
model_class = <class 'src.api.someModel'>, session = <function dbsession at 0xffffb8e6a5e0>, args = ()
kwargs = {'analysis_id': 365, 'created_by': 619, 'file_extension': 'Stop.', 'original_file_name': 'Share.', ...}
#classmethod
def _save(cls, model_class, session, args, kwargs):
session_persistence = cls._meta.sqlalchemy_session_persistence
obj = model_class(*args, **kwargs)
> session.add(obj)
E AttributeError: 'function' object has no attribute 'add'
What this means to me ^ my Session object is not behaving the way it should, why i dont know.
conftest.py ( got this code snippet from here https://gist.github.com/kissgyorgy/e2365f25a213de44b9a2?permalink_comment_id=3496042 )
import pytest
from sqlalchemy import create_engine
from sqlalchemy.orm import Session
from domain.declarative import Base
from sqlalchemy.orm import sessionmaker
TEST_DATABASE = "postgresql://postgres:postgres#localhost:5432/testDB"
#pytest.fixture(scope="session")
def engine():
return create_engine(TEST_DATABASE)
#pytest.fixture(scope="session")
def tables(engine):
Base.metadata.create_all(engine)
yield
Base.metadata.drop_all(engine)
#pytest.fixture
def dbsession(engine, tables):
"""Returns an sqlalchemy session, and after the test tears down everything properly."""
connection = engine.connect()
# begin the nested transaction
transaction = connection.begin()
# use the connection with the already started transaction
session = Session(bind=connection)
yield session
session.close()
# roll back the broader transaction
transaction.rollback()
# release connection
connection.close()
Base_factory.py
import factory.alchemy
from sqlalchemy.orm import scoped_session, sessionmaker
from the.code.above import engine, dbsession
session = scoped_session(sessionmaker(bind=engine))
class BaseFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
abstract = True
sqlalchemy_session = dbsession
sqlalchemy_session_persistence = "commit"
Child Factory
import random
from datetime import datetime
from faker import Faker
from domain.declarative import TheModel
from tests.factories.base_factory import BaseFactory
faker = Faker()
class ChildFactory(BaseFactory):
class Meta:
model = TheModel
analysis_id = random.randrange(1, 1000)
created_by = random.randrange(1, 1000)
file_extension = faker.text(max_nb_chars=10)
original_file_name = faker.text(max_nb_chars=10)
upload_date = faker.date_between_dates(
date_start=datetime(2000, 1, 1), date_end=datetime(2019, 12, 31)
)
some test
class TestSimple(unittest.TestCase):
def test_important_stuff(self):
f = ChildFactory()
So this looks like to me the Session object is not being instantiated properly and is of a type function? it's calling add here at somepoint and we get an atrribute error because of that where am I going wrong?
session.add(obj)
E AttributeError: 'function' object has no attribute 'add'
I am trying to use the sqlalchemy enum type without creating a native DB ENUM column but still having sqlalchemy validates enum inputs.
So I have this declaration :
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import Enum as SaEnum
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import validates
import enum
Base = declarative_base()
class DummyEnum(enum.Enum):
NAME_1 = "NAME_1"
NAME_2 = "NAME_2"
class SAEnumVarcharTable(Base):
__tablename__ = "test_table"
id = Column(Integer, primary_key=True)
uut_column = Column(SaEnum(DummyEnum, native_enum=False, validate_strings=True), default=DummyEnum.NAME_1)
#validates('uut_column')
def validate_uut_column(self, key, uut_value):
return uut_value.upper()
I want the validation returning the string in upper case and only then do the Enum validation...
Is it possible?
because when I do :
connection.execute(insert(SAEnumVarcharTable), {"uut_column" : "name_1"})
it does raise a LookupError where I would expect to be accepted as valid input (first goes to the validates functions -> return an uppercase string -> do the Enum validation)...Is it somehow possible?
Validators are only called when modifying an instance's attributes, e.g.:
instance = SAEnumVarcharTable()
instance.uut_column = "name_1"
In your case, you would need to override sqlalchemy.Enum's _db_value_for_elem:
class UpperCaseEnum(SaEnum):
def _db_value_for_elem(self, elem):
return super()._db_value_for_elem(elem.upper())
# uut_column = Column(SaEnum(...), ...)
uut_column = Column(UpperCaseEnum(...), ...)
Aside
Validator implementation
validate_uut_column is not implemented correctly.
instance = SAEnumVarcharTable()
instance.uut_column = "name_0" # No error, expected LookupError
instance.uut_column = "name_1" # Ok
print(instance.uut_column == DummyEnum.NAME_1) # False, expected True
print(instance.uut_column.value == "NAME_1") # AttributeError: 'str' object has no attribute 'value'
instance.uut_column = DummyEnum.NAME_1 # AttributeError: 'DummyEnum' object has no attribute 'upper'
Instead, it should be:
#validates('uut_column')
def validate_uut_column(self, key, uut_value):
# return uut_value.upper()
return uut_value if uut_value in DummyEnum else DummyEnum[uut_value.upper()]
instance = SAEnumVarcharTable()
instance.uut_column = "name_0" # LookupError, as expected
instance.uut_column = "name_1" # Ok
print(instance.uut_column == DummyEnum.NAME_1) # True, as expected
print(instance.uut_column.value == "NAME_1") # True, as expected
instance.validate_uut_column = DummyEnum.NAME_1 # Ok
I have used flask-sqlalchemy to create a mixin in a file called itemAbstract.py, to be shared by two model classes: ItemModel and ItemHistoryModelrespectively. Below is the code I have written in the itemAbstract.py
from databaseHandler import databaseHandler
from sqlalchemy.ext.declarative import declared_attr
# pylint: disable=maybe-no-member
class Item(databaseHandler.Model):
__abstract__ = True
itemName = databaseHandler.Column(databaseHandler.String(80), nullable = False)
price = databaseHandler.Column(databaseHandler.Numeric, nullable = False)
itemImage = databaseHandler.Column(databaseHandler.String(1000), nullable = False)
#classmethod
#declared_attr
def restaurantId(cls):
return databaseHandler.Column(
databaseHandler.Integer, databaseHandler.ForeignKey("restaurant.restaurantId"))
#classmethod
#declared_attr
def restaurant(cls):
return databaseHandler.relationship(
"RestaurantModel", backref=databaseHandler.backref('items', lazy=True))
#classmethod
#declared_attr
def productTypeId(cls):
return databaseHandler.Column(
databaseHandler.Integer, databaseHandler.ForeignKey("product_type.productTypeId"))
#classmethod
#declared_attr
def productType(cls):
return databaseHandler.relationship(
"ProductTypeModel", backref=databaseHandler.backref('items', lazy=True))
And I have inherited it in the itemModel.py and itemHistoryModel.py like so:
from databaseHandler import databaseHandler
from sqlalchemy import and_, or_
from abstracts.itemAbstract import Item
# pylint: disable=maybe-no-member
class ItemModel(Item):
__tablename__ = 'item'
itemId = databaseHandler.Column(databaseHandler.Integer, primary_key = True)
And
from databaseHandler import databaseHandler
from sqlalchemy import and_, or_
from abstracts.itemAbstract import Item
# pylint: disable=maybe-no-member
class ItemHistoryModel(Item):
__tablename__ = 'item_history'
historyId = databaseHandler.Column(databaseHandler.Integer, primary_key = True)
I have a class method in both files that is supposed to help me get a list of items a restaurant sells by passing in the restaurantId as parameter
#classmethod
def findItemsByRestaurant(cls, param):
return cls.query.filter_by(restaurantId = param)
However, anytime I execute this method it returns a query string in the resultset instead of a list of items. Here is a sample resultset:
SELECT item_history.`itemName` AS `item_history_itemName`, item_history.price AS item_history_price, item_history.`itemImage` AS `item_history_itemImage`, item_history.`historyId` AS `item_history_historyId`
FROM item_history
WHERE false = 1
Somehow, SQLAlchemy makes my parameter false and assigns a value of 1 to it meanwhile the actual ID of the restaurant is 10. What am I doing wrong?
This is the databaseHandler.py file:
from flask_sqlalchemy import SQLAlchemy
databaseHandler = SQLAlchemy()
The Query object has a number of API methods for getting pythonic objects rather than amending the query:
get
all
from_statement
first
one_or_none
one
scalar (as_scalar to be depreciated)
count
I'm attempting to add custom data types to Cerberus. The UUID class works as expected (it's a standard library class) but I'm not able to validate with the UUID type using Cerberus.
Secondarily I was not able to register multiple types in an an __init__ function but that probably should be it's own question.
Here's my custom validator that should register the given types.
import cerberus
class UUID:
name = "UUID"
def __init__(self, potential_uuid: str):
self.uuid = uuid.UUID(potential_uuid)
def __str__(self):
return str(self.uuid)
class Validator(cerberus.Validator):
def _register_types(self) -> cerberus.Validator.types_mapping:
types_mapping = Validator.types_mapping.copy()
for schema_type in datatypes.ALL_TYPES:
cerberus_type = cerberus.TypeDefinition(
schema_type.name,
(schema_type,),
())
types_mapping[schema_type.name] = cerberus_type
return types_mapping
cerberus_type = cerberus.TypeDefinition(
"UUID",
(datatypes.UUID,),
())
types_mapping = cerberus.Validator.types_mapping.copy()
types_mapping["UUID"] = cerberus_type
#def __init__(self, *args, **kwargs ):
# types_mapping = self._register_types()
# super().__init__(*args, **kwargs)
And here's my unit tests for this code.
#pytest.mark.unit
def test_valid_uuid():
test_input = "35d6d5a0-6f37-4794-a493-2712eda41c1a"
actual = UUID(test_input)
assert str(actual) == "35d6d5a0-6f37-4794-a493-2712eda41c1a"
#pytest.mark.unit
def test_invalid_uuid():
test_input = "Not a Valid UUID"
with pytest.raises(ValueError):
actual = UUID(test_input)
#pytest.mark.unit
def test_uuid_type_registration():
test_schema = {"test_name": {"type": "UUID"}}
validator = Validator(test_schema)
test_record = {"test_name": "35d6d5a0-6f37-4794-a493-2712eda41c1a"}
result = validator.validate(test_record)
print(validator._errors)
assert result == True
If we just give the UUID class a valid UUID it succeeds but if we attempt to validate it through Cerberus we get a BAD_TYPE validation error.
pytest tests/test_datatypes/test_datatypes.py
============================================================================================================================= test session starts ==============================================================================================================================
platform linux -- Python 3.7.2, pytest-4.3.1, py-1.8.0, pluggy-0.9.0
benchmark: 3.2.2 (defaults: timer=time.perf_counter disable_gc=False min_rounds=5 min_time=0.000005 max_time=1.0 calibration_precision=10 warmup=False warmup_iterations=100000)
rootdir: /home/vdev, inifile: setup.cfg
plugins: cov-2.6.1, benchmark-3.2.2
collected 4 items
tests/test_datatypes/test_datatypes.py ...F [100%]
=================================================================================================================================== FAILURES ===================================================================================================================================
_________________________________________________________________________________________________________________________ test_uuid_type_registration __________________________________________________________________________________________________________________________
#pytest.mark.unit
def test_uuid_type_registration():
test_schema = {"test_name": {"type": "UUID"}}
validator = Validator(test_schema)
test_record = {"test_name": "35d6d5a0-6f37-4794-a493-2712eda41c1a"}
result = validator.validate(test_record)
print(validator._errors)
> assert result == True
E assert False == True
tests/test_datatypes/test_datatypes.py:30: AssertionError
----------------------------------------------------------------------------------------------------------------------------- Captured stdout call -----------------------------------------------------------------------------------------------------------------------------
[ValidationError # 0x7fa477e10278 ( document_path=('test_name',),schema_path=('test_name', 'type'),code=0x24,constraint="UUID",value="35d6d5a0-6f37-4794-a493-2712eda41c1a",info=() )]
=============================================================================================================================== warnings summary ===============================================================================================================================
/usr/local/lib/python3.7/site-packages/cerberus/validator.py:14
/usr/local/lib/python3.7/site-packages/cerberus/validator.py:14
/usr/local/lib/python3.7/site-packages/cerberus/validator.py:14
/usr/local/lib/python3.7/site-packages/cerberus/validator.py:14
/usr/local/lib/python3.7/site-packages/cerberus/validator.py:14: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working
from collections import Hashable, Iterable, Mapping, Sequence
/usr/local/lib/python3.7/site-packages/cerberus/errors.py:6
/usr/local/lib/python3.7/site-packages/cerberus/errors.py:6: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working
from collections import defaultdict, namedtuple, MutableMapping
/usr/local/lib/python3.7/site-packages/cerberus/schema.py:3
/usr/local/lib/python3.7/site-packages/cerberus/schema.py:3: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working
from collections import (Callable, Hashable, Iterable, Mapping,
-- Docs: https://docs.pytest.org/en/latest/warnings.html
================================================================================================================ 1 failed, 3 passed, 6 warnings in 0.33 seconds ===============================================================================================================
EDIT 1
Simplified example code
import cerberus
import uuid
class Validator(cerberus.Validator):
types_mapping = {
**cerberus.Validator.types_mapping,
'UUID': cerberus.TypeDefinition('UUID', (uuid.UUID,), ())
}
Same failure
#pytest.mark.unit
def test_uuid_type_registration():
test_schema = {"test_name": {"type": "UUID"}}
validator = es_client.Validator(test_schema)
test_record = {"test_name": "35d6d5a0-6f37-4794-a493-2712eda41c1a"}
result = validator.validate(test_record)
print(validator._errors)
> assert result == True
E assert False == True
tests/test_datatypes/test_datatypes.py:30: AssertionError
------------------------------------------------------------ Captured stdout call -------------------------------------------------------------
[ValidationError # 0x7fd9cdeed0b8 ( document_path=('test_name',),schema_path=('test_name', 'type'),code=0x24,constraint="UUID",value="35d6d5a0-6f37-4794-a493-2712eda41c1a",info=() )]
Could you clarify what the _register_types method is meant to do and when it is called?
This works, maybe it helps you find your error:
def test_issue_475():
class UUID:
def __init__(self, data):
self.data = data
class MyValidator(Validator):
types_mapping = {
**Validator.types_mapping,
'UUID': TypeDefinition('UUID', (UUID,), ())
}
assert_success(
{'field': UUID(0)},
{'field': {'type': 'UUID'}},
validator=MyValidator()
)
Note that you mention the sdtlib's UUID class while you implement another one with the same name in your example.