diff --git a/README.md b/README.md index 49455c8..7e322fc 100644 --- a/README.md +++ b/README.md @@ -6,6 +6,16 @@ dataloom

+#### Why choose `dataloom`? + +1. **Ease of Use**: `dataloom` offers a user-friendly interface, making it straightforward to work with. +2. **Flexible SQL Driver**: Write one codebase and seamlessly switch between `PostgreSQL`, `MySQL`, and `SQLite3` drivers as needed. +3. **Lightweight**: Despite its powerful features, `dataloom` remains lightweight, ensuring efficient performance. +4. **Comprehensive Documentation**: Benefit from extensive documentation that guides users through various functionalities and use cases. +5. **Active Maintenance**: `dataloom` is actively maintained, ensuring ongoing support and updates for a reliable development experience. +6. **Cross-platform Compatibility**: `dataloom` works seamlessly across different operating systems, including `Windows`, `macOS`, and `Linux`. +7. **Scalability**: Scale your application effortlessly with `dataloom`, whether it's a small project or a large-scale enterprise application. + ### ⚠️ Warning > **⚠️ Experimental Status of `dataloom`**: The `dataloom` module is currently in an experimental phase. As such, we strongly advise against using it in production environments until a major version is officially released and stability is ensured. During this experimental phase, the `dataloom` module may undergo significant changes, and its features are subject to refinement. We recommend monitoring the project updates and waiting for a stable release before incorporating it into production systems. Please exercise caution and consider alternative solutions for production use until the module reaches a stable release.\*\* @@ -13,6 +23,7 @@ ### Table of Contents - [dataloom](#dataloom) + - [Why choose `dataloom`?](#why-choose-dataloom) - [⚠️ Warning](#️-warning) - [Table of Contents](#table-of-contents) - [Key Features:](#key-features) @@ -27,6 +38,10 @@ - [`ForeignKeyColumn` Class](#foreignkeycolumn-class) - [`CreatedAtColumn` Class](#createdatcolumn-class) - [`UpdatedAtColumn` Class](#updatedatcolumn-class) + - [`Filter` Class](#filter-class) + - [`ColumnValue` Class](#columnvalue-class) + - [`Order` Class](#order-class) + - [`Include`](#include) - [Syncing Tables](#syncing-tables) - [1. The `sync` method.](#1-the-sync-method) - [2. The `connect_and_sync` method.](#2-the-connect_and_sync-method) @@ -90,7 +105,7 @@ pg_loom = Dataloom( password="root", user="postgres", host="localhost", - logging=True, + sql_logger="console", logs_filename="logs.sql", port=5432, ) @@ -116,7 +131,7 @@ mysql_loom = Dataloom( password="root", user="root", host="localhost", - logging=True, + sql_logger="console", logs_filename="logs.sql", port=3306, ) @@ -140,7 +155,8 @@ sqlite_loom = Dataloom( dialect="sqlite", database="hi.db", logs_filename="sqlite-logs.sql", - logging=True + logging=True, + sql_logger="console", ) # Connect to the SQLite database @@ -159,7 +175,7 @@ The `Dataloom` class takes in the following options: | `password` | Password for the database user (only for `mysql` and `postgres`) | `str` or `None` | `None` | `No` | | `user` | Database user (only for `mysql` and `postgres`) | `str` or `None` | `None` | `No` | | `host` | Database host (only for `mysql` and `postgres`) | `str` or `None` | `localhost` | `No` | -| `logging` | Enable logging for the database queries | `bool` | `True` | `No` | +| `sql_logger` | Enable logging for the database queries. If you don't want to see the sql logs you can set this option to `None` which is the default value. If you set it to `file` then you will see the logs in the default `dataloom.sql` file, you can overide this by passing a `logs_filename` option. Setting this option to `console`, then sql statements will be printed on the console. | `console`or `file` or `None`| `True` | `No` | | `logs_filename` | Filename for the query logs | `str` or `None` | `dataloom.sql` | `No` | | `port` | Port number for the database connection (only for `mysql` and `postgres`) | `int` or `None` | `None` | `No` | @@ -180,10 +196,9 @@ from dataloom import ( TableColumn, ForeignKeyColumn, ) -from typing import Optional class User(Model): - __tablename__: Optional[TableColumn] = TableColumn(name="users") + __tablename__:TableColumn = TableColumn(name="users") id = PrimaryKeyColumn(type="int", auto_increment=True) name = Column(type="text", nullable=False, default="Bob") username = Column(type="varchar", unique=True, length=255) @@ -193,10 +208,8 @@ class User(Model): updatedAt = UpdatedAtColumn() - - class Post(Model): - __tablename__: Optional[TableColumn] = TableColumn(name="posts") + __tablename__: TableColumn = TableColumn(name="posts") id = PrimaryKeyColumn(type="int", auto_increment=True, nullable=False, unique=True) completed = Column(type="boolean", default=False) title = Column( @@ -214,7 +227,10 @@ class Post(Model): ) ``` -- Within the `User` model definition, the table name is explicitly specified using the `__tablename__` property, set to `"users"`. This informs `dataloom` to use the provided name instead of automatically deriving it from the class name. If `__tablename__` is not specified, the class name becomes the default table name during the synchronization of tables. To achieve this, the `TableColumn` class is used, accepting the specified table name as an argument. +- Within the `User` model definition, the table name is explicitly specified using the `__tablename__` property, set to `"users"`. This informs `dataloom` to use the provided name instead of automatically deriving it from the class name. If `TableColumn` is not specified, the class name becomes the default table name during the synchronization of tables. To achieve this, the `TableColumn` class is used, accepting the specified table name as an argument. + +> 👉:**Note:** When defining a table name, it's not necessary to specify the property as `__tablename__`. However, it's considered good practice to name your table column like that to avoid potential clashes with other columns in the table. + - Every table must include exactly one primary key column. To define this, the `PrimaryKeyColumn` class is employed, signaling to `dataloom` that the specified field is a primary key. - The `Column` class represents a regular column, allowing the inclusion of various options such as type and whether it is required. - The `CreatedAtColumn` and `UpdatedAt` column types are automatically generated by the database as timestamps. If timestamps are unnecessary or only one of them is needed, they can be omitted. @@ -381,6 +397,87 @@ When a column is designated as `CreatedAtColumn`, its value will be automaticall When a column is designated as `UpdatedAtColumn`, its value will be automatically generated each time you create a new record or update an existing record in a database table, acting as a timestamp. +#### `Filter` Class + +This `Filter` class in `dataloom` is designed to facilitate the application of filters when executing queries and mutations. It allows users to specify conditions that must be met for the operation to affect certain rows in a database table. Below is an example demonstrating how this class can be used: + +```python +affected_rows = pg_loom.update_one( + Post, + values=[ + ColumnValue(name="title", value="Hey"), + ColumnValue(name="completed", value=True), + ], + filters=[ + Filter(column="id", value=1, join_next_filter_with="AND"), + Filter(column="userId", value=1, join_next_filter_with="AND"), + ], +) +``` + +So from the above example we are applying filters while updating a `Post` here are the options that you can pass on that filter class: +| Argument | Description | Type | Default | +|-------------------------|------------------------------------------------------------|-------------------------------------------|------------------------| +| `column` | The name of the column to apply the filter on | `String` | - | +| `value` | The value to filter against | `Any` | - | +| `operator` | The comparison operator to use for the filter | `'eq'`, `'lt'`, `'gt'`, `'leq'`, `'geq'`, `'in'`, `'notIn'`, `'like'` | `'eq'` | +| `join_next_filter_with` | The logical operator to join this filter with the next one | `'AND'`, `'OR'` | `'AND'` | + +> 👉 : **Note:** You can apply either a list of filters or a single filter when filtering records. + +#### `ColumnValue` Class + +Just like the `Filter` class, `dataloom` also provides a `ColumnValue` class. This class acts as a setter to update the values of columns in your database table. + +The following code snippet demonstrates how the `ColumnValue` class is used to update records in the database: + +```py +re = pg_loom.update_one( + Post, + values=[ + ColumnValue(name="title", value="Hey"), + ColumnValue(name="completed", value=True), + ], + filters=[ + Filter(column="id", value=1, join_next_filter_with="AND"), + Filter(column="userId", value=1, join_next_filter_with="AND"), + ], +) +``` + +It accepts two arguments: `name` and `value`. name represents the column name, while value corresponds to the new value to be assigned to that column. + +| Argument | Description | Type | Default | +| -------- | ---------------------------------------------------------- | ----- | ------- | +| `name` | The name of the column to be updated or inserted. | `str` | - | +| `value` | The value to assign to the column during update or insert. | `Any` | - | + +#### `Order` Class + +The `Order` class enables us to specify the desired order in which documents should be returned. Below is an example illustrating its usage: + +```py +posts = pg_loom.find_all( + Post, + select=["id", "completed", "title", "createdAt"], + limit=3, + offset=0, + order=[ + Order(column="createdAt", order="ASC"), + Order(column="id", order="DESC"), + ] +) +``` + +> 👉 **Note:** When utilizing a list of orders, they are applied sequentially, one after the other: + +| Argument | Description | Type | Default | +| -------- | ------------------------------------------------------------------------- | ------------------- | ------- | +| `column` | The name of the column to order by. | `str` | - | +| `order` | The order direction, either `"ASC"` (ascending) or `"DESC"` (descending). | `"ASC"` or `"DESC"` | `"ASC"` | + +#### `Include` + ### Syncing Tables Syncing tables involves the process of creating tables from models and saving them to a database. After defining your tables, you will need to synchronize your database tables using the `sync` method. diff --git a/dataloom/loom/__init__.py b/dataloom/loom/__init__.py index 146da3e..f2c5d98 100644 --- a/dataloom/loom/__init__.py +++ b/dataloom/loom/__init__.py @@ -10,7 +10,12 @@ from dataloom.model import Model from dataloom.statements import GetStatement from dataloom.conn import ConnectionOptionsFactory -from dataloom.utils import file_logger, console_logger, get_child_table_columns +from dataloom.utils import ( + file_logger, + console_logger, + get_child_table_columns, + get_insert_bulk_attrs, +) from typing import Optional from dataloom.types import ( Order, @@ -266,8 +271,8 @@ def sync(self, models: list[Model], drop=False, force=False, alter=False): except Exception as e: raise Exception(e) - def insert_one(self, instance: Model): - sql, values = instance._get_insert_one_stm(dialect=self.dialect) + def insert_one(self, instance: Model, values: ColumnValue | list[ColumnValue]): + sql, values = instance._get_insert_one_stm(dialect=self.dialect, values=values) row = self._execute_sql( sql, args=tuple(values), @@ -276,30 +281,28 @@ def insert_one(self, instance: Model): ) return row[0] if type(row) in [list, tuple] else row - def insert_bulk(self, instances: list[Model]): + def insert_bulk( + self, instance: Model, values: list[list[ColumnValue] | ColumnValue] + ): columns = None placeholders = None - data = list() - for instance in instances: - ( - column_names, - placeholder_values, - _values, - ) = instance._get_insert_bulk_attrs(dialect=self.dialect) + data = [] + for _value in values: + (column_names, placeholder_values, _values) = get_insert_bulk_attrs( + dialect=self.dialect, instance=instance, values=_value + ) if columns is None: columns = column_names if placeholders is None: placeholders = placeholder_values - data.append(_values) - sql, values = instance._get_insert_bulk_smt( - dialect=self.dialect, - placeholders=placeholder_values, - columns=columns, - data=data, - ) - row_count = self._execute_sql(sql, args=tuple(values), fetchall=True, bulk=True) - return row_count + print(data) + + # sql = instance._get_insert_bulk_smt( + # dialect=self.dialect, column_names=columns, placeholder_values=placeholders + # ) + # row_count = self._execute_sql(sql, args=tuple(data), fetchall=True, bulk=True) + # return row_count def find_many( self, diff --git a/dataloom/model/__init__.py b/dataloom/model/__init__.py index 56f4cff..3be51e6 100644 --- a/dataloom/model/__init__.py +++ b/dataloom/model/__init__.py @@ -2,8 +2,6 @@ from dataloom.constants import CURRENT_TIME_STAMP, SQLITE_CURRENT_TIME_STAMP from dataloom.exceptions import UnknownColumnException, UnsupportedDialectException from dataloom.columns import ( - Column, - ForeignKeyColumn, PrimaryKeyColumn, TableColumn, ) @@ -25,16 +23,7 @@ class Model: - def __init__(self, **args) -> None: - self._data = {} - for k, v in args.items(): - self._data[k] = v - - def __getattribute__(self, key: str): - _data = object.__getattribute__(self, "_data") - if key in _data: - return _data[key] - return object.__getattribute__(self, key) + # def __init__(self) -> None: @classmethod def _create_sql(cls, dialect: DIALECT_LITERAL, ignore_exists=True): @@ -79,86 +68,50 @@ def _drop_sql(cls, dialect: DIALECT_LITERAL): ) return sql - def _get_insert_one_stm(self, dialect: DIALECT_LITERAL): - cls = self.__class__ - fields = [] - placeholders = [] - values = [] - pk = None - for _name, field in inspect.getmembers(cls): - if isinstance(field, Column): - value = getattr(self, _name) - if not isinstance(value, Column): - fields.append(_name) - values.append(value) - placeholders.append("?" if dialect == "sqlite" else "%s") - elif isinstance(field, ForeignKeyColumn): - value = getattr(self, _name) - if not isinstance(value, ForeignKeyColumn): - fields.append(_name) - values.append(value) - placeholders.append("?" if dialect == "sqlite" else "%s") - elif isinstance(field, PrimaryKeyColumn): - pk = f'"{_name}"' - value = getattr(self, _name) - if not isinstance(value, PrimaryKeyColumn): - fields.append(_name) - values.append(value) - placeholders.append("?" if dialect == "sqlite" else "%s") - data = (values, placeholders, fields) + @classmethod + def _get_insert_one_stm( + cls, dialect: DIALECT_LITERAL, values: list[ColumnValue] | ColumnValue + ): + fields, pk_name, fks, updatedAtColumName = get_table_fields( + cls, dialect=dialect + ) + placeholders, column_values, column_names = get_column_values( + table_name=cls._get_table_name(), + dialect=dialect, + fields=fields, + values=values, + ) if dialect == "postgres" or "mysql" or "sqlite": - values = GetStatement( - dialect=dialect, model=cls, table_name=self._get_table_name() - )._get_insert_one_command(data=data, pk=pk) + sql = GetStatement( + dialect=dialect, model=cls, table_name=cls._get_table_name() + )._get_insert_one_command( + fields=column_names, + pk_name=pk_name, + placeholders=[ + "?" if dialect == "sqlite" else "%s" for _ in placeholders + ], + ) else: raise UnsupportedDialectException( "The dialect passed is not supported the supported dialects are: {'postgres', 'mysql', 'sqlite'}" ) - return values - - def _get_insert_bulk_attrs(self, dialect: DIALECT_LITERAL): - cls = self.__class__ - fields = [] - placeholders = [] - values = [] - for _name, field in inspect.getmembers(cls): - if isinstance(field, Column): - value = getattr(self, _name) - if not isinstance(value, Column): - fields.append(_name) - values.append(value) - placeholders.append("?" if dialect == "sqlite" else "%s") - elif isinstance(field, ForeignKeyColumn): - value = getattr(self, _name) - if not isinstance(value, ForeignKeyColumn): - fields.append(_name) - values.append(value) - placeholders.append("?" if dialect == "sqlite" else "%s") - elif isinstance(field, PrimaryKeyColumn): - value = getattr(self, _name) - if not isinstance(value, PrimaryKeyColumn): - fields.append(_name) - values.append(value) - placeholders.append("?" if dialect == "sqlite" else "%s") - column_names = ", ".join( - [f'"{f}"' if dialect == "postgres" else f"`{f}`" for f in fields] - ) - placeholder_values = ", ".join(placeholders) - return column_names, placeholder_values, values + return sql, column_values @classmethod def _get_insert_bulk_smt( - cls, dialect: DIALECT_LITERAL, placeholders, columns, data + cls, dialect: DIALECT_LITERAL, column_names: str, placeholder_values: str ): if dialect == "postgres" or "mysql" or "sqlite": - sql, values = GetStatement( + sql = GetStatement( dialect=dialect, model=cls, table_name=cls._get_table_name() - )._get_insert_bulk_command(data=(placeholders, columns, data)) + )._get_insert_bulk_command( + column_names=column_names, placeholder_values=placeholder_values + ) else: raise UnsupportedDialectException( "The dialect passed is not supported the supported dialects are: {'postgres', 'mysql', 'sqlite'}" ) - return sql, values + return sql @classmethod def _get_select_where_stm( @@ -285,7 +238,7 @@ def _get_update_by_pk_stm( fields, pk_name, fks, updatedAtColumName = get_table_fields( cls, dialect=dialect ) - placeholders, column_values = get_column_values( + placeholders, column_values, column_names = get_column_values( table_name=cls._get_table_name(), dialect=dialect, fields=fields, @@ -327,7 +280,7 @@ def _get_update_one_stm( filters=filters, ) - placeholders_of_column_values, column_values = get_column_values( + placeholders_of_column_values, column_values, column_names = get_column_values( table_name=cls._get_table_name(), dialect=dialect, fields=fields, @@ -372,7 +325,7 @@ def _get_update_bulk_where_stm( fields=fields, filters=filters, ) - placeholders_of_column_values, column_values = get_column_values( + placeholders_of_column_values, column_values, column_names = get_column_values( table_name=cls._get_table_name(), dialect=dialect, fields=fields, @@ -496,7 +449,7 @@ def _get_increment_decrement_stm( filters=filters, ) - placeholders_of_column_values, column_values = get_column_values( + placeholders_of_column_values, column_values, column_names = get_column_values( table_name=cls._get_table_name(), dialect=dialect, fields=fields, diff --git a/dataloom/statements/__init__.py b/dataloom/statements/__init__.py index 5c0193c..2044a0f 100644 --- a/dataloom/statements/__init__.py +++ b/dataloom/statements/__init__.py @@ -39,14 +39,15 @@ def __init__( self.table_name = table_name self.ignore_exists = ignore_exists - def _get_insert_one_command(self, pk, data) -> tuple[Optional[str], list]: - (values, placeholders, fields) = data + def _get_insert_one_command( + self, pk_name: str, placeholders: list[str], fields: list[str] + ) -> tuple[Optional[str], list]: if self.dialect == "postgres": sql = PgStatements.INSERT_COMMAND_ONE.format( table_name=f'"{self.table_name}"', column_names=", ".join([f'"{f}"' for f in fields]), placeholder_values=", ".join(placeholders), - pk=pk, + pk_name=pk_name, ) elif self.dialect == "mysql": sql = MySqlStatements.INSERT_COMMAND_ONE.format( @@ -64,10 +65,11 @@ def _get_insert_one_command(self, pk, data) -> tuple[Optional[str], list]: raise UnsupportedDialectException( "The dialect passed is not supported the supported dialects are: {'postgres', 'mysql', 'sqlite'}" ) - return sql, values + return sql - def _get_insert_bulk_command(self, data) -> tuple[Optional[str], list]: - placeholder_values, column_names, values = data + def _get_insert_bulk_command( + self, placeholder_values: str, column_names: str + ) -> str: if self.dialect == "postgres": sql = PgStatements.INSERT_COMMAND_MANY.format( table_name=f'"{self.table_name}"', @@ -90,7 +92,7 @@ def _get_insert_bulk_command(self, data) -> tuple[Optional[str], list]: raise UnsupportedDialectException( "The dialect passed is not supported the supported dialects are: {'postgres', 'mysql', 'sqlite'}" ) - return sql, values + return sql @property def _get_drop_table_command(self) -> Optional[str]: diff --git a/dataloom/statements/statements.py b/dataloom/statements/statements.py index 1e2e434..be479bd 100644 --- a/dataloom/statements/statements.py +++ b/dataloom/statements/statements.py @@ -261,7 +261,7 @@ class PgStatements: """ # insert - INSERT_COMMAND_ONE = "INSERT INTO {table_name} ({column_names}) VALUES ({placeholder_values}) RETURNING {pk};" + INSERT_COMMAND_ONE = "INSERT INTO {table_name} ({column_names}) VALUES ({placeholder_values}) RETURNING {pk_name};" INSERT_COMMAND_MANY = "INSERT INTO {table_name} ({column_names}) VALUES ({placeholder_values}) RETURNING *;" # creating table diff --git a/dataloom/utils/__init__.py b/dataloom/utils/__init__.py index d10836e..ccf4066 100644 --- a/dataloom/utils/__init__.py +++ b/dataloom/utils/__init__.py @@ -88,6 +88,7 @@ def get_column_values( ): column_values = [] placeholders_of_column_values = [] + column_names = [] if values is not None: if isinstance(values, list): @@ -102,6 +103,7 @@ def get_column_values( key}` = {'%s' if dialect == 'mysql' else '?'}" ) placeholders_of_column_values.append(_key) + column_names.append(key) column_values.append(v) else: raise UnknownColumnException( @@ -119,12 +121,13 @@ def get_column_values( key}` = {'%s' if dialect == 'mysql' else '?'}" ) placeholders_of_column_values.append(_key) + column_names.append(key) column_values.append(v) else: raise UnknownColumnException( f"Table {table_name} does not have column '{key}'." ) - return placeholders_of_column_values, column_values + return placeholders_of_column_values, column_values, column_names def get_operator(op: OPERATOR_LITERAL) -> str: @@ -405,3 +408,26 @@ def get_formatted_query( ) return sql + + +def get_insert_bulk_attrs( + instance, + dialect: DIALECT_LITERAL, + values: list[ColumnValue] | ColumnValue, +): + fields, pk_name, fks, updatedAtColumName = get_table_fields( + instance, dialect=dialect + ) + placeholders, column_values, column_names = get_column_values( + table_name=instance._get_table_name(), + dialect=dialect, + fields=fields, + values=values, + ) + _placeholders = ", ".join( + ["?" if dialect == "sqlite" else "%s" for _ in placeholders] + ) + _column_names = ", ".join( + [f'"{f}"' if dialect == "postgres" else f"`{f}`" for f in column_names] + ) + return _column_names, _placeholders, column_values diff --git a/playground.py b/playground.py index d6af4f7..03801cf 100644 --- a/playground.py +++ b/playground.py @@ -9,6 +9,8 @@ ForeignKeyColumn, Filter, ColumnValue, + Include, + Order, ) from typing import Optional @@ -68,16 +70,25 @@ class Post(Model): conn, tables = pg_loom.connect_and_sync([Post, User, Category], drop=True, force=True) -print(tables) -user = User(username="@miller") -userId = pg_loom.insert_one(user) -pg_loom.decrement( +userId = pg_loom.insert_bulk( User, - filters=Filter(column="id", value=1), - column=ColumnValue(name="tokenVersion", value=2.6), + values=[ + [ + ColumnValue(name="username", value="@miller"), + ColumnValue(name="name", value="Jonh"), + ], + ColumnValue(name="username", value="@brown"), + ColumnValue(name="username", value="@blue"), + ], ) +# pg_loom.decrement( +# User, +# filters=Filter(column="id", value=1), +# column=ColumnValue(name="tokenVersion", value=2.6), +# ) + # cate = Category(name="general") # categoryId = pg_loom.insert_one(cate) @@ -119,17 +130,17 @@ class Post(Model): # return_dict=True, # ) -# re = pg_loom.update_one( -# Post, -# values=[ -# ColumnValue(name="title", value="Hey"), -# ColumnValue(name="completed", value=True), -# ], -# filters=[ -# Filter(column="id", value=1, join_next_filter_with="AND"), -# Filter(column="userId", value=1, join_next_filter_with="AND"), -# ], -# ) +re = pg_loom.update_one( + Post, + values=[ + ColumnValue(name="title", value="Hey"), + ColumnValue(name="completed", value=True), + ], + filters=[ + Filter(column="id", value=1, join_next_filter_with="AND"), + Filter(column="userId", value=1, join_next_filter_with="AND"), + ], +) # print(post) # print(post) @@ -153,25 +164,28 @@ class Post(Model): # ) # print(posts) -# posts = pg_loom.find_all( -# Post, -# select=["id", "completed", "title", "createdAt"], -# limit=3, -# offset=0, -# order=[ -# Order(column="createdAt", order="ASC"), -# Order(column="id", order="DESC"), -# ], -# include=[ -# Include( -# model=User, -# select=["id", "username", "name"], -# limit=1, -# offset=0, -# ), -# ], -# return_dict=True, -# ) +posts = pg_loom.find_all( + Post, + select=["id", "completed", "title", "createdAt"], + limit=3, + offset=0, + order=[ + Order(column="createdAt", order="ASC"), + Order( + column="id", + order="DESC", + ), + ], + include=[ + Include( + model=User, + select=["id", "username", "name"], + limit=1, + offset=0, + ), + ], + return_dict=True, +) # print(posts) # posts = pg_loom.find_many( # Post, @@ -199,5 +213,7 @@ class Post(Model): # print(posts) +print(max(9, 5, 3)) + if __name__ == "__main__": conn.close()