diff --git a/.github/workflows/get-matrix.yml b/.github/workflows/get-matrix.yml index c169bc2d6..196deb68a 100644 --- a/.github/workflows/get-matrix.yml +++ b/.github/workflows/get-matrix.yml @@ -154,7 +154,7 @@ jobs: - name: Get Core matrix id: matrix-core - uses: mikefarah/yq@v4.44.3 + uses: mikefarah/yq@v4.44.5 with: cmd: yq -o=json '.matrix' .github/workflows/data/core/matrix.yml @@ -184,7 +184,7 @@ jobs: - name: Get Clickhouse matrix id: matrix-clickhouse - uses: mikefarah/yq@v4.44.3 + uses: mikefarah/yq@v4.44.5 with: cmd: yq -o=json '.matrix' .github/workflows/data/clickhouse/matrix.yml @@ -214,7 +214,7 @@ jobs: - name: Get Greenplum matrix id: matrix-greenplum - uses: mikefarah/yq@v4.44.3 + uses: mikefarah/yq@v4.44.5 with: cmd: yq -o=json '.matrix' .github/workflows/data/greenplum/matrix.yml @@ -244,7 +244,7 @@ jobs: - name: Get Hive matrix id: matrix-hive - uses: mikefarah/yq@v4.44.3 + uses: mikefarah/yq@v4.44.5 with: cmd: yq -o=json '.matrix' .github/workflows/data/hive/matrix.yml @@ -274,7 +274,7 @@ jobs: - name: Get Kafka matrix id: matrix-kafka - uses: mikefarah/yq@v4.44.3 + uses: mikefarah/yq@v4.44.5 with: cmd: yq -o=json '.matrix' .github/workflows/data/kafka/matrix.yml @@ -304,7 +304,7 @@ jobs: - name: Get LocalFS matrix id: matrix-local-fs - uses: mikefarah/yq@v4.44.3 + uses: mikefarah/yq@v4.44.5 with: cmd: yq -o=json '.matrix' .github/workflows/data/local-fs/matrix.yml @@ -334,7 +334,7 @@ jobs: - name: Get MongoDB matrix id: matrix-mongodb - uses: mikefarah/yq@v4.44.3 + uses: mikefarah/yq@v4.44.5 with: cmd: yq -o=json '.matrix' .github/workflows/data/mongodb/matrix.yml @@ -364,7 +364,7 @@ jobs: - name: Get MSSQL matrix id: matrix-mssql - uses: mikefarah/yq@v4.44.3 + uses: mikefarah/yq@v4.44.5 with: cmd: yq -o=json '.matrix' .github/workflows/data/mssql/matrix.yml @@ -394,7 +394,7 @@ jobs: - name: Get MySQL matrix id: matrix-mysql - uses: mikefarah/yq@v4.44.3 + uses: mikefarah/yq@v4.44.5 with: cmd: yq -o=json '.matrix' .github/workflows/data/mysql/matrix.yml @@ -424,7 +424,7 @@ jobs: - name: Get Oracle matrix id: matrix-oracle - uses: mikefarah/yq@v4.44.3 + uses: mikefarah/yq@v4.44.5 with: cmd: yq -o=json '.matrix' .github/workflows/data/oracle/matrix.yml @@ -454,7 +454,7 @@ jobs: - name: Get Postgres matrix id: matrix-postgres - uses: mikefarah/yq@v4.44.3 + uses: mikefarah/yq@v4.44.5 with: cmd: yq -o=json '.matrix' .github/workflows/data/postgres/matrix.yml @@ -484,7 +484,7 @@ jobs: - name: Get Teradata matrix id: matrix-teradata - uses: mikefarah/yq@v4.44.3 + uses: mikefarah/yq@v4.44.5 with: cmd: yq -o=json '.matrix' .github/workflows/data/teradata/matrix.yml @@ -514,7 +514,7 @@ jobs: - name: Get FTP matrix id: matrix-ftp - uses: mikefarah/yq@v4.44.3 + uses: mikefarah/yq@v4.44.5 with: cmd: yq -o=json '.matrix' .github/workflows/data/ftp/matrix.yml @@ -544,7 +544,7 @@ jobs: - name: Get FTPS matrix id: matrix-ftps - uses: mikefarah/yq@v4.44.3 + uses: mikefarah/yq@v4.44.5 with: cmd: yq -o=json '.matrix' .github/workflows/data/ftps/matrix.yml @@ -574,7 +574,7 @@ jobs: - name: Get HDFS matrix id: matrix-hdfs - uses: mikefarah/yq@v4.44.3 + uses: mikefarah/yq@v4.44.5 with: cmd: yq -o=json '.matrix' .github/workflows/data/hdfs/matrix.yml @@ -604,7 +604,7 @@ jobs: - name: Get S3 matrix id: matrix-s3 - uses: mikefarah/yq@v4.44.3 + uses: mikefarah/yq@v4.44.5 with: cmd: yq -o=json '.matrix' .github/workflows/data/s3/matrix.yml @@ -634,7 +634,7 @@ jobs: - name: Get SFTP matrix id: matrix-sftp - uses: mikefarah/yq@v4.44.3 + uses: mikefarah/yq@v4.44.5 with: cmd: yq -o=json '.matrix' .github/workflows/data/sftp/matrix.yml @@ -664,7 +664,7 @@ jobs: - name: Get Samba matrix id: matrix-samba - uses: mikefarah/yq@v4.44.3 + uses: mikefarah/yq@v4.44.5 with: cmd: yq -o=json '.matrix' .github/workflows/data/samba/matrix.yml @@ -694,6 +694,6 @@ jobs: - name: Get WebDAV matrix id: matrix-webdav - uses: mikefarah/yq@v4.44.3 + uses: mikefarah/yq@v4.44.5 with: cmd: yq -o=json '.matrix' .github/workflows/data/webdav/matrix.yml diff --git a/.github/workflows/test-mssql.yml b/.github/workflows/test-mssql.yml index cb7252f91..2980ece05 100644 --- a/.github/workflows/test-mssql.yml +++ b/.github/workflows/test-mssql.yml @@ -45,6 +45,13 @@ jobs: with: python-version: ${{ inputs.python-version }} + # https://github.com/pymssql/pymssql/issues/372#issuecomment-742386160 + - name: Add missing sqlfont.h file + if: runner.os == 'Linux' + run: | + sudo apt-get update + sudo apt-get install --no-install-recommends freetds-dev libkrb5-dev gcc + - name: Cache Ivy uses: actions/cache@v4 if: inputs.with-cache diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index fcd6352ad..279b96614 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -392,7 +392,7 @@ jobs: run: ./combine_coverage.sh - name: Check coverage - uses: codecov/codecov-action@v4 + uses: codecov/codecov-action@v5 with: token: ${{ secrets.CODECOV_TOKEN }} file: ./reports/coverage.xml diff --git a/README.rst b/README.rst index 79561c1df..ac5f550a7 100644 --- a/README.rst +++ b/README.rst @@ -3,24 +3,35 @@ onETL ===== -|Repo Status| |PyPI| |PyPI License| |PyPI Python Version| -|Documentation| |Build Status| |Coverage| |pre-commit.ci| +|Repo Status| |PyPI Latest Release| |PyPI License| |PyPI Python Version| |PyPI Downloads| +|Documentation| |CI Status| |Test Coverage| |pre-commit.ci Status| .. |Repo Status| image:: https://www.repostatus.org/badges/latest/active.svg + :alt: Repo status - Active :target: https://github.com/MobileTeleSystems/onetl -.. |PyPI| image:: https://img.shields.io/pypi/v/onetl +.. |PyPI Latest Release| image:: https://img.shields.io/pypi/v/onetl + :alt: PyPI - Latest Release :target: https://pypi.org/project/onetl/ .. |PyPI License| image:: https://img.shields.io/pypi/l/onetl.svg + :alt: PyPI - License :target: https://github.com/MobileTeleSystems/onetl/blob/develop/LICENSE.txt .. |PyPI Python Version| image:: https://img.shields.io/pypi/pyversions/onetl.svg - :target: https://badge.fury.io/py/onetl + :alt: PyPI - Python Version + :target: https://pypi.org/project/onetl/ +.. |PyPI Downloads| image:: https://img.shields.io/pypi/dm/onetl + :alt: PyPI - Downloads + :target: https://pypi.org/project/onetl/ .. |Documentation| image:: https://readthedocs.org/projects/onetl/badge/?version=stable + :alt: Documentation - ReadTheDocs :target: https://onetl.readthedocs.io/ -.. |Build Status| image:: https://github.com/MobileTeleSystems/onetl/workflows/Tests/badge.svg +.. |CI Status| image:: https://github.com/MobileTeleSystems/onetl/workflows/Tests/badge.svg + :alt: Github Actions - latest CI build status :target: https://github.com/MobileTeleSystems/onetl/actions -.. |Coverage| image:: https://codecov.io/gh/MobileTeleSystems/onetl/branch/develop/graph/badge.svg?token=RIO8URKNZJ +.. |Test Coverage| image:: https://codecov.io/gh/MobileTeleSystems/onetl/branch/develop/graph/badge.svg?token=RIO8URKNZJ + :alt: Test coverage - percent :target: https://codecov.io/gh/MobileTeleSystems/onetl -.. |pre-commit.ci| image:: https://results.pre-commit.ci/badge/github/MobileTeleSystems/onetl/develop.svg +.. |pre-commit.ci Status| image:: https://results.pre-commit.ci/badge/github/MobileTeleSystems/onetl/develop.svg + :alt: pre-commit.ci - status :target: https://results.pre-commit.ci/latest/github/MobileTeleSystems/onetl/develop |Logo| diff --git a/codecov.yml b/codecov.yml index 7291c7aad..54c78e626 100644 --- a/codecov.yml +++ b/codecov.yml @@ -2,5 +2,5 @@ coverage: status: project: default: - target: 94% + target: 91% threshold: 1% diff --git a/docs/changelog/0.12.3.rst b/docs/changelog/0.12.3.rst new file mode 100644 index 000000000..f931d2eed --- /dev/null +++ b/docs/changelog/0.12.3.rst @@ -0,0 +1,7 @@ +0.12.3 (2024-11-22) +=================== + +Bug Fixes +--------- + +- Allow passing table names in format ``schema."table.with.dots"`` to ``DBReader(name=...)`` and ``DBWriter(name=...)``. diff --git a/docs/changelog/index.rst b/docs/changelog/index.rst index 8e0370bb9..bb23a9875 100644 --- a/docs/changelog/index.rst +++ b/docs/changelog/index.rst @@ -3,6 +3,7 @@ :caption: Changelog DRAFT + 0.12.3 0.12.2 0.12.1 0.12.0 diff --git a/docs/changelog/next_release/+.dependency.rst b/docs/changelog/next_release/+.dependency.rst deleted file mode 100644 index 33ffb4eea..000000000 --- a/docs/changelog/next_release/+.dependency.rst +++ /dev/null @@ -1 +0,0 @@ -Allow using `etl-entities==2.4.0 `_. diff --git a/docs/changelog/next_release/+.doc.1.rst b/docs/changelog/next_release/+.doc.1.rst deleted file mode 100644 index 5e1a4f3e2..000000000 --- a/docs/changelog/next_release/+.doc.1.rst +++ /dev/null @@ -1 +0,0 @@ -Fix links to MSSQL date & time type documentation. diff --git a/docs/changelog/next_release/+.improvement.1.rst b/docs/changelog/next_release/+.improvement.1.rst deleted file mode 100644 index e80ec7425..000000000 --- a/docs/changelog/next_release/+.improvement.1.rst +++ /dev/null @@ -1 +0,0 @@ -Change Spark ``jobDescription`` for DBReader & FileDFReader from ``DBReader.run() -> Connection`` to ``Connection -> DBReader.run()``. diff --git a/docs/changelog/next_release/316.bugfix.1.rst b/docs/changelog/next_release/316.bugfix.1.rst deleted file mode 100644 index 88bad0477..000000000 --- a/docs/changelog/next_release/316.bugfix.1.rst +++ /dev/null @@ -1 +0,0 @@ -Fix ``log_hwm`` result for ``KeyValueIntHWM`` (used by Kafka). diff --git a/docs/changelog/next_release/316.bugfix.2.rst b/docs/changelog/next_release/316.bugfix.2.rst deleted file mode 100644 index 485a73455..000000000 --- a/docs/changelog/next_release/316.bugfix.2.rst +++ /dev/null @@ -1 +0,0 @@ -Fix ``log_collection`` hiding values of ``Kafka.addresses`` in logs with ``INFO`` level. diff --git a/onetl/VERSION b/onetl/VERSION index 26acbf080..aa22d3ce3 100644 --- a/onetl/VERSION +++ b/onetl/VERSION @@ -1 +1 @@ -0.12.2 +0.12.3 diff --git a/onetl/connection/db_connection/dialect_mixins/support_name_with_schema_only.py b/onetl/connection/db_connection/dialect_mixins/support_name_with_schema_only.py index 0e66b9800..db369eb1f 100644 --- a/onetl/connection/db_connection/dialect_mixins/support_name_with_schema_only.py +++ b/onetl/connection/db_connection/dialect_mixins/support_name_with_schema_only.py @@ -5,7 +5,7 @@ class SupportNameWithSchemaOnly: def validate_name(self, value: str) -> str: - if value.count(".") != 1: + if "." not in value: raise ValueError( f"Name should be passed in `schema.name` format, got '{value}'", ) diff --git a/setup.py b/setup.py index 2c1126f06..efadc2523 100644 --- a/setup.py +++ b/setup.py @@ -64,7 +64,7 @@ def parse_requirements(file: Path) -> list[str]: license_files=("LICENSE.txt",), url="https://github.com/MobileTeleSystems/onetl", classifiers=[ - "Development Status :: 3 - Alpha", + "Development Status :: 4 - Beta", "Framework :: Pydantic", "Framework :: Pydantic :: 1", "Framework :: Pydantic :: 2", diff --git a/tests/tests_unit/test_db/test_db_reader_unit/test_clickhouse_reader_unit.py b/tests/tests_unit/test_db/test_db_reader_unit/test_clickhouse_reader_unit.py index 598e9d1b5..5df85fed3 100644 --- a/tests/tests_unit/test_db/test_db_reader_unit/test_clickhouse_reader_unit.py +++ b/tests/tests_unit/test_db/test_db_reader_unit/test_clickhouse_reader_unit.py @@ -35,14 +35,13 @@ def test_clickhouse_reader_snapshot_error_pass_df_schema(spark_mock): ) -@pytest.mark.parametrize("table", ["table", "table.table.table"]) -def test_clickhouse_reader_wrong_table_name(spark_mock, table): +def test_clickhouse_reader_wrong_table_name(spark_mock): clickhouse = Clickhouse(host="some_host", user="user", database="database", password="passwd", spark=spark_mock) with pytest.raises(ValueError, match="Name should be passed in `schema.name` format"): DBReader( connection=clickhouse, - table=table, # Required format: table="schema.table" + table="table", # Required format: table="schema.table" ) diff --git a/tests/tests_unit/test_db/test_db_reader_unit/test_greenplum_reader_unit.py b/tests/tests_unit/test_db/test_db_reader_unit/test_greenplum_reader_unit.py index 4f4820d4d..7ad9a1a60 100644 --- a/tests/tests_unit/test_db/test_db_reader_unit/test_greenplum_reader_unit.py +++ b/tests/tests_unit/test_db/test_db_reader_unit/test_greenplum_reader_unit.py @@ -36,14 +36,13 @@ def test_greenplum_reader_snapshot_error_pass_df_schema(spark_mock): ) -@pytest.mark.parametrize("table", ["table", "table.table.table"]) -def test_greenplum_reader_wrong_table_name(spark_mock, table): +def test_greenplum_reader_wrong_table_name(spark_mock): greenplum = Greenplum(host="some_host", user="user", database="database", password="passwd", spark=spark_mock) with pytest.raises(ValueError, match="Name should be passed in `schema.name` format"): DBReader( connection=greenplum, - table=table, # Required format: table="schema.table" + table="table", # Required format: table="schema.table" ) diff --git a/tests/tests_unit/test_db/test_db_reader_unit/test_hive_reader_unit.py b/tests/tests_unit/test_db/test_db_reader_unit/test_hive_reader_unit.py index acb467e3b..400fe2ace 100644 --- a/tests/tests_unit/test_db/test_db_reader_unit/test_hive_reader_unit.py +++ b/tests/tests_unit/test_db/test_db_reader_unit/test_hive_reader_unit.py @@ -36,14 +36,13 @@ def test_hive_reader_snapshot_error_pass_df_schema(spark_mock): ) -@pytest.mark.parametrize("table", ["table", "table.table.table"]) -def test_hive_reader_wrong_table_name(spark_mock, table): +def test_hive_reader_wrong_table_name(spark_mock): hive = Hive(cluster="rnd-dwh", spark=spark_mock) with pytest.raises(ValueError): DBReader( connection=hive, - table=table, # Required format: table="schema.table" + table="table", # Required format: table="schema.table" ) diff --git a/tests/tests_unit/test_db/test_db_reader_unit/test_mssql_reader_unit.py b/tests/tests_unit/test_db/test_db_reader_unit/test_mssql_reader_unit.py index ea9953e68..291fff179 100644 --- a/tests/tests_unit/test_db/test_db_reader_unit/test_mssql_reader_unit.py +++ b/tests/tests_unit/test_db/test_db_reader_unit/test_mssql_reader_unit.py @@ -36,14 +36,13 @@ def test_mssql_reader_snapshot_error_pass_df_schema(spark_mock): ) -@pytest.mark.parametrize("table", ["table", "table.table.table"]) -def test_mssql_reader_wrong_table_name(spark_mock, table): +def test_mssql_reader_wrong_table_name(spark_mock): mssql = MSSQL(host="some_host", user="user", database="database", password="passwd", spark=spark_mock) with pytest.raises(ValueError, match="Name should be passed in `schema.name` format"): DBReader( connection=mssql, - table=table, # Required format: table="schema.table" + table="table", # Required format: table="schema.table" ) diff --git a/tests/tests_unit/test_db/test_db_reader_unit/test_mysql_reader_unit.py b/tests/tests_unit/test_db/test_db_reader_unit/test_mysql_reader_unit.py index 8976393e3..41aeb8d77 100644 --- a/tests/tests_unit/test_db/test_db_reader_unit/test_mysql_reader_unit.py +++ b/tests/tests_unit/test_db/test_db_reader_unit/test_mysql_reader_unit.py @@ -36,14 +36,13 @@ def test_mysql_reader_snapshot_error_pass_df_schema(spark_mock): ) -@pytest.mark.parametrize("table", ["table", "table.table.table"]) -def test_mysql_reader_wrong_table_name(spark_mock, table): +def test_mysql_reader_wrong_table_name(spark_mock): mysql = MySQL(host="some_host", user="user", database="database", password="passwd", spark=spark_mock) with pytest.raises(ValueError, match="Name should be passed in `schema.name` format"): DBReader( connection=mysql, - table=table, # Required format: table="schema.table" + table="table", # Required format: table="schema.table" ) diff --git a/tests/tests_unit/test_db/test_db_reader_unit/test_oracle_reader_unit.py b/tests/tests_unit/test_db/test_db_reader_unit/test_oracle_reader_unit.py index 444bc596f..60f40f6e1 100644 --- a/tests/tests_unit/test_db/test_db_reader_unit/test_oracle_reader_unit.py +++ b/tests/tests_unit/test_db/test_db_reader_unit/test_oracle_reader_unit.py @@ -36,13 +36,12 @@ def test_oracle_reader_error_df_schema(spark_mock): ) -@pytest.mark.parametrize("table", ["table", "table.table.table"]) -def test_oracle_reader_wrong_table_name(spark_mock, table): +def test_oracle_reader_wrong_table_name(spark_mock): oracle = Oracle(host="some_host", user="user", sid="sid", password="passwd", spark=spark_mock) with pytest.raises(ValueError, match="Name should be passed in `schema.name` format"): DBReader( connection=oracle, - table=table, # Required format: table="schema.table" + table="table", # Required format: table="schema.table" ) diff --git a/tests/tests_unit/test_db/test_db_reader_unit/test_postgres_reader_unit.py b/tests/tests_unit/test_db/test_db_reader_unit/test_postgres_reader_unit.py index 03c646811..e973675fa 100644 --- a/tests/tests_unit/test_db/test_db_reader_unit/test_postgres_reader_unit.py +++ b/tests/tests_unit/test_db/test_db_reader_unit/test_postgres_reader_unit.py @@ -36,14 +36,13 @@ def test_postgres_reader_snapshot_error_pass_df_schema(spark_mock): ) -@pytest.mark.parametrize("table", ["table", "table.table.table"]) -def test_postgres_reader_wrong_table_name(spark_mock, table): +def test_postgres_reader_wrong_table_name(spark_mock): postgres = Postgres(host="some_host", user="user", database="database", password="passwd", spark=spark_mock) with pytest.raises(ValueError, match="Name should be passed in `schema.name` format"): DBReader( connection=postgres, - table=table, # Required format: table="schema.table" + table="table", # Required format: table="schema.table" ) diff --git a/tests/tests_unit/test_db/test_db_reader_unit/test_teradata_reader_unit.py b/tests/tests_unit/test_db/test_db_reader_unit/test_teradata_reader_unit.py index c61abca72..f68ded1ae 100644 --- a/tests/tests_unit/test_db/test_db_reader_unit/test_teradata_reader_unit.py +++ b/tests/tests_unit/test_db/test_db_reader_unit/test_teradata_reader_unit.py @@ -36,14 +36,13 @@ def test_teradata_reader_snapshot_error_pass_df_schema(spark_mock): ) -@pytest.mark.parametrize("table", ["table", "table.table.table"]) -def test_teradata_reader_wrong_table_name(spark_mock, table): +def test_teradata_reader_wrong_table_name(spark_mock): teradata = Teradata(host="some_host", user="user", database="database", password="passwd", spark=spark_mock) with pytest.raises(ValueError, match="Name should be passed in `schema.name` format"): DBReader( connection=teradata, - table=table, # Required format: table="schema.table" + table="table", # Required format: table="schema.table" ) diff --git a/tests/tests_unit/test_db/test_db_writer_unit/test_clickhouse_writer_unit.py b/tests/tests_unit/test_db/test_db_writer_unit/test_clickhouse_writer_unit.py index a631c95b7..801138e30 100644 --- a/tests/tests_unit/test_db/test_db_writer_unit/test_clickhouse_writer_unit.py +++ b/tests/tests_unit/test_db/test_db_writer_unit/test_clickhouse_writer_unit.py @@ -6,12 +6,11 @@ pytestmark = pytest.mark.clickhouse -@pytest.mark.parametrize("table", ["table", "table.table.table"]) -def test_clickhouse_writer_wrong_table_name(spark_mock, table): +def test_clickhouse_writer_wrong_table_name(spark_mock): clickhouse = Clickhouse(host="some_host", user="user", database="database", password="passwd", spark=spark_mock) with pytest.raises(ValueError, match="Name should be passed in `schema.name` format"): DBWriter( connection=clickhouse, - table=table, # Required format: table="schema.table" + table="table", # Required format: table="schema.table" ) diff --git a/tests/tests_unit/test_db/test_db_writer_unit/test_greenplum_writer_unit.py b/tests/tests_unit/test_db/test_db_writer_unit/test_greenplum_writer_unit.py index 784d43580..0d79b2fc2 100644 --- a/tests/tests_unit/test_db/test_db_writer_unit/test_greenplum_writer_unit.py +++ b/tests/tests_unit/test_db/test_db_writer_unit/test_greenplum_writer_unit.py @@ -6,12 +6,11 @@ pytestmark = pytest.mark.greenplum -@pytest.mark.parametrize("table", ["table", "table.table.table"]) -def test_greenplum_writer_wrong_table_name(spark_mock, table): +def test_greenplum_writer_wrong_table_name(spark_mock): greenplum = Greenplum(host="some_host", user="user", database="database", password="passwd", spark=spark_mock) with pytest.raises(ValueError, match="Name should be passed in `schema.name` format"): DBWriter( connection=greenplum, - table=table, # Required format: table="schema.table" + table="table", # Required format: table="schema.table" ) diff --git a/tests/tests_unit/test_db/test_db_writer_unit/test_hive_writer_unit.py b/tests/tests_unit/test_db/test_db_writer_unit/test_hive_writer_unit.py index c6c6ddc70..d96cd5210 100644 --- a/tests/tests_unit/test_db/test_db_writer_unit/test_hive_writer_unit.py +++ b/tests/tests_unit/test_db/test_db_writer_unit/test_hive_writer_unit.py @@ -6,12 +6,11 @@ pytestmark = pytest.mark.hive -@pytest.mark.parametrize("table", ["table", "table.table.table"]) -def test_hive_writer_wrong_table_name(spark_mock, table): +def test_hive_writer_wrong_table_name(spark_mock): hive = Hive(cluster="rnd-dwh", spark=spark_mock) with pytest.raises(ValueError, match="Name should be passed in `schema.name` format"): DBWriter( connection=hive, - table=table, # Required format: table="schema.table" + table="table", # Required format: table="schema.table" ) diff --git a/tests/tests_unit/test_db/test_db_writer_unit/test_mssql_writer_unit.py b/tests/tests_unit/test_db/test_db_writer_unit/test_mssql_writer_unit.py index 0690e6a45..3bb30b655 100644 --- a/tests/tests_unit/test_db/test_db_writer_unit/test_mssql_writer_unit.py +++ b/tests/tests_unit/test_db/test_db_writer_unit/test_mssql_writer_unit.py @@ -6,12 +6,11 @@ pytestmark = pytest.mark.mssql -@pytest.mark.parametrize("table", ["table", "table.table.table"]) -def test_mssql_writer_wrong_table_name(spark_mock, table): +def test_mssql_writer_wrong_table_name(spark_mock): mssql = MSSQL(host="some_host", user="user", database="database", password="passwd", spark=spark_mock) with pytest.raises(ValueError, match="Name should be passed in `schema.name` format"): DBWriter( connection=mssql, - table=table, # Required format: table="schema.table" + table="table", # Required format: table="schema.table" ) diff --git a/tests/tests_unit/test_db/test_db_writer_unit/test_mysql_writer_unit.py b/tests/tests_unit/test_db/test_db_writer_unit/test_mysql_writer_unit.py index fc00e96c3..c8b84c6a5 100644 --- a/tests/tests_unit/test_db/test_db_writer_unit/test_mysql_writer_unit.py +++ b/tests/tests_unit/test_db/test_db_writer_unit/test_mysql_writer_unit.py @@ -6,12 +6,11 @@ pytestmark = pytest.mark.mysql -@pytest.mark.parametrize("table", ["table", "table.table.table"]) -def test_mysql_writer_wrong_table_name(spark_mock, table): +def test_mysql_writer_wrong_table_name(spark_mock): mysql = MySQL(host="some_host", user="user", database="database", password="passwd", spark=spark_mock) with pytest.raises(ValueError, match="Name should be passed in `schema.name` format"): DBWriter( connection=mysql, - table=table, # Required format: table="schema.table" + table="table", # Required format: table="schema.table" ) diff --git a/tests/tests_unit/test_db/test_db_writer_unit/test_oracle_writer_unit.py b/tests/tests_unit/test_db/test_db_writer_unit/test_oracle_writer_unit.py index ae53e4515..6ab40bcb7 100644 --- a/tests/tests_unit/test_db/test_db_writer_unit/test_oracle_writer_unit.py +++ b/tests/tests_unit/test_db/test_db_writer_unit/test_oracle_writer_unit.py @@ -6,12 +6,11 @@ pytestmark = pytest.mark.oracle -@pytest.mark.parametrize("table", ["table", "table.table.table"]) -def test_oracle_writer_wrong_table_name(spark_mock, table): +def test_oracle_writer_wrong_table_name(spark_mock): oracle = Oracle(host="some_host", user="user", sid="sid", password="passwd", spark=spark_mock) with pytest.raises(ValueError, match="Name should be passed in `schema.name` format"): DBWriter( connection=oracle, - table=table, # Required format: table="schema.table" + table="table", # Required format: table="schema.table" ) diff --git a/tests/tests_unit/test_db/test_db_writer_unit/test_postgres_writer_unit.py b/tests/tests_unit/test_db/test_db_writer_unit/test_postgres_writer_unit.py index a2794466f..cecb6864d 100644 --- a/tests/tests_unit/test_db/test_db_writer_unit/test_postgres_writer_unit.py +++ b/tests/tests_unit/test_db/test_db_writer_unit/test_postgres_writer_unit.py @@ -6,12 +6,11 @@ pytestmark = pytest.mark.postgres -@pytest.mark.parametrize("table", ["table", "table.table.table"]) -def test_postgres_writer_wrong_table_name(spark_mock, table): +def test_postgres_writer_wrong_table_name(spark_mock): postgres = Postgres(host="some_host", user="user", database="database", password="passwd", spark=spark_mock) with pytest.raises(ValueError, match="Name should be passed in `schema.name` format"): DBWriter( connection=postgres, - table=table, # Required format: table="schema.table" + table="table", # Required format: table="schema.table" ) diff --git a/tests/tests_unit/test_db/test_db_writer_unit/test_teradata_writer_unit.py b/tests/tests_unit/test_db/test_db_writer_unit/test_teradata_writer_unit.py index 76bc48358..57bf81eac 100644 --- a/tests/tests_unit/test_db/test_db_writer_unit/test_teradata_writer_unit.py +++ b/tests/tests_unit/test_db/test_db_writer_unit/test_teradata_writer_unit.py @@ -6,12 +6,11 @@ pytestmark = pytest.mark.teradata -@pytest.mark.parametrize("table", ["table", "table.table.table"]) -def test_teradata_writer_wrong_table_name(spark_mock, table): +def test_teradata_writer_wrong_table_name(spark_mock): teradata = Teradata(host="some_host", user="user", database="database", password="passwd", spark=spark_mock) with pytest.raises(ValueError, match="Name should be passed in `schema.name` format"): DBWriter( connection=teradata, - table=table, # Required format: table="schema.table" + table="table", # Required format: table="schema.table" )