diff --git a/.github/workflows/test_scripts_version_bump.yml b/.github/workflows/test_scripts_version_bump.yml index 1c9bc10a..f72a4f4d 100644 --- a/.github/workflows/test_scripts_version_bump.yml +++ b/.github/workflows/test_scripts_version_bump.yml @@ -12,6 +12,4 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - # Bump version on merging Pull Requests with specific labels. - # (bump:major,bump:minor,bump:patch) - uses: haya14busa/action-bumpr@v1 diff --git a/README.md b/README.md index 0a230551..01614824 100755 --- a/README.md +++ b/README.md @@ -2,6 +2,11 @@ Pytest Automation Boilerplate ================================== [![contributions welcome](https://img.shields.io/badge/contributions-welcome-1EAEDB)]() [![license](https://img.shields.io/badge/license-MIT-blue)](https://opensource.org/license/mit) +![GitHub Actions Workflow Status](https://img.shields.io/github/actions/workflow/status/tweag/pytest-automation-boilerplate/docker_android_workflow.yml) +![GitHub repo size](https://img.shields.io/github/repo-size/tweag/pytest-automation-boilerplate) +![GitHub last commit](https://img.shields.io/github/last-commit/tweag/pytest-automation-boilerplate) +![GitHub Issues or Pull Requests](https://img.shields.io/github/issues/tweag/pytest-automation-boilerplate) + About the Project ----------------- diff --git a/main/frontend/common/step_definitions/__init__.py b/main/frontend/common/step_definitions/__init__.py index 91d94b52..5d51775d 100644 --- a/main/frontend/common/step_definitions/__init__.py +++ b/main/frontend/common/step_definitions/__init__.py @@ -1,11 +1,8 @@ from .browser_navigation import * from .click_touch_and_keyboard_actions import * -from .date_time import * from .dropdowns import * from .attribute_assertion import * from .environment_variables import * -from .excel_and_csv import * -from .html_tables import * from .mobile_device_actions import * from .swipe_drag_and_drop import * from .text_assertion_editing import * diff --git a/main/frontend/common/step_definitions/date_time.py b/main/frontend/common/step_definitions/date_time.py deleted file mode 100644 index 91cbddea..00000000 --- a/main/frontend/common/step_definitions/date_time.py +++ /dev/null @@ -1,126 +0,0 @@ -""" ----------------------------------------------------------------------------------------------------------------------- ----------------------------------------------------------------------------------------------------------------------- - ----------------------------------------------------------------------------------------------------------------------- ----------------------------------------------------------------------------------------------------------------------- -""" -import random -import time -import structlog - -from datetime import datetime, timedelta -from pytest_bdd import parsers, given, when -from main.frontend.common.helpers.app import context_manager -from main.frontend.common.helpers.selenium_generics import SeleniumGenerics -from main.frontend.common.step_definitions.steps_common import MOBILE_SUFFIX -from main.frontend.common.utils.locator_parser import Locators - -logger = structlog.get_logger(__name__) - - -# WEB context Predefined Step -@given(parsers.re("I pause for '(?P.*)' s"), converters=dict(seconds=int)) -@when(parsers.re("I pause for '(?P.*)' s"), converters=dict(seconds=int)) -def pause_execution(seconds: int): - time.sleep(seconds) - - -# WEB & MOBILE contexts Predefined Step -@given(parsers.re(r"I add current date to '(?P.*)' with '(?PMM/dd/yyyy|MM/dd/yy|dd/MM/yyyy|dd/MM/yy|dd MMM yyyy)'")) -@when(parsers.re(r"I add current date to '(?P.*)' with '(?PMM/dd/yyyy|MM/dd/yy|dd/MM/yyyy|dd/MM/yy|dd MMM yyyy)'")) -def add_current_date_for_element(selenium_generics: SeleniumGenerics, locators: Locators, locator_path, date_format: str): - if date_format == "MM/dd/yyyy": - date_text = datetime.now().date().strftime("%m/%d/%Y") - elif date_format == "MM/dd/yy": - date_text = datetime.now().date().strftime("%m/%d/%y") - elif date_format == "dd/MM/yyyy": - date_text = datetime.now().date().strftime("%d/%m/%Y") - elif date_format == "dd/MM/yy": - date_text = datetime.now().date().strftime("%d/%m/%y") - elif date_format == "dd MMM yyyy": - date_text = datetime.now().date().strftime("%d %b %Y") - else: - raise ValueError(f"Date format: {date_format} is invalid") - if MOBILE_SUFFIX in locator_path: - with context_manager(selenium_generics): - selenium_generics.enter_text(locators.parse_and_get(locator_path, selenium_generics), date_text) - else: - selenium_generics.enter_text(locators.parse_and_get(locator_path, selenium_generics), date_text) - - -# WEB & MOBILE contexts Predefined Step -@given(parsers.re(r"I add random '(?Pfuture|past)' date to '(?P.*)' with '(?PMM/dd/yyyy|MM/dd/yy|dd/MM/yyyy|dd/MM/yy|dd MMM yyyy)' format")) -@when(parsers.re(r"I add random '(?Pfuture|past)' date to '(?P.*)' with '(?PMM/dd/yyyy|MM/dd/yy|dd/MM/yyyy|dd/MM/yy|dd MMM yyyy)' format")) -def add_custom_date_for_element(selenium_generics: SeleniumGenerics, locators: Locators, locator_path, date_format: str, direction: str): - now = datetime.now() - delta = timedelta(days=random.SystemRandom().randint(1, 365 * 20)) - if direction == "future": - random_date = now + delta - elif direction == "past": - random_date = now - delta - else: - raise ValueError(f"Time direction: {direction} is invalid") - - if date_format == "MM/dd/yyyy": - date_text = random_date.strftime("%m/%d/%Y") - elif date_format == "MM/dd/yy": - date_text = random_date.strftime("%m/%d/%y") - elif date_format == "dd/MM/yyyy": - date_text = random_date.strftime("%d/%m/%Y") - elif date_format == "dd/MM/yy": - date_text = random_date.strftime("%d/%m/%y") - elif date_format == "dd MMM yyyy": - date_text = random_date.strftime("%d %b %Y") - else: - raise ValueError(f"Date format: {date_format} is invalid") - if MOBILE_SUFFIX in locator_path: - with context_manager(selenium_generics): - selenium_generics.enter_text(locators.parse_and_get(locator_path, selenium_generics), date_text) - else: - selenium_generics.enter_text(locators.parse_and_get(locator_path, selenium_generics), date_text) - - -# WEB & MOBILE contexts Predefined Step -@given(parsers.re(r"I add '(?PPast|Current|Future)' time to '(?P.*)' with '(?PHH:MM:SS|HH:MM)' format(\s+)?((?:and clock format)\s+(?:')(?P\w+)(?:'))?(\s+)?((?:and delimiter)\s+(?:')(?P.*)(?:'))?$")) -@when(parsers.re(r"I add '(?PPast|Current|Future)' time to '(?P.*)' with '(?PHH:MM:SS|HH:MM)' format(\s+)?((?:and clock format)\s+(?:')(?P\w+)(?:'))?(\s+)?((?:and delimiter)\s+(?:')(?P.*)(?:'))?$")) -def add_custom_time_for_element(selenium_generics: SeleniumGenerics, locators: Locators, locator_path, time_format: str, direction: str, delimiter: str, clock_format: str): - delimiter = delimiter if delimiter else ":" - clock_format = clock_format if clock_format else "24h" - now = datetime.now() - _from = now - now.replace(hour=0, minute=0, second=0, microsecond=0) - _to = now.replace(hour=23, minute=59, second=59, microsecond=0) - now - if clock_format == "24h": - cf = "%H" - elif clock_format == "12h": - cf = "%I" - else: - raise ValueError(f"Clock format: {clock_format} is invalid") - - if direction == "Future": - random_seconds = random.SystemRandom().randint(1, _to.seconds) - delta = timedelta(seconds=random_seconds) - future_date = now + delta - _time = future_date - elif direction == "Past": - random_seconds = random.SystemRandom().randint(1, _from.seconds) - delta = timedelta(seconds=random_seconds) - past_date = now - delta - _time = past_date - elif direction == "Current": - _time = now - else: - raise ValueError(f"Time direction: {direction} is invalid") - - if time_format == "HH:MM:SS": - time_text = _time.strftime(f"{cf}{delimiter}%M{delimiter}%S") - elif time_format == "HH:MM": - time_text = _time.strftime(f"{cf}{delimiter}%M") - else: - raise ValueError(f"Time format: {time_format} is invalid") - - if MOBILE_SUFFIX in locator_path: - with context_manager(selenium_generics): - selenium_generics.enter_text(locators.parse_and_get(locator_path, selenium_generics), time_text) - else: - selenium_generics.enter_text(locators.parse_and_get(locator_path, selenium_generics), time_text) diff --git a/main/frontend/common/step_definitions/excel_and_csv.py b/main/frontend/common/step_definitions/excel_and_csv.py deleted file mode 100644 index be9002cc..00000000 --- a/main/frontend/common/step_definitions/excel_and_csv.py +++ /dev/null @@ -1,346 +0,0 @@ -import os -import csv -import structlog -import openpyxl - -from pathlib import Path -from openpyxl.reader.excel import load_workbook -from pytest_bdd import parsers, given, when, then -from assertpy import assert_that -from main.utils import data_manager - -logger = structlog.get_logger(__name__) - - -@given(parsers.re("Text inside '(?P.+)' on sheet '(?P.+)' of excel file '(?P.+)' is equal to '(?P.+)'"), - converters=dict(cell=data_manager.text_formatted, sheet_name=data_manager.text_formatted, expected_text=data_manager.text_formatted)) -@when(parsers.re("Text inside '(?P.+)' on sheet '(?P.+)' of excel file '(?P.+)' is equal to '(?P.+)'"), - converters=dict(cell=data_manager.text_formatted, sheet_name=data_manager.text_formatted, expected_text=data_manager.text_formatted)) -@then(parsers.re("Text inside '(?P.+)' on sheet '(?P.+)' of excel file '(?P.+)' is equal to '(?P.+)'"), - converters=dict(cell=data_manager.text_formatted, sheet_name=data_manager.text_formatted, expected_text=data_manager.text_formatted)) -def cell_text_is_equal_to(cell: str, sheet_name: str, file_path: str, expected_text: str): - sheet = load_workbook(file_path)[sheet_name] - assert_that(sheet[cell].value).is_equal_to(expected_text) - - -# ID 902 -@given(parsers.re("Text inside '(?P.+)' on sheet '(?P.+)' of excel file '(?P.+)' contains '(?P.+)'"), - converters=dict(cell=data_manager.text_formatted, sheet_name=data_manager.text_formatted, expected_text=data_manager.text_formatted)) -@when(parsers.re("Text inside '(?P.+)' on sheet '(?P.+)' of excel file '(?P.+)' contains '(?P.+)'"), - converters=dict(cell=data_manager.text_formatted, sheet_name=data_manager.text_formatted, expected_text=data_manager.text_formatted)) -@then(parsers.re("Text inside '(?P.+)' on sheet '(?P.+)' of excel file '(?P.+)' contains '(?P.+)'"), - converters=dict(cell=data_manager.text_formatted, sheet_name=data_manager.text_formatted, expected_text=data_manager.text_formatted)) -def cell_text_contains(cell:str, sheet_name: str, file_path: str, expected_text: str): - sheet = load_workbook(file_path)[sheet_name] - assert_that(sheet[cell].value).contains(expected_text) - - -@given(parsers.re("The '(?P.+)' on sheet '(?P.+)' of excel file '(?P.+)' does not contain any text"), - converters=dict(cell=data_manager.text_formatted, sheet_name=data_manager.text_formatted)) -@when(parsers.re("The '(?P.+)' on sheet '(?P.+)' of excel file '(?P.+)' does not contain any text"), - converters=dict(cell=data_manager.text_formatted, sheet_name=data_manager.text_formatted)) -@then(parsers.re("The '(?P.+)' on sheet '(?P.+)' of excel file '(?P.+)' does not contain any text"), - converters=dict(cell=data_manager.text_formatted, sheet_name=data_manager.text_formatted)) -def cell_text_is_empty(cell:str, sheet_name: str, file_path: str): - sheet = load_workbook(file_path)[sheet_name] - assert_that(sheet[cell].value).is_none() - - -@given(parsers.re("The '(?P.+)' on sheet '(?P.+)' of excel file '(?P.+)' text is equal with the text of the '(?P.+)'"), - converters=dict(cell=data_manager.text_formatted, sheet_name=data_manager.text_formatted)) -@when(parsers.re("The '(?P.+)' on sheet '(?P.+)' of excel file '(?P.+)' text is equal with the text of the '(?P.+)'"), - converters=dict(cell=data_manager.text_formatted, sheet_name=data_manager.text_formatted)) -@then(parsers.re("The '(?P.+)' on sheet '(?P.+)' of excel file '(?P.+)' text is equal with the text of the '(?P.+)'"), - converters=dict(cell=data_manager.text_formatted, sheet_name=data_manager.text_formatted)) -def cell_text_equals_element_text(selenium_generics, locators, cell:str, sheet_name: str, file_path: str, locator_path: str): - sheet = load_workbook(file_path)[sheet_name] - assert_that(sheet[cell].value).is_equal_to(selenium_generics.get_element_text(locators.parse_and_get(locator_path, selenium_generics))) - - -@given(parsers.re("The '(?P.+)' on sheet '(?P.+)' of excel file '(?P.+)' is contained in the text of '(?P.+)'"), - converters=dict(cell=data_manager.text_formatted, sheet_name=data_manager.text_formatted)) -@when(parsers.re("The '(?P.+)' on sheet '(?P.+)' of excel file '(?P.+)' is contained in the text of '(?P.+)'"), - converters=dict(cell=data_manager.text_formatted, sheet_name=data_manager.text_formatted)) -@then(parsers.re("The '(?P.+)' on sheet '(?P.+)' of excel file '(?P.+)' is contained in the text of '(?P.+)'"), - converters=dict(cell=data_manager.text_formatted, sheet_name=data_manager.text_formatted)) -def element_text_contains_cell_text(selenium_generics, locators, cell:str, sheet_name: str, file_path: str, locator_path: str): - sheet = load_workbook(file_path)[sheet_name] - assert_that(selenium_generics.get_element_text(locators.parse_and_get(locator_path, selenium_generics))).contains(sheet[cell].value) - - -@given(parsers.re("I delete '(?P.*)' file")) -@when(parsers.re("I delete '(?P.*)' file")) -def delete_file(file_path: str): - absolute_path = Path(file_path).absolute() - if absolute_path.exists(): - os.remove(absolute_path) - - -# ID 907 -@given(parsers.re("I create excel file '(?P.*)' and save on '(?P.+)'")) -@when(parsers.re("I create excel file '(?P.*)' and save on '(?P.+)'")) -def create_excel_file(file_name: str, file_path: str): - if Path(file_name).suffix != '.xlsx': - raise ValueError(f"Invalid file extension for {file_name}") - excel_file = (Path(file_path) / file_name).absolute() - openpyxl.Workbook().save(excel_file.as_posix()) - - -@given(parsers.re("I write '(?P.*)' to '(?P.*)' on '(?P.*)' of excel file '(?P.*)'"), - converters=dict(cell=data_manager.text_formatted, sheet_name=data_manager.text_formatted, - text=data_manager.text_formatted)) -@when(parsers.re("I write '(?P.*)' to '(?P.*)' on '(?P.*)' of excel file '(?P.*)'"), - converters=dict(cell=data_manager.text_formatted, sheet_name=data_manager.text_formatted, - text=data_manager.text_formatted)) -def write_text_to_excel_file(text: str, cell: str, sheet_name: str, file_path: str): - excel_file = Path(file_path).absolute().as_posix() - wb = load_workbook(excel_file) - wb[sheet_name][cell] = text - wb.save(excel_file) - - -@given(parsers.re("Number of total rows on '(?P.*)' of excel file '(?P.*)' is '(?P.*)'"), - converters=dict(row_count=data_manager.text_formatted, sheet_name=data_manager.text_formatted)) -@when(parsers.re("Number of total rows on '(?P.*)' of excel file '(?P.*)' is '(?P.*)'"), - converters=dict(row_count=data_manager.text_formatted, sheet_name=data_manager.text_formatted)) -@then(parsers.re("Number of total rows on '(?P.*)' of excel file '(?P.*)' is '(?P.*)'"), - converters=dict(row_count=data_manager.text_formatted, sheet_name=data_manager.text_formatted)) -def total_rows_number_with_data_is_equal_to(sheet_name: str, file_path: str, row_count: str): - sheet = load_workbook(file_path)[sheet_name] - num_rows = 0 - for row in sheet.iter_rows(): - if any(cell.value is not None for cell in row): - num_rows += 1 - assert_that(int(num_rows)).is_equal_to(int(row_count)) - - -@given(parsers.re("Number of rows containing '(?P.*)' on '(?P.*)' of excel file '(?P.*)' is '(?P.*)'"), - converters=dict(row_count=data_manager.text_formatted, sheet_name=data_manager.text_formatted, text=data_manager.text_formatted)) -@when(parsers.re("Number of rows containing '(?P.*)' on '(?P.*)' of excel file '(?P.*)' is '(?P.*)'"), - converters=dict(row_count=data_manager.text_formatted, sheet_name=data_manager.text_formatted, text=data_manager.text_formatted)) -@then(parsers.re("Number of rows containing '(?P.*)' on '(?P.*)' of excel file '(?P.*)' is '(?P.*)'"), - converters=dict(row_count=data_manager.text_formatted, sheet_name=data_manager.text_formatted, text=data_manager.text_formatted)) -def number_rows_with_text_is_equal_to(expected_text: str, sheet_name: str, file_path: str, row_count: str): - sheet = load_workbook(file_path)[sheet_name] - num_rows = 0 - for row in sheet.iter_rows(): - if any(cell.value == f"{expected_text}" for cell in row): - num_rows += 1 - assert_that(int(num_rows)).is_equal_to(int(row_count)) - - -@given(parsers.re("Text inside '(?P.*)' cell of csv file '(?P.*)' is equal to '(?P.+)'"), - converters=dict(expected_text=data_manager.text_formatted, cell=data_manager.text_formatted)) -@when(parsers.re("Text inside '(?P.*)' cell of csv file '(?P.*)' is equal to '(?P.+)'"), - converters=dict(expected_text=data_manager.text_formatted, cell=data_manager.text_formatted)) -@then(parsers.re("Text inside '(?P.*)' cell of csv file '(?P.*)' is equal to '(?P.+)'"), - converters=dict(expected_text=data_manager.text_formatted, cell=data_manager.text_formatted)) -def csv_cell_text_is_equal_to(cell: str, file_path: str, expected_text: str): - csv_file = Path(file_path).absolute() - if not all([csv_file.exists(), csv_file.is_file(), csv_file.suffix == '.csv']): - raise FileNotFoundError(f"File {file_path} is not a valid csv file") - wb = openpyxl.Workbook() - sheet = wb.active - with open(csv_file, newline="") as f: - _csv_file = f.read() - dialect = csv.Sniffer() - delimiter = dialect.sniff(_csv_file).delimiter - with open(csv_file, newline="") as file: - reader = csv.reader(file, delimiter=delimiter) - for row in reader: - sheet.append(row) - assert_that(sheet[cell].value).is_equal_to(expected_text) - - -@given(parsers.re("Text inside '(?P.*)' cell of csv file '(?P.*)' contains '(?P.+)'"), - converters=dict(expected_text=data_manager.text_formatted, cell=data_manager.text_formatted)) -@when(parsers.re("Text inside '(?P.*)' cell of csv file '(?P.*)' contains '(?P.+)'"), - converters=dict(expected_text=data_manager.text_formatted, cell=data_manager.text_formatted)) -@then(parsers.re("Text inside '(?P.*)' cell of csv file '(?P.*)' contains '(?P.+)'"), - converters=dict(expected_text=data_manager.text_formatted, cell=data_manager.text_formatted)) -def csv_cell_text_contains(cell: str, file_path: str, expected_text: str): - csv_file = Path(file_path).absolute() - if not all([csv_file.exists(), csv_file.is_file(), csv_file.suffix == '.csv']): - raise FileNotFoundError(f"File {file_path} is not a valid csv file") - wb = openpyxl.Workbook() - sheet = wb.active - with open(csv_file, newline="") as f: - _csv_file = f.read() - dialect = csv.Sniffer() - delimiter = dialect.sniff(_csv_file).delimiter - with open(csv_file, newline="") as file: - reader = csv.reader(file, delimiter=delimiter) - for row in reader: - sheet.append(row) - assert_that(sheet[cell].value).contains(expected_text) - - -@given(parsers.re("The '(?P.*)' cell of csv file '(?P.*)' text is equal with the text of the '(?P.+)'"), - converters=dict(cell=data_manager.text_formatted)) -@when(parsers.re("The '(?P.*)' cell of csv file '(?P.*)' text is equal with the text of the '(?P.+)'"), - converters=dict(cell=data_manager.text_formatted)) -@then(parsers.re("The '(?P.*)' cell of csv file '(?P.*)' text is equal with the text of the '(?P.+)'"), - converters=dict(cell=data_manager.text_formatted)) -def csv_cell_text_equals_element_text(selenium_generics, locators, cell: str, file_path: str, locator_path: str): - csv_file = Path(file_path).absolute() - if not all([csv_file.exists(), csv_file.is_file(), csv_file.suffix == '.csv']): - raise FileNotFoundError(f"File {file_path} is not a valid csv file") - wb = openpyxl.Workbook() - sheet = wb.active - with open(csv_file, newline="") as f: - _csv_file = f.read() - dialect = csv.Sniffer() - delimiter = dialect.sniff(_csv_file).delimiter - with open(csv_file, newline="") as file: - reader = csv.reader(file, delimiter=delimiter) - for row in reader: - sheet.append(row) - assert_that(sheet[cell].value).is_equal_to(selenium_generics.get_element_text(locators.parse_and_get(locator_path, selenium_generics))) - - -@given(parsers.re("The '(?P.*)' cell of csv file '(?P.*)' is contained in the text of '(?P.+)'"), - converters=dict(cell=data_manager.text_formatted)) -@when(parsers.re("The '(?P.*)' cell of csv file '(?P.*)' is contained in the text of '(?P.+)'"), - converters=dict(cell=data_manager.text_formatted)) -@then(parsers.re("The '(?P.*)' cell of csv file '(?P.*)' is contained in the text of '(?P.+)'"), - converters=dict(cell=data_manager.text_formatted)) -def element_text_contains_csv_cell_text(selenium_generics, locators, cell: str, file_path: str, locator_path: str): - csv_file = Path(file_path).absolute() - if not all([csv_file.exists(), csv_file.is_file(), csv_file.suffix == '.csv']): - raise FileNotFoundError(f"File {file_path} is not a valid csv file") - wb = openpyxl.Workbook() - sheet = wb.active - with open(csv_file, newline="") as f: - _csv_file = f.read() - dialect = csv.Sniffer() - delimiter = dialect.sniff(_csv_file).delimiter - with open(csv_file, newline="") as file: - reader = csv.reader(file, delimiter=delimiter) - for row in reader: - sheet.append(row) - assert_that(selenium_generics.get_element_text(locators.parse_and_get(locator_path, selenium_generics))).contains( - sheet[cell].value) - - -@given(parsers.re("The '(?P.*)' cell of csv file '(?P.*)' does not contain any text"), - converters=dict(cell=data_manager.text_formatted)) -@when(parsers.re("The '(?P.*)' cell of csv file '(?P.*)' does not contain any text"), - converters=dict(cell=data_manager.text_formatted)) -@then(parsers.re("The '(?P.*)' cell of csv file '(?P.*)' does not contain any text"), - converters=dict(cell=data_manager.text_formatted)) -def csv_cell_text_is_empty(cell: str, file_path: str): - csv_file = Path(file_path).absolute() - if not all([csv_file.exists(), csv_file.is_file(), csv_file.suffix == '.csv']): - raise FileNotFoundError(f"File {file_path} is not a valid csv file") - wb = openpyxl.Workbook() - sheet = wb.active - with open(csv_file, newline="") as f: - _csv_file = f.read() - dialect = csv.Sniffer() - delimiter = dialect.sniff(_csv_file).delimiter - with open(csv_file, newline="") as file: - reader = csv.reader(file, delimiter=delimiter) - for row in reader: - sheet.append(row) - assert_that(sheet[cell].value).is_empty() - - -@given(parsers.re("Number of rows containing '(?P.*)' of csv file '(?P.*)' is '(?P.*)'"), - converters=dict(expected_text=data_manager.text_formatted, row_count=data_manager.text_formatted)) -@when(parsers.re("Number of rows containing '(?P.*)' of csv file '(?P.*)' is '(?P.*)'"), - converters=dict(expected_text=data_manager.text_formatted, row_count=data_manager.text_formatted)) -@then(parsers.re("Number of rows containing '(?P.*)' of csv file '(?P.*)' is '(?P.*)'"), - converters=dict(expected_text=data_manager.text_formatted, row_count=data_manager.text_formatted)) -def number_csv_rows_with_text_is_equal_to(expected_text: str, file_path: str, row_count: str): - csv_file = Path(file_path).absolute() - if not all([csv_file.exists(), csv_file.is_file(), csv_file.suffix == '.csv']): - raise FileNotFoundError(f"File {file_path} is not a valid csv file") - wb = openpyxl.Workbook() - sheet = wb.active - with open(csv_file, newline="") as f: - _csv_file = f.read() - dialect = csv.Sniffer() - delimiter = dialect.sniff(_csv_file).delimiter - with open(csv_file, newline="") as file: - reader = csv.reader(file, delimiter=delimiter) - for row in reader: - sheet.append(row) - - num_rows = 0 - for row in sheet.iter_rows(): - if any(cell.value == f"{expected_text}" for cell in row): - num_rows += 1 - assert_that(int(num_rows)).is_equal_to(int(row_count)) - - -@given(parsers.re("Number of total rows of csv file '(?P.*)' is '(?P.*)'"), - converters=dict(row_count=data_manager.text_formatted)) -@when(parsers.re("Number of total rows of csv file '(?P.*)' is '(?P.*)'"), - converters=dict(row_count=data_manager.text_formatted)) -@then(parsers.re("Number of total rows of csv file '(?P.*)' is '(?P.*)'"), - converters=dict(row_count=data_manager.text_formatted)) -def total_csv_rows_number_with_data_is_equal_to(file_path: str, row_count: str): - csv_file = Path(file_path).absolute() - if not all([csv_file.exists(), csv_file.is_file(), csv_file.suffix == '.csv']): - raise FileNotFoundError(f"File {file_path} is not a valid csv file") - wb = openpyxl.Workbook() - sheet = wb.active - with open(csv_file, newline="") as f: - _csv_file = f.read() - dialect = csv.Sniffer() - delimiter = dialect.sniff(_csv_file).delimiter - with open(csv_file, newline="") as file: - reader = csv.reader(file, delimiter=delimiter) - for row in reader: - sheet.append(row) - - num_rows = 0 - for row in sheet.iter_rows(): - if any(cell.value is not None for cell in row): - num_rows += 1 - assert_that(int(num_rows)).is_equal_to(int(row_count)) - - -@given(parsers.re("I Write '(?P.+)' to '(?P.*)' cell of csv file '(?P.*)'"), - converters=dict(cell=data_manager.text_formatted, text=data_manager.text_formatted)) -@when(parsers.re("I Write '(?P.+)' to '(?P.*)' cell of csv file '(?P.*)'"), - converters=dict(cell=data_manager.text_formatted, text=data_manager.text_formatted)) -def write_text_to_csv_cell(text: str, cell: str, file_path: str): - csv_file = Path(file_path).absolute() - wb = openpyxl.Workbook() - sheet = wb.active - delimiter = None - - if csv_file.exists(): - with open(csv_file, newline="") as f: - _csv_file = f.read() - if _csv_file: - dialect = csv.Sniffer() - delimiter = dialect.sniff(_csv_file).delimiter - else: - with open(csv_file, "w", newline="") as f: - f.write("") # Creating the file as it does not exist - delimiter = delimiter if delimiter else ',' - - with open(csv_file, newline="") as f: - reader = csv.reader(f, delimiter=delimiter) - for row in reader: - sheet.append(row) - sheet[cell] = text - - with open(csv_file, 'w', newline="") as f: - writer = csv.writer(f, delimiter=delimiter) - for row in sheet.iter_rows(): - writer.writerow([cell.value for cell in row]) - - -@given(parsers.re("I create csv file '(?P.*)' and save on '(?P.+)'")) -@when(parsers.re("I create csv file '(?P.*)' and save on '(?P.+)'")) -def create_csv_file(file_name: str, file_path: str): - if Path(file_name).suffix != '.csv': - raise ValueError(f"Invalid file extension for {file_name}") - csv_file = (Path(file_path) / file_name).absolute() - - # Create an empty csv file - with open(csv_file, "w", newline="") as f: - f.write("") diff --git a/main/frontend/common/step_definitions/html_tables.py b/main/frontend/common/step_definitions/html_tables.py deleted file mode 100644 index d091d498..00000000 --- a/main/frontend/common/step_definitions/html_tables.py +++ /dev/null @@ -1,111 +0,0 @@ -import structlog -import re - -from pytest_bdd import parsers, then -from assertpy import assert_that -from main.frontend.common.helpers.selenium_generics import SeleniumGenerics -from main.frontend.common.utils.locator_parser import Locators -from main.utils.exceptions import DataTableException -from main.utils.gherkin_utils import data_table_horizontal_converter -from main.utils import data_manager - -logger = structlog.get_logger(__name__) - - -@then(parsers.re("I expect table '(?P.*)' headers ('(?P.*)' )?to match:(?P.*)", - flags=re.S, ), converters=dict(data_table=data_table_horizontal_converter),) -def verify_table_headers_match_exactly(selenium_generics: SeleniumGenerics, locators: Locators, locator_path, data_table, header_tag_path): - header_tag = locators.parse_and_get(header_tag_path, selenium_generics) if header_tag_path else "//th" - table_headers = selenium_generics.get_elements(f"{locators.parse_and_get(locator_path, selenium_generics)}{header_tag}") - expected_columns = data_table[list(data_table.keys())[0]] - columns_present = list() - for table_header in table_headers: - columns_present.append(table_header.text) - assert_that(columns_present).is_equal_to(expected_columns) - - -@then(parsers.re("I expect table '(?P.*)' headers ('(?P.*)' )?to contain:(?P.*)", - flags=re.S, ), converters=dict(data_table=data_table_horizontal_converter), ) -def verify_table_headers_contain_columns(selenium_generics: SeleniumGenerics, locators: Locators, locator_path, data_table, header_tag_path): - header_tag = locators.parse_and_get(header_tag_path, selenium_generics) if header_tag_path else "//th" - table_headers = selenium_generics.get_elements(f"{locators.parse_and_get(locator_path, selenium_generics)}{header_tag}") - expected_columns = data_table[list(data_table.keys())[0]] - columns_present = list() - for table_header in table_headers: - columns_present.append(table_header.text) - assert_that(columns_present).contains(*expected_columns) - - -@then(parsers.re("I expect the column in table '(?P.*)' has the values:(?P.*)", - flags=re.S, ), converters=dict(data_table=data_table_horizontal_converter), ) -def verify_table_column_contain_values(selenium_generics: SeleniumGenerics, locators: Locators, locator_path, data_table): - table_locator = locators.parse_and_get(locator_path, selenium_generics) - table_header_locator = f"{table_locator}//th | {table_locator}//th//*" - table_headers = selenium_generics.get_elements(table_header_locator) - column_names = list(data_table.keys()) - column_index_dict = dict() - td_index = 0 - for table_header in table_headers: - if table_header.tag_name.lower() == 'th': - td_index += 1 - if table_header.text in column_names: - column_index_dict[table_header.text] = td_index - - for column in column_names: - for value in data_table[column]: - cell_locator = f"{table_locator}//tr//td[{column_index_dict[column]}][text()='{value}'] | " \ - f"{table_locator}//tr//td[{column_index_dict[column]}]//*[text()='{value}']" - assert len(selenium_generics.get_elements(cell_locator)) > 0 - - -@then(parsers.re("I expect that '(?P.*)' row has the value '(?P.*)' in column '(?P.*)' of table '(?P.*)'"), - converters=dict(expected_text=data_manager.text_formatted), ) -def verify_column_contain_value(selenium_generics: SeleniumGenerics, locators: Locators, locator_path, row, column, expected_text: str): - table_locator = locators.parse_and_get(locator_path, selenium_generics) - table_header_locator = f"{table_locator}//th | {table_locator}//th//*" - table_headers = selenium_generics.get_elements(table_header_locator) - td_index = 0 - row_number = '' - row_number = [row_number+i for i in row if i.isdigit()][0] - for table_header in table_headers: - if table_header.tag_name.lower() == 'th': - td_index += 1 - if table_header.text == column: - break - - cell_locator = f"{table_locator}//tr[{row_number}]//td[{td_index}][text()='{expected_text}'] | " \ - f"{table_locator}//tr[{row_number}]//td[{td_index}]//*[text()='{expected_text}']" - assert len(selenium_generics.get_elements(cell_locator)) > 0 - - -@then(parsers.re("I expect that '(?P.*)' row in table '(?P.*)' has the following values:(?P.*)", - flags=re.S, ), converters=dict(data_table=data_table_horizontal_converter), ) -def verify_table_row_contain_values(selenium_generics: SeleniumGenerics, locators: Locators, row, locator_path, data_table): - table_locator = locators.parse_and_get(locator_path, selenium_generics) - table_header_locator = f"{table_locator}//th | {table_locator}//th//*" - table_headers = selenium_generics.get_elements(table_header_locator) - column_names = list(data_table.keys()) - column_index_dict = dict() - td_index = 0 - row_number = '' - row_number = [row_number+i for i in row if i.isdigit()][0] - for table_header in table_headers: - if table_header.tag_name.lower() == 'th': - td_index += 1 - if table_header.text in column_names: - column_index_dict[table_header.text] = td_index - - if len(data_table[column_names[0]]) > 1: - raise DataTableException(f"This step can only validate data in one specific row. Data Table from BDD has {len(data_table[column_names[0]]) } rows.") - - for column in column_names: - cell_locator = f"{table_locator}//tr[{row_number}]//td[{column_index_dict[column]}][text()='{data_table[column][0]}'] | " \ - f"{table_locator}//tr[{row_number}]//td[{column_index_dict[column]}]//*[text()='{data_table[column][0]}']" - assert len(selenium_generics.get_elements(cell_locator)) > 0 - - -@then(parsers.re("I expect that table '(?P.*)' has '(?P.*)' rows"), - converters=dict(value=data_manager.text_formatted), ) -def verify_column_contain_value(selenium_generics: SeleniumGenerics, locators: Locators, locator_path, value: int): - table_row_locator = f"{locators.parse_and_get(locator_path, selenium_generics)}//tr" - assert_that(len(selenium_generics.get_elements(table_row_locator))).is_equal_to(int(value)) diff --git a/main/frontend/common/step_definitions/visual_comparison.py b/main/frontend/common/step_definitions/visual_comparison.py index 29fe399b..2e0e3edd 100644 --- a/main/frontend/common/step_definitions/visual_comparison.py +++ b/main/frontend/common/step_definitions/visual_comparison.py @@ -15,19 +15,7 @@ @then(parsers.re("(With soft assertion '(?P.*)' )?I verify images '(?P.*)' have no visual regression")) def image_visual_is_valid(soft_assert: str, name): - """Step Definition to verify if two images are same (Standalone Visual Testing) - Both Base Image and Test Image are saved in respective directories as defined by boilerplate - framework, i.e. test_data/visualtesting/base and test_data/visualtesting/test directories, with the - same name (argument name passed in feature file). - - Args: - name: str - Image Name to perform visual regression. - - Asserts: - If Base and Test Images are same. Else raises AssertionError. - - """ if soft_assert is not None and soft_assert.lower() == 'true': with check: assert are_two_images_look_same(name) @@ -38,21 +26,6 @@ def image_visual_is_valid(soft_assert: str, name): @then(parsers.re("(With soft assertion '(?P.*)' )?I verify that element '(?P.*)' is not visually regressed:(?P.*)", flags=re.S, ), converters=dict(data_table=data_table_horizontal_converter), ) def element_visual_is_valid(selenium_generics: SeleniumGenerics, locators: Locators, soft_assert: str, locator_path: str, data_table: dict): - """Step Definition to verify if a particular webelement is not visually regressed. - - Base Image should be saved in output/screenshots/base directory (name as provided in - the data table). Test Function would take screenshot of corresponding webelement as - provided in locator path, and asserts if it is same as base image. - - Args: - selenium_generics - SeleniumGenerics instance - locators - Locators instance - locator_path - as provided in feature file step. - data_table - retrieved from feature file - scenario - step. - - Asserts: - If the webelement captured during test is same as the base image provided. - """ locator = locators.parse_and_get(locator_path, selenium_generics) if soft_assert is not None and soft_assert.lower() == 'true': with check: diff --git a/main/frontend/common/utils/visual_utils.py b/main/frontend/common/utils/visual_utils.py index 36628609..48cebe04 100644 --- a/main/frontend/common/utils/visual_utils.py +++ b/main/frontend/common/utils/visual_utils.py @@ -1,5 +1,4 @@ -"""Utility to compare images using Pillow -""" + from collections import namedtuple from pathlib import Path @@ -15,29 +14,12 @@ class ImageSizeMismatchError(Exception): def read_image(img_pth: Path, color_mode: str = "RGB") -> Image: - """Read Image File - - Args: - img_pth: Path - Absolute Path of Image File to be read - color_mode: str - Either "L" or "RGB". Color Conversion mode for diffing. - - Returns: - Image Object - """ with Image.open(img_pth) as f: img = f.convert(color_mode) return img def file_paths(image_name_with_ext: str): - """Function to return a collection of file paths - base, test and diff - - Args: - image_name_with_ext: str - Image File Name to test. Not the absolute path. - - Returns: - screenshot_paths - namedtuple object containing base, test and diff image absolute paths. - """ screenshot_paths = namedtuple("screenshot_paths", "base test diff") return screenshot_paths( Path(Path.cwd() / "test_data" / "visualtesting" / "base" / f"{image_name_with_ext}").resolve(), @@ -47,29 +29,11 @@ def file_paths(image_name_with_ext: str): def raise_for_missing_file(file_path: Path, exc_msg: str): - """Checks if a path provided is a file - - Args: - exc_msg: error message - file_path: Path - - Raises: - FileNotFoundError - """ if not file_path.is_file(): raise FileNotFoundError(exc_msg) def raise_for_missing_images(bse_img_pth: Path, tst_img_pth: Path): - """Raise Exception for files not found at provided path - - Args: - bse_img_pth: Path - Absolute Path of Base Image - tst_img_pth: Path - Absolute Path of Test Image - - Raises: - FileNotFoundError - """ raise_for_missing_file( bse_img_pth, f"No Base Image found at location {bse_img_pth}" ) @@ -79,15 +43,6 @@ def raise_for_missing_images(bse_img_pth: Path, tst_img_pth: Path): def raise_for_format_mismatch(bse_img: Image, tst_img: Image): - """Raise Exception for files not found at provided path - - Args: - bse_img: Image - tst_img: Image - - Raises: - FileFormatMismatchError - In case base image and test image have different file formats like PNG vs JPEG, etc. - """ if (bse_img_frmt := bse_img.format) != (tst_img_frmt := tst_img.format): raise FileFormatMismatchError( f"Cannot compare images with different format." @@ -96,15 +51,6 @@ def raise_for_format_mismatch(bse_img: Image, tst_img: Image): def raise_for_size_mismatch(bse_img: Image, tst_img: Image): - """Raise Exception for files not found at provided path - - Args: - bse_img: Image - tst_img: Image - - Raises: - ImageSizeMismatchError - In case base image and test image have different size/resolution. - """ if (bse_img_size := bse_img.size) != (tst_img_size := tst_img.size): raise ImageSizeMismatchError( f"Cannot compare images with different sizes. " @@ -113,16 +59,6 @@ def raise_for_size_mismatch(bse_img: Image, tst_img: Image): def _diff_img(bse_img: Image, tst_img: Image) -> Image: - """Function to return diff image based on comparing base and test images. - - Args: - bse_img: Image - tst_img: Image - - Returns: Image | None - Image - If there is a difference - None - If base and test images are same. - """ diff_img = ImageChops.difference(bse_img, tst_img) # parameter to know if there is a difference - getbbox() is None implies no change in base vs test. @@ -134,17 +70,6 @@ def _diff_img(bse_img: Image, tst_img: Image) -> Image: def are_images_same(image_name_with_ext: str, color_mode: str = "RGB") -> bool: - """Compares Images using Pillow library and returns boolean value indicating if the images are same. - - Args: - image_name_with_ext: str - File Name of Image to Compare - color_mode: str - Could be either of RGB or L (B&W). Defaulted to RGB. - Color Conversion mode for comparison - - Returns: - True - if base image and test image has no difference in them - False - if base image and test image has difference. - """ base_img_pth, tst_img_pth, diff_img_pth = file_paths(image_name_with_ext) raise_for_missing_images(base_img_pth, tst_img_pth) # Read Base Image & Test Image. diff --git a/main/plugin.py b/main/plugin.py index 814930f6..c894a30c 100644 --- a/main/plugin.py +++ b/main/plugin.py @@ -82,17 +82,14 @@ def pytest_configure(config: pytest_config.Config) -> None: if os.path.exists("html_env_vars.pickle"): os.remove("html_env_vars.pickle") - # pre-load env. variables in order to meet TestRail requirements load_env_from_local_dotenv_file() - # delete temporary screenshots directory if exist if Path(f"{os.getcwd()}/{TEMP_SCREENSHOTS}").exists(): try: shutil.rmtree(Path(f"{os.getcwd()}/{TEMP_SCREENSHOTS}")) finally: ... - # HTML Report: Report name if config.option.htmlpath: not_allowed_filename_characters = ["/", "\\"] report_path: Path = Path(config.option.htmlpath) @@ -118,44 +115,7 @@ def pytest_configure(config: pytest_config.Config) -> None: # CLI params are added (we need this hook for every new parameters that will need to be added) def pytest_addoption(parser: pytest_argparsing.Parser) -> None: - """Register argparse-style options and ini-style config values, - called once at the beginning of a test run. - .. note:: - - This function should be implemented only in plugins or ``conftest.py`` - files situated at the tests root directoy due to how pytest - :ref:`discovers plugins during startup `. - - :param _pytest.config.argparsing.Parser parser: - To add command line options, call - :py:func:`parser.addoption(...) <_pytest.config.argparsing.Parser.addoption>`. - To add ini-file values call :py:func:`parser.addini(...) - <_pytest.config.argparsing.Parser.addini>`. - - :param _pytest.config.PytestPluginManager pluginmanager: - pytest plugin manager, which can be used to install :py:func:`hookspec`'s - or :py:func:`hookimpl`'s and allow one plugin to call another plugin's hooks - to change how command line options are added. - - Options can later be accessed through the - :py:class:`config <_pytest.config.Config>` object, respectively: - - - :py:func:`config.getoption(name) <_pytest.config.Config.getoption>` to - retrieve the value of a command line option. - - - :py:func:`config.getini(name) <_pytest.config.Config.getini>` to retrieve - a value read from an ini-style file. - - The config object is passed around on many internal objects via the ``.config`` - attribute or can be retrieved as the ``pytestconfig`` fixture. - - .. note:: - This hook is incompatible with ``hookwrapper=True``. - - Reference for docstring: - https://docs.pytest.org/en/6.2.x/_modules/_pytest/hookspec.html#pytest_addoption - """ parser.addoption( "--language", action="store", @@ -191,16 +151,7 @@ def pytest_addoption(parser: pytest_argparsing.Parser) -> None: ) -# Initialize output directories & override selenium fixture based on the platform def pytest_sessionstart(session: pytest.Session) -> None: - """Called after the ``Session`` object has been created and before performing collection - and entering the run test loop. - - :param pytest.Session session: The pytest session object. - - Reference for docstring: - https://docs.pytest.org/en/6.2.x/_modules/_pytest/hookspec.html#pytest_sessionstart - """ initialize_output_dirs() # HTML Report: Environment section: removing unnecessary data @@ -221,11 +172,6 @@ def pytest_sessionstart(session: pytest.Session) -> None: @pytest.hookimpl(tryfirst=True) def pytest_sessionfinish(session, exitstatus) -> None: - """Called after whole test run finished, right before returning the exit status to the system. - - :param pytest.Session session: The pytest session object. - :param int exitstatus: The status which pytest will return to the system. - """ if exitstatus == 0 or exitstatus == 1 or exitstatus == 6: command_generate_allure_report = [ @@ -298,19 +244,9 @@ def pytest_sessionfinish(session, exitstatus) -> None: session.config._metadata = ordered_metadata -# Collect all tags / markers for the tests def pytest_collection_modifyitems( config: pytest_config.Config, items: List[pytest.Item] ) -> None: - """Called after collection has been performed. May filter or re-order - the items in-place. - - :param _pytest.config.Config config: The pytest config object. - :param List[pytest.Item] items: List of item objects. - - Reference for docstring: - https://docs.pytest.org/en/6.2.x/_modules/_pytest/hookspec.html#pytest_collection_modifyitems - """ for item in items: if item.cls: for marker in item.cls.pytestmark: @@ -336,19 +272,12 @@ def pytest_collection_modifyitems( config.pluginmanager.import_plugin("main.frontend.frontend_plugin") -"2. API & UI - common implementation" - - -# Define the environment variables fixture -# This is API & UI specific implementation @pytest.fixture(scope='session') def env_variables(request): env_vars_file_path = f"{request.session.config.known_args_namespace.confcutdir}/configs/.local.env" return EnvVariables(env_vars_file_path) -# Define the base url fixture -# This is API & UI specific implementation @pytest.fixture(scope="session") def base_url(request, env_variables) -> str: # get base url value from command line @@ -358,16 +287,12 @@ def base_url(request, env_variables) -> str: ) -# Define the language fixture -# This is API & UI specific implementation @pytest.fixture(scope="session") def language(request): language_value = request.config.getoption("language") return language_value if language_value else None -# Define the project directory fixture -# This is API & UI specific implementation @pytest.fixture(scope="session", autouse=True) def project_dir(request, pytestconfig) -> str: path_str = request.session.config.known_args_namespace.confcutdir @@ -375,8 +300,6 @@ def project_dir(request, pytestconfig) -> str: return path_str if path_str else str(pytestconfig.rootdir) -# Define proxy-url as a fixture -# This is API & UI specific implementation @pytest.fixture(scope="session") def proxy_url(request): proxy_url_value = request.config.getoption("--proxy-url") @@ -493,8 +416,6 @@ def pytest_runtest_makereport(item, call): rep.test_name = [test_name.args[0] for test_name in item.iter_markers() if test_name.name == 'test_name'] -# Define an extra column for HTML report: Section -# Set the order of the columns def pytest_html_results_table_header(cells): if cells: if not bp_storage.is_api_testing(): diff --git a/main/setup/setup_tests/test_installation_check.py b/main/setup/setup_tests/test_installation_check.py index cba6fe8f..b601b372 100644 --- a/main/setup/setup_tests/test_installation_check.py +++ b/main/setup/setup_tests/test_installation_check.py @@ -68,13 +68,10 @@ def test_check_step_definitions_folder(): assert_that(os.path.isdir("./main/frontend/common/step_definitions")).is_true() assert_that(os.path.isfile("./main/frontend/common/step_definitions/browser_navigation.py")).is_true() assert_that(os.path.isfile("./main/frontend/common/step_definitions/click_touch_and_keyboard_actions.py")).is_true() - assert_that(os.path.isfile("./main/frontend/common/step_definitions/date_time.py")).is_true() assert_that(os.path.isfile("./main/frontend/common/step_definitions/dropdowns.py")).is_true() assert_that(os.path.isfile("./main/frontend/common/step_definitions/email.py")).is_true() assert_that(os.path.isfile("./main/frontend/common/step_definitions/attribute_assertion.py")).is_true() assert_that(os.path.isfile("./main/frontend/common/step_definitions/environment_variables.py")).is_true() - assert_that(os.path.isfile("./main/frontend/common/step_definitions/excel_and_csv.py")).is_true() - assert_that(os.path.isfile("./main/frontend/common/step_definitions/html_tables.py")).is_true() assert_that(os.path.isfile("./main/frontend/common/step_definitions/mobile_device_actions.py")).is_true() assert_that(os.path.isfile("./main/frontend/common/step_definitions/swipe_drag_and_drop.py")).is_true() assert_that(os.path.isfile("./main/frontend/common/step_definitions/text_assertion_editing.py")).is_true()