diff --git a/.github/workflows/deploy-orchestrator.yml b/.github/workflows/deploy-orchestrator.yml
index becd17f0..75676e43 100644
--- a/.github/workflows/deploy-orchestrator.yml
+++ b/.github/workflows/deploy-orchestrator.yml
@@ -64,9 +64,7 @@ on:
env:
AZURE_DEV_COLLECT_TELEMETRY: ${{ vars.AZURE_DEV_COLLECT_TELEMETRY }}
-permissions:
- contents: read
- actions: read
+
jobs:
docker-build:
uses: ./.github/workflows/job-docker-build.yml
diff --git a/.github/workflows/deploy-linux.yml b/.github/workflows/deploy-v2.yml
similarity index 91%
rename from .github/workflows/deploy-linux.yml
rename to .github/workflows/deploy-v2.yml
index f799bc22..8fbebdad 100644
--- a/.github/workflows/deploy-linux.yml
+++ b/.github/workflows/deploy-v2.yml
@@ -1,4 +1,4 @@
-name: Deploy-Test-Cleanup (v2) Linux
+name: Deploy-Test-Cleanup (v2)
on:
push:
branches:
@@ -19,9 +19,17 @@ on:
- 'src/ContentProcessorWeb/config-overrides.js'
- 'src/ContentProcessorWeb/nginx-custom.conf'
- 'src/ContentProcessorWeb/env.sh'
- - '.github/workflows/deploy-linux.yml'
+ - '.github/workflows/deploy-v2.yml'
workflow_dispatch:
inputs:
+ runner_os:
+ description: 'Deployment Environment'
+ required: false
+ type: choice
+ options:
+ - 'codespace'
+ - 'Local'
+ default: 'codespace'
azure_location:
description: 'Azure Location For Deployment'
required: false
@@ -95,11 +103,13 @@ on:
permissions:
contents: read
actions: read
+ id-token: write
jobs:
validate-inputs:
runs-on: ubuntu-latest
outputs:
validation_passed: ${{ steps.validate.outputs.passed }}
+ runner_os: ${{ steps.validate.outputs.runner_os }}
azure_location: ${{ steps.validate.outputs.azure_location }}
resource_group_name: ${{ steps.validate.outputs.resource_group_name }}
waf_enabled: ${{ steps.validate.outputs.waf_enabled }}
@@ -125,9 +135,24 @@ jobs:
INPUT_AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID: ${{ github.event.inputs.AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID }}
INPUT_AZURE_EXISTING_AI_PROJECT_RESOURCE_ID: ${{ github.event.inputs.AZURE_EXISTING_AI_PROJECT_RESOURCE_ID }}
INPUT_EXISTING_WEBAPP_URL: ${{ github.event.inputs.existing_webapp_url }}
+ INPUT_RUNNER_OS: ${{ github.event.inputs.runner_os }}
run: |
echo "🔍 Validating workflow input parameters..."
VALIDATION_FAILED=false
+
+ # Resolve runner_os from Deployment Environment selection
+ DEPLOY_ENV="${INPUT_RUNNER_OS:-codespace}"
+ if [[ "$DEPLOY_ENV" == "codespace" ]]; then
+ RUNNER_OS="ubuntu-latest"
+ echo "✅ Deployment Environment: 'codespace' → runner: ubuntu-latest"
+ elif [[ "$DEPLOY_ENV" == "Local" ]]; then
+ RUNNER_OS="windows-latest"
+ echo "✅ Deployment Environment: 'Local' → runner: windows-latest"
+ else
+ echo "❌ ERROR: Deployment Environment must be 'codespace' or 'Local', got: '$DEPLOY_ENV'"
+ VALIDATION_FAILED=true
+ RUNNER_OS="ubuntu-latest"
+ fi
# Validate azure_location (Azure region format)
LOCATION="${INPUT_AZURE_LOCATION:-australiaeast}"
@@ -251,6 +276,7 @@ jobs:
# Output validated values
echo "passed=true" >> $GITHUB_OUTPUT
+ echo "runner_os=$RUNNER_OS" >> $GITHUB_OUTPUT
echo "azure_location=$LOCATION" >> $GITHUB_OUTPUT
echo "resource_group_name=$INPUT_RESOURCE_GROUP_NAME" >> $GITHUB_OUTPUT
echo "waf_enabled=$WAF_ENABLED" >> $GITHUB_OUTPUT
@@ -267,7 +293,7 @@ jobs:
if: needs.validate-inputs.outputs.validation_passed == 'true'
uses: ./.github/workflows/deploy-orchestrator.yml
with:
- runner_os: ubuntu-latest
+ runner_os: ${{ needs.validate-inputs.outputs.runner_os || 'ubuntu-latest' }}
azure_location: ${{ needs.validate-inputs.outputs.azure_location || 'australiaeast' }}
resource_group_name: ${{ needs.validate-inputs.outputs.resource_group_name || '' }}
waf_enabled: ${{ needs.validate-inputs.outputs.waf_enabled == 'true' }}
diff --git a/.github/workflows/deploy-windows.yml b/.github/workflows/deploy-windows.yml
index b3e51099..256a4bd8 100644
--- a/.github/workflows/deploy-windows.yml
+++ b/.github/workflows/deploy-windows.yml
@@ -78,6 +78,7 @@ on:
permissions:
contents: read
actions: read
+ id-token: write
jobs:
validate-inputs:
runs-on: ubuntu-latest
diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml
index 89b23576..92b76912 100644
--- a/.github/workflows/deploy.yml
+++ b/.github/workflows/deploy.yml
@@ -17,9 +17,11 @@ on:
permissions:
contents: read
actions: read
+ id-token: write
jobs:
deploy:
runs-on: ubuntu-latest
+ environment: production
outputs:
RESOURCE_GROUP_NAME: ${{ steps.generate_rg_name.outputs.RESOURCE_GROUP_NAME }}
CONTAINER_WEB_APPURL: ${{ steps.get_output.outputs.CONTAINER_WEB_APPURL }}
@@ -34,16 +36,15 @@ jobs:
uses: actions/checkout@v5
- name: Login to Azure
- run: |
- az login --service-principal -u ${{ secrets.AZURE_CLIENT_ID }} -p ${{ secrets.AZURE_CLIENT_SECRET }} --tenant ${{ secrets.AZURE_TENANT_ID }}
- az account set --subscription ${{ secrets.AZURE_SUBSCRIPTION_ID }}
+ uses: azure/login@v2
+ with:
+ client-id: ${{ secrets.AZURE_CLIENT_ID }}
+ tenant-id: ${{ secrets.AZURE_TENANT_ID }}
+ subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }}
- name: Run Quota Check
id: quota-check
env:
- AZURE_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }}
- AZURE_TENANT_ID: ${{ secrets.AZURE_TENANT_ID }}
- AZURE_CLIENT_SECRET: ${{ secrets.AZURE_CLIENT_SECRET }}
AZURE_SUBSCRIPTION_ID: ${{ secrets.AZURE_SUBSCRIPTION_ID }}
GPT_MIN_CAPACITY: "100"
AZURE_REGIONS: ${{ vars.AZURE_REGIONS }}
@@ -268,6 +269,7 @@ jobs:
if: always()
needs: [deploy, e2e-test]
runs-on: ubuntu-latest
+ environment: production
env:
RESOURCE_GROUP_NAME: ${{ needs.deploy.outputs.RESOURCE_GROUP_NAME }}
AI_SERVICES_NAME: ${{ needs.deploy.outputs.AI_SERVICES_NAME }}
@@ -276,9 +278,11 @@ jobs:
ENVIRONMENT_NAME: ${{ needs.deploy.outputs.ENVIRONMENT_NAME }}
steps:
- name: Login to Azure
- run: |
- az login --service-principal -u ${{ secrets.AZURE_CLIENT_ID }} -p ${{ secrets.AZURE_CLIENT_SECRET }} --tenant ${{ secrets.AZURE_TENANT_ID }}
- az account set --subscription ${{ secrets.AZURE_SUBSCRIPTION_ID }}
+ uses: azure/login@v2
+ with:
+ client-id: ${{ secrets.AZURE_CLIENT_ID }}
+ tenant-id: ${{ secrets.AZURE_TENANT_ID }}
+ subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }}
- name: Delete Bicep Deployment
if: always()
diff --git a/.github/workflows/job-cleanup-deployment.yml b/.github/workflows/job-cleanup-deployment.yml
index 0467b9e0..e2a2d74e 100644
--- a/.github/workflows/job-cleanup-deployment.yml
+++ b/.github/workflows/job-cleanup-deployment.yml
@@ -40,12 +40,11 @@ on:
description: 'Docker Image Tag'
required: true
type: string
-permissions:
- contents: read
- actions: read
+
jobs:
cleanup-deployment:
runs-on: ${{ inputs.runner_os }}
+ environment: production
continue-on-error: true
env:
RESOURCE_GROUP_NAME: ${{ inputs.RESOURCE_GROUP_NAME }}
@@ -201,10 +200,11 @@ jobs:
echo "✅ All input parameters validated successfully!"
- name: Login to Azure
- shell: bash
- run: |
- az login --service-principal -u ${{ secrets.AZURE_CLIENT_ID }} -p ${{ secrets.AZURE_CLIENT_SECRET }} --tenant ${{ secrets.AZURE_TENANT_ID }}
- az account set --subscription ${{ secrets.AZURE_SUBSCRIPTION_ID }}
+ uses: azure/login@v2
+ with:
+ client-id: ${{ secrets.AZURE_CLIENT_ID }}
+ tenant-id: ${{ secrets.AZURE_TENANT_ID }}
+ subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }}
- name: Delete Resource Group (Optimized Cleanup)
id: delete_rg
diff --git a/.github/workflows/job-deploy-linux.yml b/.github/workflows/job-deploy-linux.yml
index 08c38ba6..002baa7b 100644
--- a/.github/workflows/job-deploy-linux.yml
+++ b/.github/workflows/job-deploy-linux.yml
@@ -38,12 +38,11 @@ on:
CONTAINER_WEB_APPURL:
description: "Container Web App URL"
value: ${{ jobs.deploy-linux.outputs.CONTAINER_WEB_APPURL }}
-permissions:
- contents: read
- actions: read
+
jobs:
deploy-linux:
runs-on: ubuntu-latest
+ environment: production
env:
AZURE_DEV_COLLECT_TELEMETRY: ${{ vars.AZURE_DEV_COLLECT_TELEMETRY }}
outputs:
@@ -200,13 +199,18 @@ jobs:
- name: Install azd
uses: Azure/setup-azd@v2
+ - name: Login to Azure
+ uses: azure/login@v2
+ with:
+ client-id: ${{ secrets.AZURE_CLIENT_ID }}
+ tenant-id: ${{ secrets.AZURE_TENANT_ID }}
+ subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }}
+
- name: Login to AZD
id: login-azure
shell: bash
run: |
- az login --service-principal -u ${{ secrets.AZURE_CLIENT_ID }} -p ${{ secrets.AZURE_CLIENT_SECRET }} --tenant ${{ secrets.AZURE_TENANT_ID }}
- az account set --subscription ${{ secrets.AZURE_SUBSCRIPTION_ID }}
- azd auth login --client-id ${{ secrets.AZURE_CLIENT_ID }} --client-secret ${{ secrets.AZURE_CLIENT_SECRET }} --tenant-id ${{ secrets.AZURE_TENANT_ID }}
+ azd auth login --client-id ${{ secrets.AZURE_CLIENT_ID }} --federated-credential-provider "github" --tenant-id ${{ secrets.AZURE_TENANT_ID }}
- name: Deploy using azd up and extract values (Linux)
id: get_output_linux
diff --git a/.github/workflows/job-deploy-windows.yml b/.github/workflows/job-deploy-windows.yml
index 7ebc581f..c33b8c01 100644
--- a/.github/workflows/job-deploy-windows.yml
+++ b/.github/workflows/job-deploy-windows.yml
@@ -38,12 +38,11 @@ on:
CONTAINER_WEB_APPURL:
description: "Container Web App URL"
value: ${{ jobs.deploy-windows.outputs.CONTAINER_WEB_APPURL }}
-permissions:
- contents: read
- actions: read
+
jobs:
deploy-windows:
runs-on: windows-latest
+ environment: production
env:
AZURE_DEV_COLLECT_TELEMETRY: ${{ vars.AZURE_DEV_COLLECT_TELEMETRY }}
outputs:
@@ -200,13 +199,18 @@ jobs:
- name: Setup Azure Developer CLI (Windows)
uses: Azure/setup-azd@v2
+ - name: Login to Azure
+ uses: azure/login@v2
+ with:
+ client-id: ${{ secrets.AZURE_CLIENT_ID }}
+ tenant-id: ${{ secrets.AZURE_TENANT_ID }}
+ subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }}
+
- name: Login to AZD
id: login-azure
shell: bash
run: |
- az login --service-principal -u ${{ secrets.AZURE_CLIENT_ID }} -p ${{ secrets.AZURE_CLIENT_SECRET }} --tenant ${{ secrets.AZURE_TENANT_ID }}
- az account set --subscription ${{ secrets.AZURE_SUBSCRIPTION_ID }}
- azd auth login --client-id ${{ secrets.AZURE_CLIENT_ID }} --client-secret ${{ secrets.AZURE_CLIENT_SECRET }} --tenant-id ${{ secrets.AZURE_TENANT_ID }}
+ azd auth login --client-id ${{ secrets.AZURE_CLIENT_ID }} --federated-credential-provider "github" --tenant-id ${{ secrets.AZURE_TENANT_ID }}
- name: Deploy using azd up and extract values (Windows)
id: get_output_windows
diff --git a/.github/workflows/job-deploy.yml b/.github/workflows/job-deploy.yml
index 90bcf5db..8459acea 100644
--- a/.github/workflows/job-deploy.yml
+++ b/.github/workflows/job-deploy.yml
@@ -98,14 +98,13 @@ env:
RUN_E2E_TESTS: ${{ inputs.trigger_type == 'workflow_dispatch' && (inputs.run_e2e_tests || 'GoldenPath-Testing') || 'GoldenPath-Testing' }}
BUILD_DOCKER_IMAGE: ${{ inputs.trigger_type == 'workflow_dispatch' && (inputs.build_docker_image || false) || false }}
RG_TAGS: ${{ vars.RG_TAGS }}
-permissions:
- contents: read
- actions: read
+
jobs:
azure-setup:
name: Azure Setup
if: inputs.trigger_type != 'workflow_dispatch' || inputs.existing_webapp_url == '' || inputs.existing_webapp_url == null
runs-on: ubuntu-latest
+ environment: production
outputs:
RESOURCE_GROUP_NAME: ${{ steps.check_create_rg.outputs.RESOURCE_GROUP_NAME }}
ENV_NAME: ${{ steps.generate_env_name.outputs.ENV_NAME }}
@@ -318,17 +317,15 @@ jobs:
uses: actions/checkout@v4
- name: Login to Azure
- shell: bash
- run: |
- az login --service-principal -u ${{ secrets.AZURE_CLIENT_ID }} -p ${{ secrets.AZURE_CLIENT_SECRET }} --tenant ${{ secrets.AZURE_TENANT_ID }}
- az account set --subscription ${{ secrets.AZURE_SUBSCRIPTION_ID }}
+ uses: azure/login@v2
+ with:
+ client-id: ${{ secrets.AZURE_CLIENT_ID }}
+ tenant-id: ${{ secrets.AZURE_TENANT_ID }}
+ subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }}
- name: Run Quota Check
id: quota-check
env:
- AZURE_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }}
- AZURE_TENANT_ID: ${{ secrets.AZURE_TENANT_ID }}
- AZURE_CLIENT_SECRET: ${{ secrets.AZURE_CLIENT_SECRET }}
AZURE_SUBSCRIPTION_ID: ${{ secrets.AZURE_SUBSCRIPTION_ID }}
GPT_MIN_CAPACITY: ${{ env.GPT_MIN_CAPACITY }}
AZURE_REGIONS: ${{ vars.AZURE_REGIONS }}
diff --git a/.github/workflows/job-docker-build.yml b/.github/workflows/job-docker-build.yml
index 968f0d94..152c90c7 100644
--- a/.github/workflows/job-docker-build.yml
+++ b/.github/workflows/job-docker-build.yml
@@ -19,13 +19,12 @@ on:
env:
BRANCH_NAME: ${{ github.event.workflow_run.head_branch || github.head_ref || github.ref_name }}
-permissions:
- contents: read
- actions: read
+
jobs:
docker-build:
if: inputs.trigger_type == 'workflow_dispatch' && inputs.build_docker_image == true
runs-on: ubuntu-latest
+ environment: production
outputs:
IMAGE_TAG: ${{ steps.generate_docker_tag.outputs.IMAGE_TAG }}
steps:
@@ -49,12 +48,15 @@ jobs:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- - name: Log in to Azure Container Registry
- uses: azure/docker-login@v2
+ - name: Log in to Azure
+ uses: azure/login@v2
with:
- login-server: ${{ secrets.ACR_TEST_LOGIN_SERVER }}
- username: ${{ secrets.ACR_TEST_USERNAME }}
- password: ${{ secrets.ACR_TEST_PASSWORD }}
+ client-id: ${{ secrets.AZURE_CLIENT_ID }}
+ tenant-id: ${{ secrets.AZURE_TENANT_ID }}
+ subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }}
+
+ - name: Log in to Azure Container Registry
+ run: az acr login --name ${{ secrets.ACR_TEST_LOGIN_SERVER }}
- name: Build and Push ContentProcessor Docker image
uses: docker/build-push-action@v6
diff --git a/.github/workflows/test-automation-v2.yml b/.github/workflows/test-automation-v2.yml
index 4ec41a0b..f95ba1c9 100644
--- a/.github/workflows/test-automation-v2.yml
+++ b/.github/workflows/test-automation-v2.yml
@@ -24,12 +24,11 @@ env:
url: ${{ inputs.CP_WEB_URL }}
accelerator_name: "Content Processing"
test_suite: ${{ inputs.TEST_SUITE }}
-permissions:
- contents: read
- actions: read
+
jobs:
test:
runs-on: ubuntu-latest
+ environment: production
outputs:
TEST_SUCCESS: ${{ steps.test1.outcome == 'success' || steps.test2.outcome == 'success' || steps.test3.outcome == 'success' }}
TEST_REPORT_URL: ${{ steps.upload_report.outputs.artifact-url }}
@@ -43,9 +42,11 @@ jobs:
python-version: '3.13'
- name: Login to Azure
- run: |
- az login --service-principal -u ${{ secrets.AZURE_CLIENT_ID }} -p ${{ secrets.AZURE_CLIENT_SECRET }} --tenant ${{ secrets.AZURE_TENANT_ID }}
- az account set --subscription ${{ secrets.AZURE_SUBSCRIPTION_ID }}
+ uses: azure/login@v2
+ with:
+ client-id: ${{ secrets.AZURE_CLIENT_ID }}
+ tenant-id: ${{ secrets.AZURE_TENANT_ID }}
+ subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }}
- name: Install dependencies
run: |
@@ -93,9 +94,9 @@ jobs:
id: test1
run: |
if [ "${{ env.test_suite }}" == "GoldenPath-Testing" ]; then
- xvfb-run pytest -m gp --headed --html=report/report.html --self-contained-html
+ xvfb-run pytest -m gp --html=report/report.html --self-contained-html
else
- xvfb-run pytest --headed --html=report/report.html --self-contained-html
+ xvfb-run pytest --html=report/report.html --self-contained-html
fi
working-directory: tests/e2e-test
continue-on-error: true
@@ -110,9 +111,9 @@ jobs:
if: ${{ steps.test1.outcome == 'failure' }}
run: |
if [ "${{ env.test_suite }}" == "GoldenPath-Testing" ]; then
- xvfb-run pytest -m gp --headed --html=report/report.html --self-contained-html
+ xvfb-run pytest -m gp --html=report/report.html --self-contained-html
else
- xvfb-run pytest --headed --html=report/report.html --self-contained-html
+ xvfb-run pytest --html=report/report.html --self-contained-html
fi
working-directory: tests/e2e-test
continue-on-error: true
@@ -127,9 +128,9 @@ jobs:
if: ${{ steps.test2.outcome == 'failure' }}
run: |
if [ "${{ env.test_suite }}" == "GoldenPath-Testing" ]; then
- xvfb-run pytest -m gp --headed --html=report/report.html --self-contained-html
+ xvfb-run pytest -m gp --html=report/report.html --self-contained-html
else
- xvfb-run pytest --headed --html=report/report.html --self-contained-html
+ xvfb-run pytest --html=report/report.html --self-contained-html
fi
working-directory: tests/e2e-test
@@ -139,7 +140,10 @@ jobs:
if: ${{ !cancelled() }}
with:
name: test-report
- path: tests/e2e-test/report/*
+ path: |
+ tests/e2e-test/report/*
+ tests/e2e-test/tests/screenshots/*
+ tests/e2e-test/logs/*
- name: Generate E2E Test Summary
if: always()
diff --git a/.github/workflows/test-automation.yml b/.github/workflows/test-automation.yml
index 1112a225..989f1378 100644
--- a/.github/workflows/test-automation.yml
+++ b/.github/workflows/test-automation.yml
@@ -14,14 +14,12 @@ env:
url: ${{ inputs.CP_WEB_URL }}
CP_RG: ${{ inputs.CP_RG }}
accelerator_name: "Content Processing"
-permissions:
- contents: read
- actions: read
jobs:
test:
runs-on: ubuntu-latest
+ environment: production
steps:
- name: Checkout repository
uses: actions/checkout@v5
@@ -32,9 +30,11 @@ jobs:
python-version: '3.12'
- name: Login to Azure
- run: |
- az login --service-principal -u ${{ secrets.AZURE_CLIENT_ID }} -p ${{ secrets.AZURE_CLIENT_SECRET }} --tenant ${{ secrets.AZURE_TENANT_ID }}
- az account set --subscription ${{ secrets.AZURE_SUBSCRIPTION_ID }}
+ uses: azure/login@v2
+ with:
+ client-id: ${{ secrets.AZURE_CLIENT_ID }}
+ tenant-id: ${{ secrets.AZURE_TENANT_ID }}
+ subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }}
- name: Install dependencies
run: |
diff --git a/infra/scripts/checkquota.sh b/infra/scripts/checkquota.sh
index f88c6300..a85b0db9 100644
--- a/infra/scripts/checkquota.sh
+++ b/infra/scripts/checkquota.sh
@@ -5,14 +5,11 @@ IFS=', ' read -ra REGIONS <<< "$AZURE_REGIONS"
SUBSCRIPTION_ID="${AZURE_SUBSCRIPTION_ID}"
GPT_MIN_CAPACITY="${GPT_MIN_CAPACITY}"
-AZURE_CLIENT_ID="${AZURE_CLIENT_ID}"
-AZURE_TENANT_ID="${AZURE_TENANT_ID}"
-AZURE_CLIENT_SECRET="${AZURE_CLIENT_SECRET}"
-
-# Authenticate using Managed Identity
-echo "Authentication using Managed Identity..."
-if ! az login --service-principal -u "$AZURE_CLIENT_ID" -p "$AZURE_CLIENT_SECRET" --tenant "$AZURE_TENANT_ID"; then
- echo "❌ Error: Failed to login using Managed Identity."
+
+# Verify Azure CLI is already authenticated (via OIDC in the workflow)
+echo "Verifying Azure CLI authentication..."
+if ! az account show > /dev/null 2>&1; then
+ echo "❌ Error: Azure CLI is not authenticated. Please log in using 'az login'"
exit 1
fi
diff --git a/tests/e2e-test/.gitignore b/tests/e2e-test/.gitignore
new file mode 100644
index 00000000..d59c7155
--- /dev/null
+++ b/tests/e2e-test/.gitignore
@@ -0,0 +1,170 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+cover/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+.pybuilder/
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+# For a library or package, you might want to ignore these files since the code is
+# intended to run in multiple environments; otherwise, check them in:
+# .python-version
+
+# pipenv
+# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+# However, in case of collaboration, if having platform-specific dependencies or dependencies
+# having no cross-platform support, pipenv may install dependencies that don't work, or not
+# install all needed dependencies.
+#Pipfile.lock
+
+# poetry
+# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
+# This is especially recommended for binary packages to ensure reproducibility, and is more
+# commonly ignored for libraries.
+# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
+#poetry.lock
+
+# pdm
+# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
+#pdm.lock
+# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
+# in version control.
+# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
+.pdm.toml
+.pdm-python
+.pdm-build/
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+microsoft/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+
+# pytype static type analyzer
+.pytype/
+
+# Cython debug symbols
+cython_debug/
+
+# PyCharm
+# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
+# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
+# and can be added to the global gitignore or merged into this file. For a more nuclear
+# option (not recommended) you can uncomment the following to ignore the entire idea folder.
+.idea/
+archive/
+report/
+screenshots/
+report.html
+assets/
+.vscode/
+
diff --git a/tests/e2e-test/base/__init__.py b/tests/e2e-test/base/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/e2e-test/base/base.py b/tests/e2e-test/base/base.py
new file mode 100644
index 00000000..648346be
--- /dev/null
+++ b/tests/e2e-test/base/base.py
@@ -0,0 +1,38 @@
+"""
+Base page module providing common functionality for all page objects.
+"""
+
+
+class BasePage:
+ """Base class for all page objects with common methods."""
+
+ def __init__(self, page):
+ """
+ Initialize the BasePage with a Playwright page instance.
+
+ Args:
+ page: Playwright page object
+ """
+ self.page = page
+
+ def scroll_into_view(self, locator):
+ """
+ Scroll the last element matching the locator into view.
+
+ Args:
+ locator: Playwright locator object
+ """
+ reference_list = locator
+ locator.nth(reference_list.count() - 1).scroll_into_view_if_needed()
+
+ def is_visible(self, locator):
+ """
+ Check if an element is visible on the page.
+
+ Args:
+ locator: Playwright locator object
+
+ Returns:
+ bool: True if visible, False otherwise
+ """
+ return locator.is_visible()
diff --git a/tests/e2e-test/config/constants.py b/tests/e2e-test/config/constants.py
new file mode 100644
index 00000000..28566894
--- /dev/null
+++ b/tests/e2e-test/config/constants.py
@@ -0,0 +1,12 @@
+"""
+Configuration constants module for test environment settings.
+"""
+
+import os
+
+from dotenv import load_dotenv
+
+load_dotenv()
+URL = os.getenv("url")
+if URL and URL.endswith("/"):
+ URL = URL[:-1]
diff --git a/tests/e2e-test/pages/HomePageV2.py b/tests/e2e-test/pages/HomePageV2.py
new file mode 100644
index 00000000..d588bae8
--- /dev/null
+++ b/tests/e2e-test/pages/HomePageV2.py
@@ -0,0 +1,1239 @@
+"""
+Home page module for Content Processing Solution Accelerator V2.
+Supports Auto Claim collection with expandable rows, AI Summary, and AI Gap Analysis.
+"""
+
+import os
+import glob
+import logging
+
+from base.base import BasePage
+from playwright.sync_api import expect
+
+logger = logging.getLogger(__name__)
+
+
+class HomePageV2(BasePage):
+ """
+ V2 Home page object containing all locators and methods for interacting
+ with the Content Processing home page (Auto Claim workflow).
+ """
+
+ # HOMEPAGE PANELS
+ PROCESSING_QUEUE = "//span[normalize-space()='Processing Queue']"
+ OUTPUT_REVIEW = "//span[contains(normalize-space(),'Output Review')]"
+ SOURCE_DOC = "//span[normalize-space()='Source Document']"
+ PROCESSING_QUEUE_BTN = "//button[normalize-space()='Processing Queue']"
+ OUTPUT_REVIEW_BTN = "//button[contains(normalize-space(),'Output Review')]"
+ SOURCE_DOC_BTN = "//button[normalize-space()='Source Document']"
+ COLLAPSE_PANEL_BTN = "//button[@title='Collapse Panel']"
+
+ # COLLECTION & ACTIONS
+ SELECT_COLLECTION = "//input[contains(@placeholder,'Select Collection')]"
+ IMPORT_DOCUMENTS_BTN = "//button[normalize-space()='Import Document(s)']"
+ REFRESH_BTN = "//button[normalize-space()='Refresh']"
+
+ # IMPORT DIALOG
+ BROWSE_FILES_BTN = "//button[normalize-space()='Browse Files']"
+ IMPORT_BTN = "//button[normalize-space()='Import']"
+ CLOSE_BTN = "//button[normalize-space()='Close']"
+ SELECTED_COLLECTION_INFO = "//div[contains(text(),'Selected Collection')]"
+ SELECT_SCHEMA_COMBOBOX = "//input[@placeholder='Select Schema']"
+
+ # File name to schema mapping for Auto Claim collection
+ FILE_SCHEMA_MAP = {
+ "claim_form.pdf": "Auto Insurance Claim Form",
+ "damage_photo.png": "Damaged Vehicle Image Assessment",
+ "police_report.pdf": "Police Report Document",
+ "repair_estimate.pdf": "Repair Estimate Document",
+ }
+
+ # TABLE (uses div with role="table", not native
)
+ CLAIMS_TABLE = "div[role='table']"
+ DATA_ROWS = "div[role='table'] div[role='rowgroup']:nth-child(2) div[role='row']"
+ NO_DATA = "//p[normalize-space()='No data available']"
+
+ # OUTPUT REVIEW TABS (Claim level)
+ AI_SUMMARY_TAB = "//span[.='AI Summary']"
+ AI_GAP_ANALYSIS_TAB = "//span[.='AI Gap Analysis']"
+
+ AI_SUMMARY_CONTENT = "//p[contains(text(),'1) Claim & Policy')]"
+ AI_GAP_ANALYSIS_CONTENT = "//p[contains(text(),'Executive Summary:')]"
+
+ # OUTPUT REVIEW TABS (Document/child file level)
+ EXTRACTED_RESULTS_TAB = "//span[.='Extracted Results']"
+ PROCESS_STEPS_TAB = "//span[.='Process Steps']"
+
+ # COMMENTS
+ COMMENTS = "//textarea"
+ SAVE_BTN = "//button[normalize-space()='Save']"
+
+ # SOURCE DOCUMENT PANE
+ SOURCE_DOC_NO_DATA = "//p[normalize-space()='No document available']"
+
+ # API DOCUMENTATION
+ API_DOCUMENTATION_TAB = "//div[normalize-space()='API Documentation']"
+
+ def __init__(self, page):
+ """
+ Initialize the HomePageV2.
+
+ Args:
+ page: Playwright page object
+ """
+ super().__init__(page)
+ self.page = page
+
+ def dismiss_any_dialog(self):
+ """Dismiss any open dialog or backdrop to ensure a clean state."""
+ # Try closing via Close button first with a short timeout
+ try:
+ close_btn = self.page.locator(self.CLOSE_BTN)
+ if close_btn.count() > 0 and close_btn.is_visible():
+ close_btn.click(timeout=5000)
+ self.page.wait_for_timeout(500)
+ except (TimeoutError, Exception): # pylint: disable=broad-exception-caught
+ # Button may be unstable or detached — ignore and continue
+ pass
+
+ # Press Escape to dismiss any remaining backdrop
+ self.page.keyboard.press("Escape")
+ self.page.wait_for_timeout(500)
+
+ def validate_home_page(self):
+ """Validate that all main sections are visible on the home page."""
+ logger.info("Starting home page validation...")
+
+ logger.info("Validating Processing Queue is visible...")
+ expect(self.page.locator(self.PROCESSING_QUEUE)).to_be_visible()
+ logger.info("✓ Processing Queue is visible")
+
+ logger.info("Validating Output Review is visible...")
+ expect(self.page.locator(self.OUTPUT_REVIEW)).to_be_visible()
+ logger.info("✓ Output Review is visible")
+
+ logger.info("Validating Source Document is visible...")
+ expect(self.page.locator(self.SOURCE_DOC)).to_be_visible()
+ logger.info("✓ Source Document is visible")
+
+ self.page.wait_for_timeout(2000)
+ logger.info("Home page validation completed successfully")
+
+ def select_collection(self, collection_name="Auto Claim"):
+ """
+ Select a collection from the Select Collection dropdown.
+
+ Args:
+ collection_name: Name of the collection to select (default: Auto Claim)
+ """
+ logger.info(f"Starting collection selection for: {collection_name}")
+
+ self.page.wait_for_timeout(3000)
+
+ logger.info("Clicking on Select Collection dropdown...")
+ self.page.locator(self.SELECT_COLLECTION).click()
+ logger.info("✓ Select Collection dropdown clicked")
+
+ logger.info(f"Selecting '{collection_name}' option...")
+ self.page.get_by_role("option", name=collection_name).click()
+ logger.info(f"✓ '{collection_name}' option selected")
+
+ self.page.wait_for_timeout(2000)
+ logger.info(f"Collection selection completed for: {collection_name}")
+
+ def get_testdata_files(self):
+ """
+ Dynamically get all files from the testdata folder.
+
+ Returns:
+ list: List of absolute file paths from testdata folder
+ """
+ current_working_dir = os.getcwd()
+ testdata_dir = os.path.join(current_working_dir, "testdata")
+ files = glob.glob(os.path.join(testdata_dir, "*"))
+ # Filter only files (not directories)
+ files = [f for f in files if os.path.isfile(f)]
+ logger.info(f"Found {len(files)} files in testdata folder: {[os.path.basename(f) for f in files]}")
+ return files
+
+ def select_schema_for_file(self, file_name, schema_name):
+ """
+ Select a schema from the dropdown for a specific file in the import dialog.
+
+ Args:
+ file_name: Name of the file (e.g. 'claim_form.pdf')
+ schema_name: Schema to select (e.g. 'Auto Insurance Claim Form')
+ """
+ logger.info(f"Selecting schema '{schema_name}' for file '{file_name}'...")
+
+ # Get all schema comboboxes and file labels in the import dialog
+ schema_dropdowns = self.page.get_by_role(
+ "alertdialog", name="Import Content"
+ ).get_by_placeholder("Select Schema")
+ file_labels = self.page.get_by_role(
+ "alertdialog", name="Import Content"
+ ).locator("strong")
+
+ # Find the index of this file among all listed files
+ count = file_labels.count()
+ target_index = -1
+ for i in range(count):
+ label_text = file_labels.nth(i).inner_text().strip()
+ if label_text == file_name:
+ target_index = i
+ break
+
+ if target_index == -1:
+ raise Exception(f"File '{file_name}' not found in import dialog")
+
+ # Click on the schema dropdown for this file
+ schema_dropdowns.nth(target_index).click()
+ logger.info(f"✓ Schema dropdown clicked for '{file_name}'")
+
+ self.page.wait_for_timeout(1000)
+
+ # Select the schema option
+ self.page.get_by_role("option", name=schema_name).click()
+ logger.info(f"✓ Schema '{schema_name}' selected for '{file_name}'")
+
+ self.page.wait_for_timeout(1000)
+
+ def upload_files(self):
+ """
+ Upload all files from the testdata folder dynamically.
+ After browsing files, selects the appropriate schema for each file
+ before clicking Import.
+ """
+ logger.info("Starting file upload for Auto Claim documents...")
+
+ files = self.get_testdata_files()
+ if not files:
+ raise Exception("No files found in testdata folder")
+
+ with self.page.expect_file_chooser() as fc_info:
+ logger.info("Clicking Import Document(s) button...")
+ self.page.locator(self.IMPORT_DOCUMENTS_BTN).click()
+ logger.info("✓ Import Document(s) button clicked")
+
+ logger.info("Clicking Browse Files button...")
+ self.page.locator(self.BROWSE_FILES_BTN).click()
+ logger.info("✓ Browse Files button clicked")
+
+ self.page.wait_for_timeout(3000)
+
+ file_chooser = fc_info.value
+ logger.info(f"Selecting {len(files)} files: {[os.path.basename(f) for f in files]}")
+ file_chooser.set_files(files)
+ logger.info("✓ All files selected")
+
+ self.page.wait_for_timeout(5000)
+
+ # Select schema for each uploaded file
+ for file_path in files:
+ file_name = os.path.basename(file_path)
+ schema_name = self.FILE_SCHEMA_MAP.get(file_name)
+ if schema_name:
+ self.select_schema_for_file(file_name, schema_name)
+ else:
+ logger.warning(
+ f"No schema mapping found for '{file_name}', skipping schema selection"
+ )
+
+ self.page.wait_for_timeout(2000)
+
+ logger.info("Clicking Import button...")
+ self.page.locator(self.IMPORT_BTN).click()
+ logger.info("✓ Import button clicked")
+
+ self.page.wait_for_timeout(10000)
+
+ logger.info("Validating upload success...")
+ expect(
+ self.page.get_by_role("alertdialog", name="Import Content")
+ .locator("path")
+ .nth(1)
+ ).to_be_visible()
+ logger.info("✓ Upload success message is visible")
+
+ logger.info("Closing upload dialog...")
+ self.page.locator(self.CLOSE_BTN).click()
+ logger.info("✓ Upload dialog closed")
+
+ logger.info("File upload completed successfully")
+
+ def refresh_until_completed(self, max_retries=60):
+ """
+ Refresh and wait for the first claim row (parent) to show Completed status.
+ Processing goes through: Processing → Summarizing → GapAnalysis → Completed.
+
+ Args:
+ max_retries: Maximum number of refresh attempts (default: 60)
+ """
+ logger.info("Starting refresh process to monitor claim processing status...")
+
+ for i in range(max_retries):
+ self.page.wait_for_timeout(3000)
+ # Get the status of the first data row (parent claim row)
+ first_row = self.page.locator(self.DATA_ROWS).first
+ status_cell = first_row.locator("div[role='cell']").nth(3)
+ status_text = status_cell.inner_text().strip()
+ logger.info(f"Attempt {i + 1}/{max_retries}: Current status = '{status_text}'")
+
+ if status_text == "Completed":
+ logger.info("✓ Claim processing completed successfully")
+ return
+
+ if status_text == "Error":
+ logger.error(f"Process failed with status: 'Error' after {i + 1} retries")
+ raise Exception(
+ f"Process failed with status: 'Error' after {i + 1} retries."
+ )
+
+ logger.info("Clicking Refresh button...")
+ self.page.locator(self.REFRESH_BTN).click()
+ logger.info("✓ Refresh button clicked, waiting...")
+ self.page.wait_for_timeout(15000)
+
+ raise Exception(
+ f"Process did not complete after {max_retries} retries."
+ )
+
+ def expand_first_claim_row(self):
+ """Expand the first claim row to reveal child file rows."""
+ logger.info("Expanding first claim row...")
+
+ first_row = self.page.locator(self.DATA_ROWS).first
+ expand_btn = first_row.locator("button").first
+ expand_btn.click()
+ logger.info("✓ First claim row expanded")
+
+ self.page.wait_for_timeout(3000)
+
+ def get_child_file_rows(self):
+ """
+ Get child file rows belonging to the first expanded claim row.
+ Child rows appear immediately after the parent row and don't have
+ a button in the first cell. Stops when hitting the next parent row.
+
+ Returns:
+ list: List of (index, row_locator) tuples for child rows
+ """
+ all_rows = self.page.locator(self.DATA_ROWS)
+ total = all_rows.count()
+ child_indices = []
+ found_first_parent = False
+
+ for i in range(total):
+ row = all_rows.nth(i)
+ first_cell = row.locator("div[role='cell']").first
+ has_button = first_cell.locator("button").count() > 0
+
+ if has_button:
+ if found_first_parent:
+ # Hit the next parent row — stop collecting children
+ break
+ found_first_parent = True
+ continue
+
+ if found_first_parent:
+ child_indices.append(i)
+
+ logger.info(f"Found {len(child_indices)} child file rows for first claim")
+ self.child_indices = child_indices
+ return all_rows
+
+ def validate_all_child_files_completed(self):
+ """Validate that all child file rows show Completed status with Entity/Schema scores."""
+ logger.info("Validating all child file statuses...")
+
+ all_rows = self.get_child_file_rows()
+ child_indices = self.child_indices
+
+ if len(child_indices) == 0:
+ raise Exception("No child file rows found after expanding claim row")
+
+ for idx in child_indices:
+ row = all_rows.nth(idx)
+ cells = row.locator("div[role='cell']")
+
+ # Get file name from second cell (index 1)
+ file_name = cells.nth(1).inner_text().strip()
+
+ # Get status from fourth cell (index 3)
+ status_text = cells.nth(3).inner_text().strip()
+ logger.info(f"File '{file_name}': Status = '{status_text}'")
+
+ if status_text != "Completed":
+ raise Exception(
+ f"File '{file_name}' has status '{status_text}', expected 'Completed'"
+ )
+ logger.info(f"✓ File '{file_name}' status is Completed")
+
+ # Validate Entity score exists (index 5)
+ entity_score = cells.nth(5).inner_text().strip()
+ if not entity_score or entity_score == "":
+ raise Exception(f"File '{file_name}' has no Entity score")
+ logger.info(f"✓ File '{file_name}' Entity score: {entity_score}")
+
+ # Validate Schema score exists (index 6)
+ schema_score = cells.nth(6).inner_text().strip()
+ if not schema_score or schema_score == "":
+ raise Exception(f"File '{file_name}' has no Schema score")
+ logger.info(f"✓ File '{file_name}' Schema score: {schema_score}")
+
+ logger.info(f"All {len(child_indices)} child files validated successfully")
+
+ def validate_ai_summary(self):
+ """Validate that the AI Summary tab has content."""
+ logger.info("Starting AI Summary validation...")
+
+ logger.info("Clicking on AI Summary tab...")
+ self.page.locator(self.AI_SUMMARY_TAB).first.click()
+ logger.info("✓ AI Summary tab clicked")
+
+ self.page.wait_for_timeout(3000)
+
+ logger.info("Validating AI Summary content is visible...")
+ expect(self.page.locator(self.AI_SUMMARY_CONTENT)).to_be_visible()
+ logger.info("✓ AI Summary content is visible")
+
+ logger.info("AI Summary validation completed successfully")
+
+ def validate_ai_gap_analysis(self):
+ """Validate that the AI Gap Analysis tab has content."""
+ logger.info("Starting AI Gap Analysis validation...")
+
+ logger.info("Clicking on AI Gap Analysis tab...")
+ self.page.locator(self.AI_GAP_ANALYSIS_TAB).first.click()
+ logger.info("✓ AI Gap Analysis tab clicked")
+
+ self.page.wait_for_timeout(3000)
+
+ logger.info("Validating AI Gap Analysis content is visible...")
+ expect(self.page.locator(self.AI_GAP_ANALYSIS_CONTENT)).to_be_visible()
+ logger.info("✓ AI Gap Analysis content is visible")
+
+ logger.info("AI Gap Analysis validation completed successfully")
+
+ def click_on_first_claim_row(self):
+ """Click on the first claim row to select it and load its Output Review."""
+ logger.info("Clicking on first claim row to load Output Review...")
+
+ first_row = self.page.locator(self.DATA_ROWS).first
+ # Click on the file name cell to select the row
+ first_row.locator("div[role='cell']").nth(1).click()
+ logger.info("✓ First claim row clicked")
+
+ self.page.wait_for_timeout(5000)
+
+ def click_on_child_file_row(self, file_name="claim_form.pdf"):
+ """
+ Click on a specific child file row to load its Extracted Results and Source Document.
+
+ Args:
+ file_name: Name of the child file to click (default: claim_form.pdf)
+ """
+ logger.info(f"Clicking on child file '{file_name}' to load Output Review...")
+
+ all_rows = self.page.locator(self.DATA_ROWS)
+ total = all_rows.count()
+ clicked = False
+
+ for i in range(total):
+ row = all_rows.nth(i)
+ file_cell = row.locator("div[role='cell']").nth(1)
+ cell_text = file_cell.inner_text().strip()
+ if cell_text == file_name:
+ file_cell.click()
+ clicked = True
+ break
+
+ if not clicked:
+ raise Exception(f"Child file '{file_name}' not found in table rows")
+
+ logger.info(f"✓ Child file '{file_name}' clicked")
+ self.page.wait_for_timeout(5000)
+
+ def validate_extracted_results(self):
+ """Validate that the Extracted Results tab is visible and has JSON content."""
+ logger.info("Starting Extracted Results validation...")
+
+ logger.info("Clicking on Extracted Results tab...")
+ self.page.locator(self.EXTRACTED_RESULTS_TAB).first.click()
+ logger.info("✓ Extracted Results tab clicked")
+
+ self.page.wait_for_timeout(3000)
+
+ logger.info("Validating Extracted Results content is visible...")
+ # The Extracted Results tab shows a JSON editor with extracted data
+ tabpanel = self.page.locator("div[role='tabpanel']")
+ expect(tabpanel).to_be_visible()
+ # JSON content should not be empty — look for the react-json-view container
+ json_content = tabpanel.locator(
+ "//div[contains(@class,'react-json-view')] | "
+ "//div[contains(@class,'json-editor')] | "
+ "//span[contains(@class,'object-key')]"
+ )
+ if json_content.count() > 0:
+ logger.info("✓ Extracted Results JSON content is visible")
+ else:
+ # Fallback: check tabpanel has any text content
+ panel_text = tabpanel.inner_text().strip()
+ if len(panel_text) > 0:
+ logger.info(f"✓ Extracted Results has content ({len(panel_text)} chars)")
+ else:
+ raise Exception("Extracted Results tab has no content")
+
+ logger.info("Extracted Results validation completed successfully")
+
+ def validate_source_document_visible(self):
+ """Validate that the Source Document pane shows the document (not 'No document available')."""
+ logger.info("Starting Source Document pane validation...")
+
+ logger.info("Validating Source Document pane has content...")
+ source_doc_pane = self.page.locator(
+ "//div[contains(text(),'Source Document')]/ancestor::div[1]/following-sibling::*"
+ )
+
+ # Verify "No document available" is NOT shown
+ no_data = self.page.locator(self.SOURCE_DOC_NO_DATA)
+ if no_data.count() > 0 and no_data.is_visible():
+ raise Exception("Source Document pane shows 'No document available'")
+
+ logger.info("✓ Source Document pane is displaying a document")
+ logger.info("Source Document validation completed successfully")
+
+ def modify_comments_and_save(self, comment_text="Automated test comment"):
+ """
+ Click on claim_form.pdf child document, find the 'name' field with value
+ 'Camille Roy', update it to 'Camille Royy', add a comment, click Save,
+ and verify the updated value is persisted.
+
+ Args:
+ comment_text: Text to enter in the comments field
+ """
+ logger.info("Starting modify JSON, add comment, and save...")
+
+ updated_value = "Camille Royy"
+ original_value = "Camille Roy"
+
+ # Step 1: Click on claim_form.pdf child document
+ logger.info("Clicking on claim_form.pdf child document...")
+ self.click_on_child_file_row("claim_form.pdf")
+ logger.info("✓ claim_form.pdf selected")
+
+ # Step 2: Ensure Extracted Results tab is active
+ logger.info("Ensuring Extracted Results tab is active...")
+ self.page.locator(self.EXTRACTED_RESULTS_TAB).first.click()
+ self.page.wait_for_timeout(3000)
+ logger.info("✓ Extracted Results tab is active")
+
+ # Step 3: Find the name field by its ID and double-click to edit
+ logger.info("Locating policyholder name field in JSON editor...")
+ name_field = self.page.locator(
+ "//div[@id='policyholder_information.name_display']"
+ )
+
+ if name_field.count() == 0:
+ logger.warning("⚠ policyholder_information.name_display not found — skipping edit")
+ else:
+ name_field.first.scroll_into_view_if_needed()
+ logger.info("✓ Found policyholder_information.name_display field")
+
+ # Double-click to enter edit mode
+ name_field.first.dblclick()
+ logger.info("✓ Double-clicked on name field to enter edit mode")
+ self.page.wait_for_timeout(2000)
+
+ # Find the input/textarea in edit mode and update the value
+ edit_input = self.page.locator(
+ ".jer-input-component input, "
+ ".jer-input-component textarea, "
+ ".JSONEditor-contentDiv input[type='text'], "
+ ".JSONEditor-contentDiv textarea"
+ )
+
+ if edit_input.count() > 0:
+ logger.info("Edit mode activated — updating value...")
+ edit_input.first.clear()
+ edit_input.first.fill(updated_value)
+ logger.info(f"✓ Value changed from '{original_value}' to '{updated_value}'")
+
+ # Confirm the edit
+ confirm_btn = self.page.locator(
+ ".jer-confirm-buttons button:first-child, "
+ "[class*='jer-confirm'] button, "
+ ".jer-edit-buttons button:first-child"
+ )
+ if confirm_btn.count() > 0:
+ confirm_btn.first.click()
+ logger.info("✓ Edit confirmed via confirm button")
+ else:
+ edit_input.first.press("Enter")
+ logger.info("✓ Edit confirmed via Enter key")
+
+ self.page.wait_for_timeout(1000)
+ else:
+ logger.warning("⚠ Edit input not found after double-click")
+
+ # Step 4: Add comment text
+ logger.info("Locating Comments textarea...")
+ comments_field = self.page.locator(self.COMMENTS)
+ expect(comments_field).to_be_visible()
+ logger.info("✓ Comments textarea is visible")
+
+ logger.info("Clearing and entering comment text...")
+ comments_field.fill(comment_text)
+ logger.info(f"✓ Comment entered: '{comment_text}'")
+
+ self.page.wait_for_timeout(1000)
+
+ # Step 5: Click Save
+ logger.info("Clicking Save button...")
+ save_btn = self.page.locator(self.SAVE_BTN)
+ expect(save_btn).to_be_enabled(timeout=5000)
+ save_btn.click()
+ logger.info("✓ Save button clicked")
+
+ self.page.wait_for_timeout(8000)
+
+ # Step 6: Verify the updated value is persisted
+ logger.info("Verifying saved data persisted...")
+
+ # Re-click claim_form.pdf to reload Extracted Results
+ self.click_on_child_file_row("claim_form.pdf")
+ self.page.locator(self.EXTRACTED_RESULTS_TAB).first.click()
+ self.page.wait_for_timeout(3000)
+
+ # Search for the updated value in the JSON editor content
+ page_content = self.page.locator(".JSONEditor-contentDiv").inner_text()
+ if updated_value in page_content:
+ logger.info(f"✓ Updated value '{updated_value}' found — data persisted successfully")
+ else:
+ logger.warning(f"⚠ '{updated_value}' not found after save — may have been reset")
+
+ # Verify comment is persisted
+ comments_after = self.page.locator(self.COMMENTS).input_value()
+ if comment_text in comments_after:
+ logger.info(f"✓ Comment '{comment_text}' is persisted after save")
+ else:
+ logger.info(f"✓ Save completed (comment field value: '{comments_after[:50]}')")
+
+ logger.info("Modify JSON, add comment, and save completed successfully")
+
+ def validate_process_steps(self):
+ """
+ Validate the Process Steps tab for all child files in the expanded claim.
+ Clicks each child file, opens Process Steps tab, and expands the accordion
+ sections (Extract, Map, Evaluate) to verify content loads.
+ """
+ logger.info("Starting Process Steps validation for all child files...")
+
+ # Get the list of child file names from FILE_SCHEMA_MAP
+ child_files = list(self.FILE_SCHEMA_MAP.keys())
+ logger.info(f"Will validate Process Steps for {len(child_files)} files: {child_files}")
+
+ for file_name in child_files:
+ logger.info(f"--- Validating Process Steps for '{file_name}' ---")
+
+ # Click on the child file row
+ logger.info(f"Clicking on child file '{file_name}'...")
+ all_rows = self.page.locator(self.DATA_ROWS)
+ total = all_rows.count()
+ clicked = False
+
+ for i in range(total):
+ row = all_rows.nth(i)
+ file_cell = row.locator("div[role='cell']").nth(1)
+ cell_text = file_cell.inner_text().strip()
+ if cell_text == file_name:
+ file_cell.click()
+ clicked = True
+ break
+
+ if not clicked:
+ logger.warning(f"⚠ Child file '{file_name}' not found in table — skipping")
+ continue
+
+ logger.info(f"✓ Child file '{file_name}' clicked")
+ self.page.wait_for_timeout(5000)
+
+ # Click on Process Steps tab
+ logger.info(f"Clicking Process Steps tab for '{file_name}'...")
+ self.page.locator(self.PROCESS_STEPS_TAB).first.click()
+ self.page.wait_for_timeout(3000)
+ logger.info(f"✓ Process Steps tab clicked for '{file_name}'")
+
+ # Validate tab panel is visible
+ tabpanel = self.page.locator("div[role='tabpanel']")
+ expect(tabpanel).to_be_visible()
+
+ # Process Steps uses FluentUI Accordion — each step has an AccordionHeader button
+ accordion_headers = tabpanel.locator("button").filter(has=self.page.locator("span"))
+
+ header_count = accordion_headers.count()
+ if header_count == 0:
+ logger.warning(f"⚠ No accordion headers found for '{file_name}'")
+ else:
+ logger.info(f"Found {header_count} process step sections for '{file_name}'")
+
+ for j in range(min(header_count, 3)):
+ header = accordion_headers.nth(j)
+ header_text = header.inner_text().strip()
+ logger.info(f"Expanding '{header_text}' for '{file_name}'...")
+ header.click()
+ self.page.wait_for_timeout(3000)
+ logger.info(f"✓ '{header_text}' expanded for '{file_name}'")
+
+ logger.info(f"✓ Process Steps validated for '{file_name}'")
+
+ logger.info(f"Process Steps validation completed for all {len(child_files)} child files")
+
+ def delete_first_claim(self):
+ """Delete the first claim via More actions menu."""
+ logger.info("Starting claim deletion process...")
+
+ logger.info("Clicking on More actions button...")
+ self.page.get_by_role("button", name="More actions").first.click()
+ logger.info("✓ More actions button clicked")
+
+ logger.info("Clicking on Delete menu item...")
+ self.page.get_by_role("menuitem", name="Delete").click()
+ logger.info("✓ Delete menu item clicked")
+
+ logger.info("Clicking on Confirm button...")
+ self.page.get_by_role("button", name="Confirm").click()
+ logger.info("✓ Confirm button clicked")
+
+ self.page.wait_for_timeout(2000)
+
+ logger.info("Validating deletion confirmation message...")
+ delete_msg = self.page.locator("//div[contains(text(),'Claim process with')]")
+ expect(delete_msg).to_be_visible(timeout=10000)
+ logger.info("✓ Deletion confirmation message is visible")
+
+ logger.info("Claim deletion completed successfully")
+
+ def validate_collapsible_panels(self):
+ """Validate collapsible section functionality for each panel."""
+ logger.info("Starting collapsible panels validation...")
+
+ # Collapse Processing Queue panel
+ logger.info("Collapsing Processing Queue panel...")
+ self.page.locator(self.COLLAPSE_PANEL_BTN).nth(0).click()
+ self.page.wait_for_timeout(2000)
+ logger.info("✓ Processing Queue collapsed")
+
+ # Expand Processing Queue panel
+ logger.info("Expanding Processing Queue panel...")
+ self.page.locator(self.PROCESSING_QUEUE_BTN).click()
+ self.page.wait_for_timeout(2000)
+ logger.info("✓ Processing Queue expanded")
+
+ # Collapse Output Review panel
+ logger.info("Collapsing Output Review panel...")
+ self.page.locator(self.COLLAPSE_PANEL_BTN).nth(1).click()
+ self.page.wait_for_timeout(2000)
+ logger.info("✓ Output Review collapsed")
+
+ # Expand Output Review panel
+ logger.info("Expanding Output Review panel...")
+ self.page.locator(self.OUTPUT_REVIEW_BTN).click()
+ self.page.wait_for_timeout(2000)
+ logger.info("✓ Output Review expanded")
+
+ # Collapse Source Document panel
+ logger.info("Collapsing Source Document panel...")
+ self.page.locator(self.COLLAPSE_PANEL_BTN).nth(2).click()
+ self.page.wait_for_timeout(2000)
+ logger.info("✓ Source Document collapsed")
+
+ # Expand Source Document panel
+ logger.info("Expanding Source Document panel...")
+ self.page.locator(self.SOURCE_DOC_BTN).click()
+ self.page.wait_for_timeout(2000)
+ logger.info("✓ Source Document expanded")
+
+ logger.info("Collapsible panels validation completed successfully")
+
+ def validate_api_document_link(self):
+ """Validate API Documentation tab opens and displays correct content."""
+ logger.info("Starting API Documentation validation...")
+
+ original_page = self.page
+
+ with self.page.context.expect_page() as new_page_info:
+ logger.info("Clicking on API Documentation tab...")
+ self.page.get_by_role("tab", name="API Documentation").click()
+ logger.info("✓ API Documentation tab clicked")
+
+ new_page = new_page_info.value
+ new_page.wait_for_load_state()
+ logger.info("New tab opened successfully")
+
+ logger.info("Switching to new tab...")
+ new_page.bring_to_front()
+ logger.info("✓ Switched to new tab")
+
+ logger.info("Validating API documentation title is visible...")
+ expect(new_page.locator("//h1[@class='title']")).to_be_visible()
+ logger.info("✓ API documentation title is visible")
+
+ logger.info("Closing API Documentation tab...")
+ new_page.close()
+ logger.info("✓ API Documentation tab closed")
+
+ logger.info("Switching back to original tab...")
+ original_page.bring_to_front()
+ logger.info("✓ Switched back to original tab")
+
+ logger.info("API Documentation validation completed successfully")
+
+ def validate_import_without_collection(self):
+ """Validate that import button shows validation when no collection is selected."""
+ logger.info("Starting validation for import without collection selection...")
+
+ # Clear the collection dropdown if it has a value
+ clear_btn = self.page.locator(
+ "//input[contains(@placeholder,'Select Collection')]/following-sibling::*[contains(@class,'clearIcon')]"
+ )
+ if clear_btn.count() > 0 and clear_btn.is_visible():
+ logger.info("Clearing existing collection selection...")
+ clear_btn.click()
+ self.page.wait_for_timeout(1000)
+ logger.info("✓ Collection selection cleared")
+ else:
+ # Try pressing Escape to clear any selection, then clear via keyboard
+ collection_input = self.page.locator(self.SELECT_COLLECTION)
+ collection_input.click()
+ collection_input.fill("")
+ self.page.keyboard.press("Escape")
+ self.page.wait_for_timeout(1000)
+
+ logger.info("Clicking on Import Document(s) button without selecting collection...")
+ self.page.locator(self.IMPORT_DOCUMENTS_BTN).click()
+ logger.info("✓ Import Document(s) button clicked")
+
+ self.page.wait_for_timeout(2000)
+
+ logger.info("Validating validation message is visible...")
+ # V2 may show "Please Select Collection" or open dialog with warning
+ validation_msg = self.page.locator(
+ "//div[contains(text(),'Please Select') or contains(text(),'Please select')]"
+ )
+ dialog = self.page.get_by_role("alertdialog")
+
+ if validation_msg.count() > 0 and validation_msg.first.is_visible():
+ logger.info("✓ Validation message is visible")
+ elif dialog.count() > 0 and dialog.is_visible():
+ logger.info("✓ Import dialog opened — checking for collection warning")
+
+ # Close any open dialog to avoid blocking subsequent tests
+ close_btn = self.page.locator(self.CLOSE_BTN)
+ if close_btn.count() > 0 and close_btn.is_visible():
+ close_btn.click()
+ self.page.wait_for_timeout(1000)
+ logger.info("✓ Dialog closed")
+
+ # Dismiss any remaining backdrop by pressing Escape
+ self.page.keyboard.press("Escape")
+ self.page.wait_for_timeout(1000)
+
+ logger.info("Import without collection validation completed successfully")
+
+ def refresh_page(self):
+ """Refresh the current page using browser reload."""
+ logger.info("Starting page refresh...")
+
+ self.page.reload()
+ logger.info("✓ Page reloaded")
+
+ self.page.wait_for_timeout(3000)
+ logger.info("Page refresh completed successfully")
+
+ def validate_schema_selection_warning(self):
+ """
+ Validate that the import dialog shows the correct collection warning message
+ and that each file requires schema selection before Import is enabled.
+ ADO TC 17305: Alert user to upload file correctly as per selected schema.
+ """
+ logger.info("Starting schema selection warning validation...")
+
+ logger.info("Clicking Import Document(s) button...")
+ self.page.locator(self.IMPORT_DOCUMENTS_BTN).click()
+ logger.info("✓ Import Document(s) button clicked")
+
+ self.page.wait_for_timeout(3000)
+
+ # Validate the selected collection info message
+ logger.info("Validating 'Selected Collection: Auto Claim' message...")
+ dialog = self.page.get_by_role("alertdialog", name="Import Content")
+ expect(dialog).to_be_visible()
+ logger.info("✓ Import Content dialog is visible")
+
+ # The collection info is in a span with class fui-MessageBarTitle
+ collection_text = dialog.locator("//span[.='Selected Collection: Auto Claim']")
+ expect(collection_text).to_be_visible(timeout=10000)
+ logger.info("✓ 'Selected Collection: Auto Claim' message is visible")
+
+ # Validate the warning text about importing specific files
+ # Text is inside div.fui-MessageBarBody
+ logger.info("Validating import warning message...")
+ warning_text = dialog.locator(
+ "//div[contains(@class,'fui-MessageBarBody') and contains(.,'Please import files specific')]"
+ )
+ expect(warning_text.first).to_be_visible(timeout=10000)
+ logger.info("✓ Import warning message is visible")
+
+ # Validate Import button is disabled before file selection
+ logger.info("Validating Import button is disabled...")
+ expect(dialog.locator("//button[normalize-space()='Import']")).to_be_disabled()
+ logger.info("✓ Import button is disabled before file/schema selection")
+
+ logger.info("Closing dialog...")
+ dialog.locator("//button[normalize-space()='Close']").click()
+ logger.info("✓ Dialog closed")
+
+ logger.info("Schema selection warning validation completed successfully")
+
+ def validate_unsupported_file_upload(self):
+ """
+ Validate that uploading unsupported file types (e.g., .txt, .docx, .json)
+ shows an appropriate error or is rejected.
+ ADO TC 26004: Validate upload of unsupported files.
+ """
+ logger.info("Starting unsupported file upload validation...")
+
+ # Create a temporary unsupported file
+ import tempfile
+ temp_dir = tempfile.mkdtemp()
+ unsupported_file = os.path.join(temp_dir, "test_document.txt")
+ with open(unsupported_file, "w") as f:
+ f.write("This is an unsupported test file")
+
+ with self.page.expect_file_chooser() as fc_info:
+ logger.info("Clicking Import Document(s) button...")
+ self.page.locator(self.IMPORT_DOCUMENTS_BTN).click()
+ logger.info("✓ Import Document(s) button clicked")
+
+ logger.info("Clicking Browse Files button...")
+ self.page.locator(self.BROWSE_FILES_BTN).click()
+ logger.info("✓ Browse Files button clicked")
+
+ self.page.wait_for_timeout(3000)
+
+ file_chooser = fc_info.value
+ logger.info(f"Selecting unsupported file: {unsupported_file}")
+ file_chooser.set_files([unsupported_file])
+ logger.info("✓ Unsupported file selected")
+
+ self.page.wait_for_timeout(3000)
+
+ # Check for validation message about unsupported file types
+ logger.info("Validating unsupported file error message...")
+ error_msg = self.page.locator(
+ "//p[contains(.,'Only PDF and JPEG, PNG image files are available')]"
+ )
+ if error_msg.is_visible():
+ logger.info("✓ Unsupported file error message is visible")
+ else:
+ # Check if Import button remains disabled
+ dialog = self.page.get_by_role("alertdialog", name="Import Content")
+ import_btn = dialog.locator("//button[normalize-space()='Import']")
+ expect(import_btn).to_be_disabled()
+ logger.info("✓ Import button remains disabled for unsupported file")
+
+ logger.info("Closing dialog...")
+ self.page.locator(self.CLOSE_BTN).click()
+ logger.info("✓ Dialog closed")
+
+ # Cleanup temp file
+ os.remove(unsupported_file)
+ os.rmdir(temp_dir)
+
+ logger.info("Unsupported file upload validation completed successfully")
+
+ def validate_network_disconnect_error(self):
+ """
+ Validate error handling when network is disconnected during file upload.
+ ADO TC 17306: Unclear Error Notification on Network Disconnect.
+ Simulates offline mode using Playwright's route abort.
+ """
+ logger.info("Starting network disconnect error validation...")
+
+ # First, select files normally
+ with self.page.expect_file_chooser() as fc_info:
+ logger.info("Clicking Import Document(s) button...")
+ self.page.locator(self.IMPORT_DOCUMENTS_BTN).click()
+ logger.info("✓ Import Document(s) button clicked")
+
+ logger.info("Clicking Browse Files button...")
+ self.page.locator(self.BROWSE_FILES_BTN).click()
+ logger.info("✓ Browse Files button clicked")
+
+ self.page.wait_for_timeout(3000)
+
+ file_chooser = fc_info.value
+ files = self.get_testdata_files()
+ file_chooser.set_files(files)
+ logger.info("✓ Files selected")
+
+ self.page.wait_for_timeout(3000)
+
+ # Select schemas for all files
+ for file_path in files:
+ file_name = os.path.basename(file_path)
+ schema_name = self.FILE_SCHEMA_MAP.get(file_name)
+ if schema_name:
+ self.select_schema_for_file(file_name, schema_name)
+
+ self.page.wait_for_timeout(2000)
+
+ # Simulate network disconnect by blocking all requests
+ logger.info("Simulating network disconnect...")
+ self.page.context.set_offline(True)
+ logger.info("✓ Network set to offline mode")
+
+ # Click Import — should trigger an error
+ logger.info("Clicking Import button while offline...")
+ self.page.locator(self.IMPORT_BTN).click()
+ logger.info("✓ Import button clicked")
+
+ self.page.wait_for_timeout(5000)
+
+ # Verify an error notification or warning is displayed
+ logger.info("Checking for error notification...")
+ # Look for any toast/notification or error dialog
+ error_visible = (
+ self.page.locator("//div[contains(@class,'Toastify')]").is_visible()
+ or self.page.locator("//div[contains(@role,'alert')]").is_visible()
+ or self.page.locator("//div[contains(text(),'error')]").is_visible()
+ or self.page.locator("//div[contains(text(),'Error')]").is_visible()
+ or self.page.locator("//div[contains(text(),'failed')]").is_visible()
+ or self.page.locator("//div[contains(text(),'Failed')]").is_visible()
+ )
+
+ if error_visible:
+ logger.info("✓ Error notification is displayed on network disconnect")
+ else:
+ logger.warning("⚠ No visible error notification found — may need locator update")
+
+ # Restore network
+ logger.info("Restoring network connection...")
+ self.page.context.set_offline(False)
+ logger.info("✓ Network restored to online mode")
+
+ # Close dialog
+ logger.info("Closing dialog...")
+ self.page.locator(self.CLOSE_BTN).click()
+ logger.info("✓ Dialog closed")
+
+ self.page.wait_for_timeout(3000)
+ logger.info("Network disconnect error validation completed")
+
+ def open_import_dialog_with_files(self):
+ """
+ Open the import dialog and browse all testdata files without selecting schemas.
+ Leaves the dialog open for further validation.
+
+ Returns:
+ dialog: The alertdialog locator for further assertions
+ """
+ logger.info("Opening import dialog and browsing files...")
+
+ files = self.get_testdata_files()
+ if not files:
+ raise Exception("No files found in testdata folder")
+
+ with self.page.expect_file_chooser() as fc_info:
+ logger.info("Clicking Import Document(s) button...")
+ self.page.locator(self.IMPORT_DOCUMENTS_BTN).click()
+ logger.info("✓ Import Document(s) button clicked")
+
+ logger.info("Clicking Browse Files button...")
+ self.page.locator(self.BROWSE_FILES_BTN).click()
+ logger.info("✓ Browse Files button clicked")
+
+ self.page.wait_for_timeout(3000)
+
+ file_chooser = fc_info.value
+ logger.info(f"Selecting {len(files)} files: {[os.path.basename(f) for f in files]}")
+ file_chooser.set_files(files)
+ logger.info("✓ All files selected")
+
+ self.page.wait_for_timeout(5000)
+
+ dialog = self.page.get_by_role("alertdialog", name="Import Content")
+ logger.info("Import dialog opened with files ready for schema selection")
+ return dialog
+
+ def validate_import_disabled_without_schemas(self):
+ """
+ Validate that the Import button remains disabled when files are uploaded
+ but no schemas have been selected for any file.
+ """
+ logger.info("Starting validation: Import disabled without schema selection...")
+
+ dialog = self.open_import_dialog_with_files()
+
+ logger.info("Validating Import button is disabled without schema selection...")
+ import_btn = dialog.locator("//button[normalize-space()='Import']")
+ expect(import_btn).to_be_disabled()
+ logger.info("✓ Import button is disabled when no schemas are selected")
+
+ logger.info("Closing dialog...")
+ self.page.locator(self.CLOSE_BTN).click()
+ self.page.wait_for_timeout(1000)
+ logger.info("✓ Dialog closed")
+
+ logger.info("Validation completed: Import disabled without schemas")
+
+ def validate_import_disabled_with_partial_schemas(self):
+ """
+ Validate that the Import button remains disabled when schemas are selected
+ for only some files but not all.
+ """
+ logger.info("Starting validation: Import disabled with partial schema selection...")
+
+ dialog = self.open_import_dialog_with_files()
+
+ # Select schema for only the first file
+ files = self.get_testdata_files()
+ first_file = os.path.basename(files[0])
+ first_schema = self.FILE_SCHEMA_MAP.get(first_file)
+
+ if first_schema:
+ logger.info(f"Selecting schema only for first file: '{first_file}' → '{first_schema}'")
+ self.select_schema_for_file(first_file, first_schema)
+ logger.info(f"✓ Schema selected for '{first_file}' only")
+ else:
+ raise Exception(f"No schema mapping for '{first_file}'")
+
+ self.page.wait_for_timeout(2000)
+
+ logger.info("Validating Import button is still disabled with partial schemas...")
+ import_btn = dialog.locator("//button[normalize-space()='Import']")
+ expect(import_btn).to_be_disabled()
+ logger.info("✓ Import button remains disabled with partial schema selection")
+
+ logger.info("Closing dialog...")
+ self.page.locator(self.CLOSE_BTN).click()
+ self.page.wait_for_timeout(1000)
+ logger.info("✓ Dialog closed")
+
+ logger.info("Validation completed: Import disabled with partial schemas")
+
+ def upload_files_with_mismatched_schemas(self):
+ """
+ Upload files with deliberately mismatched/swapped schemas to validate
+ that the system handles incorrect schema assignments.
+ Swaps schemas: claim_form.pdf gets Repair Estimate schema and vice versa.
+ """
+ logger.info("Starting file upload with mismatched schemas...")
+
+ # Define mismatched schema mapping (swap schemas around)
+ mismatched_map = {
+ "claim_form.pdf": "Repair Estimate Document",
+ "damage_photo.png": "Police Report Document",
+ "police_report.pdf": "Damaged Vehicle Image Assessment",
+ "repair_estimate.pdf": "Auto Insurance Claim Form",
+ }
+
+ dialog = self.open_import_dialog_with_files()
+
+ # Select mismatched schemas for each file
+ files = self.get_testdata_files()
+ for file_path in files:
+ file_name = os.path.basename(file_path)
+ schema_name = mismatched_map.get(file_name)
+ if schema_name:
+ logger.info(f"Assigning MISMATCHED schema '{schema_name}' to '{file_name}'...")
+ self.select_schema_for_file(file_name, schema_name)
+ logger.info(f"✓ Mismatched schema '{schema_name}' assigned to '{file_name}'")
+
+ self.page.wait_for_timeout(2000)
+
+ logger.info("Clicking Import button with mismatched schemas...")
+ self.page.locator(self.IMPORT_BTN).click()
+ logger.info("✓ Import button clicked")
+
+ self.page.wait_for_timeout(10000)
+
+ logger.info("Validating upload success (system accepts mismatched schemas)...")
+ expect(
+ self.page.get_by_role("alertdialog", name="Import Content")
+ .locator("path")
+ .nth(1)
+ ).to_be_visible()
+ logger.info("✓ Upload accepted with mismatched schemas")
+
+ logger.info("Closing upload dialog...")
+ self.page.locator(self.CLOSE_BTN).click()
+ logger.info("✓ Upload dialog closed")
+
+ logger.info("File upload with mismatched schemas completed")
+
+ def validate_schema_dropdown_after_file_removal(self):
+ """
+ Validate that removing a file from the import dialog preserves the
+ schema selections of remaining files.
+ """
+ logger.info("Starting validation: Schema dropdown after file removal...")
+
+ dialog = self.open_import_dialog_with_files()
+
+ # Select schemas for all files first
+ files = self.get_testdata_files()
+ for file_path in files:
+ file_name = os.path.basename(file_path)
+ schema_name = self.FILE_SCHEMA_MAP.get(file_name)
+ if schema_name:
+ self.select_schema_for_file(file_name, schema_name)
+
+ self.page.wait_for_timeout(2000)
+ logger.info("✓ Schemas selected for all files")
+
+ # Try to remove the first file using the delete/remove button next to it
+ logger.info("Attempting to remove first file from the list...")
+ file_labels = dialog.locator("strong")
+ first_file_name = os.path.basename(files[0])
+
+ # Look for a delete/remove button near the first file entry
+ remove_buttons = dialog.locator(
+ "//button[contains(@aria-label,'Remove') or contains(@aria-label,'Delete') "
+ "or contains(@aria-label,'remove') or contains(@title,'Remove') "
+ "or contains(@title,'Delete')]"
+ )
+
+ if remove_buttons.count() > 0:
+ remove_buttons.first.click()
+ self.page.wait_for_timeout(2000)
+ logger.info(f"✓ First file '{first_file_name}' removed from list")
+
+ # Validate remaining files still have their schema selections
+ remaining_files = [os.path.basename(f) for f in files[1:]]
+ schema_dropdowns = dialog.get_by_placeholder("Select Schema")
+
+ for idx, file_name in enumerate(remaining_files):
+ dropdown = schema_dropdowns.nth(idx)
+ dropdown_value = dropdown.input_value()
+ expected_schema = self.FILE_SCHEMA_MAP.get(file_name, "")
+ logger.info(f"File '{file_name}': Schema dropdown value = '{dropdown_value}'")
+
+ if expected_schema and dropdown_value == expected_schema:
+ logger.info(f"✓ Schema '{expected_schema}' preserved for '{file_name}'")
+ else:
+ logger.warning(
+ f"⚠ Schema may have changed for '{file_name}': "
+ f"expected '{expected_schema}', got '{dropdown_value}'"
+ )
+ else:
+ logger.info("No remove button found — file removal not supported in import dialog")
+ logger.info("✓ Skipping file removal validation (UI does not support it)")
+
+ logger.info("Closing dialog...")
+ self.page.locator(self.CLOSE_BTN).click()
+ self.page.wait_for_timeout(1000)
+ logger.info("✓ Dialog closed")
+
+ logger.info("Schema dropdown after file removal validation completed")
diff --git a/tests/e2e-test/pages/__init__.py b/tests/e2e-test/pages/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/e2e-test/pages/loginPage.py b/tests/e2e-test/pages/loginPage.py
new file mode 100644
index 00000000..490e8b4b
--- /dev/null
+++ b/tests/e2e-test/pages/loginPage.py
@@ -0,0 +1,55 @@
+"""
+Login page module for authentication functionality.
+"""
+
+from base.base import BasePage
+
+
+class LoginPage(BasePage):
+ """Login page object with authentication methods."""
+
+ EMAIL_TEXT_BOX = "//input[@type='email']"
+ NEXT_BUTTON = "//input[@type='submit']"
+ PASSWORD_TEXT_BOX = "//input[@type='password']"
+ SIGNIN_BUTTON = "//input[@id='idSIButton9']"
+ YES_BUTTON = "//input[@id='idSIButton9']"
+ PERMISSION_ACCEPT_BUTTON = "//input[@type='submit']"
+
+ def __init__(self, page):
+ """
+ Initialize the LoginPage.
+
+ Args:
+ page: Playwright page object
+ """
+ super().__init__(page)
+ self.page = page
+
+ def authenticate(self, username, password):
+ """
+ Authenticate user with username and password.
+
+ Args:
+ username: User email address
+ password: User password
+ """
+ # login with username and password in web url
+ self.page.locator(self.EMAIL_TEXT_BOX).fill(username)
+ self.page.locator(self.NEXT_BUTTON).click()
+ # Wait for the password input field to be available and fill it
+ self.page.wait_for_load_state("networkidle")
+ # Enter password
+ self.page.locator(self.PASSWORD_TEXT_BOX).fill(password)
+ # Click on SignIn button
+ self.page.locator(self.SIGNIN_BUTTON).click()
+ # Wait for 5 seconds to ensure the login process completes
+ self.page.wait_for_timeout(20000) # Wait for 20 seconds
+ if self.page.locator(self.PERMISSION_ACCEPT_BUTTON).is_visible():
+ self.page.locator(self.PERMISSION_ACCEPT_BUTTON).click()
+ self.page.wait_for_timeout(10000)
+ else:
+ # Click on YES button
+ self.page.locator(self.YES_BUTTON).click()
+ self.page.wait_for_timeout(10000)
+ # Wait for the "Articles" button to be available and click it
+ self.page.wait_for_load_state("networkidle")
diff --git a/tests/e2e-test/pytest.ini b/tests/e2e-test/pytest.ini
new file mode 100644
index 00000000..31a3bee1
--- /dev/null
+++ b/tests/e2e-test/pytest.ini
@@ -0,0 +1,9 @@
+[pytest]
+log_cli = true
+log_cli_level = INFO
+log_file = logs/tests.log
+log_file_level = INFO
+addopts = -p no:warnings --tb=short
+
+markers =
+ gp: Golden Path tests
\ No newline at end of file
diff --git a/tests/e2e-test/readme.MD b/tests/e2e-test/readme.MD
new file mode 100644
index 00000000..13d4aa47
--- /dev/null
+++ b/tests/e2e-test/readme.MD
@@ -0,0 +1,35 @@
+# cto-test-automation
+
+Write end-to-end tests for your web apps with [Playwright](https://github.com/microsoft/playwright-python) and [pytest](https://docs.pytest.org/en/stable/).
+
+- Support for **all modern browsers** including Chromium, WebKit and Firefox.
+- Support for **headless and headed** execution.
+- **Built-in fixtures** that provide browser primitives to test functions.
+
+Pre-Requisites:
+
+- Install Visual Studio Code: Download and Install Visual Studio Code(VSCode).
+- Install NodeJS: Download and Install Node JS
+
+Create and Activate Python Virtual Environment
+
+- From your directory open and run cmd : "python -m venv microsoft"
+This will create a virtual environment directory named microsoft inside your current directory
+- To enable virtual environment, copy location for "microsoft\Scripts\activate.bat" and run from cmd
+
+Installing Playwright Pytest from Virtual Environment
+
+- To install libraries run "pip install -r requirements.txt"
+- Install the required browsers "playwright install"
+
+Run test cases
+
+- To run test cases from your 'tests/e2e-test' folder : "pytest --html=report.html --self-contained-html"
+
+Create .env file in project root level with web app url and client credentials
+
+- create a .env file in project root level and the application url. please refer 'sample_dotenv_file.txt' file.
+
+## Documentation
+
+See on [playwright.dev](https://playwright.dev/python/docs/test-runners) for examples and more detailed information.
diff --git a/tests/e2e-test/requirements.txt b/tests/e2e-test/requirements.txt
new file mode 100644
index 00000000..4e488e55
--- /dev/null
+++ b/tests/e2e-test/requirements.txt
@@ -0,0 +1,7 @@
+pytest-playwright
+pytest-reporter-html1
+python-dotenv
+pytest-check
+pytest-html
+py
+beautifulsoup4
\ No newline at end of file
diff --git a/tests/e2e-test/testdata/claim_form.pdf b/tests/e2e-test/testdata/claim_form.pdf
new file mode 100644
index 00000000..2109366f
Binary files /dev/null and b/tests/e2e-test/testdata/claim_form.pdf differ
diff --git a/tests/e2e-test/testdata/damage_photo.png b/tests/e2e-test/testdata/damage_photo.png
new file mode 100644
index 00000000..e61b4ce2
Binary files /dev/null and b/tests/e2e-test/testdata/damage_photo.png differ
diff --git a/tests/e2e-test/testdata/police_report.pdf b/tests/e2e-test/testdata/police_report.pdf
new file mode 100644
index 00000000..2e9f2309
Binary files /dev/null and b/tests/e2e-test/testdata/police_report.pdf differ
diff --git a/tests/e2e-test/testdata/repair_estimate.pdf b/tests/e2e-test/testdata/repair_estimate.pdf
new file mode 100644
index 00000000..8a21ec22
Binary files /dev/null and b/tests/e2e-test/testdata/repair_estimate.pdf differ
diff --git a/tests/e2e-test/tests/__init__.py b/tests/e2e-test/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/e2e-test/tests/conftest.py b/tests/e2e-test/tests/conftest.py
new file mode 100644
index 00000000..7e83f821
--- /dev/null
+++ b/tests/e2e-test/tests/conftest.py
@@ -0,0 +1,257 @@
+"""
+Pytest configuration and fixtures for KM Generic Golden Path tests
+"""
+import os
+import io
+import logging
+import atexit
+from datetime import datetime
+
+import pytest
+from playwright.sync_api import sync_playwright
+from bs4 import BeautifulSoup
+
+from config.constants import URL
+
+# Create screenshots directory if it doesn't exist
+SCREENSHOTS_DIR = os.path.join(os.path.dirname(__file__), "screenshots")
+os.makedirs(SCREENSHOTS_DIR, exist_ok=True)
+
+
+@pytest.fixture
+def subtests(request):
+ """Fixture to enable subtests for step-by-step reporting in HTML"""
+ class SubTests:
+ """SubTests class for managing subtest contexts"""
+ def __init__(self, request):
+ self.request = request
+ self._current_subtest = None
+
+ def test(self, msg=None):
+ """Create a new subtest context"""
+ return SubTestContext(self, msg)
+
+ class SubTestContext:
+ """Context manager for individual subtests"""
+ def __init__(self, parent, msg):
+ self.parent = parent
+ self.msg = msg
+ self.logger = logging.getLogger()
+ self.stream = None
+ self.handler = None
+
+ def __enter__(self):
+ # Create a dedicated log stream for this subtest
+ self.stream = io.StringIO()
+ self.handler = logging.StreamHandler(self.stream)
+ self.handler.setLevel(logging.INFO)
+ self.logger.addHandler(self.handler)
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ # Flush logs
+ if self.handler:
+ self.handler.flush()
+ log_output = self.stream.getvalue()
+ self.logger.removeHandler(self.handler)
+
+ # Create a report entry for this subtest
+ if hasattr(self.parent.request.node, 'user_properties'):
+ self.parent.request.node.user_properties.append(
+ ("subtest", {
+ "msg": self.msg,
+ "logs": log_output,
+ "passed": exc_type is None
+ })
+ )
+
+ # Don't suppress exceptions - let them propagate
+ return False
+
+ return SubTests(request)
+
+
+@pytest.fixture(scope="session")
+def login_logout():
+ """Perform login and browser close once in a session"""
+ with sync_playwright() as playwright_instance:
+ browser = playwright_instance.chromium.launch(
+ headless=False,
+ args=["--start-maximized"]
+ )
+ context = browser.new_context(no_viewport=True)
+ context.set_default_timeout(150000)
+ page = context.new_page()
+ # Navigate to the login URL
+ page.goto(URL, wait_until="domcontentloaded")
+ # Wait for the login form to appear
+ page.wait_for_timeout(6000)
+
+ yield page
+ # Perform close the browser
+ browser.close()
+
+
+log_streams = {}
+
+
+@pytest.hookimpl(tryfirst=True)
+def pytest_runtest_setup(item):
+ """Prepare StringIO for capturing logs"""
+ stream = io.StringIO()
+ handler = logging.StreamHandler(stream)
+ handler.setLevel(logging.INFO)
+
+ logger = logging.getLogger()
+ logger.addHandler(handler)
+
+ # Save handler and stream
+ log_streams[item.nodeid] = (handler, stream)
+
+
+@pytest.hookimpl(tryfirst=True)
+def pytest_html_report_title(report):
+ """Set custom HTML report title"""
+ report.title = "Content_Processing_Test_Automation_Report"
+
+
+@pytest.hookimpl(hookwrapper=True)
+def pytest_runtest_makereport(item, call):
+ """Generate test report with logs, subtest details, and screenshots on failure"""
+ outcome = yield
+ report = outcome.get_result()
+
+ # Capture screenshot on failure
+ if report.when == "call" and report.failed:
+ # Get the page fixture if it exists
+ if "login_logout" in item.fixturenames:
+ page = item.funcargs.get("login_logout")
+ if page:
+ try:
+ # Generate screenshot filename with timestamp
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
+ test_name = item.name.replace(" ", "_").replace("/", "_")
+ screenshot_name = f"screenshot_{test_name}_{timestamp}.png"
+ screenshot_path = os.path.join(SCREENSHOTS_DIR, screenshot_name)
+
+ # Take screenshot
+ page.screenshot(path=screenshot_path)
+
+ # Add screenshot link to report
+ if not hasattr(report, 'extra'):
+ report.extra = []
+
+ # Add screenshot as a link in the Links column
+ # Use relative path from report.html location
+ relative_path = os.path.relpath(
+ screenshot_path,
+ os.path.dirname(os.path.abspath("report.html"))
+ )
+
+ # pytest-html expects this format for extras
+ from pytest_html import extras
+ report.extra.append(extras.url(relative_path, name='Screenshot'))
+
+ logging.info("Screenshot saved: %s", screenshot_path)
+ except Exception as exc: # pylint: disable=broad-exception-caught
+ logging.error("Failed to capture screenshot: %s", str(exc))
+
+ handler, stream = log_streams.get(item.nodeid, (None, None))
+
+ if handler and stream:
+ # Make sure logs are flushed
+ handler.flush()
+ log_output = stream.getvalue()
+
+ # Only remove the handler, don't close the stream yet
+ logger = logging.getLogger()
+ logger.removeHandler(handler)
+
+ # Check if there are subtests
+ subtests_html = ""
+ if hasattr(item, 'user_properties'):
+ item_subtests = [
+ prop[1] for prop in item.user_properties if prop[0] == "subtest"
+ ]
+ if item_subtests:
+ subtests_html = (
+ ""
+ "
Step-by-Step Details:"
+ "
"
+
+ # Combine main log output with subtests
+ if subtests_html:
+ report.description = f"{log_output.strip()}{subtests_html}"
+ else:
+ report.description = f"{log_output.strip()}"
+
+ # Clean up references
+ log_streams.pop(item.nodeid, None)
+ else:
+ report.description = ""
+
+
+def pytest_collection_modifyitems(items):
+ """Modify test items to use custom node IDs"""
+ for item in items:
+ if hasattr(item, 'callspec'):
+ # Check for 'description' parameter first (for Golden Path tests)
+ description = item.callspec.params.get("description")
+ if description:
+ # pylint: disable=protected-access
+ item._nodeid = f"Golden Path - KM Generic - {description}"
+ # Fallback to 'prompt' parameter for other tests
+ else:
+ prompt = item.callspec.params.get("prompt")
+ if prompt:
+ # This controls how the test name appears in the report
+ # pylint: disable=protected-access
+ item._nodeid = prompt
+
+
+def rename_duration_column():
+ """Rename Duration column to Execution Time in HTML report"""
+ report_path = os.path.abspath("report.html")
+ if not os.path.exists(report_path):
+ print("Report file not found, skipping column rename.")
+ return
+
+ with open(report_path, 'r', encoding='utf-8') as report_file:
+ soup = BeautifulSoup(report_file, 'html.parser')
+
+ # Find and rename the header
+ headers = soup.select('table#results-table thead th')
+ for header_th in headers:
+ if header_th.text.strip() == 'Duration':
+ header_th.string = 'Execution Time'
+ break
+ else:
+ print("'Duration' column not found in report.")
+
+ with open(report_path, 'w', encoding='utf-8') as report_file:
+ report_file.write(str(soup))
+
+
+# Register this function to run after everything is done
+atexit.register(rename_duration_column)
diff --git a/tests/e2e-test/tests/test_contentProcessing_st_tc.py b/tests/e2e-test/tests/test_contentProcessing_st_tc.py
new file mode 100644
index 00000000..adaf46c6
--- /dev/null
+++ b/tests/e2e-test/tests/test_contentProcessing_st_tc.py
@@ -0,0 +1,467 @@
+"""
+Test module for Content Processing Solution Accelerator V2 end-to-end tests.
+"""
+# pylint: disable=protected-access,broad-exception-caught
+
+import logging
+import pytest
+from pages.HomePageV2 import HomePageV2
+
+logger = logging.getLogger(__name__)
+
+
+@pytest.mark.gp
+def test_content_processing_golden_path(login_logout, request):
+ """
+ Content Processing V2 - Validate Golden path works as expected
+
+ Executes golden path test steps for Content Processing V2 with Auto Claim workflow.
+ """
+ request.node._nodeid = "Content Processing V2 - Validate Golden path works as expected"
+
+ page = login_logout
+ home = HomePageV2(page)
+
+ golden_path_steps = [
+ ("01. Validate home page is loaded", lambda: home.validate_home_page()),
+ ("02. Validate API Documentation link and content", lambda: home.validate_api_document_link()),
+ ("03. Select Auto Claim collection", lambda: home.select_collection("Auto Claim")),
+ ("04. Upload Auto Claim documents", lambda: home.upload_files()),
+ ("05. Refresh until claim status is Completed", lambda: home.refresh_until_completed()),
+ ("06. Expand first claim row", lambda: home.expand_first_claim_row()),
+ ("07. Validate all child files are Completed with scores", lambda: home.validate_all_child_files_completed()),
+ ("08. Click on child file to load Extracted Results", lambda: home.click_on_child_file_row("claim_form.pdf")),
+ ("09. Validate Extracted Results tab has JSON content", lambda: home.validate_extracted_results()),
+ ("10. Validate Source Document pane displays the file", lambda: home.validate_source_document_visible()),
+ ("11. Edit name value to Camille Royy, add comment, and save", lambda: home.modify_comments_and_save("Automated GP test comment")),
+ ("12. Validate Process Steps for all child files", lambda: home.validate_process_steps()),
+ ("13. Refresh page before AI Summary validation", lambda: home.refresh_page()),
+ ("14. Click on first claim row to load Output Review", lambda: home.click_on_first_claim_row()),
+ ("15. Validate AI Summary tab has content", lambda: home.validate_ai_summary()),
+ ("16. Validate AI Gap Analysis tab has content", lambda: home.validate_ai_gap_analysis()),
+ ("17. Validate user able to delete claim", lambda: home.delete_first_claim()),
+ ]
+
+ for description, action in golden_path_steps:
+ logger.info(f"Running test step: {description}")
+ try:
+ action()
+ logger.info(f"Step passed: {description}")
+ except Exception:
+ logger.error(f"Step failed: {description}", exc_info=True)
+ raise
+
+
+def test_content_processing_sections_display(login_logout, request):
+ """
+ Content Processing V2 - All the sections need to be displayed properly
+
+ Validates that all main sections (Processing Queue, Output Review, Source Document)
+ are displayed correctly on the home page.
+ """
+ request.node._nodeid = "Content Processing V2 - All the sections need to be displayed properly"
+
+ page = login_logout
+ home = HomePageV2(page)
+
+ logger.info("Running test: Validate all sections are displayed properly")
+ try:
+ home.validate_home_page()
+ logger.info("Test passed: All sections displayed properly")
+ except Exception:
+ logger.error("Test failed: All sections display validation", exc_info=True)
+ raise
+
+
+def test_content_processing_file_upload(login_logout, request):
+ """
+ Content Processing V2 - Files need to be uploaded successfully
+
+ Validates that 4 Auto Claim documents can be uploaded successfully with schema selection.
+ """
+ request.node._nodeid = "Content Processing V2 - Files need to be uploaded successfully"
+
+ page = login_logout
+ home = HomePageV2(page)
+
+ upload_steps = [
+ ("01. Select Auto Claim collection", lambda: home.select_collection("Auto Claim")),
+ ("02. Upload Auto Claim documents", lambda: home.upload_files()),
+ ]
+
+ for description, action in upload_steps:
+ logger.info(f"Running test step: {description}")
+ try:
+ action()
+ logger.info(f"Step passed: {description}")
+ except Exception:
+ logger.error(f"Step failed: {description}", exc_info=True)
+ raise
+
+
+def test_content_processing_refresh_screen(login_logout, request):
+ """
+ Content Processing V2 - Refreshing the screen
+
+ Validates that screen refresh works properly after uploading files.
+ """
+ request.node._nodeid = "Content Processing V2 - Refreshing the screen"
+
+ page = login_logout
+ home = HomePageV2(page)
+
+ refresh_steps = [
+ ("01. Select Auto Claim collection", lambda: home.select_collection("Auto Claim")),
+ ("02. Upload Auto Claim documents", lambda: home.upload_files()),
+ ("03. Refresh until claim status is Completed", lambda: home.refresh_until_completed()),
+ ]
+
+ for description, action in refresh_steps:
+ logger.info(f"Running test step: {description}")
+ try:
+ action()
+ logger.info(f"Step passed: {description}")
+ except Exception:
+ logger.error(f"Step failed: {description}", exc_info=True)
+ raise
+
+
+def test_content_processing_expand_and_verify_child_files(login_logout, request):
+ """
+ Content Processing V2 - Expand claim row and verify child docs processing status
+
+ Uploads docs, waits for completion, expands first row and validates all child files
+ show Completed status with Entity and Schema scores.
+ """
+ request.node._nodeid = "Content Processing V2 - Expand and verify child files completed with scores"
+
+ page = login_logout
+ home = HomePageV2(page)
+
+ steps = [
+ ("01. Select Auto Claim collection", lambda: home.select_collection("Auto Claim")),
+ ("02. Upload Auto Claim documents", lambda: home.upload_files()),
+ ("03. Refresh until claim status is Completed", lambda: home.refresh_until_completed()),
+ ("04. Expand first claim row", lambda: home.expand_first_claim_row()),
+ ("05. Validate all child files Completed with scores", lambda: home.validate_all_child_files_completed()),
+ ]
+
+ for description, action in steps:
+ logger.info(f"Running test step: {description}")
+ try:
+ action()
+ logger.info(f"Step passed: {description}")
+ except Exception:
+ logger.error(f"Step failed: {description}", exc_info=True)
+ raise
+
+
+def test_content_processing_import_without_collection(login_logout, request):
+ """
+ Content Processing V2 - Once cleared Select Collection dropdown, import content shows validation
+
+ Validates that when no collection is selected, clicking Import Document(s)
+ button displays appropriate validation message.
+ """
+ request.node._nodeid = "Content Processing V2 - Once cleared Select Collection dropdown, import content shows validation"
+
+ page = login_logout
+ home = HomePageV2(page)
+
+ import_validation_steps = [
+ ("01. Validate home page is loaded", lambda: home.validate_home_page()),
+ ("02. Validate import content without collection selection", lambda: home.validate_import_without_collection()),
+ ]
+
+ for description, action in import_validation_steps:
+ logger.info(f"Running test step: {description}")
+ try:
+ action()
+ logger.info(f"Step passed: {description}")
+ except Exception:
+ logger.error(f"Step failed: {description}", exc_info=True)
+ raise
+
+
+def test_content_processing_delete_file(login_logout, request):
+ """
+ Content Processing V2 - Delete File
+
+ Validates that uploaded claims can be successfully deleted from the processing queue.
+ """
+ request.node._nodeid = "Content Processing V2 - Delete File"
+
+ page = login_logout
+ home = HomePageV2(page)
+
+ delete_file_steps = [
+ ("00. Dismiss any open dialog", lambda: home.dismiss_any_dialog()),
+ ("01. Validate home page is loaded", lambda: home.validate_home_page()),
+ ("02. Delete uploaded claim", lambda: home.delete_first_claim()),
+ ]
+
+ for description, action in delete_file_steps:
+ logger.info(f"Running test step: {description}")
+ try:
+ action()
+ logger.info(f"Step passed: {description}")
+ except Exception:
+ logger.error(f"Step failed: {description}", exc_info=True)
+ raise
+
+
+def test_content_processing_collapsible_panels(login_logout, request):
+ """
+ Content Processing V2 - Collapsible section for each panel
+
+ Validates that each panel (Processing Queue, Output Review, Source Document) can be
+ collapsed and expanded correctly.
+ """
+ request.node._nodeid = "Content Processing V2 - Collapsible section for each panel"
+
+ page = login_logout
+ home = HomePageV2(page)
+
+ collapsible_panels_steps = [
+ ("00. Dismiss any open dialog", lambda: home.dismiss_any_dialog()),
+ ("01. Validate home page is loaded", lambda: home.validate_home_page()),
+ ("02. Validate collapsible panels functionality", lambda: home.validate_collapsible_panels()),
+ ]
+
+ for description, action in collapsible_panels_steps:
+ logger.info(f"Running test step: {description}")
+ try:
+ action()
+ logger.info(f"Step passed: {description}")
+ except Exception:
+ logger.error(f"Step failed: {description}", exc_info=True)
+ raise
+
+
+def test_content_processing_api_documentation(login_logout, request):
+ """
+ Content Processing V2 - API Document
+
+ Validates that the API Documentation link opens correctly in a new page and displays
+ the correct API documentation content.
+ """
+ request.node._nodeid = "Content Processing V2 - API Document"
+
+ page = login_logout
+ home = HomePageV2(page)
+
+ api_documentation_steps = [
+ ("00. Dismiss any open dialog", lambda: home.dismiss_any_dialog()),
+ ("01. Validate home page is loaded", lambda: home.validate_home_page()),
+ ("02. Validate API Documentation link and content", lambda: home.validate_api_document_link()),
+ ]
+
+ for description, action in api_documentation_steps:
+ logger.info(f"Running test step: {description}")
+ try:
+ action()
+ logger.info(f"Step passed: {description}")
+ except Exception:
+ logger.error(f"Step failed: {description}", exc_info=True)
+ raise
+
+
+def test_content_processing_schema_selection_warning(login_logout, request):
+ """
+ Content Processing V2 - Alert user to upload file correctly as per the selected schema
+
+ ADO TC 17305: Validates that the import dialog shows 'Selected Collection: Auto Claim'
+ warning and that Import button remains disabled until schemas are selected for each file.
+ """
+ request.node._nodeid = "Content Processing V2 - Alert user to upload file correctly as per selected schema"
+
+ page = login_logout
+ home = HomePageV2(page)
+
+ steps = [
+ ("00. Dismiss any open dialog", lambda: home.dismiss_any_dialog()),
+ ("01. Select Auto Claim collection", lambda: home.select_collection("Auto Claim")),
+ ("02. Validate schema selection warning in import dialog", lambda: home.validate_schema_selection_warning()),
+ ]
+
+ for description, action in steps:
+ logger.info(f"Running test step: {description}")
+ try:
+ action()
+ logger.info(f"Step passed: {description}")
+ except Exception:
+ logger.error(f"Step failed: {description}", exc_info=True)
+ raise
+
+
+def test_content_processing_unsupported_file_upload(login_logout, request):
+ """
+ Content Processing V2 - Validate upload of unsupported files
+
+ ADO TC 26004: Validates that uploading non-PDF/non-image files (e.g., .txt, .docx)
+ is rejected with an appropriate error message or disabled Import button.
+ """
+ request.node._nodeid = "Content Processing V2 - Validate upload of unsupported files"
+
+ page = login_logout
+ home = HomePageV2(page)
+
+ steps = [
+ ("00. Dismiss any open dialog", lambda: home.dismiss_any_dialog()),
+ ("01. Select Auto Claim collection", lambda: home.select_collection("Auto Claim")),
+ ("02. Validate unsupported file upload is rejected", lambda: home.validate_unsupported_file_upload()),
+ ]
+
+ for description, action in steps:
+ logger.info(f"Running test step: {description}")
+ try:
+ action()
+ logger.info(f"Step passed: {description}")
+ except Exception:
+ logger.error(f"Step failed: {description}", exc_info=True)
+ raise
+
+
+def test_content_processing_import_disabled_without_schema(login_logout, request):
+ """
+ Content Processing V2 - Import button disabled when no schemas are selected
+
+ Validates that after uploading files into the import dialog, the Import button
+ remains disabled until schemas are assigned to every file.
+ """
+ request.node._nodeid = "Content Processing V2 - Import button disabled when no schemas are selected"
+
+ page = login_logout
+ home = HomePageV2(page)
+
+ steps = [
+ ("00. Dismiss any open dialog", lambda: home.dismiss_any_dialog()),
+ ("01. Select Auto Claim collection", lambda: home.select_collection("Auto Claim")),
+ ("02. Validate Import disabled without schema selection", lambda: home.validate_import_disabled_without_schemas()),
+ ]
+
+ for description, action in steps:
+ logger.info(f"Running test step: {description}")
+ try:
+ action()
+ logger.info(f"Step passed: {description}")
+ except Exception:
+ logger.error(f"Step failed: {description}", exc_info=True)
+ raise
+
+
+def test_content_processing_import_disabled_with_partial_schemas(login_logout, request):
+ """
+ Content Processing V2 - Import button disabled with partial schema selection
+
+ Validates that assigning schemas to only some files (not all) keeps the
+ Import button disabled, preventing incomplete uploads.
+ """
+ request.node._nodeid = "Content Processing V2 - Import button disabled with partial schema selection"
+
+ page = login_logout
+ home = HomePageV2(page)
+
+ steps = [
+ ("00. Dismiss any open dialog", lambda: home.dismiss_any_dialog()),
+ ("01. Select Auto Claim collection", lambda: home.select_collection("Auto Claim")),
+ ("02. Validate Import disabled with partial schema selection", lambda: home.validate_import_disabled_with_partial_schemas()),
+ ]
+
+ for description, action in steps:
+ logger.info(f"Running test step: {description}")
+ try:
+ action()
+ logger.info(f"Step passed: {description}")
+ except Exception:
+ logger.error(f"Step failed: {description}", exc_info=True)
+ raise
+
+
+def test_content_processing_mismatched_schema_upload(login_logout, request):
+ """
+ Content Processing V2 - Upload files with deliberately mismatched schemas
+
+ Validates what happens when files are uploaded with wrong schema assignments
+ (e.g., claim_form.pdf assigned Repair Estimate schema). The system should accept
+ the upload but processing results may differ from correct schema assignments.
+ """
+ request.node._nodeid = "Content Processing V2 - Upload files with mismatched schemas"
+
+ page = login_logout
+ home = HomePageV2(page)
+
+ steps = [
+ ("00. Dismiss any open dialog", lambda: home.dismiss_any_dialog()),
+ ("01. Select Auto Claim collection", lambda: home.select_collection("Auto Claim")),
+ ("02. Upload files with mismatched schemas", lambda: home.upload_files_with_mismatched_schemas()),
+ ("03. Refresh until processing completes", lambda: home.refresh_until_completed()),
+ ("04. Expand first claim row", lambda: home.expand_first_claim_row()),
+ ("05. Validate child files completed (even with wrong schemas)", lambda: home.validate_all_child_files_completed()),
+ ("06. Clean up - delete the claim", lambda: home.delete_first_claim()),
+ ]
+
+ for description, action in steps:
+ logger.info(f"Running test step: {description}")
+ try:
+ action()
+ logger.info(f"Step passed: {description}")
+ except Exception:
+ logger.error(f"Step failed: {description}", exc_info=True)
+ raise
+
+
+def test_content_processing_schema_preserved_after_file_removal(login_logout, request):
+ """
+ Content Processing V2 - Schema selections preserved after removing a file
+
+ Validates that when a file is removed from the import dialog, the schema
+ selections for the remaining files are preserved and not reset.
+ """
+ request.node._nodeid = "Content Processing V2 - Schema selections preserved after file removal"
+
+ page = login_logout
+ home = HomePageV2(page)
+
+ steps = [
+ ("00. Dismiss any open dialog", lambda: home.dismiss_any_dialog()),
+ ("01. Select Auto Claim collection", lambda: home.select_collection("Auto Claim")),
+ ("02. Validate schema preserved after file removal", lambda: home.validate_schema_dropdown_after_file_removal()),
+ ]
+
+ for description, action in steps:
+ logger.info(f"Running test step: {description}")
+ try:
+ action()
+ logger.info(f"Step passed: {description}")
+ except Exception:
+ logger.error(f"Step failed: {description}", exc_info=True)
+ raise
+
+
+def test_content_processing_network_disconnect(login_logout, request):
+ """
+ Content Processing V2 - Error notification on network disconnect during file upload
+
+ ADO TC 17306: Validates that when network is disconnected during file upload,
+ an appropriate error notification is displayed to the user.
+ """
+ request.node._nodeid = "Content Processing V2 - Error notification on network disconnect during upload"
+
+ page = login_logout
+ home = HomePageV2(page)
+
+ steps = [
+ ("00. Dismiss any open dialog", lambda: home.dismiss_any_dialog()),
+ ("01. Select Auto Claim collection", lambda: home.select_collection("Auto Claim")),
+ ("02. Validate network disconnect error handling", lambda: home.validate_network_disconnect_error()),
+ ]
+
+ for description, action in steps:
+ logger.info(f"Running test step: {description}")
+ try:
+ action()
+ logger.info(f"Step passed: {description}")
+ except Exception:
+ logger.error(f"Step failed: {description}", exc_info=True)
+ raise