diff --git a/.github/workflows/test-automation.yml b/.github/workflows/test-automation.yml
new file mode 100644
index 00000000..d0c2412d
--- /dev/null
+++ b/.github/workflows/test-automation.yml
@@ -0,0 +1,133 @@
+name: Test Automation Content Processing
+
+on:
+ push:
+ branches:
+ - main
+ - dev
+ paths:
+ - 'tests/e2e-test/**'
+ schedule:
+ - cron: '0 13 * * *' # Runs at 1 PM UTC
+ workflow_dispatch:
+
+env:
+ url: ${{ vars.CP_WEB_URL }}
+ accelerator_name: "Content Processing"
+
+jobs:
+ test:
+
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Set up Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: '3.12'
+
+ - name: Azure CLI Login
+ uses: azure/login@v2
+ with:
+ creds: '{"clientId":"${{ secrets.AZURE_MAINTENANCE_CLIENT_ID }}","clientSecret":"${{ secrets.AZURE_MAINTENANCE_CLIENT_SECRET }}","subscriptionId":"${{ secrets.AZURE_MAINTENANCE_SUBSCRIPTION_ID }}","tenantId":"${{ secrets.AZURE_TENANT_ID }}"}'
+
+ - name: Start Container App
+ id: start-container-app
+ uses: azure/cli@v2
+ with:
+ azcliversion: 'latest'
+ inlineScript: |
+ az rest -m post -u "/subscriptions/${{ secrets.AZURE_MAINTENANCE_SUBSCRIPTION_ID }}/resourceGroups/${{ vars.CP_RG }}/providers/Microsoft.App/containerApps/${{ vars.CP_CONTAINERAPP_PREFIX }}-app/start?api-version=2025-01-01"
+ az rest -m post -u "/subscriptions/${{ secrets.AZURE_MAINTENANCE_SUBSCRIPTION_ID }}/resourceGroups/${{ vars.CP_RG }}/providers/Microsoft.App/containerApps/${{ vars.CP_CONTAINERAPP_PREFIX }}-api/start?api-version=2025-01-01"
+ az rest -m post -u "/subscriptions/${{ secrets.AZURE_MAINTENANCE_SUBSCRIPTION_ID }}/resourceGroups/${{ vars.CP_RG }}/providers/Microsoft.App/containerApps/${{ vars.CP_CONTAINERAPP_PREFIX }}-web/start?api-version=2025-01-01"
+
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install -r tests/e2e-test/requirements.txt
+
+ - name: Ensure browsers are installed
+ run: python -m playwright install --with-deps chromium
+
+ - name: Run tests(1)
+ id: test1
+ run: |
+ xvfb-run pytest --headed --html=report/report.html --self-contained-html
+ working-directory: tests/e2e-test
+ continue-on-error: true
+
+ - name: Sleep for 30 seconds
+ if: ${{ steps.test1.outcome == 'failure' }}
+ run: sleep 30s
+ shell: bash
+
+ - name: Run tests(2)
+ id: test2
+ if: ${{ steps.test1.outcome == 'failure' }}
+ run: |
+ xvfb-run pytest --headed --html=report/report.html --self-contained-html
+ working-directory: tests/e2e-test
+ continue-on-error: true
+
+ - name: Sleep for 60 seconds
+ if: ${{ steps.test2.outcome == 'failure' }}
+ run: sleep 60s
+ shell: bash
+
+ - name: Run tests(3)
+ id: test3
+ if: ${{ steps.test2.outcome == 'failure' }}
+ run: |
+ xvfb-run pytest --headed --html=report/report.html --self-contained-html
+ working-directory: tests/e2e-test
+
+ - name: Upload test report
+ id: upload_report
+ uses: actions/upload-artifact@v4
+ if: ${{ !cancelled() }}
+ with:
+ name: test-report
+ path: tests/e2e-test/report/*
+
+ - name: Send Notification
+ if: always()
+ run: |
+ RUN_URL="https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"
+ REPORT_URL=${{ steps.upload_report.outputs.artifact-url }}
+ IS_SUCCESS=${{ steps.test1.outcome == 'success' || steps.test2.outcome == 'success' || steps.test3.outcome == 'success' }}
+ # Construct the email body
+ if [ "$IS_SUCCESS" = "true" ]; then
+ EMAIL_BODY=$(cat <Dear Team,
We would like to inform you that the ${{ env.accelerator_name }} Test Automation process has completed successfully.
Run URL: ${RUN_URL}
Test Report: ${REPORT_URL}
Best regards,
Your Automation Team
",
+ "subject": "${{ env.accelerator_name }} Test Automation - Success"
+ }
+ EOF
+ )
+ else
+ EMAIL_BODY=$(cat <Dear Team,We would like to inform you that the ${{ env.accelerator_name }} Test Automation process has encountered an issue and has failed to complete successfully.
Run URL: ${RUN_URL}
${OUTPUT}
Test Report: ${REPORT_URL}
Please investigate the matter at your earliest convenience.
Best regards,
Your Automation Team
",
+ "subject": "${{ env.accelerator_name }} Test Automation - Failure"
+ }
+ EOF
+ )
+ fi
+
+ # Send the notification
+ curl -X POST "${{ secrets.EMAILNOTIFICATION_LOGICAPP_URL_TA }}" \
+ -H "Content-Type: application/json" \
+ -d "$EMAIL_BODY" || echo "Failed to send notification"
+
+ - name: Stop Container App
+ if: always()
+ uses: azure/cli@v2
+ with:
+ azcliversion: 'latest'
+ inlineScript: |
+ az rest -m post -u "/subscriptions/${{ secrets.AZURE_MAINTENANCE_SUBSCRIPTION_ID }}/resourceGroups/${{ vars.CP_RG }}/providers/Microsoft.App/containerApps/${{ vars.CP_CONTAINERAPP_PREFIX }}-app/stop?api-version=2025-01-01"
+ az rest -m post -u "/subscriptions/${{ secrets.AZURE_MAINTENANCE_SUBSCRIPTION_ID }}/resourceGroups/${{ vars.CP_RG }}/providers/Microsoft.App/containerApps/${{ vars.CP_CONTAINERAPP_PREFIX }}-api/stop?api-version=2025-01-01"
+ az rest -m post -u "/subscriptions/${{ secrets.AZURE_MAINTENANCE_SUBSCRIPTION_ID }}/resourceGroups/${{ vars.CP_RG }}/providers/Microsoft.App/containerApps/${{ vars.CP_CONTAINERAPP_PREFIX }}-web/stop?api-version=2025-01-01"
+ az logout
\ No newline at end of file
diff --git a/azure.yaml b/azure.yaml
index bf46bfb5..b5b9121e 100644
--- a/azure.yaml
+++ b/azure.yaml
@@ -11,10 +11,10 @@ hooks:
preprovision:
posix:
shell: sh
- run: timestamp=$(date +"%Y%m%d-%H%M%S"); logFile="azd_preprovision_$timestamp.log"; sed -i 's/\r$//' ./infra/scripts/docker-build.sh; ./infra/scripts/docker-build.sh "$AZURE_SUBSCRIPTION_ID" "$AZURE_ENV_NAME" "$AZURE_LOCATION" "$AZURE_RESOURCE_GROUP" "$USE_LOCAL_BUILD" 2>&1 | tee "$logFile"
+ run: timestamp=$(date +"%Y%m%d-%H%M%S"); logFile="azd_preprovision_$timestamp.log"; sed -i 's/\r$//' ./infra/scripts/docker-build.sh; ./infra/scripts/docker-build.sh "$AZURE_SUBSCRIPTION_ID" "$AZURE_ENV_NAME" "$AZURE_LOCATION" "$AZURE_RESOURCE_GROUP" "$USE_LOCAL_BUILD" "$AZURE_ENV_IMAGETAG" 2>&1 | tee "$logFile"
windows:
shell: pwsh
- run: $timestamp = Get-Date -Format "yyyyMMdd-HHmmss"; $logFile = "azd_preprovision_$timestamp.log"; ./infra/scripts/docker-build.ps1 $env:AZURE_SUBSCRIPTION_ID $env:AZURE_ENV_NAME $env:AZURE_LOCATION $env:AZURE_RESOURCE_GROUP $env:USE_LOCAL_BUILD *>&1 | Tee-Object -FilePath $logFile
+ run: $timestamp = Get-Date -Format "yyyyMMdd-HHmmss"; $logFile = "azd_preprovision_$timestamp.log"; ./infra/scripts/docker-build.ps1 $env:AZURE_SUBSCRIPTION_ID $env:AZURE_ENV_NAME $env:AZURE_LOCATION $env:AZURE_RESOURCE_GROUP $env:USE_LOCAL_BUILD $env:AZURE_ENV_IMAGETAG *>&1 | Tee-Object -FilePath $logFile
postprovision:
posix:
shell: sh
diff --git a/docs/CustomizingAzdParameters.md b/docs/CustomizingAzdParameters.md
index 3f26f246..d16f28ab 100644
--- a/docs/CustomizingAzdParameters.md
+++ b/docs/CustomizingAzdParameters.md
@@ -3,40 +3,34 @@
By default this template will use the environment name as the prefix to prevent naming collisions within Azure. The parameters below show the default values. You only need to run the statements below if you need to change the values.
-> To override any of the parameters, run `azd env set ` before running `azd up`. On the first azd command, it will prompt you for the environment name. Be sure to choose 3-20 charaters alphanumeric unique name.
+> To override any of the parameters, run `azd env set ` before running `azd up`. On the first azd command, it will prompt you for the environment name. Be sure to choose 3-20 characters alphanumeric unique name.
+## Parameters
-Set the Environment Name Prefix
-```shell
-azd env set AZURE_ENV_NAME 'cps'
-```
+| Name | Type | Example Value | Purpose |
+| -------------------------------------- | ------- | --------------------------- | ------------------------------------------------------------------------------------- |
+| `AZURE_ENV_NAME` | string | `cps` | Sets the environment name prefix for all Azure resources. |
+| `AZURE_ENV_SECONDARY_LOCATION` | string | `eastus2` | Specifies a secondary Azure region. |
+| `AZURE_ENV_CU_LOCATION` | string | `WestUS` | Sets the location for the Azure Content Understanding service. |
+| `AZURE_ENV_MODEL_DEPLOYMENT_TYPE` | string | `GlobalStandard` | Defines the model deployment type (allowed values: `Standard`, `GlobalStandard`). |
+| `AZURE_ENV_MODEL_NAME` | string | `gpt-4o` | Specifies the GPT model name (allowed values: `gpt-4o`).
+| `AZURE_ENV_MODEL_VERSION` | string | `2024-08-06` | Specifies the GPT model version (allowed values: `2024-08-06`). |
+| `AZURE_ENV_MODEL_CAPACITY` | integer | `30` | Sets the model capacity (choose based on your subscription's available GPT capacity). |
+| `USE_LOCAL_BUILD` | boolean | `false` | Indicates whether to use a local container build for deployment. |
+| `AZURE_ENV_IMAGETAG` | boolean | `latest` | Set the Image tag Like (allowed values: latest, dev, hotfix) |
+| `AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID` | string | `` | Reuses an existing Log Analytics Workspace instead of provisioning a new one. |
-Change the Azure Content Understanding Service Location (example: eastus2, westus2, etc.)
-```shell
-azd env set AZURE_ENV_CU_LOCATION 'West US'
-```
-Change the Deployment Type (allowed values: Standard, GlobalStandard)
-```shell
-azd env set AZURE_ENV_MODEL_DEPLOYMENT_TYPE 'GlobalStandard'
-```
+## How to Set a Parameter
-Set the Model Name (allowed values: gpt-4o)
-```shell
-azd env set AZURE_ENV_MODEL_NAME 'gpt-4o'
-```
+To customize any of the above values, run the following command **before** `azd up`:
-Change the Model Capacity (choose a number based on available GPT model capacity in your subscription)
-```shell
-azd env set AZURE_ENV_MODEL_CAPACITY '30'
+```bash
+azd env set
```
-Change if the deployment should use a local build of the containers
-```shell
-azd env set USE_LOCAL_BUILD 'false'
-```
+**Example:**
-Set the Log Analytics Workspace Id if you need to reuse the existing workspace
-```shell
-azd env set AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID '/subscriptions//resourceGroups//providers/Microsoft.OperationalInsights/workspaces/'
+```bash
+azd env set AZURE_LOCATION westus2
```
\ No newline at end of file
diff --git a/docs/DeploymentGuide.md b/docs/DeploymentGuide.md
index 82667631..0735575f 100644
--- a/docs/DeploymentGuide.md
+++ b/docs/DeploymentGuide.md
@@ -36,6 +36,7 @@ This will allow the scripts to run for the current session without permanently c
+
## Deployment Options & Steps
Pick from the options below to see step-by-step instructions for GitHub Codespaces, VS Code Dev Containers, and Local Environments.
@@ -111,15 +112,19 @@ Consider the following settings during your deployment to modify specific settin
When you start the deployment, most parameters will have **default values**, but you can update the following settings by following the steps [here](../docs/CustomizingAzdParameters.md):
-| **Setting** | **Description** | **Default value** |
-|-------------|-----------------|-------------------|
-| **Azure Region** | The region where resources will be created. | East US |
-| **Azure AI Content Understanding Location** | Select from a drop-down list of values. | Sweden Central |
-| **Secondary Location** | A **less busy** region for **Azure Cosmos DB**, useful in case of availability constraints. | eastus2 |
-| **Deployment Type** | Select from a drop-down list. | GlobalStandard |
-| **GPT Model** | Choose from **gpt-4o**. | gpt-4o |
-| **GPT Model Deployment Capacity** | Configure capacity for **GPT models**. | 30k |
-| **Existing Log analytics workspace** | To reuse the existing Log analytics workspace Id. | |
+| **Setting** | **Description** | **Default Value** |
+| ------------------------------------------- | ------------------------------------------------------------------------------------------- | ----------------- |
+| **Azure Region** | The region where resources will be created. | East US |
+| **Azure AI Content Understanding Location** | Location for the **Content Understanding** service. | Sweden Central |
+| **Secondary Location** | A **less busy** region for **Azure Cosmos DB**, useful in case of availability constraints. | eastus2 |
+| **Deployment Type** | Select from a drop-down list. | GlobalStandard |
+| **GPT Model** | Choose from **gpt-4o**. | gpt-4o |
+| **GPT Model Version** | GPT model version used in the deployment. | 2024-08-06 |
+| **GPT Model Deployment Capacity** | Configure capacity for **GPT models**. | 30k |
+| **Use Local Build** | Boolean flag to determine if local container builds should be used. | false |
+| **Image Tag** | Image version for deployment (allowed values: `latest`, `dev`, `hotfix`). | latest |
+| **Existing Log Analytics Workspace** | To reuse an existing Log Analytics Workspace ID instead of creating a new one. | *(none)* |
+
diff --git a/infra/container_app/deploy_container_app_api_web.bicep b/infra/container_app/deploy_container_app_api_web.bicep
index 9c4c23de..41272dd2 100644
--- a/infra/container_app/deploy_container_app_api_web.bicep
+++ b/infra/container_app/deploy_container_app_api_web.bicep
@@ -26,6 +26,7 @@ param maxReplicaContainerWeb int = 1
param azureContainerRegistry string
param containerRegistryReaderId string
param useLocalBuild string = 'false'
+param imageTag string
var abbrs = loadJsonContent('../abbreviations.json')
@@ -76,7 +77,7 @@ module containerApp 'deploy_container_app.bicep' = {
containerEnvId: containerAppEnvId
azureContainerRegistry: azureContainerRegistry
azureContainerRegistryImage: 'contentprocessor'
- azureContainerRegistryImageTag: 'latest'
+ azureContainerRegistryImageTag: imageTag
managedIdentityId: containerRegistryReaderId
containerEnvVars: [
{
@@ -99,7 +100,7 @@ module containerAppApi 'deploy_container_app.bicep' = {
containerEnvId: containerAppEnvId
azureContainerRegistry: azureContainerRegistry
azureContainerRegistryImage: 'contentprocessorapi'
- azureContainerRegistryImageTag: 'latest'
+ azureContainerRegistryImageTag: imageTag
managedIdentityId: containerRegistryReaderId
allowedOrigins: [containerAppWebEndpoint]
containerEnvVars: [
@@ -123,7 +124,7 @@ module containerAppWeb 'deploy_container_app.bicep' = {
containerEnvId: containerAppEnvId
azureContainerRegistry: azureContainerRegistry
azureContainerRegistryImage: 'contentprocessorweb'
- azureContainerRegistryImageTag: 'latest'
+ azureContainerRegistryImageTag: imageTag
managedIdentityId: containerRegistryReaderId
containerEnvVars: [
{
diff --git a/infra/main.bicep b/infra/main.bicep
index 575d5ecc..99a5b016 100644
--- a/infra/main.bicep
+++ b/infra/main.bicep
@@ -30,20 +30,10 @@ param contentUnderstandingLocation string
])
param deploymentType string = 'GlobalStandard'
-@minLength(1)
@description('Name of the GPT model to deploy:')
-@allowed([
- 'gpt-4o-mini'
- 'gpt-4o'
- 'gpt-4'
-])
param gptModelName string = 'gpt-4o'
-@minLength(1)
@description('Version of the GPT model to deploy:')
-@allowed([
- '2024-08-06'
-])
param gptModelVersion string = '2024-08-06'
//var gptModelVersion = '2024-02-15-preview'
@@ -78,6 +68,8 @@ param useLocalBuild string = 'false'
@description('Optional: Existing Log Analytics Workspace Resource ID')
param existingLogAnalyticsWorkspaceId string = ''
+param imageTag string = 'latest'
+
var containerImageEndPoint = 'cpscontainerreg.azurecr.io'
var resourceGroupLocation = resourceGroup().location
@@ -186,6 +178,7 @@ module containerApps './container_app/deploy_container_app_api_web.bicep' = {
minReplicaContainerWeb: minReplicaContainerWeb
maxReplicaContainerWeb: maxReplicaContainerWeb
useLocalBuild: 'false'
+ imageTag: 'latest'
}
}
@@ -254,6 +247,7 @@ module updateContainerApp './container_app/deploy_container_app_api_web.bicep' =
minReplicaContainerWeb: minReplicaContainerWeb
maxReplicaContainerWeb: maxReplicaContainerWeb
useLocalBuild: useLocalBuildLower
+ imageTag: imageTag
}
dependsOn: [roleAssignments]
}
diff --git a/infra/main.bicepparam b/infra/main.bicepparam
index c7549d8b..b6a15d71 100644
--- a/infra/main.bicepparam
+++ b/infra/main.bicepparam
@@ -1,9 +1,12 @@
using './main.bicep'
param environmentName = readEnvironmentVariable('AZURE_ENV_NAME', 'cps')
+param secondaryLocation = readEnvironmentVariable('AZURE_ENV_SECONDARY_LOCATION', 'EastUs2')
param contentUnderstandingLocation = readEnvironmentVariable('AZURE_ENV_CU_LOCATION', 'WestUS')
param deploymentType = readEnvironmentVariable('AZURE_ENV_MODEL_DEPLOYMENT_TYPE', 'GlobalStandard')
param gptModelName = readEnvironmentVariable('AZURE_ENV_MODEL_NAME', 'gpt-4o')
+param gptModelVersion = readEnvironmentVariable('AZURE_ENV_MODEL_VERSION', '2024-08-06')
param gptDeploymentCapacity = int(readEnvironmentVariable('AZURE_ENV_MODEL_CAPACITY', '30'))
param useLocalBuild = readEnvironmentVariable('USE_LOCAL_BUILD', 'false')
+param imageTag = readEnvironmentVariable('AZURE_ENV_IMAGETAG', 'latest')
param existingLogAnalyticsWorkspaceId = readEnvironmentVariable('AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID', '')
diff --git a/infra/main.json b/infra/main.json
index 27bf0437..4f5351e4 100644
--- a/infra/main.json
+++ b/infra/main.json
@@ -5,7 +5,7 @@
"_generator": {
"name": "bicep",
"version": "0.35.1.17967",
- "templateHash": "12841296004328754819"
+ "templateHash": "15234454470886032390"
}
},
"parameters": {
@@ -54,12 +54,6 @@
"gptModelName": {
"type": "string",
"defaultValue": "gpt-4o",
- "allowedValues": [
- "gpt-4o-mini",
- "gpt-4o",
- "gpt-4"
- ],
- "minLength": 1,
"metadata": {
"description": "Name of the GPT model to deploy:"
}
@@ -67,10 +61,6 @@
"gptModelVersion": {
"type": "string",
"defaultValue": "2024-08-06",
- "allowedValues": [
- "2024-08-06"
- ],
- "minLength": 1,
"metadata": {
"description": "Version of the GPT model to deploy:"
}
diff --git a/infra/scripts/docker-build.ps1 b/infra/scripts/docker-build.ps1
index 9c998810..10b5632a 100644
--- a/infra/scripts/docker-build.ps1
+++ b/infra/scripts/docker-build.ps1
@@ -4,7 +4,8 @@ param (
[string]$ENV_NAME,
[string]$AZURE_LOCATION,
[string]$AZURE_RESOURCE_GROUP,
- [string]$USE_LOCAL_BUILD
+ [string]$USE_LOCAL_BUILD,
+ [string]$AZURE_ENV_IMAGETAG
)
# Convert USE_LOCAL_BUILD to Boolean
@@ -12,7 +13,7 @@ $USE_LOCAL_BUILD = if ($USE_LOCAL_BUILD -match "^(?i:true)$") { $true } else { $
# Validate required parameters
if (-not $AZURE_SUBSCRIPTION_ID -or -not $ENV_NAME -or -not $AZURE_LOCATION -or -not $AZURE_RESOURCE_GROUP) {
- Write-Error "Missing required arguments. Usage: docker-build.ps1 "
+ Write-Error "Missing required arguments. Usage: docker-build.ps1 "
exit 1
}
@@ -57,7 +58,7 @@ function Build-And-Push-Image {
[string]$BUILD_PATH
)
- $IMAGE_URI = "$ACR_NAME.azurecr.io/$($IMAGE_NAME):latest"
+ $IMAGE_URI = "$ACR_NAME.azurecr.io/$($IMAGE_NAME):$AZURE_ENV_IMAGETAG"
Write-Host "Building Docker image: $IMAGE_URI"
docker build $BUILD_PATH --no-cache -t $IMAGE_URI
diff --git a/infra/scripts/docker-build.sh b/infra/scripts/docker-build.sh
index ddb22b2f..d1833938 100644
--- a/infra/scripts/docker-build.sh
+++ b/infra/scripts/docker-build.sh
@@ -7,10 +7,11 @@ echo $2
echo $3
echo $4
echo $5
+echo $6
# Check if the required arguments are provided
if [ "$#" -ne 5 ]; then
- echo "Usage: docker-build.sh "
+ echo "Usage: docker-build.sh "
exit 1
fi
@@ -19,6 +20,7 @@ ENV_NAME=$2
AZURE_LOCATION=$3
AZURE_RESOURCE_GROUP=$4
USE_LOCAL_BUILD=$5
+AZURE_ENV_IMAGETAG=$6
USE_LOCAL_BUILD=$(echo "$USE_LOCAL_BUILD" | grep -iq "^true$" && echo "true" || echo "false")
@@ -49,9 +51,9 @@ if [ "$USE_LOCAL_BUILD" = "true" ]; then
echo "Deployed container registry in location."
# Construct full image names
- CONTENTPROCESSOR_IMAGE_URI="$ACR_NAME.azurecr.io/contentprocessor:latest"
- CONTENTPROCESSORAPI_IMAGE_URI="$ACR_NAME.azurecr.io/contentprocessorapi:latest"
- CONTENTPROCESSORWEB_IMAGE_URI="$ACR_NAME.azurecr.io/contentprocessorweb:latest"
+ CONTENTPROCESSOR_IMAGE_URI="$ACR_NAME.azurecr.io/contentprocessor:$AZURE_ENV_IMAGETAG"
+ CONTENTPROCESSORAPI_IMAGE_URI="$ACR_NAME.azurecr.io/contentprocessorapi:$AZURE_ENV_IMAGETAG"
+ CONTENTPROCESSORWEB_IMAGE_URI="$ACR_NAME.azurecr.io/contentprocessorweb:$AZURE_ENV_IMAGETAG"
# Azure login
echo "Logging into Azure Container Registry: $ACR_NAME"
diff --git a/tests/e2e-test/.gitignore b/tests/e2e-test/.gitignore
new file mode 100644
index 00000000..79644b65
--- /dev/null
+++ b/tests/e2e-test/.gitignore
@@ -0,0 +1,169 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+cover/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+.pybuilder/
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+# For a library or package, you might want to ignore these files since the code is
+# intended to run in multiple environments; otherwise, check them in:
+# .python-version
+
+# pipenv
+# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+# However, in case of collaboration, if having platform-specific dependencies or dependencies
+# having no cross-platform support, pipenv may install dependencies that don't work, or not
+# install all needed dependencies.
+#Pipfile.lock
+
+# poetry
+# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
+# This is especially recommended for binary packages to ensure reproducibility, and is more
+# commonly ignored for libraries.
+# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
+#poetry.lock
+
+# pdm
+# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
+#pdm.lock
+# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
+# in version control.
+# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
+.pdm.toml
+.pdm-python
+.pdm-build/
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+microsoft/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+
+# pytype static type analyzer
+.pytype/
+
+# Cython debug symbols
+cython_debug/
+
+# PyCharm
+# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
+# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
+# and can be added to the global gitignore or merged into this file. For a more nuclear
+# option (not recommended) you can uncomment the following to ignore the entire idea folder.
+.idea/
+archive/
+report/
+screenshots/
+report.html
+assets/
+.vscode/
diff --git a/tests/e2e-test/base/__init__.py b/tests/e2e-test/base/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/e2e-test/base/base.py b/tests/e2e-test/base/base.py
new file mode 100644
index 00000000..5992ab6a
--- /dev/null
+++ b/tests/e2e-test/base/base.py
@@ -0,0 +1,10 @@
+class BasePage:
+ def __init__(self, page):
+ self.page = page
+
+ def scroll_into_view(self, locator):
+ reference_list = locator
+ locator.nth(reference_list.count() - 1).scroll_into_view_if_needed()
+
+ def is_visible(self, locator):
+ locator.is_visible()
diff --git a/tests/e2e-test/config/constants.py b/tests/e2e-test/config/constants.py
new file mode 100644
index 00000000..f5f4c9ac
--- /dev/null
+++ b/tests/e2e-test/config/constants.py
@@ -0,0 +1,8 @@
+import os
+
+from dotenv import load_dotenv
+
+load_dotenv()
+URL = os.getenv("url")
+if URL.endswith("/"):
+ URL = URL[:-1]
diff --git a/tests/e2e-test/pages/HomePage.py b/tests/e2e-test/pages/HomePage.py
new file mode 100644
index 00000000..ce091f44
--- /dev/null
+++ b/tests/e2e-test/pages/HomePage.py
@@ -0,0 +1,266 @@
+import os.path
+
+from base.base import BasePage
+from playwright.sync_api import expect
+
+
+class HomePage(BasePage):
+ TITLE_TEXT = "//span[normalize-space()='Processing Queue']"
+ SELECT_SCHEMA = "//input[@placeholder='Select Schema']"
+ IMPORT_CONTENT = "//button[normalize-space()='Import Content']"
+ REFRESH = "//button[normalize-space()='Refresh']"
+ BROWSE_FILES = "//button[normalize-space()='Browse Files']"
+ UPLOAD_BTN = "//button[normalize-space()='Upload']"
+ SUCCESS_MSG = "/div[@class='file-item']//*[name()='svg']"
+ CLOSE_BTN = "//button[normalize-space()='Close']"
+ STATUS = "//div[@role='cell']"
+ PROCESS_STEPS = "//button[@value='process-history']"
+ EXTRACT = "//span[normalize-space()='extract']"
+ MAP = "//span[normalize-space()='map']"
+ EVALUATE = "//span[normalize-space()='evaluate']"
+ EXTRACTED_RESULT = "//button[@value='extracted-results']"
+ COMMENTS = "//textarea"
+ SAVE_BTN = "//button[normalize-space()='Save']"
+ EDIT_CONFIRM = "//div[@class='jer-confirm-buttons']//div[1]"
+ SHIPPING_ADD_STREET = "//textarea[@id='shipping_address.street_textarea']"
+ DELETE_FILE = "//button[@aria-haspopup='menu']"
+
+ # INVOICE_JSON_ENTITIES
+ CUSTOMER_NAME = "//div[@id='customer_name_display']"
+ CUSTOMER_STREET = "//div[@id='customer_address.street_display']"
+ CUSTOMER_CITY = "//div[@id='customer_address.city_display']"
+ CUSTOMER_ZIP_CODE = "//div[@id='customer_address.postal_code_display']"
+ CUSTOMER_COUNTRY = "//div[@id='customer_address.country_display']"
+ SHIPPING_STREET = "//div[@id='shipping_address.street_display']"
+ SHIPPING_CITY = "//div[@id='shipping_address.city_display']"
+ SHIPPING_POSTAL_CODE = "//div[@id='shipping_address.postal_code_display']"
+ SHIPPING_COUNTRY = "//div[@id='shipping_address.country_display']"
+ PURCHASE_ORDER = "//div[@id='purchase_order_display']"
+ INVOICE_ID = "//div[@id='invoice_id_display']"
+ INVOICE_DATE = "//div[@id='invoice_date_display']"
+ payable_by = "//div[@id='payable_by_display']"
+ vendor_name = "//div[@id='vendor_name_display']"
+ v_street = "//div[@id='vendor_address.street_display']"
+ v_city = "//div[@id='vendor_address.city_display']"
+ v_state = "//div[@id='vendor_address.state_display']"
+ v_zip_code = "//div[@id='vendor_address.postal_code_display']"
+ vendor_tax_id = "//div[@id='vendor_tax_id_display']"
+ SUBTOTAL = "//span[normalize-space()='16859.1']"
+ TOTAL_TAX = "//span[normalize-space()='11286']"
+ INVOICE_TOTAL = "//span[normalize-space()='22516.08']"
+ PAYMENT_TERMS = "//div[@id='payment_terms_display']"
+ product_code1 = "//div[@id='items.0.product_code_display']"
+ p1_description = "//div[@id='items.0.description_display']"
+ p1_quantity = "//span[normalize-space()='163']"
+ p1_tax = "//span[normalize-space()='2934']"
+ p1_unit_price = "//span[normalize-space()='2.5']"
+ p1_total = "//span[normalize-space()='407.5']"
+
+ # PROPERTY_JSON_DATA
+
+ first_name = "//div[@id='policy_claim_info.first_name_display']"
+ last_name = "//div[@id='policy_claim_info.last_name_display']"
+ tel_no = "//div[@id='policy_claim_info.telephone_number_display']"
+ policy_no = "//div[@id='policy_claim_info.policy_number_display']"
+ coverage_type = "//div[@id='policy_claim_info.coverage_type_display']"
+ claim_number = "//div[@id='policy_claim_info.claim_number_display']"
+ policy_effective_date = (
+ "//div[@id='policy_claim_info.policy_effective_date_display']"
+ )
+ policy_expiration_date = (
+ "//div[@id='policy_claim_info.policy_expiration_date_display']"
+ )
+ damage_deductible = "//span[normalize-space()='1000']"
+ damage_deductible_currency = (
+ "//div[@id='policy_claim_info.damage_deductible_currency_display']"
+ )
+ date_of_damage_loss = "//div[@id='policy_claim_info.date_of_damage_loss_display']"
+ time_of_loss = "//div[@id='policy_claim_info.time_of_loss_display']"
+ date_prepared = "//div[@id='policy_claim_info.date_prepared_display']"
+ item = "//div[@id='property_claim_details.0.item_display']"
+ description = "//div[@id='property_claim_details.0.description_display']"
+ date_acquired = "//div[@id='property_claim_details.0.date_acquired_display']"
+ cost_new = "//body[1]/div[1]/div[1]/div[1]/div[1]/main[1]/div[1]/div[2]/div[2]/div[2]/div[3]/div[1]/div[1]/div[2]/div[1]/div[1]/div[3]/div[2]/div[1]/div[3]/div[1]/div[1]/div[3]/div[4]/div[1]/div[1]/div[1]/div[1]/span[1]"
+ cost_new_currency = (
+ "//div[@id='property_claim_details.0.cost_new_currency_display']"
+ )
+ replacement_repair = "//span[normalize-space()='350']"
+ replacement_repair_currency = (
+ "//div[@id='property_claim_details.0.replacement_repair_currency_display']"
+ )
+
+ def __init__(self, page):
+ self.page = page
+
+ def validate_home_page(self):
+ expect(self.page.locator(self.TITLE_TEXT)).to_be_visible()
+ self.page.wait_for_timeout(2000)
+
+ def select_schema(self, SchemaName):
+ self.page.wait_for_timeout(5000)
+ self.page.locator(self.SELECT_SCHEMA).click()
+ if SchemaName == "Invoice":
+ self.page.get_by_role("option", name="Invoice").click()
+ else:
+ self.page.get_by_role("option", name="Property Loss Damage Claim").click()
+
+ def upload_files(self, schemaType):
+ with self.page.expect_file_chooser() as fc_info:
+ self.page.locator(self.IMPORT_CONTENT).click()
+ self.page.locator(self.BROWSE_FILES).click()
+ self.page.wait_for_timeout(5000)
+ # self.page.wait_for_load_state('networkidle')
+ file_chooser = fc_info.value
+ current_working_dir = os.getcwd()
+ file_path1 = os.path.join(
+ current_working_dir, "testdata", "FabrikamInvoice_1.pdf"
+ )
+ file_path2 = os.path.join(current_working_dir, "testdata", "ClaimForm_1.pdf")
+
+ if schemaType == "Invoice":
+ file_chooser.set_files([file_path1])
+ else:
+ file_chooser.set_files([file_path2])
+ self.page.wait_for_timeout(5000)
+ self.page.wait_for_load_state("networkidle")
+ self.page.locator(self.UPLOAD_BTN).click()
+ self.page.wait_for_timeout(10000)
+ expect(
+ self.page.get_by_role("alertdialog", name="Import Content")
+ .locator("path")
+ .nth(1)
+ ).to_be_visible()
+ self.page.locator(self.CLOSE_BTN).click()
+
+ def refresh(self):
+ status_ele = self.page.locator(self.STATUS).nth(2)
+ max_retries = 15
+
+ for i in range(max_retries):
+ status_text = status_ele.inner_text().strip()
+
+ if status_text == "Completed":
+ break
+ elif status_text == "Error":
+ raise Exception(
+ f"Process failed with status: 'Error' after {i + 1} retries."
+ )
+
+ self.page.locator(self.REFRESH).click()
+ self.page.wait_for_timeout(5000)
+ else:
+ # Executed only if the loop did not break (i.e., status is neither Completed nor Error)
+ raise Exception(
+ f"Process did not complete. Final status was '{status_text}' after {max_retries} retries."
+ )
+
+ def validate_invoice_extracted_result(self):
+ expect(self.page.locator(self.CUSTOMER_NAME)).to_contain_text(
+ "Paris Fashion Group SARL"
+ )
+ expect(self.page.locator(self.CUSTOMER_STREET)).to_contain_text(
+ "10 Rue de Rivoli"
+ )
+ expect(self.page.locator(self.CUSTOMER_CITY)).to_contain_text("Paris")
+ expect(self.page.locator(self.CUSTOMER_ZIP_CODE)).to_contain_text("75001")
+ expect(self.page.locator(self.CUSTOMER_COUNTRY)).to_contain_text("France")
+ expect(self.page.locator(self.SHIPPING_STREET)).to_contain_text(
+ "25 Avenue Montaigne"
+ )
+ expect(self.page.locator(self.SHIPPING_CITY)).to_contain_text("Paris")
+ expect(self.page.locator(self.SHIPPING_POSTAL_CODE)).to_contain_text("75008")
+ expect(self.page.locator(self.SHIPPING_COUNTRY)).to_contain_text("France")
+ expect(self.page.locator(self.PURCHASE_ORDER)).to_contain_text("PO-34567")
+ expect(self.page.locator(self.INVOICE_ID)).to_contain_text("INV-20231005")
+ expect(self.page.locator(self.INVOICE_DATE)).to_contain_text("2023-10-05")
+ expect(self.page.locator(self.INVOICE_DATE)).to_contain_text("2023-10-05")
+ expect(self.page.locator(self.payable_by)).to_contain_text("2023-11-04")
+ expect(self.page.locator(self.vendor_name)).to_contain_text(
+ "Fabrikam Unlimited Company"
+ )
+ expect(self.page.locator(self.v_street)).to_contain_text("Wilton Place")
+ expect(self.page.locator(self.v_city)).to_contain_text("Brooklyn")
+ expect(self.page.locator(self.v_state)).to_contain_text("NY")
+ expect(self.page.locator(self.v_zip_code)).to_contain_text("22345")
+ expect(self.page.locator(self.vendor_tax_id)).to_contain_text("FR123456789")
+ expect(self.page.locator(self.SUBTOTAL)).to_contain_text("16859.1")
+ expect(self.page.locator(self.TOTAL_TAX)).to_contain_text("11286")
+ expect(self.page.locator(self.INVOICE_TOTAL)).to_contain_text("22516.08")
+ expect(self.page.locator(self.PAYMENT_TERMS)).to_contain_text("Net 30")
+ expect(self.page.locator(self.product_code1)).to_contain_text("EM032")
+ expect(self.page.locator(self.p1_description)).to_contain_text(
+ "Item: Terminal Lug"
+ )
+ expect(self.page.locator(self.p1_quantity)).to_contain_text("163")
+ expect(self.page.locator(self.p1_tax)).to_contain_text("2934")
+ expect(self.page.locator(self.p1_unit_price)).to_contain_text("2.5")
+ expect(self.page.locator(self.p1_total)).to_contain_text("407.5")
+
+ def modify_and_submit_extracted_data(self):
+ self.page.get_by_text('"25 Avenue Montaigne"').dblclick()
+ self.page.locator(self.SHIPPING_ADD_STREET).fill("25 Avenue Montaigne updated")
+ self.page.locator(self.EDIT_CONFIRM).click()
+ self.page.locator(self.COMMENTS).fill("Updated Shipping street address")
+ self.page.locator(self.SAVE_BTN).click()
+ self.page.wait_for_timeout(6000)
+
+ def validate_process_steps(self):
+ self.page.locator(self.PROCESS_STEPS).click()
+ self.page.locator(self.EXTRACT).click()
+ self.page.wait_for_timeout(3000)
+ expect(self.page.get_by_text('"extract"')).to_be_visible()
+ expect(self.page.get_by_text('"Succeeded"')).to_be_visible()
+ self.page.locator(self.EXTRACT).click()
+ self.page.wait_for_timeout(3000)
+ self.page.locator(self.MAP).click()
+ self.page.wait_for_timeout(3000)
+ expect(self.page.get_by_text('"map"')).to_be_visible()
+ self.page.locator(self.MAP).click()
+ self.page.wait_for_timeout(3000)
+ self.page.locator(self.EVALUATE).click()
+ self.page.wait_for_timeout(3000)
+ expect(self.page.get_by_text('"evaluate"')).to_be_visible()
+ self.page.locator(self.EVALUATE).click()
+ self.page.wait_for_timeout(3000)
+ self.page.locator(self.EXTRACTED_RESULT).click()
+ self.page.wait_for_timeout(3000)
+
+ def validate_property_extracted_result(self):
+ expect(self.page.locator(self.first_name)).to_contain_text("Sophia")
+ expect(self.page.locator(self.last_name)).to_contain_text("Kim")
+ expect(self.page.locator(self.tel_no)).to_contain_text("646-555-0789")
+ expect(self.page.locator(self.policy_no)).to_contain_text("PH5678901")
+ expect(self.page.locator(self.coverage_type)).to_contain_text("Homeowners")
+ expect(self.page.locator(self.claim_number)).to_contain_text("CLM5432109")
+ expect(self.page.locator(self.policy_effective_date)).to_contain_text(
+ "2022-07-01"
+ )
+ expect(self.page.locator(self.policy_expiration_date)).to_contain_text(
+ "2023-07-01"
+ )
+ expect(self.page.locator(self.damage_deductible)).to_contain_text("1000")
+ expect(self.page.locator(self.damage_deductible_currency)).to_contain_text(
+ "USD"
+ )
+ expect(self.page.locator(self.date_of_damage_loss)).to_contain_text(
+ "2023-05-10"
+ )
+ expect(self.page.locator(self.time_of_loss)).to_contain_text("13:20")
+ expect(self.page.locator(self.date_prepared)).to_contain_text("2023-05-11")
+ expect(self.page.locator(self.item)).to_contain_text("Apple")
+ expect(self.page.locator(self.description)).to_contain_text(
+ '"High-performance tablet with a large, vibrant display'
+ )
+ expect(self.page.locator(self.date_acquired)).to_contain_text("2022-01-20")
+ expect(self.page.locator(self.cost_new)).to_contain_text("1100")
+ expect(self.page.locator(self.cost_new_currency)).to_contain_text("USD")
+ expect(self.page.locator(self.replacement_repair)).to_contain_text("350")
+ expect(self.page.locator(self.replacement_repair_currency)).to_contain_text(
+ "USD"
+ )
+
+ def delete_files(self):
+ self.page.locator(self.DELETE_FILE).nth(0).click()
+ self.page.get_by_role("menuitem", name="Delete").click()
+ self.page.get_by_role("button", name="Confirm").click()
+ self.page.wait_for_timeout(6000)
diff --git a/tests/e2e-test/pages/__init__.py b/tests/e2e-test/pages/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/e2e-test/pages/loginPage.py b/tests/e2e-test/pages/loginPage.py
new file mode 100644
index 00000000..0b412556
--- /dev/null
+++ b/tests/e2e-test/pages/loginPage.py
@@ -0,0 +1,36 @@
+from base.base import BasePage
+
+
+class LoginPage(BasePage):
+
+ EMAIL_TEXT_BOX = "//input[@type='email']"
+ NEXT_BUTTON = "//input[@type='submit']"
+ PASSWORD_TEXT_BOX = "//input[@type='password']"
+ SIGNIN_BUTTON = "//input[@id='idSIButton9']"
+ YES_BUTTON = "//input[@id='idSIButton9']"
+ PERMISSION_ACCEPT_BUTTON = "//input[@type='submit']"
+
+ def __init__(self, page):
+ self.page = page
+
+ def authenticate(self, username, password):
+ # login with username and password in web url
+ self.page.locator(self.EMAIL_TEXT_BOX).fill(username)
+ self.page.locator(self.NEXT_BUTTON).click()
+ # Wait for the password input field to be available and fill it
+ self.page.wait_for_load_state("networkidle")
+ # Enter password
+ self.page.locator(self.PASSWORD_TEXT_BOX).fill(password)
+ # Click on SignIn button
+ self.page.locator(self.SIGNIN_BUTTON).click()
+ # Wait for 5 seconds to ensure the login process completes
+ self.page.wait_for_timeout(20000) # Wait for 20 seconds
+ if self.page.locator(self.PERMISSION_ACCEPT_BUTTON).is_visible():
+ self.page.locator(self.PERMISSION_ACCEPT_BUTTON).click()
+ self.page.wait_for_timeout(10000)
+ else:
+ # Click on YES button
+ self.page.locator(self.YES_BUTTON).click()
+ self.page.wait_for_timeout(10000)
+ # Wait for the "Articles" button to be available and click it
+ self.page.wait_for_load_state("networkidle")
diff --git a/tests/e2e-test/pytest.ini b/tests/e2e-test/pytest.ini
new file mode 100644
index 00000000..76eb64fc
--- /dev/null
+++ b/tests/e2e-test/pytest.ini
@@ -0,0 +1,6 @@
+[pytest]
+log_cli = true
+log_cli_level = INFO
+log_file = logs/tests.log
+log_file_level = INFO
+addopts = -p no:warnings
diff --git a/tests/e2e-test/readme.MD b/tests/e2e-test/readme.MD
new file mode 100644
index 00000000..941d3653
--- /dev/null
+++ b/tests/e2e-test/readme.MD
@@ -0,0 +1,35 @@
+# cto-test-automation
+
+Write end-to-end tests for your web apps with [Playwright](https://github.com/microsoft/playwright-python) and [pytest](https://docs.pytest.org/en/stable/).
+
+- Support for **all modern browsers** including Chromium, WebKit and Firefox.
+- Support for **headless and headed** execution.
+- **Built-in fixtures** that provide browser primitives to test functions.
+
+Pre-Requisites:
+
+- Install Visual Studio Code: Download and Install Visual Studio Code(VSCode).
+- Install NodeJS: Download and Install Node JS
+
+Create and Activate Python Virtual Environment
+
+- From your directory open and run cmd : "python -m venv microsoft"
+This will create a virtual environment directory named microsoft inside your current directory
+- To enable virtual environment, copy location for "microsoft\Scripts\activate.bat" and run from cmd
+
+Installing Playwright Pytest from Virtual Environment
+
+- To install libraries run "pip install -r requirements.txt"
+- Install the required browsers "playwright install"
+
+Run test cases
+
+- To run test cases from your 'tests' folder : "pytest --html=report.html --self-contained-html"
+
+Create .env file in project root level with web app url and client credentials
+
+- create a .env file in project root level and the application url. please refer 'sample_dotenv_file.txt' file.
+
+## Documentation
+
+See on [playwright.dev](https://playwright.dev/python/docs/test-runners) for examples and more detailed information.
diff --git a/tests/e2e-test/requirements.txt b/tests/e2e-test/requirements.txt
new file mode 100644
index 00000000..7aad0cfb
--- /dev/null
+++ b/tests/e2e-test/requirements.txt
@@ -0,0 +1,6 @@
+pytest-playwright
+pytest-reporter-html1
+python-dotenv
+pytest-check
+pytest-html
+py
\ No newline at end of file
diff --git a/tests/e2e-test/testdata/ClaimForm_1.pdf b/tests/e2e-test/testdata/ClaimForm_1.pdf
new file mode 100644
index 00000000..85be1dbf
Binary files /dev/null and b/tests/e2e-test/testdata/ClaimForm_1.pdf differ
diff --git a/tests/e2e-test/testdata/FabrikamInvoice_1.pdf b/tests/e2e-test/testdata/FabrikamInvoice_1.pdf
new file mode 100644
index 00000000..cc3b7dba
Binary files /dev/null and b/tests/e2e-test/testdata/FabrikamInvoice_1.pdf differ
diff --git a/tests/e2e-test/tests/__init__.py b/tests/e2e-test/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/e2e-test/tests/conftest.py b/tests/e2e-test/tests/conftest.py
new file mode 100644
index 00000000..d356dc40
--- /dev/null
+++ b/tests/e2e-test/tests/conftest.py
@@ -0,0 +1,53 @@
+import os
+
+import pytest
+from config.constants import URL
+from playwright.sync_api import sync_playwright
+from py.xml import html # type: ignore
+
+
+@pytest.fixture(scope="session")
+def login_logout():
+ # perform login and browser close once in a session
+ with sync_playwright() as p:
+ browser = p.chromium.launch(headless=False, args=["--start-maximized"])
+ context = browser.new_context(no_viewport=True)
+ context.set_default_timeout(80000)
+ page = context.new_page()
+ # Navigate to the login URL
+ page.goto(URL, wait_until="domcontentloaded")
+ # login to web url with username and password
+ # login_page = LoginPage(page)
+ # load_dotenv()
+ # login_page.authenticate(os.getenv('user_name'), os.getenv('pass_word'))
+
+ yield page
+ # perform close the browser
+ browser.close()
+
+
+@pytest.hookimpl(tryfirst=True)
+def pytest_html_report_title(report):
+ report.title = "Automation_Content_Processing"
+
+
+# Add a column for descriptions
+def pytest_html_results_table_header(cells):
+ cells.insert(1, html.th("Description"))
+
+
+def pytest_html_results_table_row(report, cells):
+ cells.insert(
+ 1, html.td(report.description if hasattr(report, "description") else "")
+ )
+
+
+# Add logs and docstring to report
+@pytest.hookimpl(hookwrapper=True)
+def pytest_runtest_makereport(item, call):
+ outcome = yield
+ report = outcome.get_result()
+ report.description = str(item.function.__doc__)
+ os.makedirs("logs", exist_ok=True)
+ extra = getattr(report, "extra", [])
+ report.extra = extra
diff --git a/tests/e2e-test/tests/test_contentProcessing_gp_tc.py b/tests/e2e-test/tests/test_contentProcessing_gp_tc.py
new file mode 100644
index 00000000..cbe99797
--- /dev/null
+++ b/tests/e2e-test/tests/test_contentProcessing_gp_tc.py
@@ -0,0 +1,41 @@
+import logging
+
+import pytest
+from pages.HomePage import HomePage
+
+logger = logging.getLogger(__name__)
+
+
+@pytest.mark.testcase_id("TC001")
+def test_ContentProcessing_Golden_path_test(login_logout):
+ """Validate Golden path test case for Content Processing Accelerator"""
+ page = login_logout
+ home_page = HomePage(page)
+ logger.info("Step 1: Validate home page is loaded.")
+ home_page.validate_home_page()
+ logger.info("Step 2: Select Invoice Schema.")
+ home_page.select_schema("Invoice")
+ logger.info("Step 3: Upload Invoice documents.")
+ home_page.upload_files("Invoice")
+ logger.info("Step 4: Refresh page till status is updated to Completed.")
+ home_page.refresh()
+ logger.info("Step 5: Validate extracted result for Invoice.")
+ home_page.validate_invoice_extracted_result()
+ logger.info("Step 6: Modify Extracted Data JSON & submit comments.")
+ home_page.modify_and_submit_extracted_data()
+ logger.info("Step 7: Validate process steps for Invoice")
+ home_page.validate_process_steps()
+ logger.info("Step 8: Select Property Loss Damage Claim Form Schema.")
+ home_page.select_schema("Property")
+ logger.info("Step 9: Upload Property Loss Damage Claim Form documents.")
+ home_page.upload_files("Property")
+ logger.info("Step 10: Refresh page till status is updated to Completed.")
+ home_page.refresh()
+ logger.info(
+ "Step 11: Validate extracted result for Property Loss Damage Claim Form."
+ )
+ home_page.validate_property_extracted_result()
+ logger.info("Step 12: Validate process steps for Property Loss Damage Claim Form.")
+ home_page.validate_process_steps()
+ logger.info("Step 13: Validate Delete files.")
+ home_page.delete_files()