From 55e240e282b89abb52aa593e05226e05073bc3c9 Mon Sep 17 00:00:00 2001
From: Vamshi-Microsoft
Date: Mon, 17 Nov 2025 14:55:16 +0530
Subject: [PATCH 1/2] Added Deploy-v2 pipeline with manual dispatch and input
parameter support and Updated Smoke Testing scenarios
---
.github/workflows/deploy-v2.yml | 893 ++++++++++++++++++
.github/workflows/test-automation-v2.yml | 195 ++++
docs/CustomizingAzdParameters.md | 3 +
infra/main.bicep | 13 +-
infra/main.parameters.json | 9 +
infra/main.waf.parameters.json | 9 +
tests/e2e-test/base/base.py | 30 +-
tests/e2e-test/config/constants.py | 6 +-
tests/e2e-test/pages/HomePage.py | 512 +++++++++-
tests/e2e-test/pages/loginPage.py | 19 +
tests/e2e-test/pytest.ini | 2 +
tests/e2e-test/tests/conftest.py | 13 +-
.../tests/test_contentProcessing_gp_tc.py | 392 ++++++--
13 files changed, 2024 insertions(+), 72 deletions(-)
create mode 100644 .github/workflows/deploy-v2.yml
create mode 100644 .github/workflows/test-automation-v2.yml
diff --git a/.github/workflows/deploy-v2.yml b/.github/workflows/deploy-v2.yml
new file mode 100644
index 00000000..f4b38b1b
--- /dev/null
+++ b/.github/workflows/deploy-v2.yml
@@ -0,0 +1,893 @@
+name: Deploy-Test-Cleanup (v2)
+on:
+ pull_request:
+ branches:
+ - main
+ workflow_dispatch:
+ inputs:
+ azure_location:
+ description: 'Azure Location For Deployment'
+ required: false
+ default: 'australiaeast'
+ type: choice
+ options:
+ - 'australiaeast'
+ - 'centralus'
+ - 'eastasia'
+ - 'eastus2'
+ - 'japaneast'
+ - 'northeurope'
+ - 'southeastasia'
+ - 'uksouth'
+ - 'eastus'
+ resource_group_name:
+ description: 'Resource Group Name (Optional)'
+ required: false
+ default: ''
+ type: string
+
+ waf_enabled:
+ description: 'Enable WAF'
+ required: false
+ default: false
+ type: boolean
+ EXP:
+ description: 'Enable EXP'
+ required: false
+ default: false
+ type: boolean
+ build_docker_image:
+ description: 'Build And Push Docker Image (Optional)'
+ required: false
+ default: false
+ type: boolean
+
+ cleanup_resources:
+ description: 'Cleanup Deployed Resources'
+ required: false
+ default: false
+ type: boolean
+
+ run_e2e_tests:
+ description: 'Run End-to-End Tests'
+ required: false
+ default: 'GoldenPath-Testing'
+ type: choice
+ options:
+ - 'GoldenPath-Testing'
+ - 'Smoke-Testing'
+ - 'None'
+
+ AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID:
+ description: 'Log Analytics Workspace ID (Optional)'
+ required: false
+ default: ''
+ type: string
+ AZURE_EXISTING_AI_PROJECT_RESOURCE_ID:
+ description: 'AI Project Resource ID (Optional)'
+ required: false
+ default: ''
+ type: string
+ existing_webapp_url:
+ description: 'Existing Container WebApp URL (Skips Deployment)'
+ required: false
+ default: ''
+ type: string
+
+ schedule:
+ - cron: '0 9,21 * * *' # Runs at 9:00 AM and 9:00 PM GMT
+env:
+ GPT_MIN_CAPACITY: 100
+ BRANCH_NAME: ${{ github.event.workflow_run.head_branch || github.head_ref || github.ref_name }}
+ # For automatic triggers (pull_request, workflow_run, schedule): force Non-WAF + Non-EXP
+ # For manual dispatch: use input values or defaults
+ WAF_ENABLED: ${{ github.event_name == 'workflow_dispatch' && (github.event.inputs.waf_enabled || false) || false }}
+ EXP: ${{ github.event_name == 'workflow_dispatch' && (github.event.inputs.EXP || false) || false }}
+ CLEANUP_RESOURCES: ${{ github.event_name == 'workflow_dispatch' && (github.event.inputs.cleanup_resources || true) || true }}
+ RUN_E2E_TESTS: ${{ github.event_name == 'workflow_dispatch' && (github.event.inputs.run_e2e_tests || 'GoldenPath-Testing') || 'GoldenPath-Testing' }}
+ BUILD_DOCKER_IMAGE: ${{ github.event_name == 'workflow_dispatch' && (github.event.inputs.build_docker_image || false) || false }}
+
+jobs:
+ docker-build:
+ if: github.event_name == 'workflow_dispatch' && github.event.inputs.build_docker_image == 'true'
+ runs-on: ubuntu-latest
+ outputs:
+ IMAGE_TAG: ${{ steps.generate_docker_tag.outputs.IMAGE_TAG }}
+ steps:
+ - name: Checkout Code
+ uses: actions/checkout@v4
+
+ - name: Generate Unique Docker Image Tag
+ id: generate_docker_tag
+ run: |
+ echo "π¨ Building new Docker image - generating unique tag..."
+ # Generate unique tag for manual deployment runs
+ TIMESTAMP=$(date +%Y%m%d-%H%M%S)
+ RUN_ID="${{ github.run_id }}"
+ BRANCH_NAME="${{ github.head_ref || github.ref_name }}"
+ # Sanitize branch name for Docker tag (replace invalid characters with hyphens)
+ CLEAN_BRANCH_NAME=$(echo "$BRANCH_NAME" | sed 's/[^a-zA-Z0-9._-]/-/g' | sed 's/--*/-/g' | sed 's/^-\|-$//g')
+ UNIQUE_TAG="${CLEAN_BRANCH_NAME}-${TIMESTAMP}-${RUN_ID}"
+ echo "IMAGE_TAG=$UNIQUE_TAG" >> $GITHUB_ENV
+ echo "IMAGE_TAG=$UNIQUE_TAG" >> $GITHUB_OUTPUT
+ echo "Generated unique Docker tag: $UNIQUE_TAG"
+
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3
+
+ - name: Log in to Azure Container Registry
+ uses: azure/docker-login@v2
+ with:
+ login-server: ${{ secrets.ACR_TEST_LOGIN_SERVER }}
+ username: ${{ secrets.ACR_TEST_USERNAME }}
+ password: ${{ secrets.ACR_TEST_PASSWORD }}
+
+ - name: Build and Push ContentProcessor Docker image
+ uses: docker/build-push-action@v6
+ env:
+ DOCKER_BUILD_SUMMARY: false
+ with:
+ context: ./src/ContentProcessor
+ file: ./src/ContentProcessor/Dockerfile
+ push: true
+ tags: |
+ ${{ secrets.ACR_TEST_LOGIN_SERVER }}/contentprocessor:${{ steps.generate_docker_tag.outputs.IMAGE_TAG }}
+ ${{ secrets.ACR_TEST_LOGIN_SERVER }}/contentprocessor:${{ steps.generate_docker_tag.outputs.IMAGE_TAG }}_${{ github.run_number }}
+
+ - name: Build and Push ContentProcessorAPI Docker image
+ uses: docker/build-push-action@v6
+ env:
+ DOCKER_BUILD_SUMMARY: false
+ with:
+ context: ./src/ContentProcessorAPI
+ file: ./src/ContentProcessorAPI/Dockerfile
+ push: true
+ tags: |
+ ${{ secrets.ACR_TEST_LOGIN_SERVER }}/contentprocessorapi:${{ steps.generate_docker_tag.outputs.IMAGE_TAG }}
+ ${{ secrets.ACR_TEST_LOGIN_SERVER }}/contentprocessorapi:${{ steps.generate_docker_tag.outputs.IMAGE_TAG }}_${{ github.run_number }}
+
+ - name: Build and Push ContentProcessorWeb Docker image
+ uses: docker/build-push-action@v6
+ env:
+ DOCKER_BUILD_SUMMARY: false
+ with:
+ context: ./src/ContentProcessorWeb
+ file: ./src/ContentProcessorWeb/Dockerfile
+ push: true
+ tags: |
+ ${{ secrets.ACR_TEST_LOGIN_SERVER }}/contentprocessorweb:${{ steps.generate_docker_tag.outputs.IMAGE_TAG }}
+ ${{ secrets.ACR_TEST_LOGIN_SERVER }}/contentprocessorweb:${{ steps.generate_docker_tag.outputs.IMAGE_TAG }}_${{ github.run_number }}
+
+ - name: Verify Docker Image Build
+ run: |
+ echo "β
Docker image successfully built and pushed"
+ echo "Image tag: ${{ steps.generate_docker_tag.outputs.IMAGE_TAG }}"
+
+ - name: Generate Docker Build Summary
+ if: always()
+ run: |
+ # Extract ACR name from the secret
+ ACR_NAME=$(echo "${{ secrets.ACR_TEST_LOGIN_SERVER }}")
+ echo "## π³ Docker Build Job Summary" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "| Field | Value |" >> $GITHUB_STEP_SUMMARY
+ echo "|-------|--------|" >> $GITHUB_STEP_SUMMARY
+ echo "| **Job Status** | ${{ job.status == 'success' && 'β
Success' || 'β Failed' }} |" >> $GITHUB_STEP_SUMMARY
+ echo "| **Image Tag** | \`${{ steps.generate_docker_tag.outputs.IMAGE_TAG }}\` |" >> $GITHUB_STEP_SUMMARY
+ echo "| **Trigger** | ${{ github.event_name }} |" >> $GITHUB_STEP_SUMMARY
+ echo "| **Branch** | ${{ env.BRANCH_NAME }} |" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ if [[ "${{ job.status }}" == "success" ]]; then
+ echo "### β
Build Details" >> $GITHUB_STEP_SUMMARY
+ echo "Successfully built and pushed three Docker images to ACR:" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "**Built Images:**" >> $GITHUB_STEP_SUMMARY
+ echo "- \`${ACR_NAME}.azurecr.io/contentprocessor:${{ steps.generate_docker_tag.outputs.IMAGE_TAG }}\`" >> $GITHUB_STEP_SUMMARY
+ echo "- \`${ACR_NAME}.azurecr.io/contentprocessorapi:${{ steps.generate_docker_tag.outputs.IMAGE_TAG }}\`" >> $GITHUB_STEP_SUMMARY
+ echo "- \`${ACR_NAME}.azurecr.io/contentprocessorweb:${{ steps.generate_docker_tag.outputs.IMAGE_TAG }}\`" >> $GITHUB_STEP_SUMMARY
+ else
+ echo "### β Build Failed" >> $GITHUB_STEP_SUMMARY
+ echo "- Docker build process encountered an error" >> $GITHUB_STEP_SUMMARY
+ echo "- Check the docker-build job for detailed error information" >> $GITHUB_STEP_SUMMARY
+ fi
+
+ deploy:
+ if: always() && (github.event_name != 'workflow_dispatch' || github.event.inputs.existing_webapp_url == '' || github.event.inputs.existing_webapp_url == null)
+ needs: [docker-build]
+ runs-on: ubuntu-latest
+ outputs:
+ invoice_schema_id: ${{ steps.register.outputs.invoice_schema_id }}
+ propertydamageclaimform_schema_id: ${{ steps.register.outputs.propertylossdamageclaimform_schema_id }}
+ RESOURCE_GROUP_NAME: ${{ steps.check_create_rg.outputs.RESOURCE_GROUP_NAME }}
+ CONTAINER_WEB_APPURL: ${{ steps.get_output.outputs.CONTAINER_WEB_APPURL }}
+ ENV_NAME: ${{ steps.generate_env_name.outputs.ENV_NAME }}
+ AZURE_LOCATION: ${{ steps.set_region.outputs.AZURE_LOCATION }}
+ AZURE_ENV_OPENAI_LOCATION: ${{ steps.set_region.outputs.AZURE_ENV_OPENAI_LOCATION }}
+ IMAGE_TAG: ${{ steps.determine_image_tag.outputs.IMAGE_TAG }}
+ QUOTA_FAILED: ${{ steps.quota_failure_output.outputs.QUOTA_FAILED }}
+ env:
+ # For automatic triggers: force Non-WAF + Non-EXP, for manual dispatch: use inputs
+ WAF_ENABLED: ${{ github.event_name == 'workflow_dispatch' && (github.event.inputs.waf_enabled || false) || false }}
+ EXP: ${{ github.event_name == 'workflow_dispatch' && (github.event.inputs.EXP || false) || false }}
+ CLEANUP_RESOURCES: ${{ github.event_name == 'workflow_dispatch' && (github.event.inputs.cleanup_resources || true) || true }}
+
+ steps:
+ - name: Display Workflow Configuration
+ run: |
+ echo "π ==================================="
+ echo "π WORKFLOW CONFIGURATION SUMMARY"
+ echo "π ==================================="
+ echo "Trigger Type: ${{ github.event_name }}"
+ echo "Branch: ${{ env.BRANCH_NAME }}"
+ echo ""
+ echo "Configuration Settings:"
+ echo " β’ WAF Enabled: ${{ env.WAF_ENABLED }}"
+ echo " β’ EXP Enabled: ${{ env.EXP }}"
+ echo " β’ Run E2E Tests: ${{ env.RUN_E2E_TESTS }}"
+ echo " β’ Cleanup Resources: ${{ env.CLEANUP_RESOURCES }}"
+ echo " β’ Build Docker Image: ${{ env.BUILD_DOCKER_IMAGE }}"
+ if [[ "${{ github.event_name }}" == "workflow_dispatch" && -n "${{ github.event.inputs.azure_location }}" ]]; then
+ echo " β’ Selected Azure Location: ${{ github.event.inputs.azure_location }}"
+ else
+ echo " β’ Azure Location: Will be determined by quota check"
+ fi
+ if [[ "${{ github.event.inputs.existing_webapp_url }}" != "" ]]; then
+ echo " β’ Using Existing Webapp URL: ${{ github.event.inputs.existing_webapp_url }}"
+ echo " β’ Skip Deployment: Yes"
+ else
+ echo " β’ Skip Deployment: No"
+ fi
+ echo ""
+ if [[ "${{ github.event_name }}" != "workflow_dispatch" ]]; then
+ echo "βΉοΈ Automatic Trigger: Using Non-WAF + Non-EXP configuration"
+ else
+ echo "βΉοΈ Manual Trigger: Using user-specified configuration"
+ # Check if EXP was auto-enabled after user input validation
+ if [[ "${{ env.EXP }}" == "true" && "${{ github.event.inputs.EXP }}" != "true" ]]; then
+ echo "π§ Note: EXP was automatically enabled due to provided parameter values"
+ fi
+ fi
+ echo "π ==================================="
+
+ - name: Validate and Auto-Configure EXP
+ run: |
+ echo "π Validating EXP configuration..."
+
+ # Check if EXP values were provided but EXP is disabled
+ if [[ "${{ github.event.inputs.EXP }}" != "true" ]]; then
+ if [[ -n "${{ github.event.inputs.AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID }}" ]] || [[ -n "${{ github.event.inputs.AZURE_EXISTING_AI_PROJECT_RESOURCE_ID }}" ]]; then
+ echo "π§ AUTO-ENABLING EXP: EXP parameter values were provided but EXP was not explicitly enabled."
+ echo ""
+ echo "You provided values for:"
+ [[ -n "${{ github.event.inputs.AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID }}" ]] && echo " - Azure Log Analytics Workspace ID: '${{ github.event.inputs.AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID }}'"
+ [[ -n "${{ github.event.inputs.AZURE_EXISTING_AI_PROJECT_RESOURCE_ID }}" ]] && echo " - Azure AI Project Resource ID: '${{ github.event.inputs.AZURE_EXISTING_AI_PROJECT_RESOURCE_ID }}'"
+ echo ""
+ echo "β
Automatically enabling EXP to use these values."
+ echo "EXP=true" >> $GITHUB_ENV
+ echo "π EXP has been automatically enabled for this deployment."
+ fi
+ fi
+
+ - name: Checkout Code
+ uses: actions/checkout@v4
+
+ - name: Setup Azure CLI
+ run: |
+ curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash
+ az --version # Verify installation
+
+ - name: Login to Azure
+ run: |
+ az login --service-principal -u ${{ secrets.AZURE_CLIENT_ID }} -p ${{ secrets.AZURE_CLIENT_SECRET }} --tenant ${{ secrets.AZURE_TENANT_ID }}
+ az account set --subscription ${{ secrets.AZURE_SUBSCRIPTION_ID }}
+
+ - name: Run Quota Check
+ id: quota-check
+ run: |
+ export AZURE_CLIENT_ID=${{ secrets.AZURE_CLIENT_ID }}
+ export AZURE_TENANT_ID=${{ secrets.AZURE_TENANT_ID }}
+ export AZURE_CLIENT_SECRET=${{ secrets.AZURE_CLIENT_SECRET }}
+ export AZURE_SUBSCRIPTION_ID="${{ secrets.AZURE_SUBSCRIPTION_ID }}"
+ export GPT_MIN_CAPACITY=${{ env.GPT_MIN_CAPACITY }}
+ export AZURE_REGIONS="${{ vars.AZURE_REGIONS }}"
+
+ chmod +x infra/scripts/checkquota.sh
+ if ! infra/scripts/checkquota.sh; then
+ # If quota check fails due to insufficient quota, set the flag
+ if grep -q "No region with sufficient quota found" infra/scripts/checkquota.sh; then
+ echo "QUOTA_FAILED=true" >> $GITHUB_ENV
+ fi
+ exit 1 # Fail the pipeline if any other failure occurs
+ fi
+
+ - name: Set Quota Failure Output
+ id: quota_failure_output
+ if: env.QUOTA_FAILED == 'true'
+ run: |
+ echo "QUOTA_FAILED=true" >> $GITHUB_OUTPUT
+ echo "Quota check failed - will notify via separate notification job"
+
+ - name: Fail Pipeline if Quota Check Fails
+ if: env.QUOTA_FAILED == 'true'
+ run: exit 1
+
+ - name: Set Deployment Region
+ id: set_region
+ run: |
+ # Set AZURE_ENV_OPENAI_LOCATION from quota check result
+ echo "Selected Region from Quota Check: $VALID_REGION"
+ echo "AZURE_ENV_OPENAI_LOCATION=$VALID_REGION" >> $GITHUB_ENV
+ echo "AZURE_ENV_OPENAI_LOCATION=$VALID_REGION" >> $GITHUB_OUTPUT
+
+ # Set AZURE_LOCATION from user input (for manual dispatch) or default to quota check result (for automatic triggers)
+ if [[ "${{ github.event_name }}" == "workflow_dispatch" && -n "${{ github.event.inputs.azure_location }}" ]]; then
+ USER_SELECTED_LOCATION="${{ github.event.inputs.azure_location }}"
+ echo "Using user-selected Azure location: $USER_SELECTED_LOCATION"
+ echo "AZURE_LOCATION=$USER_SELECTED_LOCATION" >> $GITHUB_ENV
+ echo "AZURE_LOCATION=$USER_SELECTED_LOCATION" >> $GITHUB_OUTPUT
+ else
+ echo "Using location from quota check for automatic triggers: $VALID_REGION"
+ echo "AZURE_LOCATION=$VALID_REGION" >> $GITHUB_ENV
+ echo "AZURE_LOCATION=$VALID_REGION" >> $GITHUB_OUTPUT
+ fi
+
+ - name: Generate Resource Group Name
+ id: generate_rg_name
+ run: |
+ # Check if a resource group name was provided as input
+ if [[ -n "${{ github.event.inputs.resource_group_name }}" ]]; then
+ echo "Using provided Resource Group name: ${{ github.event.inputs.resource_group_name }}"
+ echo "RESOURCE_GROUP_NAME=${{ github.event.inputs.resource_group_name }}" >> $GITHUB_ENV
+ else
+ echo "Generating a unique resource group name..."
+ ACCL_NAME="cp" # Account name as specified
+ SHORT_UUID=$(uuidgen | cut -d'-' -f1)
+ UNIQUE_RG_NAME="arg-${ACCL_NAME}-${SHORT_UUID}"
+ echo "RESOURCE_GROUP_NAME=${UNIQUE_RG_NAME}" >> $GITHUB_ENV
+ echo "Generated RESOURCE_GROUP_NAME: ${UNIQUE_RG_NAME}"
+ fi
+
+ - name: Setup Azure Developer CLI
+ run: |
+ curl -fsSL https://aka.ms/install-azd.sh | sudo bash
+ azd version
+
+ - name: Login to Azure
+ id: login-azure
+ run: |
+ az login --service-principal -u ${{ secrets.AZURE_CLIENT_ID }} -p ${{ secrets.AZURE_CLIENT_SECRET }} --tenant ${{ secrets.AZURE_TENANT_ID }}
+ azd auth login --client-id ${{ secrets.AZURE_CLIENT_ID }} --client-secret ${{ secrets.AZURE_CLIENT_SECRET }} --tenant-id ${{ secrets.AZURE_TENANT_ID }}
+
+ - name: Install Bicep CLI
+ run: az bicep install
+
+ - name: Check and Create Resource Group
+ id: check_create_rg
+ run: |
+ set -e
+ echo "π Checking if resource group '$RESOURCE_GROUP_NAME' exists..."
+ rg_exists=$(az group exists --name $RESOURCE_GROUP_NAME)
+ if [ "$rg_exists" = "false" ]; then
+ echo "π¦ Resource group does not exist. Creating new resource group '$RESOURCE_GROUP_NAME' in location '$AZURE_LOCATION'..."
+ az group create --name $RESOURCE_GROUP_NAME --location $AZURE_LOCATION || { echo "β Error creating resource group"; exit 1; }
+ echo "β
Resource group '$RESOURCE_GROUP_NAME' created successfully."
+ else
+ echo "β
Resource group '$RESOURCE_GROUP_NAME' already exists. Deploying to existing resource group."
+ fi
+ echo "RESOURCE_GROUP_NAME=$RESOURCE_GROUP_NAME" >> $GITHUB_OUTPUT
+ echo "RESOURCE_GROUP_NAME=$RESOURCE_GROUP_NAME" >> $$GITHUB_ENV
+
+ - name: Generate Unique Solution Prefix
+ id: generate_solution_prefix
+ run: |
+ set -e
+ COMMON_PART="psldg"
+ TIMESTAMP=$(date +%s)
+ UPDATED_TIMESTAMP=$(echo $TIMESTAMP | tail -c 6)
+ UNIQUE_SOLUTION_PREFIX="${COMMON_PART}${UPDATED_TIMESTAMP}"
+ echo "SOLUTION_PREFIX=${UNIQUE_SOLUTION_PREFIX}" >> $GITHUB_ENV
+ echo "Generated SOLUTION_PREFIX: ${UNIQUE_SOLUTION_PREFIX}"
+
+ - name: Determine Docker Image Tag
+ id: determine_image_tag
+ run: |
+ if [[ "${{ env.BUILD_DOCKER_IMAGE }}" == "true" ]]; then
+ # Use the tag from docker-build job if it was built
+ if [[ "${{ needs.docker-build.result }}" == "success" ]]; then
+ IMAGE_TAG="${{ needs.docker-build.outputs.IMAGE_TAG }}"
+ echo "π Using Docker image tag from build job: $IMAGE_TAG"
+ else
+ echo "β Docker build job failed or was skipped, but BUILD_DOCKER_IMAGE is true"
+ exit 1
+ fi
+ else
+ echo "π·οΈ Using existing Docker image based on branch..."
+ BRANCH_NAME="${{ env.BRANCH_NAME }}"
+ echo "Current branch: $BRANCH_NAME"
+
+ # Determine image tag based on branch
+ if [[ "$BRANCH_NAME" == "main" ]]; then
+ IMAGE_TAG="latest"
+ echo "Using main branch - image tag: latest"
+ elif [[ "$BRANCH_NAME" == "dev" ]]; then
+ IMAGE_TAG="dev"
+ echo "Using dev branch - image tag: dev"
+ elif [[ "$BRANCH_NAME" == "demo" ]]; then
+ IMAGE_TAG="demo"
+ echo "Using demo branch - image tag: demo"
+ elif [[ "$BRANCH_NAME" == "hotfix" ]]; then
+ BASE_TAG="hotfix"
+ elif [[ "$BRANCH_NAME" == "dependabotchanges" ]]; then
+ BASE_TAG="dependabotchanges"
+ else
+ IMAGE_TAG="latest"
+ echo "Using default for branch '$BRANCH_NAME' - image tag: latest"
+ fi
+
+ echo "Using existing Docker image tag: $IMAGE_TAG"
+ fi
+
+ echo "IMAGE_TAG=$IMAGE_TAG" >> $GITHUB_ENV
+ echo "IMAGE_TAG=$IMAGE_TAG" >> $GITHUB_OUTPUT
+
+ - name: Generate Unique Environment Name
+ id: generate_env_name
+ run: |
+ COMMON_PART="pslc"
+ TIMESTAMP=$(date +%s)
+ UPDATED_TIMESTAMP=$(echo $TIMESTAMP | tail -c 6)
+ UNIQUE_ENV_NAME="${COMMON_PART}${UPDATED_TIMESTAMP}"
+ echo "ENV_NAME=${UNIQUE_ENV_NAME}" >> $GITHUB_ENV
+ echo "Generated Environment Name: ${UNIQUE_ENV_NAME}"
+ echo "ENV_NAME=${UNIQUE_ENV_NAME}" >> $GITHUB_OUTPUT
+
+ - name: Configure Parameters Based on WAF Setting
+ run: |
+ if [[ "${{ env.WAF_ENABLED }}" == "true" ]]; then
+ echo "π§ Configuring WAF deployment - copying main.waf.parameters.json to main.parameters.json..."
+ cp infra/main.waf.parameters.json infra/main.parameters.json
+ echo "β
Successfully copied WAF parameters to main parameters file"
+ else
+ echo "π§ Configuring Non-WAF deployment - using default main.parameters.json..."
+ # Ensure we have the original parameters file if it was overwritten
+ if [[ -f infra/main.waf.parameters.json ]] && [[ ! -f infra/main.parameters.json.backup ]]; then
+ echo "Backing up original parameters file..."
+ git checkout HEAD -- infra/main.parameters.json || echo "Using existing main.parameters.json"
+ fi
+ fi
+
+ - name: Deploy using azd up and extract values (${{ github.event.inputs.waf_enabled == 'true' && 'WAF' || 'Non-WAF' }}+${{ (github.event.inputs.EXP == 'true' || github.event.inputs.AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID != '' || github.event.inputs.AZURE_EXISTING_AI_PROJECT_RESOURCE_ID != '') && 'EXP' || 'Non-EXP' }})
+ id: get_output
+ run: |
+ set -e
+ echo "Starting azd deployment..."
+ echo "WAF Enabled: ${{ env.WAF_ENABLED }}"
+ echo "EXP: ${{ env.EXP }}"
+ echo "Using Docker Image Tag: ${{ steps.determine_image_tag.outputs.IMAGE_TAG }}"
+
+ # Install azd (Azure Developer CLI)
+ curl -fsSL https://aka.ms/install-azd.sh | bash
+
+ # Generate current timestamp in desired format: YYYY-MM-DDTHH:MM:SS.SSSSSSSZ
+ current_date=$(date -u +"%Y-%m-%dT%H:%M:%S.%7NZ")
+
+ echo "Creating environment..."
+ azd env new $ENV_NAME --no-prompt
+ echo "Environment created: $ENV_NAME"
+
+ echo "Setting default subscription..."
+ azd config set defaults.subscription ${{ secrets.AZURE_SUBSCRIPTION_ID }}
+
+ # Set additional parameters
+ azd env set AZURE_SUBSCRIPTION_ID="${{ secrets.AZURE_SUBSCRIPTION_ID }}"
+ azd env set AZURE_ENV_AI_DEPLOYMENTS_LOCATION="$AZURE_ENV_OPENAI_LOCATION"
+ azd env set AZURE_LOCATION="$AZURE_LOCATION"
+ azd env set AZURE_RESOURCE_GROUP="$RESOURCE_GROUP_NAME"
+ azd env set AZURE_ENV_CONTAINER_IMAGE_TAG="${{ steps.determine_image_tag.outputs.IMAGE_TAG }}"
+ azd env set AZURE_DEV_COLLECT_TELEMETRY="${{ vars.AZURE_DEV_COLLECT_TELEMETRY }}"
+ # Set ACR name only when building Docker image
+ if [[ "${{ env.BUILD_DOCKER_IMAGE }}" == "true" ]]; then
+ # Extract ACR name from login server and set as environment variable
+ ACR_NAME=$(echo "${{ secrets.ACR_TEST_LOGIN_SERVER }}" )
+ azd env set AZURE_ENV_CONTAINER_REGISTRY_ENDPOINT="$ACR_NAME"
+ echo "Set ACR name to: $ACR_NAME"
+ else
+ echo "Skipping ACR name configuration (using existing image)"
+ fi
+
+ if [[ "${{ env.EXP }}" == "true" ]]; then
+ echo "β
EXP ENABLED - Setting EXP parameters..."
+
+ # Set EXP variables dynamically
+ if [[ -n "${{ github.event.inputs.AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID }}" ]]; then
+ EXP_LOG_ANALYTICS_ID="${{ github.event.inputs.AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID }}"
+ else
+ EXP_LOG_ANALYTICS_ID="${{ secrets.AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID }}"
+ fi
+
+ if [[ -n "${{ github.event.inputs.AZURE_EXISTING_AI_PROJECT_RESOURCE_ID }}" ]]; then
+ EXP_AI_PROJECT_ID="${{ github.event.inputs.AZURE_EXISTING_AI_PROJECT_RESOURCE_ID }}"
+ else
+ EXP_AI_PROJECT_ID="${{ secrets.AZURE_ENV_FOUNDRY_PROJECT_ID }}"
+ fi
+
+ echo "AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID: $EXP_LOG_ANALYTICS_ID"
+ echo "AZURE_ENV_FOUNDRY_PROJECT_ID: $EXP_AI_PROJECT_ID"
+ azd env set AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID="$EXP_LOG_ANALYTICS_ID"
+ azd env set AZURE_ENV_FOUNDRY_PROJECT_ID="$EXP_AI_PROJECT_ID"
+ else
+ echo "β EXP DISABLED - Skipping EXP parameters"
+ fi
+
+ # Deploy using azd up
+ azd up --no-prompt
+
+ echo "β
Deployment succeeded."
+ echo "$DEPLOY_OUTPUT"
+
+ # Get deployment outputs using azd
+ echo "Extracting deployment outputs..."
+ DEPLOY_OUTPUT=$(azd env get-values --output json)
+ echo "Deployment output: $DEPLOY_OUTPUT"
+
+ if [[ -z "$DEPLOY_OUTPUT" ]]; then
+ echo "Error: Deployment output is empty. Please check the deployment logs."
+ exit 1
+ fi
+
+ # Export variables only after successful deploy
+ export CONTAINER_API_APPURL="https://$(echo "$DEPLOY_OUTPUT" | jq -r '.CONTAINER_API_APP_FQDN // empty')"
+ echo "CONTAINER_API_APPURL=$CONTAINER_API_APPURL" >> $GITHUB_ENV
+
+ export CONTAINER_API_APPNAME=$(echo "$DEPLOY_OUTPUT" | jq -r '.CONTAINER_API_APP_NAME // empty')
+ echo "CONTAINER_API_APPNAME=$CONTAINER_API_APPNAME" >> $GITHUB_ENV
+
+ export CONTAINER_WEB_APPURL="https://$(echo "$DEPLOY_OUTPUT" | jq -r '.CONTAINER_WEB_APP_FQDN // empty')"
+ echo "CONTAINER_WEB_APPURL=$CONTAINER_WEB_APPURL" >> $GITHUB_ENV
+ echo "CONTAINER_WEB_APPURL=$CONTAINER_WEB_APPURL" >> $GITHUB_OUTPUT
+
+ export CONTAINER_WEB_APPNAME=$(echo "$DEPLOY_OUTPUT" | jq -r '.CONTAINER_WEB_APP_NAME // empty')
+ echo "CONTAINER_WEB_APPNAME=$CONTAINER_WEB_APPNAME" >> $GITHUB_ENV
+
+ - name: Register schemas
+ id: register
+ run: |
+ echo "Registering schemas..."
+ sleep 40 # Wait for the API to be ready
+
+ cd src/ContentProcessorAPI/samples/schemas
+ chmod +x ./register_schema.sh
+ ./register_schema.sh ${{ env.CONTAINER_API_APPURL }}/schemavault/ schema_info_sh.json
+
+ - name: Upload sample invoice and claim data
+ run: |
+ echo "Uploading sample data..."
+ cd src/ContentProcessorAPI/samples
+ chmod +x ./upload_files.sh
+ ./upload_files.sh ${{ env.CONTAINER_API_APPURL }}/contentprocessor/submit ./invoices '${{ steps.register.outputs.invoice_schema_id }}'
+ ./upload_files.sh ${{ env.CONTAINER_API_APPURL }}/contentprocessor/submit ./propertyclaims '${{ steps.register.outputs.propertylossdamageclaimform_schema_id }}'
+
+
+ - name: Disable Auth in Web App
+ run: |
+ az containerapp update --name ${{ env.CONTAINER_WEB_APPNAME }} \
+ --resource-group ${{ env.RESOURCE_GROUP_NAME }} \
+ --set-env-vars APP_AUTH_ENABLED=false
+
+ - name: Disable Auth in API App
+ run: |
+ sleep 30
+ az containerapp update --name ${{ env.CONTAINER_API_APPNAME }} \
+ --resource-group ${{ env.RESOURCE_GROUP_NAME }} \
+ --set-env-vars APP_AUTH_ENABLED=false
+
+ - name: Logout from Azure
+ if: always()
+ run: |
+ az logout
+ echo "Logged out from Azure."
+
+ - name: Generate Deploy Job Summary
+ if: always()
+ run: |
+ echo "## π Deploy Job Summary" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "| Field | Value |" >> $GITHUB_STEP_SUMMARY
+ echo "|-------|--------|" >> $GITHUB_STEP_SUMMARY
+ echo "| **Job Status** | ${{ job.status == 'success' && 'β
Success' || 'β Failed' }} |" >> $GITHUB_STEP_SUMMARY
+ echo "| **Resource Group** | \`${{ steps.check_create_rg.outputs.RESOURCE_GROUP_NAME }}\` |" >> $GITHUB_STEP_SUMMARY
+ echo "| **Azure Region (Infrastructure)** | \`${{ steps.set_region.outputs.AZURE_LOCATION }}\` |" >> $GITHUB_STEP_SUMMARY
+ echo "| **Azure OpenAI Region** | \`${{ steps.set_region.outputs.AZURE_ENV_OPENAI_LOCATION }}\` |" >> $GITHUB_STEP_SUMMARY
+ echo "| **Docker Image Tag** | \`${{ steps.determine_image_tag.outputs.IMAGE_TAG }}\` |" >> $GITHUB_STEP_SUMMARY
+ echo "| **WAF Enabled** | ${{ env.WAF_ENABLED == 'true' && 'β
Yes' || 'β No' }} |" >> $GITHUB_STEP_SUMMARY
+ echo "| **EXP Enabled** | ${{ env.EXP == 'true' && 'β
Yes' || 'β No' }} |" >> $GITHUB_STEP_SUMMARY
+ echo "| **Trigger** | ${{ github.event_name }} |" >> $GITHUB_STEP_SUMMARY
+ echo "| **Branch** | ${{ env.BRANCH_NAME }} |" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ if [[ "${{ job.status }}" == "success" ]]; then
+ echo "### β
Deployment Details" >> $GITHUB_STEP_SUMMARY
+ echo "- **Container Web App URL**: [${{ steps.get_output.outputs.CONTAINER_WEB_APPURL }}](${{ steps.get_output.outputs.CONTAINER_WEB_APPURL }})" >> $GITHUB_STEP_SUMMARY
+ echo "- **Container API App URL**: [${{ env.CONTAINER_API_APPURL }}](${{ env.CONTAINER_API_APPURL }})" >> $GITHUB_STEP_SUMMARY
+ echo "- **Configuration**: ${{ env.WAF_ENABLED == 'true' && 'WAF' || 'Non-WAF' }}+${{ env.EXP == 'true' && 'EXP' || 'Non-EXP' }}" >> $GITHUB_STEP_SUMMARY
+ echo "- Successfully deployed to Azure with all resources configured" >> $GITHUB_STEP_SUMMARY
+ echo "- Schemas registered and sample data uploaded successfully" >> $GITHUB_STEP_SUMMARY
+ else
+ echo "### β Deployment Failed" >> $GITHUB_STEP_SUMMARY
+ echo "- Deployment process encountered an error" >> $GITHUB_STEP_SUMMARY
+ echo "- Check the deploy job for detailed error information" >> $GITHUB_STEP_SUMMARY
+ fi
+
+ e2e-test:
+ if: always() && ((needs.deploy.result == 'success' && needs.deploy.outputs.CONTAINER_WEB_APPURL != '') || (github.event.inputs.existing_webapp_url != '' && github.event.inputs.existing_webapp_url != null)) && (github.event_name != 'workflow_dispatch' || (github.event.inputs.run_e2e_tests != 'None' && github.event.inputs.run_e2e_tests != '' && github.event.inputs.run_e2e_tests != null))
+ needs: [docker-build, deploy]
+ uses: ./.github/workflows/test-automation-v2.yml
+ with:
+ CP_WEB_URL: ${{ needs.deploy.outputs.CONTAINER_WEB_APPURL || github.event.inputs.existing_webapp_url }}
+ TEST_SUITE: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.run_e2e_tests || 'GoldenPath-Testing' }}
+ secrets: inherit
+
+ send-notification:
+ if: always()
+ needs: [docker-build, deploy, e2e-test]
+ runs-on: ubuntu-latest
+ env:
+ accelerator_name: "Content Processing"
+ steps:
+ - name: Determine Test Suite Display Name
+ id: test_suite
+ run: |
+ # Determine test suite display name based on RUN_E2E_TESTS value
+ if [ "${{ env.RUN_E2E_TESTS }}" = "GoldenPath-Testing" ]; then
+ TEST_SUITE_NAME="Golden Path Testing"
+ elif [ "${{ env.RUN_E2E_TESTS }}" = "Smoke-Testing" ]; then
+ TEST_SUITE_NAME="Smoke Testing"
+ elif [ "${{ env.RUN_E2E_TESTS }}" = "None" ]; then
+ TEST_SUITE_NAME="None"
+ else
+ TEST_SUITE_NAME="${{ env.RUN_E2E_TESTS }}"
+ fi
+ echo "TEST_SUITE_NAME=$TEST_SUITE_NAME" >> $GITHUB_OUTPUT
+ echo "Test Suite: $TEST_SUITE_NAME"
+
+ - name: Send Quota Failure Notification
+ if: needs.deploy.result == 'failure' && needs.deploy.outputs.QUOTA_FAILED == 'true'
+ run: |
+ RUN_URL="https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"
+ EMAIL_BODY=$(cat <Dear Team,
We would like to inform you that the ${{ env.accelerator_name }} deployment has failed due to insufficient quota in the requested regions.
Issue Details:
β’ Quota check failed for GPT model
β’ Required GPT Capacity: ${{ env.GPT_MIN_CAPACITY }}
β’ Checked Regions: ${{ vars.AZURE_REGIONS }}
Run URL: ${RUN_URL}
Please resolve the quota issue and retry the deployment.
Best regards,
Your Automation Team
",
+ "subject": "${{ env.accelerator_name }} Pipeline - Failed (Insufficient Quota)"
+ }
+ EOF
+ )
+
+ curl -X POST "${{ secrets.EMAILNOTIFICATION_LOGICAPP_URL_TA }}" \
+ -H "Content-Type: application/json" \
+ -d "$EMAIL_BODY" || echo "Failed to send quota failure notification"
+
+ - name: Send Deployment Failure Notification
+ if: needs.deploy.result == 'failure' && needs.deploy.outputs.QUOTA_FAILED != 'true'
+ run: |
+ RUN_URL="https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"
+ RESOURCE_GROUP="${{ needs.deploy.outputs.RESOURCE_GROUP_NAME }}"
+
+ EMAIL_BODY=$(cat <Dear Team,We would like to inform you that the ${{ env.accelerator_name }} deployment process has encountered an issue and has failed to complete successfully.
Deployment Details:
β’ Resource Group: ${RESOURCE_GROUP}
β’ WAF Enabled: ${{ env.WAF_ENABLED }}
β’ EXP Enabled: ${{ env.EXP }}
Run URL: ${RUN_URL}
Please investigate the deployment failure at your earliest convenience.
Best regards,
Your Automation Team
",
+ "subject": "${{ env.accelerator_name }} Pipeline - Failed"
+ }
+ EOF
+ )
+
+ curl -X POST "${{ secrets.EMAILNOTIFICATION_LOGICAPP_URL_TA }}" \
+ -H "Content-Type: application/json" \
+ -d "$EMAIL_BODY" || echo "Failed to send deployment failure notification"
+
+ - name: Send Success Notification
+ if: needs.deploy.result == 'success' && (needs.e2e-test.result == 'skipped' || needs.e2e-test.outputs.TEST_SUCCESS == 'true')
+ run: |
+ RUN_URL="https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"
+ WEBAPP_URL="${{ needs.deploy.outputs.CONTAINER_WEB_APPURL || github.event.inputs.existing_webapp_url }}"
+ RESOURCE_GROUP="${{ needs.deploy.outputs.RESOURCE_GROUP_NAME }}"
+ TEST_REPORT_URL="${{ needs.e2e-test.outputs.TEST_REPORT_URL }}"
+ TEST_SUITE_NAME="${{ steps.test_suite.outputs.TEST_SUITE_NAME }}"
+
+ # Create email body based on test result
+ if [ "${{ needs.e2e-test.result }}" = "skipped" ]; then
+ EMAIL_BODY=$(cat <Dear Team,We would like to inform you that the ${{ env.accelerator_name }} deployment has completed successfully.
Deployment Details:
β’ Resource Group: ${RESOURCE_GROUP}
β’ Web App URL: ${WEBAPP_URL}
β’ E2E Tests: Skipped (as configured)
Configuration:
β’ WAF Enabled: ${{ env.WAF_ENABLED }}
β’ EXP Enabled: ${{ env.EXP }}
Run URL: ${RUN_URL}
Best regards,
Your Automation Team
",
+ "subject": "${{ env.accelerator_name }} Pipeline - Deployment Success"
+ }
+ EOF
+ )
+ else
+ EMAIL_BODY=$(cat <Dear Team,We would like to inform you that the ${{ env.accelerator_name }} deployment and testing process has completed successfully.
Deployment Details:
β’ Resource Group: ${RESOURCE_GROUP}
β’ Web App URL: ${WEBAPP_URL}
β’ E2E Tests: Passed β
β’ Test Suite: ${TEST_SUITE_NAME}
β’ Test Report: View Report
Configuration:
β’ WAF Enabled: ${{ env.WAF_ENABLED }}
β’ EXP Enabled: ${{ env.EXP }}
Run URL: ${RUN_URL}
Best regards,
Your Automation Team
",
+ "subject": "${{ env.accelerator_name }} Pipeline - Test Automation - Success"
+ }
+ EOF
+ )
+ fi
+
+ curl -X POST "${{ secrets.EMAILNOTIFICATION_LOGICAPP_URL_TA }}" \
+ -H "Content-Type: application/json" \
+ -d "$EMAIL_BODY" || echo "Failed to send success notification"
+
+ - name: Send Test Failure Notification
+ if: needs.deploy.result == 'success' && needs.e2e-test.result != 'skipped' && needs.e2e-test.outputs.TEST_SUCCESS != 'true'
+ run: |
+ RUN_URL="https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"
+ TEST_REPORT_URL="${{ needs.e2e-test.outputs.TEST_REPORT_URL }}"
+ WEBAPP_URL="${{ needs.deploy.outputs.CONTAINER_WEB_APPURL || github.event.inputs.existing_webapp_url }}"
+ RESOURCE_GROUP="${{ needs.deploy.outputs.RESOURCE_GROUP_NAME }}"
+ TEST_SUITE_NAME="${{ steps.test_suite.outputs.TEST_SUITE_NAME }}"
+
+ EMAIL_BODY=$(cat <Dear Team,We would like to inform you that ${{ env.accelerator_name }} accelerator test automation process has encountered issues and failed to complete successfully.
Deployment Details:
β’ Resource Group: ${RESOURCE_GROUP}
β’ Web App URL: ${WEBAPP_URL}
β’ Deployment Status: β
Success
β’ E2E Tests: β Failed
β’ Test Suite: ${TEST_SUITE_NAME}
Test Details:
β’ Test Report: View Report
Run URL: ${RUN_URL}
Please investigate the matter at your earliest convenience.
Best regards,
Your Automation Team
",
+ "subject": "${{ env.accelerator_name }} Pipeline - Test Automation - Failed"
+ }
+ EOF
+ )
+
+ curl -X POST "${{ secrets.EMAILNOTIFICATION_LOGICAPP_URL_TA }}" \
+ -H "Content-Type: application/json" \
+ -d "$EMAIL_BODY" || echo "Failed to send test failure notification"
+
+ - name: Send Existing URL Success Notification
+ # Scenario: Deployment skipped (existing URL provided) AND e2e tests passed
+ if: needs.deploy.result == 'skipped' && github.event.inputs.existing_webapp_url != '' && needs.e2e-test.result == 'success' && (needs.e2e-test.outputs.TEST_SUCCESS == 'true' || needs.e2e-test.outputs.TEST_SUCCESS == '')
+ run: |
+ RUN_URL="https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"
+ EXISTING_URL="${{ github.event.inputs.existing_webapp_url }}"
+ TEST_REPORT_URL="${{ needs.e2e-test.outputs.TEST_REPORT_URL }}"
+ TEST_SUITE_NAME="${{ steps.test_suite.outputs.TEST_SUITE_NAME }}"
+
+ EMAIL_BODY=$(cat <Dear Team,The ${{ env.accelerator_name }} pipeline executed against the existing WebApp URL and testing process has completed successfully.
Test Results:
β’ Status: β
Passed
β’ Test Suite: ${TEST_SUITE_NAME}
${TEST_REPORT_URL:+β’ Test Report: View Report}
β’ Target URL: ${EXISTING_URL}
Deployment: Skipped
Run URL: ${RUN_URL}
Best regards,
Your Automation Team
",
+ "subject": "${{ env.accelerator_name }} Pipeline - Test Automation Passed (Existing URL)"
+ }
+ EOF
+ )
+
+ curl -X POST "${{ secrets.EMAILNOTIFICATION_LOGICAPP_URL_TA }}" \
+ -H "Content-Type: application/json" \
+ -d "$EMAIL_BODY" || echo "Failed to send existing URL success notification"
+
+ - name: Send Existing URL Test Failure Notification
+ # Scenario: Deployment skipped (existing URL provided) AND e2e tests failed
+ if: needs.deploy.result == 'skipped' && github.event.inputs.existing_webapp_url != '' && needs.e2e-test.result == 'failure'
+ run: |
+ RUN_URL="https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"
+ EXISTING_URL="${{ github.event.inputs.existing_webapp_url }}"
+ TEST_REPORT_URL="${{ needs.e2e-test.outputs.TEST_REPORT_URL }}"
+ TEST_SUITE_NAME="${{ steps.test_suite.outputs.TEST_SUITE_NAME }}"
+
+ EMAIL_BODY=$(cat <Dear Team,The ${{ env.accelerator_name }} pipeline executed against the existing WebApp URL and the test automation has encountered issues and failed to complete successfully.
Failure Details:
β’ Target URL: ${EXISTING_URL}
${TEST_REPORT_URL:+β’ Test Report: View Report}
β’ Test Suite: ${TEST_SUITE_NAME}
β’ Deployment: Skipped
Run URL: ${RUN_URL}
Best regards,
Your Automation Team
",
+ "subject": "${{ env.accelerator_name }} Pipeline - Test Automation Failed (Existing URL)"
+ }
+ EOF
+ )
+
+ curl -X POST "${{ secrets.EMAILNOTIFICATION_LOGICAPP_URL_TA }}" \
+ -H "Content-Type: application/json" \
+ -d "$EMAIL_BODY" || echo "Failed to send existing URL test failure notification"
+
+ cleanup-deployment:
+ if: always() && needs.deploy.result == 'success' && needs.deploy.outputs.RESOURCE_GROUP_NAME != '' && github.event.inputs.existing_webapp_url == '' && (github.event_name != 'workflow_dispatch' || github.event.inputs.cleanup_resources == 'true' || github.event.inputs.cleanup_resources == null)
+ needs: [docker-build, deploy, e2e-test]
+ runs-on: ubuntu-latest
+ env:
+ RESOURCE_GROUP_NAME: ${{ needs.deploy.outputs.RESOURCE_GROUP_NAME }}
+ AZURE_LOCATION: ${{ needs.deploy.outputs.AZURE_LOCATION }}
+ AZURE_ENV_OPENAI_LOCATION: ${{ needs.deploy.outputs.AZURE_ENV_OPENAI_LOCATION }}
+ ENV_NAME: ${{ needs.deploy.outputs.ENV_NAME }}
+ IMAGE_TAG: ${{ needs.deploy.outputs.IMAGE_TAG }}
+ steps:
+ - name: Checkout Code
+ uses: actions/checkout@v4
+
+ - name: Setup Azure Developer CLI
+ run: |
+ curl -fsSL https://aka.ms/install-azd.sh | sudo bash
+ azd version
+
+ - name: Login to Azure
+ run: |
+ azd auth login --client-id ${{ secrets.AZURE_CLIENT_ID }} --client-secret ${{ secrets.AZURE_CLIENT_SECRET }} --tenant-id ${{ secrets.AZURE_TENANT_ID }}
+ azd config set defaults.subscription ${{ secrets.AZURE_SUBSCRIPTION_ID }}
+
+ - name: Setup Azure CLI for Docker cleanup
+ run: |
+ curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash
+ az --version
+
+ - name: Login to Azure CLI for Docker cleanup
+ run: |
+ az login --service-principal -u ${{ secrets.AZURE_CLIENT_ID }} -p ${{ secrets.AZURE_CLIENT_SECRET }} --tenant ${{ secrets.AZURE_TENANT_ID }}
+
+ - name: Delete Docker Images from ACR
+ if: github.event.inputs.existing_webapp_url == ''
+ run: |
+ set -e
+ echo "ποΈ Cleaning up Docker images from Azure Container Registry..."
+
+ # Determine the image tag to delete - check if docker-build job ran
+ if [[ "${{ needs.docker-build.result }}" == "success" ]]; then
+ IMAGE_TAG="${{ needs.docker-build.outputs.IMAGE_TAG }}"
+ echo "Using image tag from docker-build job: $IMAGE_TAG"
+ else
+ IMAGE_TAG="${{ needs.deploy.outputs.IMAGE_TAG }}"
+ echo "Using image tag from deploy job: $IMAGE_TAG"
+ fi
+
+ if [[ -n "$IMAGE_TAG" && "$IMAGE_TAG" != "latest" && "$IMAGE_TAG" != "dev" && "$IMAGE_TAG" != "demo" && "$IMAGE_TAG" != "hotfix" && "$IMAGE_TAG" != "dependabotchanges" ]]; then
+ echo "Deleting Docker images with tag: $IMAGE_TAG"
+
+ # Delete the main image
+ echo "Deleting image: ${{ secrets.ACR_TEST_LOGIN_SERVER }}/webapp:$IMAGE_TAG"
+ az acr repository delete --name $(echo "${{ secrets.ACR_TEST_LOGIN_SERVER }}" | cut -d'.' -f1) \
+ --image webapp:$IMAGE_TAG --yes || echo "Warning: Failed to delete main image or image not found"
+
+ echo "β
Docker images cleanup completed"
+ else
+ echo "β οΈ Skipping Docker image cleanup (using standard branch image: $IMAGE_TAG)"
+ fi
+
+ - name: Select Environment and Delete deployment using azd
+ run: |
+ set -e
+ # Try to select the environment if it exists, otherwise create a minimal environment for cleanup
+ azd env list
+ if azd env list | grep -q "${{ env.ENV_NAME }}"; then
+ echo "Environment ${{ env.ENV_NAME }} found, selecting it..."
+ azd env select ${{ env.ENV_NAME }}
+ else
+ echo "Environment ${{ env.ENV_NAME }} not found, creating minimal environment for cleanup..."
+ azd env new ${{ env.ENV_NAME }} --no-prompt
+ azd env set AZURE_RESOURCE_GROUP "${{ env.RESOURCE_GROUP_NAME }}"
+ azd env set AZURE_SUBSCRIPTION_ID "${{ secrets.AZURE_SUBSCRIPTION_ID }}"
+ azd env set AZURE_ENV_AI_DEPLOYMENTS_LOCATION="${{ env.AZURE_ENV_OPENAI_LOCATION }}"
+ azd env set AZURE_LOCATION="${{ env.AZURE_LOCATION }}"
+ fi
+
+ echo "Deleting deployment..."
+ azd down --purge --force --no-prompt
+ echo "Deployment deleted successfully."
+
+ - name: Logout from Azure
+ if: always()
+ run: |
+ azd auth logout
+ az logout || echo "Warning: Failed to logout from Azure CLI"
+ echo "Logged out from Azure."
+
+ - name: Generate Cleanup Job Summary
+ if: always()
+ run: |
+ echo "## π§Ή Cleanup Job Summary" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "| Field | Value |" >> $GITHUB_STEP_SUMMARY
+ echo "|-------|--------|" >> $GITHUB_STEP_SUMMARY
+ echo "| **Job Status** | ${{ job.status == 'success' && 'β
Success' || 'β Failed' }} |" >> $GITHUB_STEP_SUMMARY
+ echo "| **Resource Group** | \`${{ env.RESOURCE_GROUP_NAME }}\` |" >> $GITHUB_STEP_SUMMARY
+ echo "| **Azure Region (Infrastructure)** | \`${{ env.AZURE_LOCATION }}\` |" >> $GITHUB_STEP_SUMMARY
+ echo "| **Azure OpenAI Region** | \`${{ env.AZURE_ENV_OPENAI_LOCATION }}\` |" >> $GITHUB_STEP_SUMMARY
+ echo "| **Docker Image Tag** | \`${{ env.IMAGE_TAG }}\` |" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ if [[ "${{ job.status }}" == "success" ]]; then
+ echo "### β
Cleanup Details" >> $GITHUB_STEP_SUMMARY
+ echo "- Successfully deleted Azure deployment using \`azd down --purge\`" >> $GITHUB_STEP_SUMMARY
+ echo "- Resource group \`${{ env.RESOURCE_GROUP_NAME }}\` and all associated resources removed" >> $GITHUB_STEP_SUMMARY
+ else
+ echo "### β Cleanup Failed" >> $GITHUB_STEP_SUMMARY
+ echo "- Cleanup process encountered an error" >> $GITHUB_STEP_SUMMARY
+ echo "- Manual cleanup may be required for:" >> $GITHUB_STEP_SUMMARY
+ echo " - Resource Group: \`${{ env.RESOURCE_GROUP_NAME }}\`" >> $GITHUB_STEP_SUMMARY
+ echo "- Check the cleanup-deployment job logs for detailed error information" >> $GITHUB_STEP_SUMMARY
+ fi
\ No newline at end of file
diff --git a/.github/workflows/test-automation-v2.yml b/.github/workflows/test-automation-v2.yml
new file mode 100644
index 00000000..e865a348
--- /dev/null
+++ b/.github/workflows/test-automation-v2.yml
@@ -0,0 +1,195 @@
+name: Test Automation Content Processing-v2
+
+on:
+ workflow_call:
+ inputs:
+ CP_WEB_URL:
+ required: true
+ type: string
+ description: "Web URL for Content Processing"
+ TEST_SUITE:
+ required: false
+ type: string
+ default: "GoldenPath-Testing"
+ description: "Test suite to run: 'Smoke-Testing', 'GoldenPath-Testing' "
+ secrets:
+ EMAILNOTIFICATION_LOGICAPP_URL_TA:
+ required: false
+ description: "Logic App URL for email notifications"
+ outputs:
+ TEST_SUCCESS:
+ description: "Whether tests passed"
+ value: ${{ jobs.test.outputs.TEST_SUCCESS }}
+ TEST_REPORT_URL:
+ description: "URL to test report artifact"
+ value: ${{ jobs.test.outputs.TEST_REPORT_URL }}
+
+env:
+ url: ${{ inputs.CP_WEB_URL }}
+ accelerator_name: "Content Processing"
+ test_suite: ${{ inputs.TEST_SUITE }}
+
+jobs:
+ test:
+ runs-on: ubuntu-latest
+ outputs:
+ TEST_SUCCESS: ${{ steps.test1.outcome == 'success' || steps.test2.outcome == 'success' || steps.test3.outcome == 'success' }}
+ TEST_REPORT_URL: ${{ steps.upload_report.outputs.artifact-url }}
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v5
+
+ - name: Set up Python
+ uses: actions/setup-python@v6
+ with:
+ python-version: '3.13'
+
+ - name: Login to Azure
+ run: |
+ az login --service-principal -u ${{ secrets.AZURE_CLIENT_ID }} -p ${{ secrets.AZURE_CLIENT_SECRET }} --tenant ${{ secrets.AZURE_TENANT_ID }}
+ az account set --subscription ${{ secrets.AZURE_SUBSCRIPTION_ID }}
+
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install -r tests/e2e-test/requirements.txt
+
+ - name: Ensure browsers are installed
+ run: python -m playwright install --with-deps chromium
+
+ - name: Validate URL
+ run: |
+ if [ -z "${{ env.url }}" ]; then
+ echo "ERROR: No URL provided for testing"
+ exit 1
+ fi
+ echo "Testing URL: ${{ env.url }}"
+ echo "Test Suite: ${{ env.test_suite }}"
+
+
+ - name: Wait for Application to be Ready
+ run: |
+ echo "Waiting for application to be ready at ${{ env.url }} "
+ max_attempts=10
+ attempt=1
+
+ while [ $attempt -le $max_attempts ]; do
+ echo "Attempt $attempt: Checking if application is ready..."
+ if curl -f -s "${{ env.url }}" > /dev/null; then
+ echo "Application is ready!"
+ break
+
+ fi
+
+ if [ $attempt -eq $max_attempts ]; then
+ echo "Application is not ready after $max_attempts attempts"
+ exit 1
+ fi
+
+ echo "Application not ready, waiting 30 seconds..."
+ sleep 30
+ attempt=$((attempt + 1))
+ done
+
+ - name: Run tests(1)
+ id: test1
+ run: |
+ if [ "${{ env.test_suite }}" == "GoldenPath-Testing" ]; then
+ xvfb-run pytest -m gp --headed --html=report/report.html --self-contained-html
+ else
+ xvfb-run pytest --headed --html=report/report.html --self-contained-html
+ fi
+ working-directory: tests/e2e-test
+ continue-on-error: true
+
+ - name: Sleep for 30 seconds
+ if: ${{ steps.test1.outcome == 'failure' }}
+ run: sleep 30s
+ shell: bash
+
+ - name: Run tests(2)
+ id: test2
+ if: ${{ steps.test1.outcome == 'failure' }}
+ run: |
+ if [ "${{ env.test_suite }}" == "GoldenPath-Testing" ]; then
+ xvfb-run pytest -m gp --headed --html=report/report.html --self-contained-html
+ else
+ xvfb-run pytest --headed --html=report/report.html --self-contained-html
+ fi
+ working-directory: tests/e2e-test
+ continue-on-error: true
+
+ - name: Sleep for 60 seconds
+ if: ${{ steps.test2.outcome == 'failure' }}
+ run: sleep 60s
+ shell: bash
+
+ - name: Run tests(3)
+ id: test3
+ if: ${{ steps.test2.outcome == 'failure' }}
+ run: |
+ if [ "${{ env.test_suite }}" == "GoldenPath-Testing" ]; then
+ xvfb-run pytest -m gp --headed --html=report/report.html --self-contained-html
+ else
+ xvfb-run pytest --headed --html=report/report.html --self-contained-html
+ fi
+ working-directory: tests/e2e-test
+
+ - name: Upload test report
+ id: upload_report
+ uses: actions/upload-artifact@v4
+ if: ${{ !cancelled() }}
+ with:
+ name: test-report
+ path: tests/e2e-test/report/*
+
+ - name: Generate E2E Test Summary
+ if: always()
+ run: |
+ # Determine test suite type for title
+ if [ "${{ env.test_suite }}" == "GoldenPath-Testing" ]; then
+ echo "## π§ͺ E2E Test Job Summary : Golden Path Testing" >> $GITHUB_STEP_SUMMARY
+ else
+ echo "## π§ͺ E2E Test Job Summary : Smoke Testing" >> $GITHUB_STEP_SUMMARY
+ fi
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "| Field | Value |" >> $GITHUB_STEP_SUMMARY
+ echo "|-------|--------|" >> $GITHUB_STEP_SUMMARY
+
+ # Determine overall test result
+ OVERALL_SUCCESS="${{ steps.test1.outcome == 'success' || steps.test2.outcome == 'success' || steps.test3.outcome == 'success' }}"
+ if [[ "$OVERALL_SUCCESS" == "true" ]]; then
+ echo "| **Job Status** | β
Success |" >> $GITHUB_STEP_SUMMARY
+ else
+ echo "| **Job Status** | β Failed |" >> $GITHUB_STEP_SUMMARY
+ fi
+
+ echo "| **Target URL** | [${{ env.url }}](${{ env.url }}) |" >> $GITHUB_STEP_SUMMARY
+ echo "| **Test Suite** | \`${{ env.test_suite }}\` |" >> $GITHUB_STEP_SUMMARY
+ echo "| **Test Report** | [Download Artifact](${{ steps.upload_report.outputs.artifact-url }}) |" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+
+ echo "### π Test Execution Details" >> $GITHUB_STEP_SUMMARY
+ echo "| Attempt | Status | Notes |" >> $GITHUB_STEP_SUMMARY
+ echo "|---------|--------|-------|" >> $GITHUB_STEP_SUMMARY
+ echo "| **Test Run 1** | ${{ steps.test1.outcome == 'success' && 'β
Passed' || 'β Failed' }} | Initial test execution |" >> $GITHUB_STEP_SUMMARY
+
+ if [[ "${{ steps.test1.outcome }}" == "failure" ]]; then
+ echo "| **Test Run 2** | ${{ steps.test2.outcome == 'success' && 'β
Passed' || steps.test2.outcome == 'failure' && 'β Failed' || 'βΈοΈ Skipped' }} | Retry after 30s delay |" >> $GITHUB_STEP_SUMMARY
+ fi
+
+ if [[ "${{ steps.test2.outcome }}" == "failure" ]]; then
+ echo "| **Test Run 3** | ${{ steps.test3.outcome == 'success' && 'β
Passed' || steps.test3.outcome == 'failure' && 'β Failed' || 'βΈοΈ Skipped' }} | Final retry after 60s delay |" >> $GITHUB_STEP_SUMMARY
+ fi
+
+ echo "" >> $GITHUB_STEP_SUMMARY
+
+ if [[ "$OVERALL_SUCCESS" == "true" ]]; then
+ echo "### β
Test Results" >> $GITHUB_STEP_SUMMARY
+ echo "- End-to-end tests completed successfully" >> $GITHUB_STEP_SUMMARY
+ echo "- Application is functioning as expected" >> $GITHUB_STEP_SUMMARY
+ else
+ echo "### β Test Results" >> $GITHUB_STEP_SUMMARY
+ echo "- All test attempts failed" >> $GITHUB_STEP_SUMMARY
+ echo "- Check the e2e-test/test job for detailed error information" >> $GITHUB_STEP_SUMMARY
+ fi
\ No newline at end of file
diff --git a/docs/CustomizingAzdParameters.md b/docs/CustomizingAzdParameters.md
index 3ffc7635..cc1d10e0 100644
--- a/docs/CustomizingAzdParameters.md
+++ b/docs/CustomizingAzdParameters.md
@@ -17,6 +17,9 @@ By default this template will use the environment name as the prefix to prevent
| `AZURE_ENV_MODEL_VERSION` | string | `2024-08-06` | Specifies the GPT model version (allowed values: `2024-08-06`). |
| `AZURE_ENV_MODEL_CAPACITY` | integer | `30` | Sets the model capacity (choose based on your subscription's available GPT capacity). |
| `AZURE_ENV_IMAGETAG` | boolean | `latest` | Set the Image tag Like (allowed values: latest, dev, hotfix) |
+| `AZURE_ENV_CONTAINER_REGISTRY_ENDPOINT` | string | `cpscontainerreg.azurecr.io` | Sets the Azure Container Registry name (allowed value: `cpscontainerreg.azurecr.io`) |
+| `AZURE_ENV_CONTAINER_IMAGE_TAG` | string | `latest` | Sets the container image tag (e.g., `latest`, `dev`, `hotfix`). |
+| `AZURE_LOCATION` | string | `eastus` | Sets the primary Azure region for resource deployment. |
| `AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID` | string | Guide to get your [Existing Workspace ID](/docs/re-use-log-analytics.md) | Reuses an existing Log Analytics Workspace instead of provisioning a new one. |
| `AZURE_EXISTING_AI_PROJECT_RESOURCE_ID` | string | `` | Reuses an existing AIFoundry and AIFoundryProject instead of creating a new one. |
diff --git a/infra/main.bicep b/infra/main.bicep
index 1b329d0a..6631a08d 100644
--- a/infra/main.bicep
+++ b/infra/main.bicep
@@ -60,6 +60,9 @@ param secondaryLocation string = (location == 'eastus2') ? 'westus2' : 'eastus2'
@description('Optional. The public container image endpoint.')
param publicContainerImageEndpoint string = 'cpscontainerreg.azurecr.io'
+@description('Optional. The image tag for the container images.')
+param imageTag string = 'latest'
+
@description('Optional. The resource group location.')
param resourceGroupLocation string = resourceGroup().location
@@ -717,7 +720,7 @@ module avmContainerApp 'br/public:avm/res/app/container-app:0.17.0' = {
containers: [
{
name: 'ca-${solutionSuffix}'
- image: '${publicContainerImageEndpoint}/contentprocessor:latest'
+ image: '${publicContainerImageEndpoint}/contentprocessor:${imageTag}'
resources: {
cpu: '4'
@@ -766,7 +769,7 @@ module avmContainerApp_API 'br/public:avm/res/app/container-app:0.17.0' = {
containers: [
{
name: 'ca-${solutionSuffix}-api'
- image: '${publicContainerImageEndpoint}/contentprocessorapi:latest'
+ image: '${publicContainerImageEndpoint}/contentprocessorapi:${imageTag}'
resources: {
cpu: '4'
memory: '8.0Gi'
@@ -896,7 +899,7 @@ module avmContainerApp_Web 'br/public:avm/res/app/container-app:0.17.0' = {
containers: [
{
name: 'ca-${solutionSuffix}-web'
- image: '${publicContainerImageEndpoint}/contentprocessorweb:latest'
+ image: '${publicContainerImageEndpoint}/contentprocessorweb:${imageTag}'
resources: {
cpu: '4'
memory: '8.0Gi'
@@ -1190,7 +1193,7 @@ module avmContainerApp_update 'br/public:avm/res/app/container-app:0.17.0' = {
containers: [
{
name: 'ca-${solutionSuffix}'
- image: '${publicContainerImageEndpoint}/contentprocessor:latest'
+ image: '${publicContainerImageEndpoint}/contentprocessor:${imageTag}'
resources: {
cpu: '4'
@@ -1250,7 +1253,7 @@ module avmContainerApp_API_update 'br/public:avm/res/app/container-app:0.17.0' =
containers: [
{
name: 'ca-${solutionSuffix}-api'
- image: '${publicContainerImageEndpoint}/contentprocessorapi:latest'
+ image: '${publicContainerImageEndpoint}/contentprocessorapi:${imageTag}'
resources: {
cpu: '4'
memory: '8.0Gi'
diff --git a/infra/main.parameters.json b/infra/main.parameters.json
index 2d8f9eb3..adcde897 100644
--- a/infra/main.parameters.json
+++ b/infra/main.parameters.json
@@ -31,6 +31,15 @@
},
"existingFoundryProjectResourceId": {
"value": "${AZURE_ENV_FOUNDRY_PROJECT_ID}"
+ },
+ "publicContainerImageEndpoint": {
+ "value": "${AZURE_ENV_CONTAINER_REGISTRY_ENDPOINT}"
+ },
+ "imageTag": {
+ "value": "${AZURE_ENV_CONTAINER_IMAGE_TAG}"
+ },
+ "location": {
+ "value": "${AZURE_LOCATION}"
}
}
}
\ No newline at end of file
diff --git a/infra/main.waf.parameters.json b/infra/main.waf.parameters.json
index c003947d..2159a8dc 100644
--- a/infra/main.waf.parameters.json
+++ b/infra/main.waf.parameters.json
@@ -46,6 +46,15 @@
},
"virtualMachineAdminPassword": {
"value": "${AZURE_ENV_VM_ADMIN_PASSWORD}"
+ },
+ "publicContainerImageEndpoint": {
+ "value": "${AZURE_ENV_CONTAINER_REGISTRY_ENDPOINT}"
+ },
+ "imageTag": {
+ "value": "${AZURE_ENV_CONTAINER_IMAGE_TAG}"
+ },
+ "location": {
+ "value": "${AZURE_LOCATION}"
}
}
}
\ No newline at end of file
diff --git a/tests/e2e-test/base/base.py b/tests/e2e-test/base/base.py
index 5992ab6a..648346be 100644
--- a/tests/e2e-test/base/base.py
+++ b/tests/e2e-test/base/base.py
@@ -1,10 +1,38 @@
+"""
+Base page module providing common functionality for all page objects.
+"""
+
+
class BasePage:
+ """Base class for all page objects with common methods."""
+
def __init__(self, page):
+ """
+ Initialize the BasePage with a Playwright page instance.
+
+ Args:
+ page: Playwright page object
+ """
self.page = page
def scroll_into_view(self, locator):
+ """
+ Scroll the last element matching the locator into view.
+
+ Args:
+ locator: Playwright locator object
+ """
reference_list = locator
locator.nth(reference_list.count() - 1).scroll_into_view_if_needed()
def is_visible(self, locator):
- locator.is_visible()
+ """
+ Check if an element is visible on the page.
+
+ Args:
+ locator: Playwright locator object
+
+ Returns:
+ bool: True if visible, False otherwise
+ """
+ return locator.is_visible()
diff --git a/tests/e2e-test/config/constants.py b/tests/e2e-test/config/constants.py
index f5f4c9ac..28566894 100644
--- a/tests/e2e-test/config/constants.py
+++ b/tests/e2e-test/config/constants.py
@@ -1,8 +1,12 @@
+"""
+Configuration constants module for test environment settings.
+"""
+
import os
from dotenv import load_dotenv
load_dotenv()
URL = os.getenv("url")
-if URL.endswith("/"):
+if URL and URL.endswith("/"):
URL = URL[:-1]
diff --git a/tests/e2e-test/pages/HomePage.py b/tests/e2e-test/pages/HomePage.py
index ce091f44..b4597be7 100644
--- a/tests/e2e-test/pages/HomePage.py
+++ b/tests/e2e-test/pages/HomePage.py
@@ -1,10 +1,37 @@
+"""
+Home page module for Content Processing Solution Accelerator.
+"""
+
import os.path
+import logging
from base.base import BasePage
from playwright.sync_api import expect
+logger = logging.getLogger(__name__)
+
class HomePage(BasePage):
+ """
+ Home page object containing all locators and methods for interacting
+ with the Content Processing home page.
+ """
+ # HOMEPAGE
+ PROCESSING_QUEUE = "//span[normalize-space()='Processing Queue']"
+ OUTPUT_REVIEW = "//span[normalize-space()='Output Review']"
+ SOURCE_DOC = "//span[normalize-space()='Source Document']"
+ PROCESSING_QUEUE_BTN = "//button[normalize-space()='Processing Queue']"
+ OUTPUT_REVIEW_BTN = "//button[normalize-space()='Output Review']"
+ SOURCE_DOC_BTN = "//button[normalize-space()='Source Document']"
+ INVOICE_SELECTED_SCHEMA = "//span[.='Selected Schema : Invoice ']"
+ PROP_SELECTED_SCHEMA = "//span[.='Selected Schema : Property Loss Damage Claim Form ']"
+ INVOICE_SELECT_VALIDATION = "//div[contains(text(),'Please Select Schema')]"
+ SEARCH_BOX = "//input[@placeholder='Search']"
+ PROCESSING_QUEUE_CP = "//div[@class='panelLeft']//button[@title='Collapse Panel']"
+ COLLAPSE_PANEL_BTN = "//button[@title='Collapse Panel']"
+ API_DOCUMENTATION = "//span[.='API Documentation']"
+ INVALID_FILE_VALIDATION = "//p[contains(.,'Only PDF and JPEG, PNG image files are available.')]"
+
TITLE_TEXT = "//span[normalize-space()='Processing Queue']"
SELECT_SCHEMA = "//input[@placeholder='Select Schema']"
IMPORT_CONTENT = "//button[normalize-space()='Import Content']"
@@ -12,6 +39,9 @@ class HomePage(BasePage):
BROWSE_FILES = "//button[normalize-space()='Browse Files']"
UPLOAD_BTN = "//button[normalize-space()='Upload']"
SUCCESS_MSG = "/div[@class='file-item']//*[name()='svg']"
+ UPLOAD_WARNING_MESSAGE = "//div[contains(text(),'Please upload files specific to')]"
+ SCHEMA_NAME_IN_WARNING = "//div[contains(text(),'Invoice')]"
+
CLOSE_BTN = "//button[normalize-space()='Close']"
STATUS = "//div[@role='cell']"
PROCESS_STEPS = "//button[@value='process-history']"
@@ -90,26 +120,71 @@ class HomePage(BasePage):
)
def __init__(self, page):
+ """
+ Initialize the HomePage.
+
+ Args:
+ page: Playwright page object
+ """
+ super().__init__(page)
self.page = page
def validate_home_page(self):
- expect(self.page.locator(self.TITLE_TEXT)).to_be_visible()
+ """Validate that the home page elements are visible."""
+ logger.info("Starting home page validation...")
+
+ logger.info("Validating Processing Queue is visible...")
+ expect(self.page.locator(self.PROCESSING_QUEUE)).to_be_visible()
+ logger.info("β Processing Queue is visible")
+
+ logger.info("Validating Output Review is visible...")
+ expect(self.page.locator(self.OUTPUT_REVIEW)).to_be_visible()
+ logger.info("β Output Review is visible")
+
+ logger.info("Validating Source Document is visible...")
+ expect(self.page.locator(self.SOURCE_DOC)).to_be_visible()
+ logger.info("β Source Document is visible")
+
self.page.wait_for_timeout(2000)
+ logger.info("Home page validation completed successfully")
def select_schema(self, SchemaName):
+ """Select a schema from the dropdown."""
+ logger.info(f"Starting schema selection for: {SchemaName}")
+
self.page.wait_for_timeout(5000)
+
+ logger.info("Clicking on Select Schema dropdown...")
self.page.locator(self.SELECT_SCHEMA).click()
+ logger.info("β Select Schema dropdown clicked")
+
if SchemaName == "Invoice":
+ logger.info("Selecting 'Invoice' option...")
self.page.get_by_role("option", name="Invoice").click()
+ logger.info("β 'Invoice' option selected")
else:
+ logger.info("Selecting 'Property Loss Damage Claim' option...")
self.page.get_by_role("option", name="Property Loss Damage Claim").click()
+ logger.info("β 'Property Loss Damage Claim' option selected")
+
+ logger.info(f"Schema selection completed for: {SchemaName}")
def upload_files(self, schemaType):
+ """Upload files based on schema type."""
+ logger.info(f"Starting file upload for schema type: {schemaType}")
+
with self.page.expect_file_chooser() as fc_info:
+ logger.info("Clicking Import Content button...")
self.page.locator(self.IMPORT_CONTENT).click()
+ logger.info("β Import Content button clicked")
+
+ logger.info("Clicking Browse Files button...")
self.page.locator(self.BROWSE_FILES).click()
+ logger.info("β Browse Files button clicked")
+
self.page.wait_for_timeout(5000)
# self.page.wait_for_load_state('networkidle')
+
file_chooser = fc_info.value
current_working_dir = os.getcwd()
file_path1 = os.path.join(
@@ -118,149 +193,580 @@ def upload_files(self, schemaType):
file_path2 = os.path.join(current_working_dir, "testdata", "ClaimForm_1.pdf")
if schemaType == "Invoice":
+ logger.info(f"Selecting file: {file_path1}")
file_chooser.set_files([file_path1])
+ logger.info("β Invoice file selected")
else:
+ logger.info(f"Selecting file: {file_path2}")
file_chooser.set_files([file_path2])
+ logger.info("β Claim form file selected")
+
self.page.wait_for_timeout(5000)
self.page.wait_for_load_state("networkidle")
+
+ logger.info("Clicking Upload button...")
self.page.locator(self.UPLOAD_BTN).click()
+ logger.info("β Upload button clicked")
+
self.page.wait_for_timeout(10000)
+
+ logger.info("Validating success message is visible...")
expect(
self.page.get_by_role("alertdialog", name="Import Content")
.locator("path")
.nth(1)
).to_be_visible()
+ logger.info("β Success message is visible")
+
+ logger.info("Closing upload dialog...")
self.page.locator(self.CLOSE_BTN).click()
+ logger.info("β Upload dialog closed")
+
+ logger.info(f"File upload completed successfully for schema type: {schemaType}")
def refresh(self):
+ """Refresh and wait for processing to complete."""
+ logger.info("Starting refresh process to monitor file processing status...")
+
status_ele = self.page.locator(self.STATUS).nth(2)
- max_retries = 15
+ max_retries = 20
for i in range(max_retries):
status_text = status_ele.inner_text().strip()
+ logger.info(f"Attempt {i + 1}/{max_retries}: Current status = '{status_text}'")
if status_text == "Completed":
+ logger.info("β Processing completed successfully")
break
elif status_text == "Error":
+ logger.error(f"Process failed with status: 'Error' after {i + 1} retries")
raise Exception(
f"Process failed with status: 'Error' after {i + 1} retries."
)
+ logger.info("Clicking Refresh button...")
self.page.locator(self.REFRESH).click()
+ logger.info("β Refresh button clicked, waiting 5 seconds...")
self.page.wait_for_timeout(5000)
else:
# Executed only if the loop did not break (i.e., status is neither Completed nor Error)
+ logger.error(f"Process did not complete. Final status was '{status_text}' after {max_retries} retries")
raise Exception(
f"Process did not complete. Final status was '{status_text}' after {max_retries} retries."
)
+ logger.info("Refresh process completed successfully")
+
def validate_invoice_extracted_result(self):
+ """Validate all extracted invoice data fields."""
+ logger.info("Starting invoice extracted result validation...")
+
+ logger.info("Validating Customer Name...")
expect(self.page.locator(self.CUSTOMER_NAME)).to_contain_text(
"Paris Fashion Group SARL"
)
+ logger.info("β Customer Name validated: Paris Fashion Group SARL")
+
+ logger.info("Validating Customer Street...")
expect(self.page.locator(self.CUSTOMER_STREET)).to_contain_text(
"10 Rue de Rivoli"
)
+ logger.info("β Customer Street validated: 10 Rue de Rivoli")
+
+ logger.info("Validating Customer City...")
expect(self.page.locator(self.CUSTOMER_CITY)).to_contain_text("Paris")
+ logger.info("β Customer City validated: Paris")
+
+ logger.info("Validating Customer Zip Code...")
expect(self.page.locator(self.CUSTOMER_ZIP_CODE)).to_contain_text("75001")
+ logger.info("β Customer Zip Code validated: 75001")
+
+ logger.info("Validating Customer Country...")
expect(self.page.locator(self.CUSTOMER_COUNTRY)).to_contain_text("France")
+ logger.info("β Customer Country validated: France")
+
+ logger.info("Validating Shipping Street...")
expect(self.page.locator(self.SHIPPING_STREET)).to_contain_text(
"25 Avenue Montaigne"
)
+ logger.info("β Shipping Street validated: 25 Avenue Montaigne")
+
+ logger.info("Validating Shipping City...")
expect(self.page.locator(self.SHIPPING_CITY)).to_contain_text("Paris")
+ logger.info("β Shipping City validated: Paris")
+
+ logger.info("Validating Shipping Postal Code...")
expect(self.page.locator(self.SHIPPING_POSTAL_CODE)).to_contain_text("75008")
+ logger.info("β Shipping Postal Code validated: 75008")
+
+ logger.info("Validating Shipping Country...")
expect(self.page.locator(self.SHIPPING_COUNTRY)).to_contain_text("France")
+ logger.info("β Shipping Country validated: France")
+
+ logger.info("Validating Purchase Order...")
expect(self.page.locator(self.PURCHASE_ORDER)).to_contain_text("PO-34567")
+ logger.info("β Purchase Order validated: PO-34567")
+
+ logger.info("Validating Invoice ID...")
expect(self.page.locator(self.INVOICE_ID)).to_contain_text("INV-20231005")
+ logger.info("β Invoice ID validated: INV-20231005")
+
+ logger.info("Validating Invoice Date...")
expect(self.page.locator(self.INVOICE_DATE)).to_contain_text("2023-10-05")
- expect(self.page.locator(self.INVOICE_DATE)).to_contain_text("2023-10-05")
+ logger.info("β Invoice Date validated: 2023-10-05")
+
+ logger.info("Validating Payable By Date...")
expect(self.page.locator(self.payable_by)).to_contain_text("2023-11-04")
+ logger.info("β Payable By Date validated: 2023-11-04")
+
+ logger.info("Validating Vendor Name...")
expect(self.page.locator(self.vendor_name)).to_contain_text(
"Fabrikam Unlimited Company"
)
+ logger.info("β Vendor Name validated: Fabrikam Unlimited Company")
+
+ logger.info("Validating Vendor Street...")
expect(self.page.locator(self.v_street)).to_contain_text("Wilton Place")
+ logger.info("β Vendor Street validated: Wilton Place")
+
+ logger.info("Validating Vendor City...")
expect(self.page.locator(self.v_city)).to_contain_text("Brooklyn")
+ logger.info("β Vendor City validated: Brooklyn")
+
+ logger.info("Validating Vendor State...")
expect(self.page.locator(self.v_state)).to_contain_text("NY")
+ logger.info("β Vendor State validated: NY")
+
+ logger.info("Validating Vendor Zip Code...")
expect(self.page.locator(self.v_zip_code)).to_contain_text("22345")
+ logger.info("β Vendor Zip Code validated: 22345")
+
+ logger.info("Validating Vendor Tax ID...")
expect(self.page.locator(self.vendor_tax_id)).to_contain_text("FR123456789")
+ logger.info("β Vendor Tax ID validated: FR123456789")
+
+ logger.info("Validating Subtotal...")
expect(self.page.locator(self.SUBTOTAL)).to_contain_text("16859.1")
+ logger.info("β Subtotal validated: 16859.1")
+
+ logger.info("Validating Total Tax...")
expect(self.page.locator(self.TOTAL_TAX)).to_contain_text("11286")
+ logger.info("β Total Tax validated: 11286")
+
+ logger.info("Validating Invoice Total...")
expect(self.page.locator(self.INVOICE_TOTAL)).to_contain_text("22516.08")
+ logger.info("β Invoice Total validated: 22516.08")
+
+ logger.info("Validating Payment Terms...")
expect(self.page.locator(self.PAYMENT_TERMS)).to_contain_text("Net 30")
+ logger.info("β Payment Terms validated: Net 30")
+
+ logger.info("Validating Product Code...")
expect(self.page.locator(self.product_code1)).to_contain_text("EM032")
+ logger.info("β Product Code validated: EM032")
+
+ logger.info("Validating Product Description...")
expect(self.page.locator(self.p1_description)).to_contain_text(
"Item: Terminal Lug"
)
+ logger.info("β Product Description validated: Item: Terminal Lug")
+
+ logger.info("Validating Product Quantity...")
expect(self.page.locator(self.p1_quantity)).to_contain_text("163")
+ logger.info("β Product Quantity validated: 163")
+
+ logger.info("Validating Product Tax...")
expect(self.page.locator(self.p1_tax)).to_contain_text("2934")
+ logger.info("β Product Tax validated: 2934")
+
+ logger.info("Validating Product Unit Price...")
expect(self.page.locator(self.p1_unit_price)).to_contain_text("2.5")
+ logger.info("β Product Unit Price validated: 2.5")
+
+ logger.info("Validating Product Total...")
expect(self.page.locator(self.p1_total)).to_contain_text("407.5")
+ logger.info("β Product Total validated: 407.5")
+
+ logger.info("Invoice extracted result validation completed successfully")
def modify_and_submit_extracted_data(self):
+ """Modify shipping address and submit the changes."""
+ logger.info("Starting modification of extracted data...")
+
+ logger.info("Double-clicking on Shipping Street field...")
self.page.get_by_text('"25 Avenue Montaigne"').dblclick()
+ logger.info("β Shipping Street field double-clicked")
+
+ logger.info("Updating Shipping Street to '25 Avenue Montaigne updated'...")
self.page.locator(self.SHIPPING_ADD_STREET).fill("25 Avenue Montaigne updated")
+ logger.info("β Shipping Street updated")
+
+ logger.info("Clicking Edit Confirm button...")
self.page.locator(self.EDIT_CONFIRM).click()
+ logger.info("β Edit Confirm button clicked")
+
+ logger.info("Adding comment: 'Updated Shipping street address'...")
self.page.locator(self.COMMENTS).fill("Updated Shipping street address")
+ logger.info("β Comment added")
+
+ logger.info("Clicking Save button...")
self.page.locator(self.SAVE_BTN).click()
+ logger.info("β Save button clicked")
+
self.page.wait_for_timeout(6000)
+ logger.info("Data modification and submission completed successfully")
def validate_process_steps(self):
+ """Validate all process steps (extract, map, evaluate)."""
+ logger.info("Starting process steps validation...")
+
+ logger.info("Clicking on Process Steps tab...")
self.page.locator(self.PROCESS_STEPS).click()
+ logger.info("β Process Steps tab clicked")
+
+ # Extract Step
+ logger.info("Validating Extract step...")
self.page.locator(self.EXTRACT).click()
self.page.wait_for_timeout(3000)
+
+ logger.info("Checking 'extract' text is visible...")
expect(self.page.get_by_text('"extract"')).to_be_visible()
+ logger.info("β 'extract' text is visible")
+
+ logger.info("Checking 'Succeeded' status is visible...")
expect(self.page.get_by_text('"Succeeded"')).to_be_visible()
+ logger.info("β 'Succeeded' status is visible for Extract step")
+
self.page.locator(self.EXTRACT).click()
self.page.wait_for_timeout(3000)
+
+ # Map Step
+ logger.info("Validating Map step...")
self.page.locator(self.MAP).click()
self.page.wait_for_timeout(3000)
+
+ logger.info("Checking 'map' text is visible...")
expect(self.page.get_by_text('"map"')).to_be_visible()
+ logger.info("β 'map' text is visible for Map step")
+
self.page.locator(self.MAP).click()
self.page.wait_for_timeout(3000)
+
+ # Evaluate Step
+ logger.info("Validating Evaluate step...")
self.page.locator(self.EVALUATE).click()
self.page.wait_for_timeout(3000)
+
+ logger.info("Checking 'evaluate' text is visible...")
expect(self.page.get_by_text('"evaluate"')).to_be_visible()
+ logger.info("β 'evaluate' text is visible for Evaluate step")
+
self.page.locator(self.EVALUATE).click()
self.page.wait_for_timeout(3000)
+
+ logger.info("Clicking on Extracted Result tab...")
self.page.locator(self.EXTRACTED_RESULT).click()
self.page.wait_for_timeout(3000)
+ logger.info("β Extracted Result tab clicked")
+
+ logger.info("Process steps validation completed successfully")
def validate_property_extracted_result(self):
+ """Validate all extracted property claim data fields."""
+ logger.info("Starting property extracted result validation...")
+
+ logger.info("Validating First Name...")
expect(self.page.locator(self.first_name)).to_contain_text("Sophia")
+ logger.info("β First Name validated: Sophia")
+
+ logger.info("Validating Last Name...")
expect(self.page.locator(self.last_name)).to_contain_text("Kim")
+ logger.info("β Last Name validated: Kim")
+
+ logger.info("Validating Telephone Number...")
expect(self.page.locator(self.tel_no)).to_contain_text("646-555-0789")
+ logger.info("β Telephone Number validated: 646-555-0789")
+
+ logger.info("Validating Policy Number...")
expect(self.page.locator(self.policy_no)).to_contain_text("PH5678901")
+ logger.info("β Policy Number validated: PH5678901")
+
+ logger.info("Validating Coverage Type...")
expect(self.page.locator(self.coverage_type)).to_contain_text("Homeowners")
+ logger.info("β Coverage Type validated: Homeowners")
+
+ logger.info("Validating Claim Number...")
expect(self.page.locator(self.claim_number)).to_contain_text("CLM5432109")
+ logger.info("β Claim Number validated: CLM5432109")
+
+ logger.info("Validating Policy Effective Date...")
expect(self.page.locator(self.policy_effective_date)).to_contain_text(
"2022-07-01"
)
+ logger.info("β Policy Effective Date validated: 2022-07-01")
+
+ logger.info("Validating Policy Expiration Date...")
expect(self.page.locator(self.policy_expiration_date)).to_contain_text(
"2023-07-01"
)
+ logger.info("β Policy Expiration Date validated: 2023-07-01")
+
+ logger.info("Validating Damage Deductible...")
expect(self.page.locator(self.damage_deductible)).to_contain_text("1000")
+ logger.info("β Damage Deductible validated: 1000")
+
+ logger.info("Validating Damage Deductible Currency...")
expect(self.page.locator(self.damage_deductible_currency)).to_contain_text(
"USD"
)
+ logger.info("β Damage Deductible Currency validated: USD")
+
+ logger.info("Validating Date of Damage/Loss...")
expect(self.page.locator(self.date_of_damage_loss)).to_contain_text(
"2023-05-10"
)
+ logger.info("β Date of Damage/Loss validated: 2023-05-10")
+
+ logger.info("Validating Time of Loss...")
expect(self.page.locator(self.time_of_loss)).to_contain_text("13:20")
+ logger.info("β Time of Loss validated: 13:20")
+
+ logger.info("Validating Date Prepared...")
expect(self.page.locator(self.date_prepared)).to_contain_text("2023-05-11")
+ logger.info("β Date Prepared validated: 2023-05-11")
+
+ logger.info("Validating Item...")
expect(self.page.locator(self.item)).to_contain_text("Apple")
+ logger.info("β Item validated: Apple")
+
+ logger.info("Validating Description...")
expect(self.page.locator(self.description)).to_contain_text(
'"High-performance tablet with a large, vibrant display'
)
+ logger.info("β Description validated")
+
+ logger.info("Validating Date Acquired...")
expect(self.page.locator(self.date_acquired)).to_contain_text("2022-01-20")
+ logger.info("β Date Acquired validated: 2022-01-20")
+
+ logger.info("Validating Cost New...")
expect(self.page.locator(self.cost_new)).to_contain_text("1100")
+ logger.info("β Cost New validated: 1100")
+
+ logger.info("Validating Cost New Currency...")
expect(self.page.locator(self.cost_new_currency)).to_contain_text("USD")
+ logger.info("β Cost New Currency validated: USD")
+
+ logger.info("Validating Replacement/Repair...")
expect(self.page.locator(self.replacement_repair)).to_contain_text("350")
+ logger.info("β Replacement/Repair validated: 350")
+
+ logger.info("Validating Replacement/Repair Currency...")
expect(self.page.locator(self.replacement_repair_currency)).to_contain_text(
"USD"
)
+ logger.info("β Replacement/Repair Currency validated: USD")
+
+ logger.info("Property extracted result validation completed successfully")
def delete_files(self):
+ """Delete uploaded files from the processing queue."""
+ logger.info("Starting file deletion process...")
+
+ logger.info("Clicking on Delete File menu button...")
self.page.locator(self.DELETE_FILE).nth(0).click()
+ logger.info("β Delete File menu button clicked")
+
+ logger.info("Clicking on Delete menu item...")
self.page.get_by_role("menuitem", name="Delete").click()
+ logger.info("β Delete menu item clicked")
+
+ logger.info("Clicking on Confirm button...")
self.page.get_by_role("button", name="Confirm").click()
+ logger.info("β Confirm button clicked")
+
self.page.wait_for_timeout(6000)
+ logger.info("File deletion completed successfully")
+
+ def validate_import_without_schema(self):
+ """Validate import content validation when no schema is selected."""
+ logger.info("Starting validation for import without schema selection...")
+
+ logger.info("Clicking on Import Content button without selecting schema...")
+ self.page.locator(self.IMPORT_CONTENT).click()
+ logger.info("β Import Content button clicked")
+
+ logger.info("Validating 'Please Select Schema' message is visible...")
+ expect(self.page.locator(self.INVOICE_SELECT_VALIDATION)).to_be_visible()
+ logger.info("β 'Please Select Schema' validation message is visible")
+
+ logger.info("Import without schema validation completed successfully")
+
+ def validate_invoice_schema_selected(self):
+ """Validate that Invoice schema is selected and visible."""
+ logger.info("Starting validation for Invoice schema selection...")
+
+ logger.info("Clicking on Import Content button...")
+ self.page.locator(self.IMPORT_CONTENT).click()
+ logger.info("β Import Content button clicked")
+
+ logger.info("Validating 'Selected Schema : Invoice' message is visible...")
+ expect(self.page.locator(self.INVOICE_SELECTED_SCHEMA)).to_be_visible()
+ logger.info("β 'Selected Schema : Invoice' is visible")
+
+ logger.info("Invoice schema selection validation completed successfully")
+
+ def validate_property_schema_selected(self):
+ """Validate that Property Loss Damage Claim Form schema is selected and visible."""
+ logger.info("Starting validation for Property Loss Damage Claim Form schema selection...")
+
+ logger.info("Clicking on Import Content button...")
+ self.page.locator(self.IMPORT_CONTENT).click()
+ logger.info("β Import Content button clicked")
+
+ logger.info("Validating 'Selected Schema : Property Loss Damage Claim Form' message is visible...")
+ expect(self.page.locator(self.PROP_SELECTED_SCHEMA)).to_be_visible()
+ logger.info("β 'Selected Schema : Property Loss Damage Claim Form' is visible")
+
+ logger.info("Property Loss Damage Claim Form schema selection validation completed successfully")
+
+ def close_upload_popup(self):
+ """Close the upload popup dialog."""
+ logger.info("Starting to close upload popup...")
+
+ logger.info("Clicking on Close button...")
+ self.page.locator(self.CLOSE_BTN).click()
+ logger.info("β Close button clicked")
+
+ logger.info("Upload popup closed successfully")
+
+ def refresh_page(self):
+ """Refresh the current page using browser reload."""
+ logger.info("Starting page refresh...")
+
+ logger.info("Reloading the page...")
+ self.page.reload()
+ logger.info("β Page reloaded")
+
+ self.page.wait_for_timeout(3000)
+ logger.info("Page refresh completed successfully")
+
+ def validate_search_functionality(self):
+ """Validate search functionality in extracted results."""
+ logger.info("Starting search functionality validation...")
+
+ logger.info("Entering search text 'Fabrikam' in Search Box...")
+ self.page.locator(self.SEARCH_BOX).fill("Fabrikam")
+ logger.info("β Search text 'Fabrikam' entered")
+
+ self.page.wait_for_timeout(2000)
+
+ logger.info("Validating vendor name contains 'Fabrikam'...")
+ expect(self.page.locator("//div[@id='vendor_name_display']")).to_contain_text("Fabrikam")
+ logger.info("β Vendor name contains 'Fabrikam'")
+
+ logger.info("Search functionality validation completed successfully")
+
+ def validate_api_document_link(self):
+ """Validate API Documentation link opens and displays correct content."""
+ logger.info("Starting API Documentation link validation...")
+
+ # Store reference to original page
+ original_page = self.page
+ logger.info("Stored reference to original page/tab")
+
+ with self.page.context.expect_page() as new_page_info:
+ logger.info("Clicking on API Documentation link...")
+ self.page.locator(self.API_DOCUMENTATION).nth(0).click()
+ logger.info("β API Documentation link clicked")
+
+ new_page = new_page_info.value
+ new_page.wait_for_load_state()
+ logger.info("New tab/page opened successfully")
+
+ # Switch to new tab
+ logger.info("Switching to new tab...")
+ new_page.bring_to_front()
+ logger.info("β Switched to new tab")
+
+ logger.info("Validating title heading is visible...")
+ expect(new_page.locator("//h1[@class='title']")).to_be_visible()
+ logger.info("β Title heading is visible")
+
+ logger.info("Validating 'contentprocessor' text is visible...")
+ expect(new_page.locator("//span[normalize-space()='contentprocessor']")).to_be_visible()
+ logger.info("β 'contentprocessor' text is visible")
+
+ logger.info("Validating 'schemavault' text is visible...")
+ expect(new_page.locator("//span[normalize-space()='schemavault']")).to_be_visible()
+ logger.info("β 'schemavault' text is visible")
+
+ logger.info("Validating 'default' text is visible...")
+ expect(new_page.locator("//span[normalize-space()='default']")).to_be_visible()
+ logger.info("β 'default' text is visible")
+
+ logger.info("Validating 'Schemas' text is visible...")
+ expect(new_page.locator("//span[normalize-space()='Schemas']")).to_be_visible()
+ logger.info("β 'Schemas' text is visible")
+
+ logger.info("Closing API Documentation tab...")
+ new_page.close()
+ logger.info("β API Documentation tab closed")
+
+ # Switch back to original tab
+ logger.info("Switching back to original tab...")
+ original_page.bring_to_front()
+ logger.info("β Switched back to original tab")
+
+ logger.info("API Documentation link validation completed successfully")
+
+ def validate_collapsible_panels(self):
+ """Validate collapsible section functionality for each panel (Processing Queue, Output Review, Source Document)."""
+ logger.info("Starting collapsible panels validation...")
+
+ # Collapse Processing Queue panel
+ logger.info("Collapsing Processing Queue panel...")
+ self.page.locator(self.COLLAPSE_PANEL_BTN).nth(0).click()
+ logger.info("β Collapse button for Processing Queue clicked")
+
+ self.page.wait_for_timeout(2000)
+ logger.info("Waited 2 seconds after collapsing Processing Queue")
+
+ # Expand Processing Queue panel
+ logger.info("Expanding Processing Queue panel...")
+ self.page.locator(self.PROCESSING_QUEUE_BTN).click()
+ logger.info("β Processing Queue clicked to expand")
+
+ self.page.wait_for_timeout(2000)
+
+ # Collapse Output Review panel
+ logger.info("Collapsing Output Review panel...")
+ self.page.locator(self.COLLAPSE_PANEL_BTN).nth(1).click()
+ logger.info("β Collapse button for Output Review clicked")
+
+ self.page.wait_for_timeout(2000)
+ logger.info("Waited 2 seconds after collapsing Output Review")
+
+ # Expand Output Review panel
+ logger.info("Expanding Output Review panel...")
+ self.page.locator(self.OUTPUT_REVIEW_BTN).click()
+ logger.info("β Output Review clicked to expand")
+
+ self.page.wait_for_timeout(2000)
+
+ # Collapse Source Document panel
+ logger.info("Collapsing Source Document panel...")
+ self.page.locator(self.COLLAPSE_PANEL_BTN).nth(2).click()
+ logger.info("β Collapse button for Source Document clicked")
+
+ self.page.wait_for_timeout(2000)
+ logger.info("Waited 2 seconds after collapsing Source Document")
+
+ # Expand Source Document panel
+ logger.info("Expanding Source Document panel...")
+ self.page.locator(self.SOURCE_DOC_BTN).click()
+ logger.info("β Source Document clicked to expand")
+
+ self.page.wait_for_timeout(2000)
+
+ logger.info("Collapsible panels validation completed successfully")
diff --git a/tests/e2e-test/pages/loginPage.py b/tests/e2e-test/pages/loginPage.py
index 0b412556..490e8b4b 100644
--- a/tests/e2e-test/pages/loginPage.py
+++ b/tests/e2e-test/pages/loginPage.py
@@ -1,7 +1,12 @@
+"""
+Login page module for authentication functionality.
+"""
+
from base.base import BasePage
class LoginPage(BasePage):
+ """Login page object with authentication methods."""
EMAIL_TEXT_BOX = "//input[@type='email']"
NEXT_BUTTON = "//input[@type='submit']"
@@ -11,9 +16,23 @@ class LoginPage(BasePage):
PERMISSION_ACCEPT_BUTTON = "//input[@type='submit']"
def __init__(self, page):
+ """
+ Initialize the LoginPage.
+
+ Args:
+ page: Playwright page object
+ """
+ super().__init__(page)
self.page = page
def authenticate(self, username, password):
+ """
+ Authenticate user with username and password.
+
+ Args:
+ username: User email address
+ password: User password
+ """
# login with username and password in web url
self.page.locator(self.EMAIL_TEXT_BOX).fill(username)
self.page.locator(self.NEXT_BUTTON).click()
diff --git a/tests/e2e-test/pytest.ini b/tests/e2e-test/pytest.ini
index 05b7f91c..31a3bee1 100644
--- a/tests/e2e-test/pytest.ini
+++ b/tests/e2e-test/pytest.ini
@@ -5,3 +5,5 @@ log_file = logs/tests.log
log_file_level = INFO
addopts = -p no:warnings --tb=short
+markers =
+ gp: Golden Path tests
\ No newline at end of file
diff --git a/tests/e2e-test/tests/conftest.py b/tests/e2e-test/tests/conftest.py
index a6459bf2..4260ec09 100644
--- a/tests/e2e-test/tests/conftest.py
+++ b/tests/e2e-test/tests/conftest.py
@@ -6,7 +6,6 @@
import atexit
import logging
from pathlib import Path
-from venv import logger
import pytest
from bs4 import BeautifulSoup
@@ -17,6 +16,9 @@
# Global dictionary to store log streams for each test
LOG_STREAMS = {}
+# Get logger for this module
+logger = logging.getLogger(__name__)
+
@pytest.fixture(scope="session")
def login_logout():
@@ -42,6 +44,12 @@ def login_logout():
browser.close()
+@pytest.hookimpl(tryfirst=True)
+def pytest_html_report_title(report):
+ """Customize HTML report title."""
+ report.title = "Test Automation Content Processing"
+
+
@pytest.hookimpl(tryfirst=True)
def pytest_runtest_setup(item):
"""
@@ -109,9 +117,10 @@ def rename_duration_column():
for th in headers:
if th.text.strip() == "Duration":
th.string = "Execution Time"
+ logger.info("Renamed 'Duration' column to 'Execution Time'")
break
else:
- print("'Duration' column not found in report.")
+ logger.info("'Duration' column not found in report.")
with report_path.open("w", encoding="utf-8") as file:
file.write(str(soup))
diff --git a/tests/e2e-test/tests/test_contentProcessing_gp_tc.py b/tests/e2e-test/tests/test_contentProcessing_gp_tc.py
index 7fe90c2c..7d30e731 100644
--- a/tests/e2e-test/tests/test_contentProcessing_gp_tc.py
+++ b/tests/e2e-test/tests/test_contentProcessing_gp_tc.py
@@ -1,72 +1,344 @@
+"""
+Test module for Content Processing Solution Accelerator end-to-end tests.
+"""
+
import logging
-import time
import pytest
from pages.HomePage import HomePage
logger = logging.getLogger(__name__)
-# Define step-wise test actions for Golden Path
-golden_path_steps = [
- ("Validate home page is loaded", lambda home: home.validate_home_page()),
- ("Select Invoice Schema", lambda home: home.select_schema("Invoice")),
- ("Upload Invoice documents", lambda home: home.upload_files("Invoice")),
- ("Refreshing the page until the 'Invoice' file status is updated to 'Completed'", lambda home: home.refresh()),
- (
- "Validate extracted result for Invoice",
- lambda home: home.validate_invoice_extracted_result(),
- ),
- (
- "Modify Extracted Data JSON & submit comments",
- lambda home: home.modify_and_submit_extracted_data(),
- ),
- ("Validate process steps for Invoice", lambda home: home.validate_process_steps()),
- (
- "Select Property Loss Damage Claim Form Schema",
- lambda home: home.select_schema("Property"),
- ),
- (
- "Upload Property Loss Damage Claim Form documents",
- lambda home: home.upload_files("Property"),
- ),
- ("Refreshing the page until the 'Claim Form' status is updated to 'Completed'", lambda home: home.refresh()),
- (
- "Validate extracted result for Property Loss Damage Claim Form",
- lambda home: home.validate_property_extracted_result(),
- ),
- (
- "Validate process steps for Property Loss Damage Claim Form",
- lambda home: home.validate_process_steps(),
- ),
- ("Validate user able to delete file", lambda home: home.delete_files()),
-]
-
-# Generate readable test step IDs
-golden_path_ids = [
- f"{i+1:02d}. {desc}" for i, (desc, _) in enumerate(golden_path_steps)
-]
-
-
-@pytest.mark.parametrize("description, action", golden_path_steps, ids=golden_path_ids)
-def test_content_processing_steps(login_logout, description, action, request):
- """
- Executes Golden Path content processing steps with individual log entries.
- """
- request.node._nodeid = description
+
+@pytest.mark.gp
+def test_content_processing_golden_path(login_logout, request):
+ """
+ Content Processing - Validate Golden path works as expected
+
+ Executes golden path test steps for Content Processing Solution Accelerator with detailed logging.
+ """
+ request.node._nodeid = "Content Processing - Validate Golden path works as expected"
+
page = login_logout
home = HomePage(page)
- logger.info(f"Running test step: {description}")
+ # Define step-wise test actions for Golden Path
+ golden_path_steps = [
+ ("01. Validate home page is loaded", lambda: home.validate_home_page()),
+ ("02. Select Invoice Schema", lambda: home.select_schema("Invoice")),
+ ("03. Upload Invoice documents", lambda: home.upload_files("Invoice")),
+ ("04. Refresh until Invoice file status is Completed", lambda: home.refresh()),
+ ("05. Validate extracted result for Invoice", lambda: home.validate_invoice_extracted_result()),
+ ("06. Modify Extracted Data JSON & submit comments", lambda: home.modify_and_submit_extracted_data()),
+ ("07. Validate process steps for Invoice", lambda: home.validate_process_steps()),
+ ("08. Select Property Loss Damage Claim Form Schema", lambda: home.select_schema("Property")),
+ ("09. Upload Property Loss Damage Claim Form documents", lambda: home.upload_files("Property")),
+ ("10. Refresh until Claim Form status is Completed", lambda: home.refresh()),
+ ("11. Validate extracted result for Property Loss Damage Claim Form", lambda: home.validate_property_extracted_result()),
+ ("12. Validate process steps for Property Loss Damage Claim Form", lambda: home.validate_process_steps()),
+ ("13. Validate user able to delete file", lambda: home.delete_files()),
+ ]
+
+ # Execute all steps sequentially
+ for description, action in golden_path_steps:
+ logger.info(f"Running test step: {description}")
+ try:
+ action()
+ logger.info(f"Step passed: {description}")
+ except Exception: # pylint: disable=broad-exception-caught
+ logger.error(f"Step failed: {description}", exc_info=True)
+ raise
- start_time = time.time()
+
+def test_content_processing_sections_display(login_logout, request):
+ """
+ Content Processing - All the sections need to be displayed properly
+
+ Validates that all main sections (Processing Queue, Output Review, Source Document)
+ are displayed correctly on the home page.
+ """
+ request.node._nodeid = "Content Processing - All the sections need to be displayed properly"
+
+ page = login_logout
+ home = HomePage(page)
+
+ logger.info("Running test: Validate all sections are displayed properly")
try:
- action(home)
- duration = time.time() - start_time
- message = "Step passed: %s (Duration: %.2f seconds)" % (description, duration)
- logger.info(message)
- request.node._report_sections.append(("call", "log", message))
-
- except Exception:
- duration = time.time() - start_time
- logger.error("Step failed: %s (Duration: %.2f seconds)", description, duration, exc_info=True)
+ home.validate_home_page()
+ logger.info("Test passed: All sections displayed properly")
+ except Exception: # pylint: disable=broad-exception-caught
+ logger.error("Test failed: All sections display validation", exc_info=True)
raise
- request.node._report_sections.append(("call", "log", f"Step passed: {description}"))
+
+
+def test_content_processing_file_upload(login_logout, request):
+ """
+ Content Processing - Files need to be uploaded successfully
+
+ Validates that files can be uploaded successfully for both Invoice and Property schemas.
+ """
+ request.node._nodeid = "Content Processing - Files need to be uploaded successfully"
+
+ page = login_logout
+ home = HomePage(page)
+
+ # Define file upload test steps
+ upload_steps = [
+ ("01. Select Invoice Schema", lambda: home.select_schema("Invoice")),
+ ("02. Upload Invoice documents", lambda: home.upload_files("Invoice")),
+ ("03. Select Property Loss Damage Claim Form Schema", lambda: home.select_schema("Property")),
+ ("04. Upload Property Loss Damage Claim Form documents", lambda: home.upload_files("Property")),
+ ]
+
+ # Execute all upload steps sequentially
+ for description, action in upload_steps:
+ logger.info(f"Running test step: {description}")
+ try:
+ action()
+ logger.info(f"Step passed: {description}")
+ except Exception: # pylint: disable=broad-exception-caught
+ logger.error(f"Step failed: {description}", exc_info=True)
+ raise
+
+
+def test_content_processing_refresh_screen(login_logout, request):
+ """
+ Content Processing - Refreshing the screen
+
+ Validates that screen refresh works properly after uploading files.
+ """
+ request.node._nodeid = "Content Processing - Refreshing the screen"
+
+ page = login_logout
+ home = HomePage(page)
+
+ # Define refresh test steps
+ refresh_steps = [
+ ("01. Select Invoice Schema", lambda: home.select_schema("Invoice")),
+ ("02. Upload Invoice documents", lambda: home.upload_files("Invoice")),
+ ("03. Refresh until file status is Completed", lambda: home.refresh()),
+ ]
+
+ # Execute all refresh steps sequentially
+ for description, action in refresh_steps:
+ logger.info(f"Running test step: {description}")
+ try:
+ action()
+ logger.info(f"Step passed: {description}")
+ except Exception: # pylint: disable=broad-exception-caught
+ logger.error(f"Step failed: {description}", exc_info=True)
+ raise
+
+
+def test_content_processing_schema_validation(login_logout, request):
+ """
+ Content Processing - Validate Content Processing - Alert user to upload file correctly as per the selected schema
+
+ Validates that the system correctly displays the selected schema and alerts users to upload
+ files specific to the selected schema (Invoice and Property Loss Damage Claim Form).
+ """
+ request.node._nodeid = "Content Processing - Validate Content Processing - Alert user to upload file correctly as per the selected schema"
+
+ page = login_logout
+ home = HomePage(page)
+
+ # Define schema validation test steps
+ schema_validation_steps = [
+ ("01. Validate home page is loaded", lambda: home.validate_home_page()),
+ ("02. Select Invoice Schema", lambda: home.select_schema("Invoice")),
+ ("03. Validate Invoice schema is selected correctly", lambda: home.validate_invoice_schema_selected()),
+ ("04. Close upload popup", lambda: home.close_upload_popup()),
+ ("05. Select Property Loss Damage Claim Form Schema", lambda: home.select_schema("Property")),
+ ("06. Validate Property schema is selected correctly", lambda: home.validate_property_schema_selected()),
+ ("07. Close upload popup", lambda: home.close_upload_popup()),
+ ("08: Refresh screen", lambda: home.refresh_page())
+ ]
+
+ # Execute all schema validation steps sequentially
+ for description, action in schema_validation_steps:
+ logger.info(f"Running test step: {description}")
+ try:
+ action()
+ logger.info(f"Step passed: {description}")
+ except Exception: # pylint: disable=broad-exception-caught
+ logger.error(f"Step failed: {description}", exc_info=True)
+ raise
+
+
+def test_content_processing_import_without_schema(login_logout, request):
+ """
+ Content Processing - Once cleared Select Schema dropdown, import content shows validation
+
+ Validates that when no schema is selected (or schema is cleared), clicking Import Content
+ button displays appropriate validation message prompting user to select a schema first.
+ """
+ request.node._nodeid = "Content Processing - Once cleared Select Schema dropdown, import content shows validation"
+
+ page = login_logout
+ home = HomePage(page)
+
+ # Define import without schema validation test steps
+ import_validation_steps = [
+ ("01. Validate home page is loaded", lambda: home.validate_home_page()),
+ ("02. Validate import content without schema selection", lambda: home.validate_import_without_schema()),
+ ]
+
+ # Execute all import validation steps sequentially
+ for description, action in import_validation_steps:
+ logger.info(f"Running test step: {description}")
+ try:
+ action()
+ logger.info(f"Step passed: {description}")
+ except Exception: # pylint: disable=broad-exception-caught
+ logger.error(f"Step failed: {description}", exc_info=True)
+ raise
+
+
+def test_content_processing_delete_file(login_logout, request):
+ """
+ Content Processing - Delete File
+
+ Validates that uploaded files can be successfully deleted from the processing queue.
+ Uploads a file first, then verifies the delete functionality works correctly.
+ """
+ request.node._nodeid = "Content Processing - Delete File"
+
+ page = login_logout
+ home = HomePage(page)
+
+ # Define delete file test steps
+ delete_file_steps = [
+ ("01. Validate home page is loaded", lambda: home.validate_home_page()),
+ ("02. Delete uploaded file", lambda: home.delete_files()),
+ ]
+
+ # Execute all delete file steps sequentially
+ for description, action in delete_file_steps:
+ logger.info(f"Running test step: {description}")
+ try:
+ action()
+ logger.info(f"Step passed: {description}")
+ except Exception: # pylint: disable=broad-exception-caught
+ logger.error(f"Step failed: {description}", exc_info=True)
+ raise
+
+
+def test_content_processing_search_functionality(login_logout, request):
+ """
+ Content Processing - Search box inside extracted results
+
+ Validates that the search functionality works correctly in the extracted results section.
+ Uploads an Invoice file, waits for processing to complete, and then validates search functionality.
+ """
+ request.node._nodeid = "Content Processing - Search box inside extracted results"
+
+ page = login_logout
+ home = HomePage(page)
+
+ # Define search functionality test steps
+ search_functionality_steps = [
+ ("01. Validate home page is loaded", lambda: home.validate_home_page()),
+ ("02. Select Invoice Schema", lambda: home.select_schema("Invoice")),
+ ("03. Upload Invoice documents", lambda: home.upload_files("Invoice")),
+ ("04. Refresh until file status is Completed", lambda: home.refresh()),
+ ("05. Validate search functionality in extracted results", lambda: home.validate_search_functionality()),
+ ]
+
+ # Execute all search functionality steps sequentially
+ for description, action in search_functionality_steps:
+ logger.info(f"Running test step: {description}")
+ try:
+ action()
+ logger.info(f"Step passed: {description}")
+ except Exception: # pylint: disable=broad-exception-caught
+ logger.error(f"Step failed: {description}", exc_info=True)
+ raise
+
+
+def test_content_processing_collapsible_panels(login_logout, request):
+ """
+ Content Processing - Collapsible section for each panel
+
+ Validates that each panel (Processing Queue, Output Review, Source Document) can be
+ collapsed and expanded correctly, ensuring the UI controls work as expected.
+ """
+ request.node._nodeid = "Content Processing - Collapsible section for each panel"
+
+ page = login_logout
+ home = HomePage(page)
+
+ # Define collapsible panels test steps
+ collapsible_panels_steps = [
+ ("01. Validate home page is loaded", lambda: home.validate_home_page()),
+ ("02. Validate collapsible panels functionality", lambda: home.validate_collapsible_panels()),
+ ]
+
+ # Execute all collapsible panels steps sequentially
+ for description, action in collapsible_panels_steps:
+ logger.info(f"Running test step: {description}")
+ try:
+ action()
+ logger.info(f"Step passed: {description}")
+ except Exception: # pylint: disable=broad-exception-caught
+ logger.error(f"Step failed: {description}", exc_info=True)
+ raise
+
+
+def test_content_processing_api_documentation(login_logout, request):
+ """
+ Content Processing - API Document
+
+ Validates that the API Documentation link opens correctly in a new page and displays
+ all required API documentation sections including contentprocessor, schemavault, and Schemas.
+ """
+ request.node._nodeid = "Content Processing - API Document"
+
+ page = login_logout
+ home = HomePage(page)
+
+ # Define API documentation test steps
+ api_documentation_steps = [
+ ("01. Validate home page is loaded", lambda: home.validate_home_page()),
+ ("02. Validate API Documentation link and content", lambda: home.validate_api_document_link()),
+ ]
+
+ # Execute all API documentation steps sequentially
+ for description, action in api_documentation_steps:
+ logger.info(f"Running test step: {description}")
+ try:
+ action()
+ logger.info(f"Step passed: {description}")
+ except Exception: # pylint: disable=broad-exception-caught
+ logger.error(f"Step failed: {description}", exc_info=True)
+ raise
+
+
+def test_content_processing_expandable_process_steps(login_logout, request):
+ """
+ Content Processing - Expandable section under each process
+
+ Validates that each process step (extract, map, evaluate) can be expanded and collapsed correctly,
+ and displays the expected content and status information.
+ """
+ request.node._nodeid = "Content Processing - Expandable section under each process"
+
+ page = login_logout
+ home = HomePage(page)
+
+ # Define expandable process steps test steps
+ expandable_process_steps = [
+ ("01. Validate home page is loaded", lambda: home.validate_home_page()),
+ ("02. Select Invoice Schema", lambda: home.select_schema("Invoice")),
+ ("03. Upload Invoice documents", lambda: home.upload_files("Invoice")),
+ ("04. Refresh until file status is Completed", lambda: home.refresh()),
+ ("05. Validate expandable process steps functionality", lambda: home.validate_process_steps()),
+ ]
+
+ # Execute all expandable process steps sequentially
+ for description, action in expandable_process_steps:
+ logger.info(f"Running test step: {description}")
+ try:
+ action()
+ logger.info(f"Step passed: {description}")
+ except Exception: # pylint: disable=broad-exception-caught
+ logger.error(f"Step failed: {description}", exc_info=True)
+ raise
From d8e1cf0c2edfcd29b530bc9dff7d92b60239d6f3 Mon Sep 17 00:00:00 2001
From: Vamshi-Microsoft
Date: Mon, 17 Nov 2025 15:36:36 +0530
Subject: [PATCH 2/2] Changed Trigger type as per previous pipeline
---
.github/workflows/deploy-v2.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/deploy-v2.yml b/.github/workflows/deploy-v2.yml
index f4b38b1b..ef7266b5 100644
--- a/.github/workflows/deploy-v2.yml
+++ b/.github/workflows/deploy-v2.yml
@@ -1,6 +1,6 @@
name: Deploy-Test-Cleanup (v2)
on:
- pull_request:
+ push:
branches:
- main
workflow_dispatch: