diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml
index 08e79aa0..48504204 100644
--- a/.github/workflows/CI.yml
+++ b/.github/workflows/CI.yml
@@ -2,243 +2,250 @@ name: Validate Deployment
on:
push:
branches:
- - main # Adjust this to the branch you want to trigger the deployment on
+ - main # Adjust this to the branch you want to trigger the deployment on
- dev
- demo
schedule:
- - cron: '0 10,22 * * *' # Runs at 10:00 AM and 10:00 PM GMT
-
+ - cron: "0 10,22 * * *" # Runs at 10:00 AM and 10:00 PM GMT
jobs:
deploy:
- runs-on: windows-latest # Use a Windows runner for PowerShell scripts
+ runs-on: windows-latest # Use a Windows runner for PowerShell scripts
steps:
- - name: Checkout Code
- uses: actions/checkout@v4 # Checks out your repository
- # Install Azure CLI
- - name: Install Azure CLI
- shell: pwsh
- run: |
+ - name: Checkout Code
+ uses: actions/checkout@v4 # Checks out your repository
+ # Install Azure CLI
+ - name: Install Azure CLI
+ shell: pwsh
+ run: |
Invoke-WebRequest -Uri https://aka.ms/installazurecliwindows -OutFile AzureCLI.msi
Start-Process msiexec.exe -ArgumentList '/I AzureCLI.msi /quiet' -Wait
- # Install kubectl (Windows method)
- - name: Install kubectl
- shell: pwsh
- run: |
- Invoke-WebRequest -Uri https://dl.k8s.io/release/v1.28.0/bin/windows/amd64/kubectl.exe -OutFile kubectl.exe
- Move-Item -Path ./kubectl.exe -Destination "C:\kubectl.exe"
- [Environment]::SetEnvironmentVariable('PATH', $env:PATH + ';C:\', [System.EnvironmentVariableTarget]::Machine)
-
+ # Install kubectl (Windows method)
+ - name: Install kubectl
+ shell: pwsh
+ run: |
+ Invoke-WebRequest -Uri https://dl.k8s.io/release/v1.28.0/bin/windows/amd64/kubectl.exe -OutFile kubectl.exe
+ Move-Item -Path ./kubectl.exe -Destination "C:\kubectl.exe"
+ [Environment]::SetEnvironmentVariable('PATH', $env:PATH + ';C:\', [System.EnvironmentVariableTarget]::Machine)
- # Install Helm (Windows method)
- - name: Install Helm
- shell: pwsh
- run: |
+ # Install Helm (Windows method)
+ - name: Install Helm
+ shell: pwsh
+ run: |
Invoke-WebRequest -Uri https://get.helm.sh/helm-v3.13.0-windows-amd64.zip -OutFile helm.zip
Expand-Archive helm.zip -DestinationPath helm
Move-Item -Path ./helm/windows-amd64/helm.exe -Destination "C:\helm.exe"
[Environment]::SetEnvironmentVariable('PATH', $env:PATH + ';C:\', [System.EnvironmentVariableTarget]::Machine)
-
- - name: Set Docker environment variables
- run: echo "DOCKER_BUILDKIT=0" >> $GITHUB_ENV
-
- # Set up Docker
- - name: Set up Docker
- uses: docker/setup-buildx-action@v3
- with:
- driver: docker
-
- - name: Setup PowerShell
- shell: pwsh
- run: |
- $PSVersionTable.PSVersion
-
- # Run Quota Check Script
- - name: Run Quota Check
- id: quota-check
- shell: pwsh
- run: |
- $ErrorActionPreference = "Stop" # Ensure that any error stops the pipeline
+ - name: Set Docker environment variables
+ run: echo "DOCKER_BUILDKIT=0" >> $GITHUB_ENV
+
+ # Set up Docker
+ - name: Set up Docker
+ uses: docker/setup-buildx-action@v3
+ with:
+ driver: docker
+
+ - name: Setup PowerShell
+ shell: pwsh
+ run: |
+ $PSVersionTable.PSVersion
+
+ # Run Quota Check Script
+ - name: Run Quota Check
+ id: quota-check
+ shell: pwsh
+ run: |
+ $ErrorActionPreference = "Stop" # Ensure that any error stops the pipeline
+
+ # Path to the PowerShell script for quota check
+ $quotaCheckScript = "Deployment/checkquota.ps1"
+
+ # Check if the script exists and is executable (not needed for PowerShell like chmod)
+ if (-not (Test-Path $quotaCheckScript)) {
+ Write-Host "❌ Error: Quota check script not found."
+ exit 1
+ }
+
+ # Run the script
+ .\Deployment\checkquota.ps1
+
+ # If the script fails, check for the failure message
+ $quotaFailedMessage = "No region with sufficient quota found"
+ $output = Get-Content "Deployment/checkquota.ps1"
+
+ if ($output -contains $quotaFailedMessage) {
+ echo "QUOTA_FAILED=true" >> $GITHUB_ENV
+ }
+ env:
+ AZURE_SUBSCRIPTION_ID: ${{ secrets.AZURE_SUBSCRIPTION_ID }}
+ AZURE_TENANT_ID: ${{ secrets.AZURE_TENANT_ID }}
+ AZURE_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }}
+ AZURE_CLIENT_SECRET: ${{ secrets.AZURE_CLIENT_SECRET }}
+ GPT_MIN_CAPACITY: "10"
+ TEXT_EMBEDDING_MIN_CAPACITY: "10"
+ AZURE_REGIONS: "${{ vars.AZURE_REGIONS }}"
+
+ # Send Notification on Quota Failure
+ - name: Send Notification on Quota Failure
+ if: env.QUOTA_FAILED == 'true'
+ shell: pwsh
+ run: |
+ $RUN_URL = "https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"
+
+ # Construct the email body
+ $EMAIL_BODY = @"
+ {
+ "body": "
Dear Team,
The quota check has failed, and the pipeline cannot proceed.
Build URL: $RUN_URL
Please take necessary action.
Best regards,
Your Automation Team
"
+ }
+ "@
+
+ # Send the notification
+ try {
+ $response = Invoke-RestMethod -Uri "${{ secrets.LOGIC_APP_URL }}" -Method Post -ContentType "application/json" -Body $EMAIL_BODY
+ Write-Host "Notification sent successfully."
+ } catch {
+ Write-Host "❌ Failed to send notification."
+ }
+
+ - name: Fail Pipeline if Quota Check Fails
+ if: env.QUOTA_FAILED == 'true'
+ run: exit 1
- # Path to the PowerShell script for quota check
- $quotaCheckScript = "Deployment/checkquota.ps1"
-
- # Check if the script exists and is executable (not needed for PowerShell like chmod)
- if (-not (Test-Path $quotaCheckScript)) {
- Write-Host "❌ Error: Quota check script not found."
- exit 1
- }
-
- # Run the script
- .\Deployment\checkquota.ps1
-
- # If the script fails, check for the failure message
- $quotaFailedMessage = "No region with sufficient quota found"
- $output = Get-Content "Deployment/checkquota.ps1"
-
- if ($output -contains $quotaFailedMessage) {
- echo "QUOTA_FAILED=true" >> $GITHUB_ENV
- }
- env:
- AZURE_SUBSCRIPTION_ID: ${{ secrets.AZURE_SUBSCRIPTION_ID }}
- AZURE_TENANT_ID: ${{ secrets.AZURE_TENANT_ID }}
- AZURE_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }}
- AZURE_CLIENT_SECRET: ${{ secrets.AZURE_CLIENT_SECRET }}
- GPT_MIN_CAPACITY: '10'
- TEXT_EMBEDDING_MIN_CAPACITY: '10'
- AZURE_REGIONS: "${{ vars.AZURE_REGIONS }}"
-
-
- # Send Notification on Quota Failure
- - name: Send Notification on Quota Failure
- if: env.QUOTA_FAILED == 'true'
- shell: pwsh
- run: |
- $RUN_URL = "https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"
-
- # Construct the email body
- $EMAIL_BODY = @"
- {
- "body": "Dear Team,
The quota check has failed, and the pipeline cannot proceed.
Build URL: $RUN_URL
Please take necessary action.
Best regards,
Your Automation Team
"
- }
- "@
-
- # Send the notification
- try {
- $response = Invoke-RestMethod -Uri "${{ secrets.LOGIC_APP_URL }}" -Method Post -ContentType "application/json" -Body $EMAIL_BODY
- Write-Host "Notification sent successfully."
- } catch {
- Write-Host "❌ Failed to send notification."
- }
-
- - name: Fail Pipeline if Quota Check Fails
- if: env.QUOTA_FAILED == 'true'
- run: exit 1
-
- - name: Run Deployment Script with Input
- shell: pwsh
- run: |
- cd Deployment
- $input = @"
- ${{ secrets.AZURE_SUBSCRIPTION_ID }}
- CanadaCentral
- ${{ env.VALID_REGION }}
- ${{ secrets.EMAIL }}
- yes
- "@
- $input | pwsh ./resourcedeployment.ps1
- echo "Resource Group Name is ${{ env.rg_name }}"
- echo "Kubernetes resource group are ${{ env.krg_name }}"
- env:
- AZURE_SUBSCRIPTION_ID: ${{ secrets.AZURE_SUBSCRIPTION_ID }}
- AZURE_TENANT_ID: ${{ secrets.AZURE_TENANT_ID }}
- AZURE_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }}
- AZURE_CLIENT_SECRET: ${{ secrets.AZURE_CLIENT_SECRET }}
- - name: Cleanup Resource Group
- if: always() # Ensures this step runs even if the deployment fails
- shell: pwsh
- run: |
+ - name: Generate Environment Name
+ id: generate_environment_name
+ shell: bash
+ run: |
+ set -e
+ TIMESTAMP_SHORT=$(date +%s | tail -c 5) # Last 4-5 digits of epoch seconds
+ RANDOM_SUFFIX=$(head /dev/urandom | tr -dc 'a-z0-9' | head -c 8) # 8 random alphanum chars
+ UNIQUE_ENV_NAME="${TIMESTAMP_SHORT}${RANDOM_SUFFIX}" # Usually ~12-13 chars
+ echo "ENVIRONMENT_NAME=${UNIQUE_ENV_NAME}" >> $GITHUB_ENV
+ echo "Generated ENVIRONMENT_NAME: ${UNIQUE_ENV_NAME}"
+
+ - name: Run Deployment Script with Input
+ shell: pwsh
+ run: |
+ cd Deployment
+ $input = @"
+ ${{ secrets.AZURE_SUBSCRIPTION_ID }}
+ ${{ env.ENVIRONMENT_NAME }}
+
+ CanadaCentral
+ ${{ env.VALID_REGION }}
+ ${{ secrets.EMAIL }}
+ yes
+ "@
+ $input | pwsh ./resourcedeployment.ps1
+ echo "Resource Group Name is ${{ env.rg_name }}"
+ echo "Kubernetes resource group are ${{ env.krg_name }}"
+ env:
+ AZURE_SUBSCRIPTION_ID: ${{ secrets.AZURE_SUBSCRIPTION_ID }}
+ AZURE_TENANT_ID: ${{ secrets.AZURE_TENANT_ID }}
+ AZURE_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }}
+ AZURE_CLIENT_SECRET: ${{ secrets.AZURE_CLIENT_SECRET }}
+ - name: Cleanup Resource Group
+ if: always() # Ensures this step runs even if the deployment fails
+ shell: pwsh
+ run: |
az login --service-principal --username ${{ secrets.AZURE_CLIENT_ID }} --password ${{ secrets.AZURE_CLIENT_SECRET }} --tenant ${{ secrets.AZURE_TENANT_ID }}
az group delete --name ${{ env.rg_name }} --yes --no-wait
az group delete --name ${{ env.krg_name }} --yes --no-wait
- env:
+ env:
AZURE_SUBSCRIPTION_ID: ${{ secrets.AZURE_SUBSCRIPTION_ID }}
AZURE_TENANT_ID: ${{ secrets.AZURE_TENANT_ID }}
AZURE_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }}
AZURE_CLIENT_SECRET: ${{ secrets.AZURE_CLIENT_SECRET }}
- - name: Wait for Resource Deletion to Complete
- shell: pwsh
- run: |
- $retries = 0
- $maxRetries = 3
- $sleepIntervals = @(700, 200, 200)
-
- while ($retries -lt $maxRetries) {
- $rgStatus = az group exists --name ${{ env.rg_name }}
- $krgStatus = az group exists --name ${{ env.krg_name }}
-
-
- # if (-not $rgStatus -and -not $krgStatus) {
- # Write-Host "Both resource groups deleted successfully."
- # break
- # }
- if ($rgStatus -eq "false" -and $krgStatus -eq "false") {
- Write-Host "Both resource groups deleted successfully."
- break
+ - name: Wait for Resource Deletion to Complete
+ shell: pwsh
+ run: |
+ $retries = 0
+ $maxRetries = 3
+ $sleepIntervals = @(700, 200, 200)
+
+ while ($retries -lt $maxRetries) {
+ $rgStatus = az group exists --name ${{ env.rg_name }}
+ $krgStatus = az group exists --name ${{ env.krg_name }}
+
+
+ # if (-not $rgStatus -and -not $krgStatus) {
+ # Write-Host "Both resource groups deleted successfully."
+ # break
+ # }
+ if ($rgStatus -eq "false" -and $krgStatus -eq "false") {
+ Write-Host "Both resource groups deleted successfully."
+ break
+ }
+
+ $retries++
+ if ($retries -eq $maxRetries) {
+ Write-Host "Resource groups deletion not confirmed after $maxRetries attempts. Exiting."
+ exit 1
+ }
+
+ Write-Host "Resource groups still exist. Retrying in $($sleepIntervals[$retries - 1]) seconds..."
+ Start-Sleep -Seconds $sleepIntervals[$retries - 1]
+ }
+
+ - name: Purging the Resources
+ if: success()
+ shell: pwsh
+ run: |
+ # Set variables using GitHub Actions environment values
+ $solutionPrefix = "${{ env.SOLUTION_PREFIX }}"
+ $subscriptionId = "${{ secrets.AZURE_SUBSCRIPTION_ID }}"
+ $resourceGroupName = "${{ env.rg_name }}"
+
+ $openai_name = "openaiservice-$solutionPrefix"
+ $cognitiveservice_name = "cognitiveservice-$solutionPrefix"
+
+ # Debug: Print resource names
+ Write-Host "Purging OpenAI resource: $openai_name"
+ Write-Host "Purging CognitiveService Account: $cognitiveservice_name"
+
+ # Construct resource IDs
+ $openaiResourceId = "/subscriptions/$subscriptionId/providers/Microsoft.CognitiveServices/locations/${{ env.VALID_REGION }}/resourceGroups/$resourceGroupName/deletedAccounts/$openai_name"
+ $cognitiveResourceId = "/subscriptions/$subscriptionId/providers/Microsoft.CognitiveServices/locations/${{ env.VALID_REGION }}/resourceGroups/$resourceGroupName/deletedAccounts/$cognitiveservice_name"
+
+ # Debug: Print constructed resource IDs
+ Write-Host "Command to purge OpenAI resource: az resource delete --ids `"$openaiResourceId`" --verbose"
+ Write-Host "Command to purge CognitiveService Account: az resource delete --ids `"$cognitiveResourceId`" --verbose"
+ # Purge OpenAI Resource
+ az resource delete --ids $openaiResourceId --verbose
+ if (-not $?) {
+ Write-Host "Failed to purge OpenAI resource: $openaiResourceId"
}
-
- $retries++
- if ($retries -eq $maxRetries) {
- Write-Host "Resource groups deletion not confirmed after $maxRetries attempts. Exiting."
- exit 1
+
+ # Purge CognitiveService Account
+
+
+ az resource delete --ids $cognitiveResourceId --verbose
+ if (-not $?) {
+ Write-Host "Failed to purge CognitiveService Account."
}
-
- Write-Host "Resource groups still exist. Retrying in $($sleepIntervals[$retries - 1]) seconds..."
- Start-Sleep -Seconds $sleepIntervals[$retries - 1]
- }
-
- - name: Purging the Resources
- if: success()
- shell: pwsh
- run: |
- # Set variables using GitHub Actions environment values
- $solutionPrefix = "${{ env.SOLUTION_PREFIX }}"
- $subscriptionId = "${{ secrets.AZURE_SUBSCRIPTION_ID }}"
- $resourceGroupName = "${{ env.rg_name }}"
-
- $openai_name = "openaiservice-$solutionPrefix"
- $cognitiveservice_name = "cognitiveservice-$solutionPrefix"
-
- # Debug: Print resource names
- Write-Host "Purging OpenAI resource: $openai_name"
- Write-Host "Purging CognitiveService Account: $cognitiveservice_name"
-
- # Construct resource IDs
- $openaiResourceId = "/subscriptions/$subscriptionId/providers/Microsoft.CognitiveServices/locations/${{ env.VALID_REGION }}/resourceGroups/$resourceGroupName/deletedAccounts/$openai_name"
- $cognitiveResourceId = "/subscriptions/$subscriptionId/providers/Microsoft.CognitiveServices/locations/${{ env.VALID_REGION }}/resourceGroups/$resourceGroupName/deletedAccounts/$cognitiveservice_name"
-
- # Debug: Print constructed resource IDs
- Write-Host "Command to purge OpenAI resource: az resource delete --ids `"$openaiResourceId`" --verbose"
- Write-Host "Command to purge CognitiveService Account: az resource delete --ids `"$cognitiveResourceId`" --verbose"
- # Purge OpenAI Resource
- az resource delete --ids $openaiResourceId --verbose
- if (-not $?) {
- Write-Host "Failed to purge OpenAI resource: $openaiResourceId"
- }
-
- # Purge CognitiveService Account
-
-
- az resource delete --ids $cognitiveResourceId --verbose
- if (-not $?) {
- Write-Host "Failed to purge CognitiveService Account."
- }
-
-
- - name: Send Notification on Failure
- if: failure()
- shell: pwsh
- run: |
- # Define the RUN_URL variable
- $RUN_URL = "https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"
-
- # Construct the email body using a Here-String
- $EMAIL_BODY = @"
- {
- "body": "Dear Team,
The Document Knowledge Mining Automation process encountered an issue.
Build URL: $RUN_URL
Please investigate promptly.
Best regards,
Your Automation Team
"
- }
- "@
-
- # Send the notification with error handling
- try {
- curl -X POST "${{ secrets.LOGIC_APP_URL }}" `
- -H "Content-Type: application/json" `
- -d "$EMAIL_BODY"
- } catch {
- Write-Output "Failed to send notification."
- }
-
+
+ - name: Send Notification on Failure
+ if: failure()
+ shell: pwsh
+ run: |
+ # Define the RUN_URL variable
+ $RUN_URL = "https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"
+
+ # Construct the email body using a Here-String
+ $EMAIL_BODY = @"
+ {
+ "body": "Dear Team,
The Document Knowledge Mining Automation process encountered an issue.
Build URL: $RUN_URL
Please investigate promptly.
Best regards,
Your Automation Team
"
+ }
+ "@
+
+ # Send the notification with error handling
+ try {
+ curl -X POST "${{ secrets.LOGIC_APP_URL }}" `
+ -H "Content-Type: application/json" `
+ -d "$EMAIL_BODY"
+ } catch {
+ Write-Output "Failed to send notification."
+ }
diff --git a/.github/workflows/test-automation.yml b/.github/workflows/test-automation.yml
new file mode 100644
index 00000000..5383d57b
--- /dev/null
+++ b/.github/workflows/test-automation.yml
@@ -0,0 +1,130 @@
+name: Test Automation DKM
+
+on:
+ push:
+ branches:
+ - main
+ - dev
+ paths:
+ - 'tests/e2e-test/**'
+ schedule:
+ - cron: '0 13 * * *' # Runs at 1 PM UTC
+ workflow_dispatch:
+
+env:
+ url: ${{ vars.DKM_URL }}
+ accelerator_name: "DKM"
+
+jobs:
+ test:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Set up Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: '3.13'
+
+ - name: Azure CLI Login
+ uses: azure/login@v2
+ with:
+ creds: '{"clientId":"${{ secrets.AZURE_CLIENT_ID }}","clientSecret":"${{ secrets.AZURE_CLIENT_SECRET }}","subscriptionId":"${{ secrets.AZURE_SUBSCRIPTION_ID }}","tenantId":"${{ secrets.AZURE_TENANT_ID }}"}'
+
+ - name: Start AKS
+ id: start-aks
+ uses: azure/cli@v2
+ with:
+ azcliversion: 'latest'
+ inlineScript: |
+ az aks install-cli
+ if [ "$(az aks show --resource-group ${{ vars.DKM_RG }} --name ${{ vars.DKM_AKS_NAME }} --query "powerState.code" -o tsv)" = "Running" ]; then echo "AKS is running"; else az aks start --resource-group ${{ vars.DKM_RG }} --name ${{ vars.DKM_AKS_NAME }}; fi
+
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install -r tests/e2e-test/requirements.txt
+
+ - name: Ensure browsers are installed
+ run: python -m playwright install --with-deps chromium
+
+ - name: Run tests(1)
+ id: test1
+ run: |
+ xvfb-run pytest --headed --html=report/report.html --self-contained-html
+ working-directory: tests/e2e-test
+ continue-on-error: true
+
+ - name: Sleep for 30 seconds
+ if: ${{ steps.test1.outcome == 'failure' }}
+ run: sleep 30s
+ shell: bash
+
+ - name: Run tests(2)
+ id: test2
+ if: ${{ steps.test1.outcome == 'failure' }}
+ run: |
+ xvfb-run pytest --headed --html=report/report.html --self-contained-html
+ working-directory: tests/e2e-test
+ continue-on-error: true
+
+ - name: Sleep for 60 seconds
+ if: ${{ steps.test2.outcome == 'failure' }}
+ run: sleep 60s
+ shell: bash
+
+ - name: Run tests(3)
+ id: test3
+ if: ${{ steps.test2.outcome == 'failure' }}
+ run: |
+ xvfb-run pytest --headed --html=report/report.html --self-contained-html
+ working-directory: tests/e2e-test
+
+ - name: Upload test report
+ id: upload_report
+ uses: actions/upload-artifact@v4
+ if: ${{ !cancelled() }}
+ with:
+ name: test-report
+ path: tests/e2e-test/report/*
+
+ - name: Send Notification
+ if: always()
+ run: |
+ RUN_URL="https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"
+ REPORT_URL=${{ steps.upload_report.outputs.artifact-url }}
+ IS_SUCCESS=${{ steps.test1.outcome == 'success' || steps.test2.outcome == 'success' || steps.test3.outcome == 'success' }}
+ # Construct the email body
+ if [ "$IS_SUCCESS" = "true" ]; then
+ EMAIL_BODY=$(cat <Dear Team,We would like to inform you that the ${{ env.accelerator_name }} Test Automation process has completed successfully.
Run URL: ${RUN_URL}
Test Report: ${REPORT_URL}
Best regards,
Your Automation Team
",
+ "subject": "${{ env.accelerator_name }} Test Automation - Success"
+ }
+ EOF
+ )
+ else
+ EMAIL_BODY=$(cat <Dear Team,We would like to inform you that the ${{ env.accelerator_name }} Test Automation process has encountered an issue and has failed to complete successfully.
Run URL: ${RUN_URL}
Test Report: ${REPORT_URL}
Please investigate the matter at your earliest convenience.
Best regards,
Your Automation Team
",
+ "subject": "${{ env.accelerator_name }} Test Automation - Failure"
+ }
+ EOF
+ )
+ fi
+
+ # Send the notification
+ curl -X POST "${{ secrets.EMAILNOTIFICATION_LOGICAPP_URL_TA }}" \
+ -H "Content-Type: application/json" \
+ -d "$EMAIL_BODY" || echo "Failed to send notification"
+
+ - name: Stop AKS
+ if: always()
+ uses: azure/cli@v2
+ with:
+ azcliversion: 'latest'
+ inlineScript: |
+ az aks install-cli
+ if [ "$(az aks show --resource-group ${{ vars.DKM_RG }} --name ${{ vars.DKM_AKS_NAME }} --query "powerState.code" -o tsv)" = "Running" ]; then az aks stop --resource-group ${{ vars.DKM_RG }} --name ${{ vars.DKM_AKS_NAME }}; else echo "AKS is already stopped"; fi
+ az logout
\ No newline at end of file
diff --git a/Deployment/main.bicep b/Deployment/main.bicep
index 30e04d4a..a7edfdb6 100644
--- a/Deployment/main.bicep
+++ b/Deployment/main.bicep
@@ -1,36 +1,46 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT license.
+
+targetScope = 'resourceGroup'
+
+@minLength(3)
+@maxLength(20)
+@description('A unique prefix for all resources in this deployment. This should be 3-20 characters long:')
+param environmentName string
+
@description('The Data Center where the model is deployed.')
param modeldatacenter string
-var abbrs = loadJsonContent('./abbreviations.json')
-targetScope = 'subscription'
-var resourceprefix = padLeft(take(uniqueString(deployment().name), 5), 5, '0')
+@description('Azure data center region where resources will be deployed. This should be a valid Azure region, e.g., eastus, westus, etc.')
+param location string
+
+var uniqueId = toLower(uniqueString(subscription().id, environmentName, location))
+var resourceprefix = padLeft(take(uniqueId, 10), 10, '0')
var resourceprefix_name = 'kmgs'
-// Create a resource group
-resource gs_resourcegroup 'Microsoft.Resources/resourceGroups@2021-04-01' = {
- name: '${abbrs.managementGovernance.resourceGroup}${resourceprefix_name}${resourceprefix}'
- location: deployment().location
-}
+var resourceGroupLocation = resourceGroup().location
+
+// Load the abbrevations file required to name the azure resources.
+var abbrs = loadJsonContent('./abbreviations.json')
+
// Create a storage account
module gs_storageaccount 'bicep/azurestorageaccount.bicep' = {
name: '${abbrs.storage.storageAccount}${resourceprefix_name}${resourceprefix}'
- scope: gs_resourcegroup
+ scope: resourceGroup()
params: {
storageAccountName: '${abbrs.storage.storageAccount}${resourceprefix}'
- location: deployment().location
+ location: resourceGroupLocation
}
}
// Create a Azure Search Service
module gs_azsearch 'bicep/azuresearch.bicep' = {
name: '${abbrs.ai.aiSearch}${resourceprefix_name}${resourceprefix}'
- scope: gs_resourcegroup
+ scope: resourceGroup()
params: {
searchServiceName: '${abbrs.ai.aiSearch}${resourceprefix}'
- location: deployment().location
+ location: resourceGroupLocation
}
}
@@ -38,20 +48,20 @@ module gs_azsearch 'bicep/azuresearch.bicep' = {
// Create Container Registry
module gs_containerregistry 'bicep/azurecontainerregistry.bicep' = {
name: '${abbrs.containers.containerRegistry}${resourceprefix_name}${resourceprefix}'
- scope: gs_resourcegroup
+ scope: resourceGroup()
params: {
acrName: '${abbrs.containers.containerRegistry}${resourceprefix_name}${resourceprefix}'
- location: deployment().location
+ location: resourceGroupLocation
}
}
// Create AKS Cluster
module gs_aks 'bicep/azurekubernetesservice.bicep' = {
name: '${abbrs.compute.arcEnabledKubernetesCluster}${resourceprefix_name}${resourceprefix}'
- scope: gs_resourcegroup
+ scope: resourceGroup()
params: {
aksName: '${abbrs.compute.arcEnabledKubernetesCluster}${resourceprefix_name}${resourceprefix}'
- location: deployment().location
+ location: resourceGroupLocation
}
dependsOn: [
gs_containerregistry
@@ -76,7 +86,7 @@ module gs_aks 'bicep/azurekubernetesservice.bicep' = {
// Create Azure Cognitive Service
module gs_azcognitiveservice 'bicep/azurecognitiveservice.bicep' = {
name: '${abbrs.ai.documentIntelligence}${resourceprefix_name}${resourceprefix}'
- scope: gs_resourcegroup
+ scope: resourceGroup()
params: {
cognitiveServiceName: '${abbrs.ai.documentIntelligence}${resourceprefix_name}${resourceprefix}'
location: 'eastus'
@@ -86,7 +96,7 @@ module gs_azcognitiveservice 'bicep/azurecognitiveservice.bicep' = {
// Create Azure Open AI Service
module gs_openaiservice 'bicep/azureopenaiservice.bicep' = {
name: '${abbrs.ai.openAIService}${resourceprefix_name}${resourceprefix}'
- scope: gs_resourcegroup
+ scope: resourceGroup()
params: {
openAIServiceName: '${abbrs.ai.openAIService}${resourceprefix_name}${resourceprefix}'
// GPT-4-32K model & GPT-4o available Data center information.
@@ -99,7 +109,7 @@ module gs_openaiservice 'bicep/azureopenaiservice.bicep' = {
// Set the minimum capacity of each model
// Based on customer's Model capacity, it needs to be updated in Azure Portal.
module gs_openaiservicemodels_gpt4o 'bicep/azureopenaiservicemodel.bicep' = {
- scope: gs_resourcegroup
+ scope: resourceGroup()
name: 'gpt-4o-mini'
params: {
parentResourceName: gs_openaiservice.outputs.openAIServiceName
@@ -119,7 +129,7 @@ module gs_openaiservicemodels_gpt4o 'bicep/azureopenaiservicemodel.bicep' = {
}
module gs_openaiservicemodels_text_embedding 'bicep/azureopenaiservicemodel.bicep' = {
- scope: gs_resourcegroup
+ scope: resourceGroup()
name: 'text-embedding-large'
params: {
parentResourceName: gs_openaiservice.outputs.openAIServiceName
@@ -140,25 +150,26 @@ module gs_openaiservicemodels_text_embedding 'bicep/azureopenaiservicemodel.bice
// Create Azure Cosmos DB Mongo
module gs_cosmosdb 'bicep/azurecosmosdb.bicep' = {
name: '${abbrs.databases.cosmosDBDatabase}${resourceprefix_name}${resourceprefix}'
- scope: gs_resourcegroup
+ scope: resourceGroup()
params: {
cosmosDbAccountName: '${abbrs.databases.cosmosDBDatabase}${resourceprefix_name}${resourceprefix}'
- location: deployment().location
+ location: resourceGroupLocation
}
}
// Create Azure App Configuration
module gs_appconfig 'bicep/azureappconfigservice.bicep' = {
name: 'appconfig-${resourceprefix_name}${resourceprefix}'
- scope: gs_resourcegroup
+ scope: resourceGroup()
params: {
appConfigName: 'appconfig-${resourceprefix_name}${resourceprefix}'
- location: deployment().location
+ location: resourceGroupLocation
}
}
// return all resource names as a output
-output gs_resourcegroup_name string = '${abbrs.managementGovernance.resourceGroup}${resourceprefix_name}${resourceprefix}'
+// output gs_resourcegroup_name string = '${abbrs.managementGovernance.resourceGroup}${resourceprefix_name}${resourceprefix}'
+output gs_resourcegroup_name string = resourceGroup().name
output gs_solution_prefix string = '${resourceprefix_name}${resourceprefix}'
output gs_storageaccount_name string = gs_storageaccount.outputs.storageAccountName
output gs_azsearch_name string = gs_azsearch.outputs.searchServiceName
@@ -189,5 +200,5 @@ output gs_appconfig_endpoint string = gs_appconfig.outputs.appConfigEndpoint
output gs_containerregistry_endpoint string = gs_containerregistry.outputs.acrEndpoint
//return resourcegroup resource ID
-output gs_resourcegroup_id string = gs_resourcegroup.id
+output gs_resourcegroup_id string = resourceGroup().id
diff --git a/Deployment/resourcePrefix.bicep b/Deployment/resourcePrefix.bicep
new file mode 100644
index 00000000..62fca590
--- /dev/null
+++ b/Deployment/resourcePrefix.bicep
@@ -0,0 +1,9 @@
+targetScope = 'subscription'
+
+param environmentName string
+param location string
+
+var uniqueId = toLower(uniqueString(subscription().id, environmentName, location))
+var resourceprefix = padLeft(take(uniqueId, 10), 10, '0')
+
+output resourcePrefix string = resourceprefix
diff --git a/Deployment/resourcedeployment.ps1 b/Deployment/resourcedeployment.ps1
index 9c906047..577e09ae 100644
--- a/Deployment/resourcedeployment.ps1
+++ b/Deployment/resourcedeployment.ps1
@@ -74,6 +74,8 @@ function ValidateVariableIsNullOrEmpty {
function PromptForParameters {
param(
[string]$subscriptionID,
+ [string]$environmentName,
+ [string]$resourceGroupName,
[string]$location,
[string]$modelLocation,
[string]$email
@@ -105,6 +107,16 @@ function PromptForParameters {
$subscriptionID = Read-Host -Prompt '> '
}
+ if (-not $environmentName) {
+ Write-Host "Please enter Environment name" -ForegroundColor Cyan
+ $environmentName = Read-Host -Prompt '> '
+ }
+
+ if (-not $resourceGroupName) {
+ Write-Host "Please enter your Azure Resource Group Name to deploy your resources (leave blank to auto-generate one)" -ForegroundColor Cyan
+ $resourceGroupName = Read-Host -Prompt '> '
+ }
+
if (-not $location) {
Write-Host "Please enter the Azure Data Center Region to deploy your resources" -ForegroundColor Cyan
Write-Host "Available regions are:" -ForegroundColor Cyan
@@ -125,37 +137,41 @@ function PromptForParameters {
}
return @{
- subscriptionID = $subscriptionID
- location = $location
- modelLocation = $modelLocation
- email = $email
+ subscriptionID = $subscriptionID
+ environmentName = $environmentName
+ resourceGroupName = $resourceGroupName
+ location = $location
+ modelLocation = $modelLocation
+ email = $email
}
}
# Prompt for parameters with kind messages
-$params = PromptForParameters -subscriptionID $subscriptionID -location $location -modelLocation $modelLocation -email $email
+$params = PromptForParameters -subscriptionID $subscriptionID -environmentName $environmentName -resourceGroupName $resourceGroupName -location $location -modelLocation $modelLocation -email $email
# Assign the parameters to variables
$subscriptionID = $params.subscriptionID
+$environmentName = $params.environmentName
+$resourceGroupName = $params.resourceGroupName
$location = $params.location
$modelLocation = $params.modelLocation
$email = $params.email
function LoginAzure([string]$subscriptionID) {
- Write-Host "Log in to Azure.....`r`n" -ForegroundColor Yellow
+ Write-Host "Log in to Azure.....`r`n" -ForegroundColor Yellow
if ($env:CI -eq "true"){
az login --service-principal `
- --username $env:AZURE_CLIENT_ID `
- --password $env:AZURE_CLIENT_SECRET `
- --tenant $env:AZURE_TENANT_ID
+ --username $env:AZURE_CLIENT_ID `
+ --password $env:AZURE_CLIENT_SECRET `
+ --tenant $env:AZURE_TENANT_ID
write-host "CI deployment mode"
- }
+ }
else{
- az login
+ az login
write-host "manual deployment mode"
- }
- az account set --subscription $subscriptionID
- Write-Host "Switched subscription to '$subscriptionID' `r`n" -ForegroundColor Yellow
+ }
+ az account set --subscription $subscriptionID
+ Write-Host "Switched subscription to '$subscriptionID' `r`n" -ForegroundColor Yellow
}
function DeployAzureResources([string]$location, [string]$modelLocation) {
@@ -169,9 +185,53 @@ function DeployAzureResources([string]$location, [string]$modelLocation) {
# Make deployment name unique by appending random number
$deploymentName = "KM_SA_Deployment$randomNumberPadded"
+
+ if (-not $resourceGroupName) {
+ # Generate a new RG name using your existing logic
+
+ # Load abbreviation from abbreviations.json (optional)
+ $abbrs = Get-Content -Raw -Path "./abbreviations.json" | ConvertFrom-Json
+ if (-not $abbrs -or -not $abbrs.managementGovernance.resourceGroup) {
+ Write-Host "abbreviations.json is missing or malformed."
+ failureBanner
+ exit 1
+ }
+ $rgPrefix = $abbrs.managementGovernance.resourceGroup # e.g., "rg-"
+
+ # Constants
+ $resourceprefix_name = "kmgs"
+
+ # Call Bicep to generate resourcePrefix
+ $resourcePrefix = az deployment sub create `
+ --location $location `
+ --name $deploymentName `
+ --template-file ./resourcePrefix.bicep `
+ --parameters environmentName=$environmentName location=$location `
+ --query "properties.outputs.resourcePrefix.value" `
+ -o tsv
+
+ # Final Resource Group Name
+ $resourceGroupName = "$rgPrefix$resourceprefix_name$resourcePrefix"
+
+ Write-Host "Generated Resource Group Name: $resourceGroupName"
+
+ Write-Host "No RG provided. Creating new RG: $resourceGroupName" -ForegroundColor Yellow
+ az group create --name $resourceGroupName --location $location --tags EnvironmentName=$environmentName | Out-Null
+ }
+ else {
+ $exists = az group exists --name $resourceGroupName | ConvertFrom-Json
+ if (-not $exists) {
+ Write-Host "Specified RG does not exist. Creating RG: $resourceGroupName" -ForegroundColor Yellow
+ az group create --name $resourceGroupName --location $location --tags EnvironmentName=$environmentName | Out-Null
+ }
+ else {
+ Write-Host "Using existing RG: $resourceGroupName" -ForegroundColor Green
+ }
+ }
+
# Perform a what-if deployment to preview changes
Write-Host "Evaluating Deployment resource availabilities to preview changes..." -ForegroundColor Yellow
- $whatIfResult = az deployment sub what-if --template-file .\main.bicep --location $location --name $deploymentName --parameters modeldatacenter=$modelLocation
+ $whatIfResult = az deployment group what-if --resource-group $resourceGroupName --template-file .\main.bicep --name $deploymentName --parameters modeldatacenter=$modelLocation location=$location environmentName=$environmentName
if ($LASTEXITCODE -ne 0) {
Write-Host "There might be something wrong with your deployment." -ForegroundColor Red
@@ -181,7 +241,8 @@ function DeployAzureResources([string]$location, [string]$modelLocation) {
}
# Proceed with the actual deployment
Write-Host "Proceeding with Deployment..." -ForegroundColor Yellow
- $deploymentResult = az deployment sub create --template-file .\main.bicep --location $location --name $deploymentName --parameters modeldatacenter=$modelLocation
+ Write-Host "Resource Group Name: $resourceGroupName" -ForegroundColor Yellow
+ $deploymentResult = az deployment group create --resource-group $resourceGroupName --template-file .\main.bicep --name $deploymentName --parameters modeldatacenter=$modelLocation location=$location environmentName=$environmentName
# Check if deploymentResult is valid
ValidateVariableIsNullOrEmpty -variableValue $deploymentResult -variableName "Deployment Result"
if ($LASTEXITCODE -ne 0) {
@@ -391,7 +452,7 @@ class DeploymentResult {
# Azure App Configuration
$this.AzAppConfigEndpoint = $jsonString.properties.outputs.gs_appconfig_endpoint.value
# App Config Name
- $this.AzAppConfigName = "appconfig" + $this.ResourceGroupName
+ $this.AzAppConfigName = "appconfig-" + $jsonString.properties.outputs.gs_solution_prefix.value
}
}
@@ -438,6 +499,9 @@ try {
Write-Host "Deploying Azure resources in $location region.....`r`n" -ForegroundColor Yellow
$resultJson = DeployAzureResources -location $location -modelLocation $modelLocation
+
+ # Ensure ResourceGroupName is set correctly
+ $deploymentResult.ResourceGroupName = $resourceGroupName
# Map the deployment result to DeploymentResult object
$deploymentResult.MapResult($resultJson)
# Display the deployment result
@@ -530,7 +594,7 @@ try {
'{gpt-4o-modelname}' = $deploymentResult.AzGPT4oModelName
'{azureopenaiembedding-deployment}' = $deploymentResult.AzGPTEmbeddingModelName
'{kernelmemory-endpoint}' = "http://kernelmemory-service"
- }
+ }
## Load and update the AI service configuration template
$aiServiceConfigTemplate = Get-Content -Path .\appconfig\aiservice\appconfig.jsonl -Raw
@@ -550,7 +614,7 @@ try {
$filePath = Join-Path $scriptDirectory ".\appconfig\aiservice\appsettings.dev.jsonl"
## Other variables
- $appConfigName = $deploymentResult.AzAppConfigName -replace "rg-", "-"
+ $appConfigName = $deploymentResult.AzAppConfigName
## Output the file path for verification
#write-host "Using file path: $filePath"
@@ -731,7 +795,7 @@ try {
# Validate if System Assigned Identity is null or empty
ValidateVariableIsNullOrEmpty -variableValue $systemAssignedIdentity -variableName "System-assigned managed identity"
- # Validate if ResourceGroupId is null or empty
+ # Validate if ResourceGroupId is null or empty
ValidateVariableIsNullOrEmpty -variableValue $deploymentResult.ResourceGroupId -variableName "ResourceGroupId"
# Assign the role for aks system assigned managed identity to App Configuration Data Reader role with the scope of Resourcegroup
@@ -848,7 +912,7 @@ try {
Wait-ForCertManager
-#======================================================================================================================================================================
+ #======================================================================================================================================================================
# Validate AzAppConfigEndpoint IsNull Or Empty.
ValidateVariableIsNullOrEmpty -variableValue $deploymentResult.AzAppConfigEndpoint -variableName "Azure App Configuration Endpoint"
# App Deployment after finishing the AKS infrastructure setup
@@ -933,7 +997,7 @@ try {
docker build "../App/frontend-app/." --no-cache -t $acrFrontAppTag
docker push $acrFrontAppTag
-#======================================================================================================================================================================
+ #======================================================================================================================================================================
# 7.2. Deploy ClusterIssuer in Kubernetes for SSL/TLS certificate
kubectl apply -f "./kubernetes/deploy.certclusterissuer.yaml"
@@ -962,12 +1026,12 @@ try {
successBanner
$messageString = "Please find the deployment details below: `r`n" +
- "1. Check Front Web Application with this URL - https://${fqdn} `n`r" +
- "2. Check GPT Model's TPM rate in your resource group - $($deploymentResult.ResourceGroupName) `n`r" +
- "Please set each value high as much as you can set`n`r" +
- "`t- Open AI Resource Name - $($deploymentResult.AzOpenAiServiceName) `n`r" +
- "`t- GPT4o Model - $($deploymentResult.AzGPT4oModelName) `n`r" +
- "`t- GPT Embedding Model - $($deploymentResult.AzGPTEmbeddingModelName) `n`r"
+ "1. Check Front Web Application with this URL - https://${fqdn} `n`r" +
+ "2. Check GPT Model's TPM rate in your resource group - $($deploymentResult.ResourceGroupName) `n`r" +
+ "Please set each value high as much as you can set`n`r" +
+ "`t- Open AI Resource Name - $($deploymentResult.AzOpenAiServiceName) `n`r" +
+ "`t- GPT4o Model - $($deploymentResult.AzGPT4oModelName) `n`r" +
+ "`t- GPT Embedding Model - $($deploymentResult.AzGPTEmbeddingModelName) `n`r"
Write-Host $messageString -ForegroundColor Yellow
Write-Host "Don't forget to control the TPM rate for your GPT and Embedding Model in Azure Open AI Studio Deployments section." -ForegroundColor Red
Write-Host "After controlling the TPM rate for your GPT and Embedding Model, let's start Data file import process with this command." -ForegroundColor Yellow
diff --git a/tests/e2e-test/.gitignore b/tests/e2e-test/.gitignore
new file mode 100644
index 00000000..6f792d69
--- /dev/null
+++ b/tests/e2e-test/.gitignore
@@ -0,0 +1,167 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+cover/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+.pybuilder/
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+# For a library or package, you might want to ignore these files since the code is
+# intended to run in multiple environments; otherwise, check them in:
+# .python-version
+
+# pipenv
+# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+# However, in case of collaboration, if having platform-specific dependencies or dependencies
+# having no cross-platform support, pipenv may install dependencies that don't work, or not
+# install all needed dependencies.
+#Pipfile.lock
+
+# poetry
+# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
+# This is especially recommended for binary packages to ensure reproducibility, and is more
+# commonly ignored for libraries.
+# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
+#poetry.lock
+
+# pdm
+# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
+#pdm.lock
+# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
+# in version control.
+# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
+.pdm.toml
+.pdm-python
+.pdm-build/
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+microsoft/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+
+# pytype static type analyzer
+.pytype/
+
+# Cython debug symbols
+cython_debug/
+
+# PyCharm
+# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
+# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
+# and can be added to the global gitignore or merged into this file. For a more nuclear
+# option (not recommended) you can uncomment the following to ignore the entire idea folder.
+.idea/
+archive/
+report/
+screenshots/
+report.html
diff --git a/tests/e2e-test/README.md b/tests/e2e-test/README.md
new file mode 100644
index 00000000..1957a0d2
--- /dev/null
+++ b/tests/e2e-test/README.md
@@ -0,0 +1,34 @@
+# Test Automation for Document Knowledge Mining Accelerator
+
+Write end-to-end tests for your web apps with [Playwright](https://github.com/microsoft/playwright-python) and [pytest](https://docs.pytest.org/en/stable/).
+
+- Support for **all modern browsers** including Chromium, WebKit and Firefox.
+- Support for **headless and headed** execution.
+- **Built-in fixtures** that provide browser primitives to test functions.
+
+Pre-Requisites:
+
+- Install Visual Studio Code: Download and Install Visual Studio Code(VSCode).
+- Install NodeJS: Download and Install Node JS
+
+Create and Activate Python Virtual Environment
+
+- From your directory open and run cmd : "python -m venv microsoft"
+This will create a virtual environment directory named microsoft inside your current directory
+- To enable virtual environment, copy location for "microsoft\Scripts\activate.bat" and run from cmd
+
+Installing Playwright Pytest from Virtual Environment
+
+- To install libraries run "pip install -r requirements.txt"
+
+Run test cases
+
+- To run test cases from e2e-test: "pytest --html=report.html --self-contained-html"
+
+Create .env file in project root level with web app url and client credentials
+
+- create a .env file in project root level and the application url. please refer 'sample_dotenv_file.txt' file.
+
+## Documentation
+
+See on [playwright.dev](https://playwright.dev/python/docs/test-runners) for examples and more detailed information.
diff --git a/tests/e2e-test/base/__init__.py b/tests/e2e-test/base/__init__.py
new file mode 100644
index 00000000..cf50d1cc
--- /dev/null
+++ b/tests/e2e-test/base/__init__.py
@@ -0,0 +1 @@
+from . import base
\ No newline at end of file
diff --git a/tests/e2e-test/base/base.py b/tests/e2e-test/base/base.py
new file mode 100644
index 00000000..759c7701
--- /dev/null
+++ b/tests/e2e-test/base/base.py
@@ -0,0 +1,36 @@
+from config.constants import *
+import requests
+import json
+from dotenv import load_dotenv
+import os
+
+class BasePage:
+ def __init__(self, page):
+ self.page = page
+
+ def scroll_into_view(self,locator):
+ reference_list = locator
+ locator.nth(reference_list.count()-1).scroll_into_view_if_needed()
+
+ def is_visible(self,locator):
+ locator.is_visible()
+
+ def validate_response_status(self, question_api):
+ load_dotenv()
+ # The URL of the API endpoint you want to access
+ url = f"{URL}/backend/chat"
+
+ headers = {
+ "Content-Type": "application/json",
+ "Accept": "*/*",
+ }
+ payload = {
+ "Question": question_api, # This is your example question, you can modify it as needed
+ }
+ # Make the POST request
+ response = self.page.request.post(url, headers=headers, data=json.dumps(payload), timeout=200000)
+
+ # Check the response status code
+ assert response.status == 200, "Response code is " + str(response.status) + " " + str(response.json())
+
+
\ No newline at end of file
diff --git a/tests/e2e-test/config/constants.py b/tests/e2e-test/config/constants.py
new file mode 100644
index 00000000..7d9c8a91
--- /dev/null
+++ b/tests/e2e-test/config/constants.py
@@ -0,0 +1,16 @@
+from dotenv import load_dotenv
+import os
+
+load_dotenv()
+URL = os.getenv('url')
+if URL.endswith('/'):
+ URL = URL[:-1]
+
+# DKM input data
+chat_question1 = "What are the main factors contributing to the current housing affordability issues?"
+chat_question2 = "Analyze the two annual reports and compare the positive and negative outcomes YoY. Show the results in a table."
+house_10_11_question ="Can you summarize and compare the tables on page 10 and 11?"
+handwritten_question1 ="Analyze these forms and create a table with all buyers, sellers, and corresponding purchase prices."
+search_1= "Housing Report"
+search_2= "Contracts"
+contract_details_question = "What liabilities is the buyer responsible for within the contract?"
diff --git a/tests/e2e-test/pages/__init__.py b/tests/e2e-test/pages/__init__.py
new file mode 100644
index 00000000..3363f3e4
--- /dev/null
+++ b/tests/e2e-test/pages/__init__.py
@@ -0,0 +1,2 @@
+from. import loginPage
+from. import dkmPage
\ No newline at end of file
diff --git a/tests/e2e-test/pages/dkmPage.py b/tests/e2e-test/pages/dkmPage.py
new file mode 100644
index 00000000..c434d19f
--- /dev/null
+++ b/tests/e2e-test/pages/dkmPage.py
@@ -0,0 +1,147 @@
+from base.base import BasePage
+from playwright.sync_api import expect
+import time
+from playwright.sync_api import TimeoutError as PlaywrightTimeoutError
+
+class DkmPage(BasePage):
+ WELCOME_PAGE_TITLE = "(//div[@class='order-5 my-auto pb-3 text-lg font-semibold leading-tight text-white mt-3'])[1]"
+ NEWTOPIC = "//button[normalize-space()='New Topic']"
+ Suggested_follow_up_questions="body > div:nth-child(3) > div:nth-child(1) > main:nth-child(2) > div:nth-child(1) > div:nth-child(3) > div:nth-child(1) > div:nth-child(1) > div:nth-child(1) > div:nth-child(1) > div:nth-child(3) > div:nth-child(1) > div:nth-child(6) > div:nth-child(3) > button:nth-child(2)"
+ SCROLL_DOWN = "//div[10]//div[2]//div[2]//i[1]//img[1]"
+ ASK_QUESTION ="//textarea[@placeholder='Ask a question or request (ctrl + enter to submit)']"
+ SEARCH_BOX="//input[@type='search']"
+ HOUSING_2022 ="//body[1]/div[2]/div[1]/main[1]/div[1]/div[2]/div[4]/div[1]/div[1]/div[4]/div[2]/div[2]/span[1]"
+ HOUSING_2023 ="//body[1]/div[2]/div[1]/main[1]/div[1]/div[2]/div[4]/div[1]/div[1]/div[3]/div[2]/div[2]/span[1]"
+ CONTRACTS_DETAILS_PAGE = "body > div:nth-child(3) > div:nth-child(1) > main:nth-child(2) > div:nth-child(1) > div:nth-child(2) > div:nth-child(4) > div:nth-child(1) > div:nth-child(1) > div:nth-child(6) > div:nth-child(2) > div:nth-child(2) > div:nth-child(3) > button:nth-child(2)"
+ DETAILS_PAGE ="body > div:nth-child(3) > div:nth-child(1) > main:nth-child(2) > div:nth-child(1) > div:nth-child(2) > div:nth-child(4) > div:nth-child(1) > div:nth-child(1) > div:nth-child(3) > div:nth-child(2) > div:nth-child(2) > div:nth-child(3) > button:nth-child(2)"
+ POP_UP_CHAT="//button[@value='Chat Room']"
+ CLOSE_POP_UP ="//button[@aria-label='close']"
+ CLLEAR_ALL_POP_UP ="//button[normalize-space()='Clear all']"
+ HANDWRITTEN_DOC1="//body[1]/div[2]/div[1]/main[1]/div[1]/div[2]/div[4]/div[1]/div[1]/div[6]/div[2]/div[2]/span[1]"
+ HANDWRITTEN_DOC2="//body[1]/div[2]/div[1]/main[1]/div[1]/div[2]/div[4]/div[1]/div[1]/div[1]/div[2]/div[2]/span[1]"
+ HANDWRITTEN_DOC3="//body[1]/div[2]/div[1]/main[1]/div[1]/div[2]/div[4]/div[1]/div[1]/div[5]/div[2]/div[2]/span[1]"
+ SEND_BUTTON = "//button[@aria-label='Send']"
+ POP_UP_CHAT_SEARCH = "(//textarea[@placeholder='Ask a question or request (ctrl + enter to submit)'])[2]"
+ POP_UP_CHAT_SEND = "(//button[@type='submit'])[2]"
+ DOCUMENT_FILTER = "//button[normalize-space()='Accessibility Features']"
+ HEADING_TITLE = "//div[.='Document Knowledge Mining']"
+
+
+ def __init__(self, page):
+ self.page = page
+
+
+
+ def validate_home_page(self):
+ self.page.wait_for_timeout(5000)
+ expect(self.page.locator(self.DOCUMENT_FILTER)).to_be_visible()
+ expect(self.page.locator(self.HEADING_TITLE)).to_be_visible()
+ self.page.wait_for_timeout(2000)
+
+
+ def enter_a_question(self,text):
+ self.page.locator(self.ASK_QUESTION).fill(text)
+ self.page.wait_for_timeout(5000)
+
+ def enter_in_search(self,text):
+ self.page.locator(self.SEARCH_BOX).fill(text)
+ self.page.wait_for_timeout(5000)
+
+ def enter_in_popup_search(self,text):
+ self.page.locator(self.POP_UP_CHAT_SEARCH).fill(text)
+ self.page.wait_for_timeout(5000)
+ self.page.locator(self.POP_UP_CHAT_SEND).click()
+ # self.page.wait_for_load_state('networkidle')
+
+ def select_housing_checkbox(self):
+ self.page.locator(self.HOUSING_2022).click()
+ self.page.locator(self.HOUSING_2023).click()
+ self.page.wait_for_timeout(5000)
+
+ def click_on_details(self):
+ self.page.wait_for_timeout(5000)
+ self.page.locator(self.DETAILS_PAGE).click()
+ self.page.wait_for_timeout(13000)
+
+ def click_on_popup_chat(self):
+ self.page.locator(self.POP_UP_CHAT).click()
+ self.page.wait_for_timeout(5000)
+
+ def close_pop_up(self):
+ self.page.locator(self.CLOSE_POP_UP).click()
+ self.page.wait_for_timeout(2000)
+ self.page.locator(self.CLLEAR_ALL_POP_UP).click()
+ self.page.wait_for_timeout(2000)
+
+ def select_handwritten_doc(self):
+ self.page.locator(self.HANDWRITTEN_DOC1).click()
+ self.page.locator(self.HANDWRITTEN_DOC2).click()
+ self.page.locator(self.HANDWRITTEN_DOC3).click()
+ self.page.wait_for_timeout(2000)
+
+ def click_send_button(self):
+ # Click on send button in question area
+ self.page.locator(self.SEND_BUTTON).click()
+ self.page.wait_for_timeout(5000)
+
+ #self.page.wait_for_load_state('networkidle')
+
+ def wait_until_response_loaded(self,timeout=200000):
+ start_time = time.time()
+ interval = 0.1
+ end_time = start_time + timeout / 1000
+ locator = self.page.locator(self.ASK_QUESTION)
+
+ while time.time() < end_time:
+ if locator.is_enabled():
+ return
+ time.sleep(interval)
+
+ raise PlaywrightTimeoutError("Response is not generated and it has been timed out.")
+ # try:
+ # # Wait for it to appear in the DOM and be visible
+ # locator = self.page.locator(self.ASK_QUESTION)
+ # locator.wait_for(state="enabled", timeout=200000) # adjust timeout as needed
+ # except PlaywrightTimeoutError:
+ # raise Exception("Response is not generated and it has been timed out.")
+
+
+ def wait_until_chat_details_response_loaded(self,timeout=200000):
+
+ start_time = time.time()
+ interval = 0.1
+ end_time = start_time + timeout / 1000
+ locator = self.page.locator(self.POP_UP_CHAT_SEARCH)
+
+ while time.time() < end_time:
+ if locator.is_enabled():
+ return
+ time.sleep(interval)
+
+ raise PlaywrightTimeoutError("Response is not generated and it has been timed out.")
+
+
+
+ def click_new_topic(self):
+ self.page.locator(self.NEWTOPIC).click()
+ self.page.wait_for_timeout(2000)
+ self.page.wait_for_load_state('networkidle')
+
+ def get_follow_ques_text(self):
+ follow_up_question = self.page.locator(self.Suggested_follow_up_questions).text_content()
+ return follow_up_question
+
+ def click_suggested_question(self):
+ self.page.locator(self.Suggested_follow_up_questions).click()
+ self.page.wait_for_timeout(2000)
+ self.page.wait_for_load_state('networkidle')
+
+
+
+ def click_on_contract_details(self):
+ self.page.locator(self.CONTRACTS_DETAILS_PAGE).click()
+ self.page.wait_for_timeout(12000)
+
+
+
+
\ No newline at end of file
diff --git a/tests/e2e-test/pages/loginPage.py b/tests/e2e-test/pages/loginPage.py
new file mode 100644
index 00000000..0ee59f77
--- /dev/null
+++ b/tests/e2e-test/pages/loginPage.py
@@ -0,0 +1,36 @@
+from base.base import BasePage
+
+
+class LoginPage(BasePage):
+
+ EMAIL_TEXT_BOX = "//input[@type='email']"
+ NEXT_BUTTON = "//input[@type='submit']"
+ PASSWORD_TEXT_BOX = "//input[@type='password']"
+ SIGNIN_BUTTON = "//input[@id='idSIButton9']"
+ YES_BUTTON = "//input[@id='idSIButton9']"
+ PERMISSION_ACCEPT_BUTTON = "//input[@type='submit']"
+
+ def __init__(self, page):
+ self.page = page
+
+ def authenticate(self, username,password):
+ # login with username and password in web url
+ self.page.locator(self.EMAIL_TEXT_BOX).fill(username)
+ self.page.locator(self.NEXT_BUTTON).click()
+ # Wait for the password input field to be available and fill it
+ self.page.wait_for_load_state('networkidle')
+ # Enter password
+ self.page.locator(self.PASSWORD_TEXT_BOX).fill(password)
+ # Click on SignIn button
+ self.page.locator(self.SIGNIN_BUTTON).click()
+ # Wait for 5 seconds to ensure the login process completes
+ self.page.wait_for_timeout(20000) # Wait for 20 seconds
+ if self.page.locator(self.PERMISSION_ACCEPT_BUTTON).is_visible():
+ self.page.locator(self.PERMISSION_ACCEPT_BUTTON).click()
+ self.page.wait_for_timeout(10000)
+ else:
+ # Click on YES button
+ self.page.locator(self.YES_BUTTON).click()
+ self.page.wait_for_timeout(10000)
+ # Wait for the "Articles" button to be available and click it
+ self.page.wait_for_load_state('networkidle')
diff --git a/tests/e2e-test/pytest.ini b/tests/e2e-test/pytest.ini
new file mode 100644
index 00000000..76eb64fc
--- /dev/null
+++ b/tests/e2e-test/pytest.ini
@@ -0,0 +1,6 @@
+[pytest]
+log_cli = true
+log_cli_level = INFO
+log_file = logs/tests.log
+log_file_level = INFO
+addopts = -p no:warnings
diff --git a/tests/e2e-test/requirements.txt b/tests/e2e-test/requirements.txt
new file mode 100644
index 00000000..37159fb1
--- /dev/null
+++ b/tests/e2e-test/requirements.txt
@@ -0,0 +1,7 @@
+pytest-playwright
+pytest-reporter-html1
+python-dotenv
+pytest-check
+pytest-html
+py
+beautifulsoup4
diff --git a/tests/e2e-test/sample_dotenv_file.txt b/tests/e2e-test/sample_dotenv_file.txt
new file mode 100644
index 00000000..bee18d2f
--- /dev/null
+++ b/tests/e2e-test/sample_dotenv_file.txt
@@ -0,0 +1 @@
+url = 'web app url'
\ No newline at end of file
diff --git a/tests/e2e-test/tests/__init__.py b/tests/e2e-test/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/e2e-test/tests/conftest.py b/tests/e2e-test/tests/conftest.py
new file mode 100644
index 00000000..28771989
--- /dev/null
+++ b/tests/e2e-test/tests/conftest.py
@@ -0,0 +1,113 @@
+from pathlib import Path
+import pytest
+from playwright.sync_api import sync_playwright
+from config.constants import *
+from slugify import slugify
+from pages.loginPage import LoginPage
+from dotenv import load_dotenv
+import os
+from py.xml import html # type: ignore
+import io
+import logging
+from bs4 import BeautifulSoup
+import atexit
+
+
+@pytest.fixture(scope="session")
+def login_logout():
+ # perform login and browser close once in a session
+ with sync_playwright() as p:
+ browser = p.chromium.launch(headless=False, args=["--start-maximized"])
+ context = browser.new_context(no_viewport=True)
+ context.set_default_timeout(120000)
+ page = context.new_page()
+ # Navigate to the login URL
+ page.goto(URL)
+ # Wait for the login form to appear
+ page.wait_for_load_state('networkidle')
+ # login to web url with username and password
+ #login_page = LoginPage(page)
+ #load_dotenv()
+ #login_page.authenticate(os.getenv('user_name'),os.getenv('pass_word'))
+ yield page
+
+ # perform close the browser
+ browser.close()
+
+
+@pytest.hookimpl(tryfirst=True)
+def pytest_html_report_title(report):
+ report.title = "Test Automation DKM"
+
+
+log_streams = {}
+
+@pytest.hookimpl(tryfirst=True)
+def pytest_runtest_setup(item):
+ # Prepare StringIO for capturing logs
+ stream = io.StringIO()
+ handler = logging.StreamHandler(stream)
+ handler.setLevel(logging.INFO)
+
+ logger = logging.getLogger()
+ logger.addHandler(handler)
+
+ # Save handler and stream
+ log_streams[item.nodeid] = (handler, stream)
+
+
+@pytest.hookimpl(hookwrapper=True)
+def pytest_runtest_makereport(item, call):
+ outcome = yield
+ report = outcome.get_result()
+
+ handler, stream = log_streams.get(item.nodeid, (None, None))
+
+ if handler and stream:
+ # Make sure logs are flushed
+ handler.flush()
+ log_output = stream.getvalue()
+
+ # Only remove the handler, don't close the stream yet
+ logger = logging.getLogger()
+ logger.removeHandler(handler)
+
+ # Store the log output on the report object for HTML reporting
+ report.description = f"{log_output.strip()}"
+
+ # Clean up references
+ log_streams.pop(item.nodeid, None)
+ else:
+ report.description = ""
+
+def pytest_collection_modifyitems(items):
+ for item in items:
+ if hasattr(item, 'callspec'):
+ prompt = item.callspec.params.get("prompt")
+ if prompt:
+ item._nodeid = prompt # This controls how the test name appears in the report
+
+def rename_duration_column():
+ report_path = os.path.abspath("report.html") # or your report filename
+ if not os.path.exists(report_path):
+ print("Report file not found, skipping column rename.")
+ return
+
+ with open(report_path, 'r', encoding='utf-8') as f:
+ soup = BeautifulSoup(f, 'html.parser')
+
+ # Find and rename the header
+ headers = soup.select('table#results-table thead th')
+ for th in headers:
+ if th.text.strip() == 'Duration':
+ th.string = 'Execution Time'
+ #print("Renamed 'Duration' to 'Execution Time'")
+ break
+ else:
+ print("'Duration' column not found in report.")
+
+ with open(report_path, 'w', encoding='utf-8') as f:
+ f.write(str(soup))
+
+# Register this function to run after everything is done
+atexit.register(rename_duration_column)
\ No newline at end of file
diff --git a/tests/e2e-test/tests/test_poc_dkm.py b/tests/e2e-test/tests/test_poc_dkm.py
new file mode 100644
index 00000000..64c6fb66
--- /dev/null
+++ b/tests/e2e-test/tests/test_poc_dkm.py
@@ -0,0 +1,92 @@
+import logging
+import time
+import pytest
+from pages.dkmPage import DkmPage
+from config.constants import *
+
+logger = logging.getLogger(__name__)
+
+def _store_follow_up_question(dkm):
+ """Helper to store follow-up question text as an attribute on the DkmPage object."""
+ dkm.follow_up_question = dkm.get_follow_ques_text()
+
+
+# Define test steps and prompts
+test_cases = [
+ ("Validate home page is loaded", lambda dkm: dkm.validate_home_page()),
+ (f"Ask first chat question: {chat_question1}", lambda dkm: (
+ dkm.enter_a_question(chat_question1),
+ dkm.click_send_button(),
+ dkm.validate_response_status(chat_question1),
+ dkm.wait_until_response_loaded()
+ )),
+ ("Click on suggested follow-up question", lambda dkm: (
+ _store_follow_up_question(dkm),
+ dkm.click_suggested_question(),
+ dkm.validate_response_status(dkm.follow_up_question),
+ dkm.wait_until_response_loaded()
+ )),
+ ("Start new topic", lambda dkm: dkm.click_new_topic()),
+ ("Search for 'Housing Report'", lambda dkm: dkm.enter_in_search(search_1)),
+ ("Select housing docs", lambda dkm: dkm.select_housing_checkbox()),
+ (f"Ask housing chat question: {chat_question2}", lambda dkm: (
+ dkm.enter_a_question(chat_question2),
+ dkm.click_send_button(),
+ dkm.validate_response_status(chat_question2),
+ dkm.wait_until_response_loaded()
+ )),
+ ("View details of housing report", lambda dkm: dkm.click_on_details()),
+ (f"Ask question in housing report popup: {house_10_11_question}", lambda dkm: (
+ dkm.click_on_popup_chat(),
+ dkm.enter_in_popup_search(house_10_11_question),
+ dkm.validate_response_status(house_10_11_question),
+ dkm.wait_until_chat_details_response_loaded(),
+ dkm.close_pop_up()
+ )),
+ ("Search for 'Contracts'", lambda dkm: dkm.enter_in_search(search_2)),
+ ("Select handwritten contract docs", lambda dkm: dkm.select_handwritten_doc()),
+ (f"Ask question about handwritten contracts: {handwritten_question1}", lambda dkm: (
+ dkm.enter_a_question(handwritten_question1),
+ dkm.click_send_button(),
+ dkm.validate_response_status(handwritten_question1),
+ dkm.wait_until_response_loaded()
+ )),
+ (f"Ask question in contract details popup: {contract_details_question}", lambda dkm: (
+ dkm.click_on_contract_details(),
+ dkm.click_on_popup_chat(),
+ dkm.enter_in_popup_search(contract_details_question),
+ dkm.validate_response_status(contract_details_question),
+ dkm.wait_until_chat_details_response_loaded(),
+ dkm.close_pop_up()
+ )),
+]
+
+# Create custom readable test IDs with step numbers
+test_ids = [f"{i+1:02d}. {case[0]}" for i, case in enumerate(test_cases)]
+
+@pytest.mark.parametrize("prompt, action", test_cases, ids=test_ids)
+def test_dkm_prompt_case(login_logout, prompt, action, request):
+ """
+ Executes each DKM user interaction step as an independent test case,
+ logs execution time, and attaches it to the test report.
+ """
+ page = login_logout
+ dkm_page = DkmPage(page)
+ logger.info(f"Running test step: {prompt}")
+
+ start = time.time()
+ if isinstance(action, tuple):
+ for step in action:
+ if callable(step):
+ step()
+ else:
+ action(dkm_page)
+ end = time.time()
+
+ duration = end - start
+ logger.info(f"Execution Time for '{prompt}': {duration:.2f}s")
+
+ # Attach to report
+ request.node._report_sections.append((
+ "call", "log", f"Execution time: {duration:.2f}s"
+ ))
\ No newline at end of file