diff --git a/.github/workflows/azure-dev.yml b/.github/workflows/azure-dev.yml index 2b887434..6feb285b 100644 --- a/.github/workflows/azure-dev.yml +++ b/.github/workflows/azure-dev.yml @@ -18,7 +18,7 @@ jobs: AZURE_ENV_NAME: ${{ secrets.AZURE_ENV_NAME }} AZURE_LOCATION: ${{ secrets.AZURE_LOCATION }} AZURE_AI_DEPLOYMENT_LOCATION: ${{ secrets.AZURE_AI_DEPLOYMENT_LOCATION || secrets.AZURE_LOCATION }} - AZURE_ENV_MODEL_CAPACITY: 1 + AZURE_ENV_GPT_MODEL_CAPACITY: 1 AZURE_DEV_COLLECT_TELEMETRY: ${{ vars.AZURE_DEV_COLLECT_TELEMETRY }} steps: - name: Checkout code @@ -60,6 +60,6 @@ jobs: azd env set AZURE_SUBSCRIPTION_ID "$AZURE_SUBSCRIPTION_ID" azd env set AZURE_LOCATION "$AZURE_LOCATION" azd env set AZURE_ENV_AI_SERVICE_LOCATION "${AZURE_AI_DEPLOYMENT_LOCATION:-$AZURE_LOCATION}" - azd env set AZURE_ENV_MODEL_CAPACITY "$AZURE_ENV_MODEL_CAPACITY" + azd env set AZURE_ENV_GPT_MODEL_CAPACITY "$AZURE_ENV_GPT_MODEL_CAPACITY" azd up --no-prompt diff --git a/.github/workflows/deploy-orchestrator.yml b/.github/workflows/deploy-orchestrator.yml index ab68ee98..30c3af07 100644 --- a/.github/workflows/deploy-orchestrator.yml +++ b/.github/workflows/deploy-orchestrator.yml @@ -42,13 +42,13 @@ on: required: false default: 'GoldenPath-Testing' type: string - AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID: - description: 'Log Analytics Workspace ID (Optional)' + AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID: + description: 'Log Analytics Workspace Resource ID (Optional)' required: false default: '' type: string - AZURE_EXISTING_AI_PROJECT_RESOURCE_ID: - description: 'AI Project Resource ID (Optional)' + AZURE_EXISTING_AIPROJECT_RESOURCE_ID: + description: 'Foundry Project Resource ID (Optional)' required: false default: '' type: string @@ -86,8 +86,8 @@ jobs: EXP: ${{ inputs.EXP }} build_docker_image: ${{ inputs.build_docker_image }} existing_webapp_url: ${{ inputs.existing_webapp_url }} - AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID: ${{ inputs.AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID }} - AZURE_EXISTING_AI_PROJECT_RESOURCE_ID: ${{ inputs.AZURE_EXISTING_AI_PROJECT_RESOURCE_ID }} + AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID: ${{ inputs.AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID }} + AZURE_EXISTING_AIPROJECT_RESOURCE_ID: ${{ inputs.AZURE_EXISTING_AIPROJECT_RESOURCE_ID }} docker_image_tag: ${{ needs.docker-build.outputs.IMAGE_TAG }} run_e2e_tests: ${{ inputs.run_e2e_tests }} cleanup_resources: ${{ inputs.cleanup_resources }} diff --git a/.github/workflows/deploy-v2.yml b/.github/workflows/deploy-v2.yml index 2f59ee82..e5cdd35c 100644 --- a/.github/workflows/deploy-v2.yml +++ b/.github/workflows/deploy-v2.yml @@ -70,13 +70,13 @@ on: - 'GoldenPath-Testing' - 'Smoke-Testing' - 'None' - AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID: - description: 'Log Analytics Workspace ID (Optional)' + AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID: + description: 'Log Analytics Workspace Resource ID (Optional)' required: false default: '' type: string - AZURE_EXISTING_AI_PROJECT_RESOURCE_ID: - description: 'AI Project Resource ID (Optional)' + AZURE_EXISTING_AIPROJECT_RESOURCE_ID: + description: 'Foundry Project Resource ID (Optional)' required: false default: '' type: string @@ -102,8 +102,8 @@ jobs: build_docker_image: ${{ steps.validate.outputs.build_docker_image }} cleanup_resources: ${{ steps.validate.outputs.cleanup_resources }} run_e2e_tests: ${{ steps.validate.outputs.run_e2e_tests }} - azure_env_log_analytics_workspace_id: ${{ steps.validate.outputs.azure_env_log_analytics_workspace_id }} - azure_existing_ai_project_resource_id: ${{ steps.validate.outputs.azure_existing_ai_project_resource_id }} + AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID: ${{ steps.validate.outputs.AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID }} + AZURE_EXISTING_AIPROJECT_RESOURCE_ID: ${{ steps.validate.outputs.AZURE_EXISTING_AIPROJECT_RESOURCE_ID }} existing_webapp_url: ${{ steps.validate.outputs.existing_webapp_url }} steps: - name: Validate Workflow Input Parameters @@ -118,8 +118,8 @@ jobs: INPUT_BUILD_DOCKER_IMAGE: ${{ github.event.inputs.build_docker_image }} INPUT_CLEANUP_RESOURCES: ${{ github.event.inputs.cleanup_resources }} INPUT_RUN_E2E_TESTS: ${{ github.event.inputs.run_e2e_tests }} - INPUT_AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID: ${{ github.event.inputs.AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID }} - INPUT_AZURE_EXISTING_AI_PROJECT_RESOURCE_ID: ${{ github.event.inputs.AZURE_EXISTING_AI_PROJECT_RESOURCE_ID }} + INPUT_AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID: ${{ github.event.inputs.AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID }} + INPUT_AZURE_EXISTING_AIPROJECT_RESOURCE_ID: ${{ github.event.inputs.AZURE_EXISTING_AIPROJECT_RESOURCE_ID }} INPUT_EXISTING_WEBAPP_URL: ${{ github.event.inputs.existing_webapp_url }} run: | echo "šŸ” Validating workflow input parameters..." @@ -209,32 +209,32 @@ jobs: echo "āœ… run_e2e_tests: '$TEST_OPTION' is valid" fi - # Validate AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID (optional, Azure Resource ID format) - if [[ -n "$INPUT_AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID" ]]; then - if [[ ! "$INPUT_AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID" =~ ^/subscriptions/[a-fA-F0-9-]+/[Rr]esource[Gg]roups/[^/]+/providers/[Mm]icrosoft\.[Oo]perational[Ii]nsights/[Ww]orkspaces/[^/]+$ ]]; then - echo "āŒ ERROR: AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID is invalid. Must be a valid Azure Resource ID format:" + # Validate AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID (optional, Azure Resource ID format) + if [[ -n "$INPUT_AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID" ]]; then + if [[ ! "$INPUT_AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID" =~ ^/subscriptions/[a-fA-F0-9-]+/[Rr]esource[Gg]roups/[^/]+/providers/[Mm]icrosoft\.[Oo]perational[Ii]nsights/[Ww]orkspaces/[^/]+$ ]]; then + echo "āŒ ERROR: AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID is invalid. Must be a valid Azure Resource ID format:" echo " /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}" - echo " Got: '$INPUT_AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID'" + echo " Got: '$INPUT_AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID'" VALIDATION_FAILED=true else - echo "āœ… AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID: Valid Resource ID format" + echo "āœ… AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID: Valid Resource ID format" fi else - echo "āœ… AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID: Not provided (optional)" + echo "āœ… AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID: Not provided (optional)" fi - # Validate AZURE_EXISTING_AI_PROJECT_RESOURCE_ID (optional, Azure Resource ID format) - if [[ -n "$INPUT_AZURE_EXISTING_AI_PROJECT_RESOURCE_ID" ]]; then - if [[ ! "$INPUT_AZURE_EXISTING_AI_PROJECT_RESOURCE_ID" =~ ^/subscriptions/[a-fA-F0-9-]+/[Rr]esource[Gg]roups/[^/]+/providers/([Mm]icrosoft\.[Mm]achine[Ll]earning[Ss]ervices/([Ww]orkspaces|[Pp]rojects)/[^/]+|[Mm]icrosoft\.[Cc]ognitive[Ss]ervices/[Aa]ccounts/[^/]+/[Pp]rojects/[^/]+)$ ]]; then - echo "āŒ ERROR: AZURE_EXISTING_AI_PROJECT_RESOURCE_ID is invalid. Must be a valid Azure Resource ID format:" + # Validate AZURE_EXISTING_AIPROJECT_RESOURCE_ID (optional, Azure Resource ID format) + if [[ -n "$INPUT_AZURE_EXISTING_AIPROJECT_RESOURCE_ID" ]]; then + if [[ ! "$INPUT_AZURE_EXISTING_AIPROJECT_RESOURCE_ID" =~ ^/subscriptions/[a-fA-F0-9-]+/[Rr]esource[Gg]roups/[^/]+/providers/([Mm]icrosoft\.[Mm]achine[Ll]earning[Ss]ervices/([Ww]orkspaces|[Pp]rojects)/[^/]+|[Mm]icrosoft\.[Cc]ognitive[Ss]ervices/[Aa]ccounts/[^/]+/[Pp]rojects/[^/]+)$ ]]; then + echo "āŒ ERROR: AZURE_EXISTING_AIPROJECT_RESOURCE_ID is invalid. Must be a valid Azure Resource ID format:" echo " /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CognitiveServices/accounts/{accountName}/projects/{projectName}" - echo " Got: '$INPUT_AZURE_EXISTING_AI_PROJECT_RESOURCE_ID'" + echo " Got: '$INPUT_AZURE_EXISTING_AIPROJECT_RESOURCE_ID'" VALIDATION_FAILED=true else - echo "āœ… AZURE_EXISTING_AI_PROJECT_RESOURCE_ID: Valid Resource ID format" + echo "āœ… AZURE_EXISTING_AIPROJECT_RESOURCE_ID: Valid Resource ID format" fi else - echo "āœ… AZURE_EXISTING_AI_PROJECT_RESOURCE_ID: Not provided (optional)" + echo "āœ… AZURE_EXISTING_AIPROJECT_RESOURCE_ID: Not provided (optional)" fi # Validate existing_webapp_url (optional, must start with https) @@ -269,8 +269,8 @@ jobs: echo "build_docker_image=$BUILD_DOCKER" >> $GITHUB_OUTPUT echo "cleanup_resources=$CLEANUP_RESOURCES" >> $GITHUB_OUTPUT echo "run_e2e_tests=$TEST_OPTION" >> $GITHUB_OUTPUT - echo "azure_env_log_analytics_workspace_id=$INPUT_AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID" >> $GITHUB_OUTPUT - echo "azure_existing_ai_project_resource_id=$INPUT_AZURE_EXISTING_AI_PROJECT_RESOURCE_ID" >> $GITHUB_OUTPUT + echo "AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID=$INPUT_AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID" >> $GITHUB_OUTPUT + echo "AZURE_EXISTING_AIPROJECT_RESOURCE_ID=$INPUT_AZURE_EXISTING_AIPROJECT_RESOURCE_ID" >> $GITHUB_OUTPUT echo "existing_webapp_url=$INPUT_EXISTING_WEBAPP_URL" >> $GITHUB_OUTPUT Run: @@ -286,8 +286,8 @@ jobs: build_docker_image: ${{ needs.validate-inputs.outputs.build_docker_image == 'true' }} cleanup_resources: ${{ needs.validate-inputs.outputs.cleanup_resources == 'true' }} run_e2e_tests: ${{ needs.validate-inputs.outputs.run_e2e_tests || 'GoldenPath-Testing' }} - AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID: ${{ needs.validate-inputs.outputs.azure_env_log_analytics_workspace_id || '' }} - AZURE_EXISTING_AI_PROJECT_RESOURCE_ID: ${{ needs.validate-inputs.outputs.azure_existing_ai_project_resource_id || '' }} + AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID: ${{ needs.validate-inputs.outputs.AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID || '' }} + AZURE_EXISTING_AIPROJECT_RESOURCE_ID: ${{ needs.validate-inputs.outputs.AZURE_EXISTING_AIPROJECT_RESOURCE_ID || '' }} existing_webapp_url: ${{ needs.validate-inputs.outputs.existing_webapp_url || '' }} trigger_type: ${{ github.event_name }} secrets: inherit diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml index cb0ad3d2..c61e4e8d 100644 --- a/.github/workflows/deploy.yml +++ b/.github/workflows/deploy.yml @@ -142,7 +142,7 @@ jobs: --parameters \ solutionName="${{ env.SOLUTION_PREFIX }}" \ azureAiServiceLocation='${{ env.AZURE_LOCATION }}' \ - imageVersion="${IMAGE_TAG}" \ + imageTag="${IMAGE_TAG}" \ createdBy="Pipeline" \ tags="{'Purpose':'Deploying and Cleaning Up Resources for Validation','CreatedDate':'$current_date'}" diff --git a/.github/workflows/job-deploy-linux.yml b/.github/workflows/job-deploy-linux.yml index a7240859..9bb21f44 100644 --- a/.github/workflows/job-deploy-linux.yml +++ b/.github/workflows/job-deploy-linux.yml @@ -28,10 +28,10 @@ on: required: false type: string default: 'false' - AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID: + AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID: required: false type: string - AZURE_EXISTING_AI_PROJECT_RESOURCE_ID: + AZURE_EXISTING_AIPROJECT_RESOURCE_ID: required: false type: string outputs: @@ -62,8 +62,8 @@ jobs: INPUT_BUILD_DOCKER_IMAGE: ${{ inputs.BUILD_DOCKER_IMAGE }} INPUT_EXP: ${{ inputs.EXP }} INPUT_WAF_ENABLED: ${{ inputs.WAF_ENABLED }} - INPUT_AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID: ${{ inputs.AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID }} - INPUT_AZURE_EXISTING_AI_PROJECT_RESOURCE_ID: ${{ inputs.AZURE_EXISTING_AI_PROJECT_RESOURCE_ID }} + INPUT_AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID: ${{ inputs.AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID }} + INPUT_AZURE_EXISTING_AIPROJECT_RESOURCE_ID: ${{ inputs.AZURE_EXISTING_AIPROJECT_RESOURCE_ID }} run: | echo "šŸ” Validating workflow input parameters..." VALIDATION_FAILED=false @@ -150,27 +150,27 @@ jobs: echo "āœ… WAF_ENABLED: '$INPUT_WAF_ENABLED' is valid" fi - # Validate AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID (optional, if provided must be valid Resource ID) - if [[ -n "$INPUT_AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID" ]]; then - if [[ ! "$INPUT_AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID" =~ ^/subscriptions/[a-fA-F0-9-]+/[Rr]esource[Gg]roups/[^/]+/providers/[Mm]icrosoft\.[Oo]perational[Ii]nsights/[Ww]orkspaces/[^/]+$ ]]; then - echo "āŒ ERROR: AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID is invalid. Must be a valid Azure Resource ID format:" + # Validate AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID (optional, if provided must be valid Resource ID) + if [[ -n "$INPUT_AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID" ]]; then + if [[ ! "$INPUT_AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID" =~ ^/subscriptions/[a-fA-F0-9-]+/[Rr]esource[Gg]roups/[^/]+/providers/[Mm]icrosoft\.[Oo]perational[Ii]nsights/[Ww]orkspaces/[^/]+$ ]]; then + echo "āŒ ERROR: AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID is invalid. Must be a valid Azure Resource ID format:" echo " /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}" - echo " Got: '$INPUT_AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID'" + echo " Got: '$INPUT_AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID'" VALIDATION_FAILED=true else - echo "āœ… AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID: Valid Resource ID format" + echo "āœ… AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID: Valid Resource ID format" fi fi - # Validate AZURE_EXISTING_AI_PROJECT_RESOURCE_ID (optional, if provided must be valid Resource ID) - if [[ -n "$INPUT_AZURE_EXISTING_AI_PROJECT_RESOURCE_ID" ]]; then - if [[ ! "$INPUT_AZURE_EXISTING_AI_PROJECT_RESOURCE_ID" =~ ^/subscriptions/[a-fA-F0-9-]+/[Rr]esource[Gg]roups/[^/]+/providers/([Mm]icrosoft\.[Mm]achine[Ll]earning[Ss]ervices/([Ww]orkspaces|[Pp]rojects)/[^/]+|[Mm]icrosoft\.[Cc]ognitive[Ss]ervices/[Aa]ccounts/[^/]+/[Pp]rojects/[^/]+)$ ]]; then - echo "āŒ ERROR: AZURE_EXISTING_AI_PROJECT_RESOURCE_ID is invalid. Must be a valid Azure Resource ID format:" + # Validate AZURE_EXISTING_AIPROJECT_RESOURCE_ID (optional, if provided must be valid Resource ID) + if [[ -n "$INPUT_AZURE_EXISTING_AIPROJECT_RESOURCE_ID" ]]; then + if [[ ! "$INPUT_AZURE_EXISTING_AIPROJECT_RESOURCE_ID" =~ ^/subscriptions/[a-fA-F0-9-]+/[Rr]esource[Gg]roups/[^/]+/providers/([Mm]icrosoft\.[Mm]achine[Ll]earning[Ss]ervices/([Ww]orkspaces|[Pp]rojects)/[^/]+|[Mm]icrosoft\.[Cc]ognitive[Ss]ervices/[Aa]ccounts/[^/]+/[Pp]rojects/[^/]+)$ ]]; then + echo "āŒ ERROR: AZURE_EXISTING_AIPROJECT_RESOURCE_ID is invalid. Must be a valid Azure Resource ID format:" echo " /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CognitiveServices/accounts/{accountName}/projects/{projectName}" - echo " Got: '$INPUT_AZURE_EXISTING_AI_PROJECT_RESOURCE_ID'" + echo " Got: '$INPUT_AZURE_EXISTING_AIPROJECT_RESOURCE_ID'" VALIDATION_FAILED=true else - echo "āœ… AZURE_EXISTING_AI_PROJECT_RESOURCE_ID: Valid Resource ID format" + echo "āœ… AZURE_EXISTING_AIPROJECT_RESOURCE_ID: Valid Resource ID format" fi fi @@ -223,8 +223,8 @@ jobs: INPUT_IMAGE_TAG: ${{ inputs.IMAGE_TAG }} INPUT_BUILD_DOCKER_IMAGE: ${{ inputs.BUILD_DOCKER_IMAGE }} INPUT_EXP: ${{ inputs.EXP }} - INPUT_AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID: ${{ inputs.AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID }} - INPUT_AZURE_EXISTING_AI_PROJECT_RESOURCE_ID: ${{ inputs.AZURE_EXISTING_AI_PROJECT_RESOURCE_ID }} + INPUT_AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID: ${{ inputs.AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID }} + INPUT_AZURE_EXISTING_AIPROJECT_RESOURCE_ID: ${{ inputs.AZURE_EXISTING_AIPROJECT_RESOURCE_ID }} run: | set -e @@ -240,11 +240,11 @@ jobs: azd env set AZURE_ENV_AI_SERVICE_LOCATION="$INPUT_AZURE_ENV_OPENAI_LOCATION" azd env set AZURE_LOCATION="$INPUT_AZURE_LOCATION" azd env set AZURE_RESOURCE_GROUP="$INPUT_RESOURCE_GROUP_NAME" - azd env set AZURE_ENV_IMAGETAG="$INPUT_IMAGE_TAG" + azd env set AZURE_ENV_IMAGE_TAG="$INPUT_IMAGE_TAG" if [[ "$INPUT_BUILD_DOCKER_IMAGE" == "true" ]]; then ACR_NAME="${{ secrets.ACR_TEST_LOGIN_SERVER }}" - azd env set AZURE_ENV_ACR_NAME="$ACR_NAME" + azd env set AZURE_ENV_CONTAINER_REGISTRY_ENDPOINT="$ACR_NAME" echo "Set ACR name to: $ACR_NAME" else echo "Skipping ACR name configuration (using existing image)" @@ -253,22 +253,22 @@ jobs: if [[ "$INPUT_EXP" == "true" ]]; then echo "āœ… EXP ENABLED - Setting EXP parameters..." - if [[ -n "$INPUT_AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID" ]]; then - EXP_LOG_ANALYTICS_ID="$INPUT_AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID" + if [[ -n "$INPUT_AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID" ]]; then + EXP_LOG_ANALYTICS_ID="$INPUT_AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID" else EXP_LOG_ANALYTICS_ID="${{ vars.AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID }}" fi - if [[ -n "$INPUT_AZURE_EXISTING_AI_PROJECT_RESOURCE_ID" ]]; then - EXP_AI_PROJECT_ID="$INPUT_AZURE_EXISTING_AI_PROJECT_RESOURCE_ID" + if [[ -n "$INPUT_AZURE_EXISTING_AIPROJECT_RESOURCE_ID" ]]; then + EXP_AI_PROJECT_ID="$INPUT_AZURE_EXISTING_AIPROJECT_RESOURCE_ID" else EXP_AI_PROJECT_ID="${{ vars.AZURE_EXISTING_AI_PROJECT_RESOURCE_ID }}" fi - echo "AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID: $EXP_LOG_ANALYTICS_ID" - echo "AZURE_EXISTING_AI_PROJECT_RESOURCE_ID: $EXP_AI_PROJECT_ID" - azd env set AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID="$EXP_LOG_ANALYTICS_ID" - azd env set AZURE_EXISTING_AI_PROJECT_RESOURCE_ID="$EXP_AI_PROJECT_ID" + echo "AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID: $EXP_LOG_ANALYTICS_ID" + echo "AZURE_EXISTING_AIPROJECT_RESOURCE_ID: $EXP_AI_PROJECT_ID" + azd env set AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID="$EXP_LOG_ANALYTICS_ID" + azd env set AZURE_EXISTING_AIPROJECT_RESOURCE_ID="$EXP_AI_PROJECT_ID" else echo "āŒ EXP DISABLED - Skipping EXP parameters" fi diff --git a/.github/workflows/job-deploy-windows.yml b/.github/workflows/job-deploy-windows.yml index e36f1b1e..fe7f51ea 100644 --- a/.github/workflows/job-deploy-windows.yml +++ b/.github/workflows/job-deploy-windows.yml @@ -28,10 +28,10 @@ on: required: false type: string default: 'false' - AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID: + AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID: required: false type: string - AZURE_EXISTING_AI_PROJECT_RESOURCE_ID: + AZURE_EXISTING_AIPROJECT_RESOURCE_ID: required: false type: string outputs: @@ -62,8 +62,8 @@ jobs: INPUT_BUILD_DOCKER_IMAGE: ${{ inputs.BUILD_DOCKER_IMAGE }} INPUT_EXP: ${{ inputs.EXP }} INPUT_WAF_ENABLED: ${{ inputs.WAF_ENABLED }} - INPUT_AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID: ${{ inputs.AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID }} - INPUT_AZURE_EXISTING_AI_PROJECT_RESOURCE_ID: ${{ inputs.AZURE_EXISTING_AI_PROJECT_RESOURCE_ID }} + INPUT_AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID: ${{ inputs.AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID }} + INPUT_AZURE_EXISTING_AIPROJECT_RESOURCE_ID: ${{ inputs.AZURE_EXISTING_AIPROJECT_RESOURCE_ID }} run: | echo "šŸ” Validating workflow input parameters..." VALIDATION_FAILED=false @@ -150,27 +150,27 @@ jobs: echo "āœ… WAF_ENABLED: '$INPUT_WAF_ENABLED' is valid" fi - # Validate AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID (optional, if provided must be valid Resource ID) - if [[ -n "$INPUT_AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID" ]]; then - if [[ ! "$INPUT_AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID" =~ ^/subscriptions/[a-fA-F0-9-]+/[Rr]esource[Gg]roups/[^/]+/providers/[Mm]icrosoft\.[Oo]perational[Ii]nsights/[Ww]orkspaces/[^/]+$ ]]; then - echo "āŒ ERROR: AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID is invalid. Must be a valid Azure Resource ID format:" + # Validate AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID (optional, if provided must be valid Resource ID) + if [[ -n "$INPUT_AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID" ]]; then + if [[ ! "$INPUT_AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID" =~ ^/subscriptions/[a-fA-F0-9-]+/[Rr]esource[Gg]roups/[^/]+/providers/[Mm]icrosoft\.[Oo]perational[Ii]nsights/[Ww]orkspaces/[^/]+$ ]]; then + echo "āŒ ERROR: AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID is invalid. Must be a valid Azure Resource ID format:" echo " /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}" - echo " Got: '$INPUT_AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID'" + echo " Got: '$INPUT_AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID'" VALIDATION_FAILED=true else - echo "āœ… AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID: Valid Resource ID format" + echo "āœ… AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID: Valid Resource ID format" fi fi - # Validate AZURE_EXISTING_AI_PROJECT_RESOURCE_ID (optional, if provided must be valid Resource ID) - if [[ -n "$INPUT_AZURE_EXISTING_AI_PROJECT_RESOURCE_ID" ]]; then - if [[ ! "$INPUT_AZURE_EXISTING_AI_PROJECT_RESOURCE_ID" =~ ^/subscriptions/[a-fA-F0-9-]+/[Rr]esource[Gg]roups/[^/]+/providers/([Mm]icrosoft\.[Mm]achine[Ll]earning[Ss]ervices/([Ww]orkspaces|[Pp]rojects)/[^/]+|[Mm]icrosoft\.[Cc]ognitive[Ss]ervices/[Aa]ccounts/[^/]+/[Pp]rojects/[^/]+)$ ]]; then - echo "āŒ ERROR: AZURE_EXISTING_AI_PROJECT_RESOURCE_ID is invalid. Must be a valid Azure Resource ID format:" + # Validate AZURE_EXISTING_AIPROJECT_RESOURCE_ID (optional, if provided must be valid Resource ID) + if [[ -n "$INPUT_AZURE_EXISTING_AIPROJECT_RESOURCE_ID" ]]; then + if [[ ! "$INPUT_AZURE_EXISTING_AIPROJECT_RESOURCE_ID" =~ ^/subscriptions/[a-fA-F0-9-]+/[Rr]esource[Gg]roups/[^/]+/providers/([Mm]icrosoft\.[Mm]achine[Ll]earning[Ss]ervices/([Ww]orkspaces|[Pp]rojects)/[^/]+|[Mm]icrosoft\.[Cc]ognitive[Ss]ervices/[Aa]ccounts/[^/]+/[Pp]rojects/[^/]+)$ ]]; then + echo "āŒ ERROR: AZURE_EXISTING_AIPROJECT_RESOURCE_ID is invalid. Must be a valid Azure Resource ID format:" echo " /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CognitiveServices/accounts/{accountName}/projects/{projectName}" - echo " Got: '$INPUT_AZURE_EXISTING_AI_PROJECT_RESOURCE_ID'" + echo " Got: '$INPUT_AZURE_EXISTING_AIPROJECT_RESOURCE_ID'" VALIDATION_FAILED=true else - echo "āœ… AZURE_EXISTING_AI_PROJECT_RESOURCE_ID: Valid Resource ID format" + echo "āœ… AZURE_EXISTING_AIPROJECT_RESOURCE_ID: Valid Resource ID format" fi fi @@ -224,8 +224,8 @@ jobs: INPUT_IMAGE_TAG: ${{ inputs.IMAGE_TAG }} INPUT_BUILD_DOCKER_IMAGE: ${{ inputs.BUILD_DOCKER_IMAGE }} INPUT_EXP: ${{ inputs.EXP }} - INPUT_AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID: ${{ inputs.AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID }} - INPUT_AZURE_EXISTING_AI_PROJECT_RESOURCE_ID: ${{ inputs.AZURE_EXISTING_AI_PROJECT_RESOURCE_ID }} + INPUT_AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID: ${{ inputs.AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID }} + INPUT_AZURE_EXISTING_AIPROJECT_RESOURCE_ID: ${{ inputs.AZURE_EXISTING_AIPROJECT_RESOURCE_ID }} run: | $ErrorActionPreference = "Stop" Write-Host "Starting azd deployment..." @@ -244,12 +244,12 @@ jobs: azd env set AZURE_ENV_AI_SERVICE_LOCATION="$env:INPUT_AZURE_ENV_OPENAI_LOCATION" azd env set AZURE_LOCATION="$env:INPUT_AZURE_LOCATION" azd env set AZURE_RESOURCE_GROUP="$env:INPUT_RESOURCE_GROUP_NAME" - azd env set AZURE_ENV_IMAGETAG="$env:INPUT_IMAGE_TAG" + azd env set AZURE_ENV_IMAGE_TAG="$env:INPUT_IMAGE_TAG" # Set ACR name only when building Docker image if ("$env:INPUT_BUILD_DOCKER_IMAGE" -eq "true") { $ACR_NAME = "${{ secrets.ACR_TEST_LOGIN_SERVER }}" - azd env set AZURE_ENV_ACR_NAME="$ACR_NAME" + azd env set AZURE_ENV_CONTAINER_REGISTRY_ENDPOINT="$ACR_NAME" Write-Host "Set ACR name to: $ACR_NAME" } else { Write-Host "Skipping ACR name configuration (using existing image)" @@ -259,22 +259,22 @@ jobs: Write-Host "EXP ENABLED āœ… - Setting EXP parameters..." # Set EXP variables dynamically - if ("$env:INPUT_AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID" -ne "") { - $EXP_LOG_ANALYTICS_ID = "$env:INPUT_AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID" + if ("$env:INPUT_AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID" -ne "") { + $EXP_LOG_ANALYTICS_ID = "$env:INPUT_AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID" } else { $EXP_LOG_ANALYTICS_ID = "${{ vars.AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID }}" } - if ("$env:INPUT_AZURE_EXISTING_AI_PROJECT_RESOURCE_ID" -ne "") { - $EXP_AI_PROJECT_ID = "$env:INPUT_AZURE_EXISTING_AI_PROJECT_RESOURCE_ID" + if ("$env:INPUT_AZURE_EXISTING_AIPROJECT_RESOURCE_ID" -ne "") { + $EXP_AI_PROJECT_ID = "$env:INPUT_AZURE_EXISTING_AIPROJECT_RESOURCE_ID" } else { $EXP_AI_PROJECT_ID = "${{ vars.AZURE_EXISTING_AI_PROJECT_RESOURCE_ID }}" } - Write-Host "AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID: $EXP_LOG_ANALYTICS_ID" - Write-Host "AZURE_EXISTING_AI_PROJECT_RESOURCE_ID: $EXP_AI_PROJECT_ID" - azd env set AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID="$EXP_LOG_ANALYTICS_ID" - azd env set AZURE_EXISTING_AI_PROJECT_RESOURCE_ID="$EXP_AI_PROJECT_ID" + Write-Host "AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID: $EXP_LOG_ANALYTICS_ID" + Write-Host "AZURE_EXISTING_AIPROJECT_RESOURCE_ID: $EXP_AI_PROJECT_ID" + azd env set AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID="$EXP_LOG_ANALYTICS_ID" + azd env set AZURE_EXISTING_AIPROJECT_RESOURCE_ID="$EXP_AI_PROJECT_ID" } else { Write-Host "EXP DISABLED - Skipping EXP parameters" } diff --git a/.github/workflows/job-deploy.yml b/.github/workflows/job-deploy.yml index ddaee746..94d7e964 100644 --- a/.github/workflows/job-deploy.yml +++ b/.github/workflows/job-deploy.yml @@ -51,13 +51,13 @@ on: required: false default: '' type: string - AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID: - description: 'Log Analytics Workspace ID (Optional)' + AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID: + description: 'Log Analytics Workspace Resource ID (Optional)' required: false default: '' type: string - AZURE_EXISTING_AI_PROJECT_RESOURCE_ID: - description: 'AI Project Resource ID (Optional)' + AZURE_EXISTING_AIPROJECT_RESOURCE_ID: + description: 'Foundry Project Resource ID (Optional)' required: false default: '' type: string @@ -112,6 +112,7 @@ jobs: IMAGE_TAG: ${{ steps.determine_image_tag.outputs.IMAGE_TAG }} QUOTA_FAILED: ${{ steps.quota_failure_output.outputs.QUOTA_FAILED }} EXP_ENABLED: ${{ steps.configure_exp.outputs.EXP_ENABLED }} + RG_TAGS: ${{ vars.RG_TAGS }} steps: - name: Validate Workflow Input Parameters @@ -123,8 +124,8 @@ jobs: INPUT_WAF_ENABLED: ${{ inputs.waf_enabled }} INPUT_CLEANUP_RESOURCES: ${{ inputs.cleanup_resources }} INPUT_RUN_E2E_TESTS: ${{ inputs.run_e2e_tests }} - INPUT_AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID: ${{ inputs.AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID }} - INPUT_AZURE_EXISTING_AI_PROJECT_RESOURCE_ID: ${{ inputs.AZURE_EXISTING_AI_PROJECT_RESOURCE_ID }} + INPUT_AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID: ${{ inputs.AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID }} + INPUT_AZURE_EXISTING_AIPROJECT_RESOURCE_ID: ${{ inputs.AZURE_EXISTING_AIPROJECT_RESOURCE_ID }} INPUT_DOCKER_IMAGE_TAG: ${{ inputs.docker_image_tag }} run: | echo "šŸ” Validating workflow input parameters..." @@ -188,27 +189,27 @@ jobs: fi fi - # Validate AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID (Azure Resource ID format) - if [[ -n "$INPUT_AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID" ]]; then - if [[ ! "$INPUT_AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID" =~ ^/subscriptions/[a-fA-F0-9-]+/[Rr]esource[Gg]roups/[^/]+/providers/[Mm]icrosoft\.[Oo]perational[Ii]nsights/[Ww]orkspaces/[^/]+$ ]]; then - echo "āŒ ERROR: AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID is invalid. Must be a valid Azure Resource ID format:" + # Validate AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID (Azure Resource ID format) + if [[ -n "$INPUT_AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID" ]]; then + if [[ ! "$INPUT_AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID" =~ ^/subscriptions/[a-fA-F0-9-]+/[Rr]esource[Gg]roups/[^/]+/providers/[Mm]icrosoft\.[Oo]perational[Ii]nsights/[Ww]orkspaces/[^/]+$ ]]; then + echo "āŒ ERROR: AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID is invalid. Must be a valid Azure Resource ID format:" echo " /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}" - echo " Got: '$INPUT_AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID'" + echo " Got: '$INPUT_AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID'" VALIDATION_FAILED=true else - echo "āœ… AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID: Valid Resource ID format" + echo "āœ… AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID: Valid Resource ID format" fi fi - # Validate AZURE_EXISTING_AI_PROJECT_RESOURCE_ID (Azure Resource ID format) - if [[ -n "$INPUT_AZURE_EXISTING_AI_PROJECT_RESOURCE_ID" ]]; then - if [[ ! "$INPUT_AZURE_EXISTING_AI_PROJECT_RESOURCE_ID" =~ ^/subscriptions/[a-fA-F0-9-]+/[Rr]esource[Gg]roups/[^/]+/providers/([Mm]icrosoft\.[Mm]achine[Ll]earning[Ss]ervices/([Ww]orkspaces|[Pp]rojects)/[^/]+|[Mm]icrosoft\.[Cc]ognitive[Ss]ervices/[Aa]ccounts/[^/]+/[Pp]rojects/[^/]+)$ ]]; then - echo "āŒ ERROR: AZURE_EXISTING_AI_PROJECT_RESOURCE_ID is invalid. Must be a valid Azure Resource ID format:" + # Validate AZURE_EXISTING_AIPROJECT_RESOURCE_ID (Azure Resource ID format) + if [[ -n "$INPUT_AZURE_EXISTING_AIPROJECT_RESOURCE_ID" ]]; then + if [[ ! "$INPUT_AZURE_EXISTING_AIPROJECT_RESOURCE_ID" =~ ^/subscriptions/[a-fA-F0-9-]+/[Rr]esource[Gg]roups/[^/]+/providers/([Mm]icrosoft\.[Mm]achine[Ll]earning[Ss]ervices/([Ww]orkspaces|[Pp]rojects)/[^/]+|[Mm]icrosoft\.[Cc]ognitive[Ss]ervices/[Aa]ccounts/[^/]+/[Pp]rojects/[^/]+)$ ]]; then + echo "āŒ ERROR: AZURE_EXISTING_AIPROJECT_RESOURCE_ID is invalid. Must be a valid Azure Resource ID format:" echo " /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CognitiveServices/accounts/{accountName}/projects/{projectName}" - echo " Got: '$INPUT_AZURE_EXISTING_AI_PROJECT_RESOURCE_ID'" + echo " Got: '$INPUT_AZURE_EXISTING_AIPROJECT_RESOURCE_ID'" VALIDATION_FAILED=true else - echo "āœ… AZURE_EXISTING_AI_PROJECT_RESOURCE_ID: Valid Resource ID format" + echo "āœ… AZURE_EXISTING_AIPROJECT_RESOURCE_ID: Valid Resource ID format" fi fi @@ -242,8 +243,8 @@ jobs: shell: bash env: INPUT_EXP: ${{ inputs.EXP }} - INPUT_LOG_ANALYTICS_WORKSPACE_ID: ${{ inputs.AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID }} - INPUT_AI_PROJECT_RESOURCE_ID: ${{ inputs.AZURE_EXISTING_AI_PROJECT_RESOURCE_ID }} + INPUT_LOG_ANALYTICS_WORKSPACE_RID: ${{ inputs.AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID }} + INPUT_FOUNDRY_PROJECT_RID: ${{ inputs.AZURE_EXISTING_AIPROJECT_RESOURCE_ID }} run: | echo "šŸ” Validating EXP configuration..." @@ -252,12 +253,12 @@ jobs: if [[ "$INPUT_EXP" == "true" ]]; then EXP_ENABLED="true" echo "āœ… EXP explicitly enabled by user input" - elif [[ -n "$INPUT_LOG_ANALYTICS_WORKSPACE_ID" ]] || [[ -n "$INPUT_AI_PROJECT_RESOURCE_ID" ]]; then + elif [[ -n "$INPUT_LOG_ANALYTICS_WORKSPACE_RID" ]] || [[ -n "$INPUT_FOUNDRY_PROJECT_RID" ]]; then echo "šŸ”§ AUTO-ENABLING EXP: EXP parameter values were provided but EXP was not explicitly enabled." echo "" echo "You provided values for:" - [[ -n "$INPUT_LOG_ANALYTICS_WORKSPACE_ID" ]] && echo " - Azure Log Analytics Workspace ID: '$INPUT_LOG_ANALYTICS_WORKSPACE_ID'" - [[ -n "$INPUT_AI_PROJECT_RESOURCE_ID" ]] && echo " - Azure AI Project Resource ID: '$INPUT_AI_PROJECT_RESOURCE_ID'" + [[ -n "$INPUT_LOG_ANALYTICS_WORKSPACE_RID" ]] && echo " - Azure Log Analytics Workspace RID: '$INPUT_LOG_ANALYTICS_WORKSPACE_RID'" + [[ -n "$INPUT_FOUNDRY_PROJECT_RID" ]] && echo " - Azure Foundry Project RID: '$INPUT_FOUNDRY_PROJECT_RID'" echo "" echo "āœ… Automatically enabling EXP to use these values." EXP_ENABLED="true" @@ -361,7 +362,12 @@ jobs: rg_exists=$(az group exists --name $RESOURCE_GROUP_NAME) if [ "$rg_exists" = "false" ]; then echo "šŸ“¦ Resource group does not exist. Creating new resource group '$RESOURCE_GROUP_NAME' in location '$AZURE_LOCATION'..." - az group create --name $RESOURCE_GROUP_NAME --location $AZURE_LOCATION || { echo "āŒ Error creating resource group"; exit 1; } + RG_TAGS="${{ env.RG_TAGS }}" + if [ -n "$RG_TAGS" ]; then + az group create --name $RESOURCE_GROUP_NAME --location $AZURE_LOCATION --tags "$RG_TAGS" || { echo "āŒ Error creating resource group"; exit 1; } + else + az group create --name $RESOURCE_GROUP_NAME --location $AZURE_LOCATION || { echo "āŒ Error creating resource group"; exit 1; } + fi echo "āœ… Resource group '$RESOURCE_GROUP_NAME' created successfully." else echo "āœ… Resource group '$RESOURCE_GROUP_NAME' already exists. Deploying to existing resource group." @@ -478,8 +484,8 @@ jobs: BUILD_DOCKER_IMAGE: ${{ inputs.build_docker_image || 'false' }} EXP: ${{ needs.azure-setup.outputs.EXP_ENABLED }} WAF_ENABLED: ${{ inputs.waf_enabled == true && 'true' || 'false' }} - AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID: ${{ inputs.AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID }} - AZURE_EXISTING_AI_PROJECT_RESOURCE_ID: ${{ inputs.AZURE_EXISTING_AI_PROJECT_RESOURCE_ID }} + AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID: ${{ inputs.AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID }} + AZURE_EXISTING_AIPROJECT_RESOURCE_ID: ${{ inputs.AZURE_EXISTING_AIPROJECT_RESOURCE_ID }} secrets: inherit deploy-windows: @@ -496,6 +502,6 @@ jobs: BUILD_DOCKER_IMAGE: ${{ inputs.build_docker_image || 'false' }} EXP: ${{ needs.azure-setup.outputs.EXP_ENABLED }} WAF_ENABLED: ${{ inputs.waf_enabled == true && 'true' || 'false' }} - AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID: ${{ inputs.AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID }} - AZURE_EXISTING_AI_PROJECT_RESOURCE_ID: ${{ inputs.AZURE_EXISTING_AI_PROJECT_RESOURCE_ID }} + AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID: ${{ inputs.AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID }} + AZURE_EXISTING_AIPROJECT_RESOURCE_ID: ${{ inputs.AZURE_EXISTING_AIPROJECT_RESOURCE_ID }} secrets: inherit diff --git a/.github/workflows/pylint.yml b/.github/workflows/pylint.yml index 35b997af..f9c4d858 100644 --- a/.github/workflows/pylint.yml +++ b/.github/workflows/pylint.yml @@ -5,6 +5,16 @@ permissions: on: push: + branches: [main, dev, demo] + paths: + - '**/*.py' + - '**/requirements.txt' + - '**/pyproject.toml' + - '.flake8' + - '.github/workflows/pylint.yml' + pull_request: + branches: [main, dev, demo] + types: [opened, ready_for_review, reopened, synchronize] paths: - '**/*.py' - '**/requirements.txt' diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index d05fffa8..f1ef5755 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -10,7 +10,7 @@ on: - demo paths: - 'src/backend/**/*.py' - - 'src/tests/backend/**' + - 'src/tests/**' - '.github/workflows/test.yml' - 'src/backend/requirements.txt' - 'src/frontend/requirements.txt' @@ -27,7 +27,7 @@ on: - demo paths: - 'src/backend/**/*.py' - - 'src/tests/backend/**' + - 'src/tests/**' - '.github/workflows/test.yml' - 'src/backend/requirements.txt' - 'src/frontend/requirements.txt' diff --git a/.github/workflows/validate-bicep-params.yml b/.github/workflows/validate-bicep-params.yml new file mode 100644 index 00000000..a8722725 --- /dev/null +++ b/.github/workflows/validate-bicep-params.yml @@ -0,0 +1,107 @@ +name: Validate Bicep Parameters + +permissions: + contents: read + +on: + schedule: + - cron: '30 6 * * 3' # Wednesday 12:00 PM IST (6:30 AM UTC) + pull_request: + branches: + - main + - dev + paths: + - 'infra/**/*.bicep' + - 'infra/**/*.parameters.json' + workflow_dispatch: + +env: + accelerator_name: "CodeMod" + +jobs: + validate: + runs-on: ubuntu-latest + steps: + - name: Checkout Code + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Validate infra/ parameters + id: validate_infra + continue-on-error: true + run: | + set +e + python scripts/validate_bicep_params.py --dir infra --strict --no-color --json-output infra_results.json 2>&1 | tee infra_output.txt + EXIT_CODE=${PIPESTATUS[0]} + set -e + echo "## Infra Param Validation" >> "$GITHUB_STEP_SUMMARY" + echo '```' >> "$GITHUB_STEP_SUMMARY" + cat infra_output.txt >> "$GITHUB_STEP_SUMMARY" + echo '```' >> "$GITHUB_STEP_SUMMARY" + exit $EXIT_CODE + + - name: Set overall result + id: result + run: | + if [[ "${{ steps.validate_infra.outcome }}" == "failure" ]]; then + echo "status=failure" >> "$GITHUB_OUTPUT" + else + echo "status=success" >> "$GITHUB_OUTPUT" + fi + + - name: Upload validation results + if: always() + uses: actions/upload-artifact@v4 + with: + name: bicep-validation-results + path: | + infra_results.json + retention-days: 30 + + - name: Send schedule notification on failure + if: github.event_name == 'schedule' && steps.result.outputs.status == 'failure' + env: + LOGICAPP_URL: ${{ secrets.EMAILNOTIFICATION_LOGICAPP_URL_TA }} + GITHUB_REPOSITORY: ${{ github.repository }} + GITHUB_RUN_ID: ${{ github.run_id }} + ACCELERATOR_NAME: ${{ env.accelerator_name }} + run: | + RUN_URL="https://github.com/${GITHUB_REPOSITORY}/actions/runs/${GITHUB_RUN_ID}" + INFRA_OUTPUT=$(sed 's/&/\&/g; s//\>/g' infra_output.txt) + + jq -n \ + --arg name "${ACCELERATOR_NAME}" \ + --arg infra "$INFRA_OUTPUT" \ + --arg url "$RUN_URL" \ + '{subject: ("Bicep Parameter Validation Report - " + $name + " - Issues Detected"), body: ("

Dear Team,

The scheduled Bicep Parameter Validation for " + $name + " has detected parameter mapping errors.

infra/ Results:

" + $infra + "

Run URL: " + $url + "

Please fix the parameter mapping issues at your earliest convenience.

Best regards,
Your Automation Team

")}' \ + | curl -X POST "${LOGICAPP_URL}" \ + -H "Content-Type: application/json" \ + -d @- || echo "Failed to send notification" + + - name: Send schedule notification on success + if: github.event_name == 'schedule' && steps.result.outputs.status == 'success' + env: + LOGICAPP_URL: ${{ secrets.EMAILNOTIFICATION_LOGICAPP_URL_TA }} + GITHUB_REPOSITORY: ${{ github.repository }} + GITHUB_RUN_ID: ${{ github.run_id }} + ACCELERATOR_NAME: ${{ env.accelerator_name }} + run: | + RUN_URL="https://github.com/${GITHUB_REPOSITORY}/actions/runs/${GITHUB_RUN_ID}" + INFRA_OUTPUT=$(sed 's/&/\&/g; s//\>/g' infra_output.txt) + + jq -n \ + --arg name "${ACCELERATOR_NAME}" \ + --arg infra "$INFRA_OUTPUT" \ + --arg url "$RUN_URL" \ + '{subject: ("Bicep Parameter Validation Report - " + $name + " - Passed"), body: ("

Dear Team,

The scheduled Bicep Parameter Validation for " + $name + " has completed successfully. All parameter mappings are valid.

infra/ Results:

" + $infra + "

Run URL: " + $url + "

Best regards,
Your Automation Team

")}' \ + | curl -X POST "${LOGICAPP_URL}" \ + -H "Content-Type: application/json" \ + -d @- || echo "Failed to send notification" + + - name: Fail if errors found + if: steps.result.outputs.status == 'failure' + run: exit 1 diff --git a/README.md b/README.md index 2f848e68..749582cd 100644 --- a/README.md +++ b/README.md @@ -88,6 +88,8 @@ Follow the quick deploy steps on the deployment guide to deploy this solution to
+> **Note**: Some tenants may have additional security restrictions that run periodically and could impact the application (e.g., blocking public network access). If you experience issues or the application stops working, check if these restrictions are the cause. In such cases, consider deploying the WAF-supported version to ensure compliance. To configure, [Click here](./docs/DeploymentGuide.md#31-choose-deployment-type-optional). + > āš ļø **Important: Check Azure OpenAI Quota Availability**
To ensure sufficient quota is available in your subscription, please follow [quota check instructions guide](./docs/quota_check.md) before you deploy the solution. diff --git a/azure.yaml b/azure.yaml index 89774bcd..48c2f13a 100644 --- a/azure.yaml +++ b/azure.yaml @@ -4,6 +4,7 @@ metadata: requiredVersions: azd: '>= 1.18.0 != 1.23.9' + bicep: '>= 0.33.0' parameters: AzureAiServiceLocation: diff --git a/docs/CustomizingAzdParameters.md b/docs/CustomizingAzdParameters.md index a5d6799e..05652672 100644 --- a/docs/CustomizingAzdParameters.md +++ b/docs/CustomizingAzdParameters.md @@ -12,17 +12,17 @@ By default this template will use the environment name as the prefix to prevent | `AZURE_LOCATION` | string | `` | Location of the Azure resources. Controls where the infrastructure will be deployed. | | `AZURE_ENV_AI_SERVICE_LOCATION` | string | `` | Location of the Azure resources. Controls where the Azure AI Services will be deployed. | | `AZURE_ENV_MODEL_DEPLOYMENT_TYPE` | string | `GlobalStandard` | Change the Model Deployment Type (allowed values: Standard, GlobalStandard). | -| `AZURE_ENV_MODEL_NAME` | string | `gpt-4o` | Set the Model Name (allowed values: gpt-4o). | -| `AZURE_ENV_MODEL_VERSION` | string | `2024-08-06` | Set the Azure model version (allowed values: 2024-08-06) | -| `AZURE_ENV_MODEL_CAPACITY` | integer | `150` | Set the Model Capacity (choose a number based on available GPT model capacity in your subscription). | -| `AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID` | string | Guide to get your [Existing Workspace ID](/docs/re-use-log-analytics.md) | Set this if you want to reuse an existing Log Analytics Workspace instead of creating a new one. | -| `AZURE_ENV_IMAGETAG` | string | `latest` | Set the Image tag Like (allowed values: latest, dev, hotfix) | -| `AZURE_ENV_VM_SIZE` | string | `Standard_D2s_v5` | Specifies the size of the Jumpbox Virtual Machine (e.g., `Standard_D2s_v5`, `Standard_D2s_v4`). Set a custom value if `enablePrivateNetworking` is `true`. | +| `AZURE_ENV_GPT_MODEL_NAME` | string | `gpt-4o` | Set the Model Name (allowed values: gpt-4o). | +| `AZURE_ENV_GPT_MODEL_VERSION` | string | `2024-08-06` | Set the Azure model version (allowed values: 2024-08-06) | +| `AZURE_ENV_GPT_MODEL_CAPACITY` | integer | `150` | Set the Model Capacity (choose a number based on available GPT model capacity in your subscription). | +| `AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID`| string | Guide to get your [Existing Workspace ID](/docs/re-use-log-analytics.md) | Set this if you want to reuse an existing Log Analytics Workspace instead of creating a new one. | +| `AZURE_ENV_IMAGE_TAG` | string | `latest` | Set the Image tag Like (allowed values: latest, dev, hotfix) | +| `AZURE_ENV_VM_SIZE` | string | `Standard_D2s_v5` | Specifies the size of the Jumpbox Virtual Machine (e.g., `Standard_D2s_v5`, `Standard_D2s_v4`). Set a custom value if `enablePrivateNetworking` is `true`. | | `AZURE_ENV_JUMPBOX_ADMIN_USERNAME` | string | `JumpboxAdminUser` | Specifies the administrator username for the Jumpbox Virtual Machine. | | `AZURE_ENV_JUMPBOX_ADMIN_PASSWORD` | string | `JumpboxAdminP@ssw0rd1234!` | Specifies the administrator password for the Jumpbox Virtual Machine. | | `AZURE_ENV_COSMOS_SECONDARY_LOCATION` | string | *(not set by default)* | Specifies the secondary region for Cosmos DB. Required if `enableRedundancy` is `true`. | -| `AZURE_EXISTING_AI_PROJECT_RESOURCE_ID` | string | *(not set by default)* | Specifies the existing AI Foundry Project Resource ID if it needs to be reused. | -| `AZURE_ENV_ACR_NAME` | string | `cmsacontainerreg.azurecr.io` | Specifies the Azure Container Registry name to use for container images. | +| `AZURE_EXISTING_AIPROJECT_RESOURCE_ID` | string | *(not set by default)* | Specifies the existing AI Foundry Project Resource ID if it needs to be reused. | +| `AZURE_ENV_CONTAINER_REGISTRY_ENDPOINT`| string | *(not set by default)* | Specifies the Azure Container Registry endpoint to use for container images. | --- @@ -36,12 +36,12 @@ azd env set Set the Log Analytics Workspace Id if you need to reuse the existing workspace ```shell -azd env set AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID '/subscriptions//resourceGroups//providers/Microsoft.OperationalInsights/workspaces/' +azd env set AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID '/subscriptions//resourceGroups//providers/Microsoft.OperationalInsights/workspaces/' ``` Set the Azure Existing AI Foundry Project Resource ID if you need to reuse the existing AI Foundry Project ```shell -azd env set AZURE_EXISTING_AI_PROJECT_RESOURCE_ID '/subscriptions//resourceGroups//providers/Microsoft.CognitiveServices/accounts//projects/' +azd env set AZURE_EXISTING_AIPROJECT_RESOURCE_ID '/subscriptions//resourceGroups//providers/Microsoft.CognitiveServices/accounts//projects/' ``` **Example:** diff --git a/docs/DeploymentGuide.md b/docs/DeploymentGuide.md index 1225a8aa..69e87196 100644 --- a/docs/DeploymentGuide.md +++ b/docs/DeploymentGuide.md @@ -6,6 +6,8 @@ This guide walks you through deploying the Modernize Your Code Solution Accelera šŸ†˜ **Need Help?** If you encounter any issues during deployment, check our [Troubleshooting Guide](./TroubleShootingSteps.md) for solutions to common problems. +> **Note**: Some tenants may have additional security restrictions that run periodically and could impact the application (e.g., blocking public network access). If you experience issues or the application stops working, check if these restrictions are the cause. In such cases, consider deploying the WAF-supported version to ensure compliance. To configure, [Click here](#31-choose-deployment-type-optional). + ## Step 1: Prerequisites & Setup ### 1.1 Azure Account Requirements diff --git a/infra/main.bicep b/infra/main.bicep index 1c02d6c9..8933fb94 100644 --- a/infra/main.bicep +++ b/infra/main.bicep @@ -57,13 +57,13 @@ var replicaRegionPairs = { var replicaLocation = replicaRegionPairs[resourceGroup().location] @description('Optional. AI model deployment token capacity. Defaults to 150K tokens per minute.') -param gptModelCapacity int = 150 +param gptDeploymentCapacity int = 150 @description('Optional. Enable monitoring for the resources. This will enable Application Insights and Log Analytics. Defaults to false.') param enableMonitoring bool = false @description('Optional. Enable scaling for the container apps. Defaults to false.') -param enableScaling bool = false +param enableScalability bool = false @description('Optional. Enable redundancy for applicable resources. Defaults to false.') param enableRedundancy bool = false @@ -95,7 +95,7 @@ param enableTelemetry bool = true @minLength(1) @description('Optional. GPT model deployment type. Defaults to GlobalStandard.') -param gptModelDeploymentType string = 'GlobalStandard' +param deploymentType string = 'GlobalStandard' @minLength(1) @description('Optional. Name of the GPT model to deploy. Defaults to gpt-4o.') @@ -103,21 +103,24 @@ param gptModelName string = 'gpt-4o' @minLength(1) @description('Optional. Set the Image tag. Defaults to latest_2025-11-10_599.') -param imageVersion string = 'latest_2025-11-10_599' +param imageTag string = 'latest_2025-11-10_599' -@description('Optional. Azure Container Registry name. Defaults to cmsacontainerreg.azurecr.io') -param acrName string = 'cmsacontainerreg.azurecr.io' +@description('Optional. Azure Container Registry endpoint. Defaults to cmsacontainerreg.azurecr.io') +param containerRegistryEndpoint string = 'cmsacontainerreg.azurecr.io' @minLength(1) @description('Optional. Version of the GPT model to deploy. Defaults to 2024-08-06.') param gptModelVersion string = '2024-08-06' @description('Optional. Use this parameter to use an existing AI project resource ID. Defaults to empty string.') -param azureExistingAIProjectResourceId string = '' +param existingFoundryProjectResourceId string = '' @description('Optional. Use this parameter to use an existing Log Analytics workspace resource ID. Defaults to empty string.') param existingLogAnalyticsWorkspaceId string = '' +@description('Optional. AI model deployments array for quota validation scripts. Not used directly by the template.') +param aiModelDeployments array = [] + var existingTags = resourceGroup().tags ?? {} var allTags = union( @@ -145,8 +148,8 @@ var modelDeployment = { version: gptModelVersion } sku: { - name: gptModelDeploymentType - capacity: gptModelCapacity + name: deploymentType + capacity: gptDeploymentCapacity } raiPolicyName: 'Microsoft.Default' } @@ -742,7 +745,7 @@ module aiServices 'modules/ai-foundry/aifoundry.bicep' = { projectDescription: 'proj-${solutionSuffix}' logAnalyticsWorkspaceResourceId: enableMonitoring ? logAnalyticsWorkspaceResourceId : '' privateNetworking: null // Private endpoint is handled by the standalone aiFoundryPrivateEndpoint module - existingFoundryProjectResourceId: azureExistingAIProjectResourceId + existingFoundryProjectResourceId: existingFoundryProjectResourceId disableLocalAuth: true //Should be set to true for WAF aligned configuration customSubDomainName: 'aif-${solutionSuffix}' apiProperties: { @@ -781,7 +784,7 @@ module aiServices 'modules/ai-foundry/aifoundry.bicep' = { } var aiFoundryAiServicesResourceName = 'aif-${solutionSuffix}' -var useExistingAiFoundryAiProject = !empty(azureExistingAIProjectResourceId) +var useExistingAiFoundryAiProject = !empty(existingFoundryProjectResourceId) module aiFoundryPrivateEndpoint 'br/public:avm/res/network/private-endpoint:0.8.1' = if (enablePrivateNetworking && !useExistingAiFoundryAiProject) { name: take('pep-${aiFoundryAiServicesResourceName}-deployment', 64) @@ -967,7 +970,7 @@ module containerAppBackend 'br/public:avm/res/app/container-app:0.19.0' = { containers: [ { name: 'cmsabackend' - image: '${acrName}/cmsabackend:${imageVersion}' + image: '${containerRegistryEndpoint}/cmsabackend:${imageTag}' env: concat( [ { @@ -1111,10 +1114,10 @@ module containerAppBackend 'br/public:avm/res/app/container-app:0.19.0' = { ingressTargetPort: 8000 ingressExternal: true scaleSettings: { - // maxReplicas: enableScaling ? 3 : 1 + // maxReplicas: enableScalability ? 3 : 1 maxReplicas: 1 // maxReplicas set to 1 (not 3) due to multiple agents created per type during WAF deployment minReplicas: 1 - rules: enableScaling + rules: enableScalability ? [ { name: 'http-scaler' @@ -1155,7 +1158,7 @@ module containerAppFrontend 'br/public:avm/res/app/container-app:0.19.0' = { value: 'prod' } ] - image: '${acrName}/cmsafrontend:${imageVersion}' + image: '${containerRegistryEndpoint}/cmsafrontend:${imageTag}' name: 'cmsafrontend' resources: { cpu: 1 @@ -1166,9 +1169,9 @@ module containerAppFrontend 'br/public:avm/res/app/container-app:0.19.0' = { ingressTargetPort: 3000 ingressExternal: true scaleSettings: { - maxReplicas: enableScaling ? 3 : 1 + maxReplicas: enableScalability ? 3 : 1 minReplicas: 1 - rules: enableScaling + rules: enableScalability ? [ { name: 'http-scaler' diff --git a/infra/main.json b/infra/main.json index 09e37656..2eb52454 100644 --- a/infra/main.json +++ b/infra/main.json @@ -5,8 +5,8 @@ "metadata": { "_generator": { "name": "bicep", - "version": "0.42.1.51946", - "templateHash": "7222423000870488333" + "version": "0.40.2.10011", + "templateHash": "13589960712112840698" }, "name": "Modernize Your Code Solution Accelerator", "description": "CSA CTO Gold Standard Solution Accelerator for Modernize Your Code. \r\n" @@ -64,7 +64,7 @@ "description": "Required. Location for all AI service resources. This location can be different from the resource group location." } }, - "gptModelCapacity": { + "gptDeploymentCapacity": { "type": "int", "defaultValue": 150, "metadata": { @@ -78,7 +78,7 @@ "description": "Optional. Enable monitoring for the resources. This will enable Application Insights and Log Analytics. Defaults to false." } }, - "enableScaling": { + "enableScalability": { "type": "bool", "defaultValue": false, "metadata": { @@ -141,7 +141,7 @@ "description": "Optional. Enable/Disable usage telemetry for module." } }, - "gptModelDeploymentType": { + "deploymentType": { "type": "string", "defaultValue": "GlobalStandard", "minLength": 1, @@ -157,7 +157,7 @@ "description": "Optional. Name of the GPT model to deploy. Defaults to gpt-4o." } }, - "imageVersion": { + "imageTag": { "type": "string", "defaultValue": "latest_2025-11-10_599", "minLength": 1, @@ -165,11 +165,11 @@ "description": "Optional. Set the Image tag. Defaults to latest_2025-11-10_599." } }, - "acrName": { + "containerRegistryEndpoint": { "type": "string", "defaultValue": "cmsacontainerreg.azurecr.io", "metadata": { - "description": "Optional. Azure Container Registry name. Defaults to cmsacontainerreg.azurecr.io" + "description": "Optional. Azure Container Registry endpoint. Defaults to cmsacontainerreg.azurecr.io" } }, "gptModelVersion": { @@ -180,7 +180,7 @@ "description": "Optional. Version of the GPT model to deploy. Defaults to 2024-08-06." } }, - "azureExistingAIProjectResourceId": { + "existingFoundryProjectResourceId": { "type": "string", "defaultValue": "", "metadata": { @@ -227,8 +227,8 @@ "version": "[parameters('gptModelVersion')]" }, "sku": { - "name": "[parameters('gptModelDeploymentType')]", - "capacity": "[parameters('gptModelCapacity')]" + "name": "[parameters('deploymentType')]", + "capacity": "[parameters('gptDeploymentCapacity')]" }, "raiPolicyName": "Microsoft.Default" }, @@ -268,7 +268,7 @@ "proximityPlacementGroupResourceName": "[format('ppg-{0}', variables('solutionSuffix'))]", "virtualMachineResourceName": "[take(format('vm-{0}', variables('solutionSuffix')), 15)]", "aiFoundryAiServicesResourceName": "[format('aif-{0}', variables('solutionSuffix'))]", - "useExistingAiFoundryAiProject": "[not(empty(parameters('azureExistingAIProjectResourceId')))]", + "useExistingAiFoundryAiProject": "[not(empty(parameters('existingFoundryProjectResourceId')))]", "appStorageContainerName": "appstorage", "containerAppsEnvironmentName": "[format('cae-{0}', variables('solutionSuffix'))]" }, @@ -5052,8 +5052,8 @@ "metadata": { "_generator": { "name": "bicep", - "version": "0.42.1.51946", - "templateHash": "3406526791248457038" + "version": "0.40.2.10011", + "templateHash": "4892991135758906801" } }, "definitions": { @@ -12895,10 +12895,10 @@ }, "dependsOn": [ "applicationInsights", - "[format('avmPrivateDnsZones[{0}]', variables('dnsZoneIndex').ods)]", "[format('avmPrivateDnsZones[{0}]', variables('dnsZoneIndex').monitor)]", "[format('avmPrivateDnsZones[{0}]', variables('dnsZoneIndex').oms)]", "[format('avmPrivateDnsZones[{0}]', variables('dnsZoneIndex').storageBlob)]", + "[format('avmPrivateDnsZones[{0}]', variables('dnsZoneIndex').ods)]", "[format('avmPrivateDnsZones[{0}]', variables('dnsZoneIndex').agentSvc)]", "dataCollectionEndpoint", "logAnalyticsWorkspace", @@ -25549,7 +25549,7 @@ "value": null }, "existingFoundryProjectResourceId": { - "value": "[parameters('azureExistingAIProjectResourceId')]" + "value": "[parameters('existingFoundryProjectResourceId')]" }, "disableLocalAuth": { "value": true @@ -25611,8 +25611,8 @@ "metadata": { "_generator": { "name": "bicep", - "version": "0.42.1.51946", - "templateHash": "16969185198334420434" + "version": "0.40.2.10011", + "templateHash": "665208465907096971" }, "name": "AI Services and Project Module", "description": "This module creates an AI Services resource and an AI Foundry project within it. It supports private networking, OpenAI deployments, and role assignments." @@ -26952,8 +26952,8 @@ "metadata": { "_generator": { "name": "bicep", - "version": "0.42.1.51946", - "templateHash": "4140498216793917924" + "version": "0.40.2.10011", + "templateHash": "7604365129625921085" } }, "definitions": { @@ -28667,8 +28667,8 @@ "metadata": { "_generator": { "name": "bicep", - "version": "0.42.1.51946", - "templateHash": "2422737205646151487" + "version": "0.40.2.10011", + "templateHash": "14939823368517410024" } }, "definitions": { @@ -28821,8 +28821,8 @@ "metadata": { "_generator": { "name": "bicep", - "version": "0.42.1.51946", - "templateHash": "11911242767938607365" + "version": "0.40.2.10011", + "templateHash": "13151306134286549002" } }, "definitions": { @@ -29039,8 +29039,8 @@ "metadata": { "_generator": { "name": "bicep", - "version": "0.42.1.51946", - "templateHash": "4140498216793917924" + "version": "0.40.2.10011", + "templateHash": "7604365129625921085" } }, "definitions": { @@ -30754,8 +30754,8 @@ "metadata": { "_generator": { "name": "bicep", - "version": "0.42.1.51946", - "templateHash": "2422737205646151487" + "version": "0.40.2.10011", + "templateHash": "14939823368517410024" } }, "definitions": { @@ -30908,8 +30908,8 @@ "metadata": { "_generator": { "name": "bicep", - "version": "0.42.1.51946", - "templateHash": "11911242767938607365" + "version": "0.40.2.10011", + "templateHash": "13151306134286549002" } }, "definitions": { @@ -31923,8 +31923,8 @@ "dependsOn": [ "aiServices", "[format('avmPrivateDnsZones[{0}]', variables('dnsZoneIndex').openAI)]", - "[format('avmPrivateDnsZones[{0}]', variables('dnsZoneIndex').aiServices)]", "[format('avmPrivateDnsZones[{0}]', variables('dnsZoneIndex').cognitiveServices)]", + "[format('avmPrivateDnsZones[{0}]', variables('dnsZoneIndex').aiServices)]", "virtualNetwork" ] }, @@ -31980,8 +31980,8 @@ "metadata": { "_generator": { "name": "bicep", - "version": "0.42.1.51946", - "templateHash": "522477461329004641" + "version": "0.40.2.10011", + "templateHash": "9525047811797133596" } }, "definitions": { @@ -40225,8 +40225,8 @@ "metadata": { "_generator": { "name": "bicep", - "version": "0.42.1.51946", - "templateHash": "15355322017409205910" + "version": "0.40.2.10011", + "templateHash": "9495092499292590311" } }, "definitions": { @@ -44088,8 +44088,8 @@ "metadata": { "_generator": { "name": "bicep", - "version": "0.42.1.51946", - "templateHash": "4242598725709304634" + "version": "0.40.2.10011", + "templateHash": "17583156542522410309" } }, "definitions": { @@ -51258,7 +51258,7 @@ "value": [ { "name": "cmsabackend", - "image": "[format('{0}/cmsabackend:{1}', parameters('acrName'), parameters('imageVersion'))]", + "image": "[format('{0}/cmsabackend:{1}', parameters('containerRegistryEndpoint'), parameters('imageTag'))]", "env": "[concat(createArray(createObject('name', 'COSMOSDB_ENDPOINT', 'value', reference('cosmosDb').outputs.endpoint.value), createObject('name', 'COSMOSDB_DATABASE', 'value', reference('cosmosDb').outputs.databaseName.value), createObject('name', 'COSMOSDB_BATCH_CONTAINER', 'value', reference('cosmosDb').outputs.containerNames.value.batch), createObject('name', 'COSMOSDB_FILE_CONTAINER', 'value', reference('cosmosDb').outputs.containerNames.value.file), createObject('name', 'COSMOSDB_LOG_CONTAINER', 'value', reference('cosmosDb').outputs.containerNames.value.log), createObject('name', 'AZURE_BLOB_ACCOUNT_NAME', 'value', reference('storageAccount').outputs.name.value), createObject('name', 'AZURE_BLOB_CONTAINER_NAME', 'value', variables('appStorageContainerName')), createObject('name', 'MIGRATOR_AGENT_MODEL_DEPLOY', 'value', variables('modelDeployment').name), createObject('name', 'PICKER_AGENT_MODEL_DEPLOY', 'value', variables('modelDeployment').name), createObject('name', 'FIXER_AGENT_MODEL_DEPLOY', 'value', variables('modelDeployment').name), createObject('name', 'SEMANTIC_VERIFIER_AGENT_MODEL_DEPLOY', 'value', variables('modelDeployment').name), createObject('name', 'SYNTAX_CHECKER_AGENT_MODEL_DEPLOY', 'value', variables('modelDeployment').name), createObject('name', 'SELECTION_MODEL_DEPLOY', 'value', variables('modelDeployment').name), createObject('name', 'TERMINATION_MODEL_DEPLOY', 'value', variables('modelDeployment').name), createObject('name', 'AZURE_AI_AGENT_MODEL_DEPLOYMENT_NAME', 'value', variables('modelDeployment').name), createObject('name', 'AI_PROJECT_ENDPOINT', 'value', reference('aiServices').outputs.aiProjectInfo.value.apiEndpoint), createObject('name', 'AZURE_AI_AGENT_PROJECT_CONNECTION_STRING', 'value', reference('aiServices').outputs.aiProjectInfo.value.apiEndpoint), createObject('name', 'AZURE_AI_AGENT_PROJECT_NAME', 'value', reference('aiServices').outputs.aiProjectInfo.value.name), createObject('name', 'AZURE_AI_AGENT_RESOURCE_GROUP_NAME', 'value', resourceGroup().name), createObject('name', 'AZURE_AI_AGENT_SUBSCRIPTION_ID', 'value', subscription().subscriptionId), createObject('name', 'AZURE_AI_AGENT_ENDPOINT', 'value', reference('aiServices').outputs.aiProjectInfo.value.apiEndpoint), createObject('name', 'AZURE_CLIENT_ID', 'value', reference('appIdentity').outputs.clientId.value), createObject('name', 'APP_ENV', 'value', 'prod'), createObject('name', 'AZURE_BASIC_LOGGING_LEVEL', 'value', 'INFO'), createObject('name', 'AZURE_PACKAGE_LOGGING_LEVEL', 'value', 'WARNING'), createObject('name', 'AZURE_LOGGING_PACKAGES', 'value', '')), if(parameters('enableMonitoring'), createArray(createObject('name', 'APPLICATIONINSIGHTS_INSTRUMENTATION_KEY', 'value', reference('applicationInsights').outputs.instrumentationKey.value), createObject('name', 'APPLICATIONINSIGHTS_CONNECTION_STRING', 'value', reference('applicationInsights').outputs.connectionString.value)), createArray()))]", "resources": { "cpu": 1, @@ -51278,7 +51278,7 @@ "value": { "maxReplicas": 1, "minReplicas": 1, - "rules": "[if(parameters('enableScaling'), createArray(createObject('name', 'http-scaler', 'http', createObject('metadata', createObject('concurrentRequests', 100)))), createArray())]" + "rules": "[if(parameters('enableScalability'), createArray(createObject('name', 'http-scaler', 'http', createObject('metadata', createObject('concurrentRequests', 100)))), createArray())]" } }, "tags": { @@ -52851,7 +52851,7 @@ "value": "prod" } ], - "image": "[format('{0}/cmsafrontend:{1}', parameters('acrName'), parameters('imageVersion'))]", + "image": "[format('{0}/cmsafrontend:{1}', parameters('containerRegistryEndpoint'), parameters('imageTag'))]", "name": "cmsafrontend", "resources": { "cpu": 1, @@ -52868,9 +52868,9 @@ }, "scaleSettings": { "value": { - "maxReplicas": "[if(parameters('enableScaling'), 3, 1)]", + "maxReplicas": "[if(parameters('enableScalability'), 3, 1)]", "minReplicas": 1, - "rules": "[if(parameters('enableScaling'), createArray(createObject('name', 'http-scaler', 'http', createObject('metadata', createObject('concurrentRequests', 100)))), createArray())]" + "rules": "[if(parameters('enableScalability'), createArray(createObject('name', 'http-scaler', 'http', createObject('metadata', createObject('concurrentRequests', 100)))), createArray())]" } }, "tags": { diff --git a/infra/main.parameters.json b/infra/main.parameters.json index 43bdc3da..ca5d1cd2 100644 --- a/infra/main.parameters.json +++ b/infra/main.parameters.json @@ -8,98 +8,59 @@ "location": { "value": "${AZURE_LOCATION}" }, - "gptModelDeploymentType": { + "deploymentType": { "value": "${AZURE_ENV_MODEL_DEPLOYMENT_TYPE}" }, "gptModelName": { - "value": "${AZURE_ENV_MODEL_NAME}" + "value": "${AZURE_ENV_GPT_MODEL_NAME}" }, - "gptModelCapacity": { - "value": "${AZURE_ENV_MODEL_CAPACITY}" + "gptDeploymentCapacity": { + "value": "${AZURE_ENV_GPT_MODEL_CAPACITY}" }, "gptModelVersion": { - "value": "${AZURE_ENV_MODEL_VERSION}" + "value": "${AZURE_ENV_GPT_MODEL_VERSION}" }, - "imageVersion": { - "value": "${AZURE_ENV_IMAGETAG=latest}" + "imageTag": { + "value": "${AZURE_ENV_IMAGE_TAG=latest}" }, - "acrName": { - "value": "${AZURE_ENV_ACR_NAME=cmsacontainerreg.azurecr.io}" + "containerRegistryEndpoint": { + "value": "${AZURE_ENV_CONTAINER_REGISTRY_ENDPOINT=cmsacontainerreg.azurecr.io}" }, "existingLogAnalyticsWorkspaceId": { - "value": "${AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID}" + "value": "${AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID}" }, - "azureExistingAIProjectResourceId": { - "value": "${AZURE_EXISTING_AI_PROJECT_RESOURCE_ID}" + "existingFoundryProjectResourceId": { + "value": "${AZURE_EXISTING_AIPROJECT_RESOURCE_ID}" }, "secondaryLocation": { - "value": "${AZURE_ENV_COSMOS_SECONDARY_LOCATION}" + "value": "${AZURE_ENV_SECONDARY_LOCATION}" }, "azureAiServiceLocation": { "value": "${AZURE_ENV_AI_SERVICE_LOCATION}" }, - "backendExists": { - "value": "${SERVICE_BACKEND_RESOURCE_EXISTS=false}" + "vmSize": { + "value": "${AZURE_ENV_VM_SIZE}" }, - "backendDefinition": { - "value": { - "settings": [ - { - "name": "", - "value": "${VAR}", - "_comment_name": "The name of the environment variable when running in Azure. If empty, ignored.", - "_comment_value": "The value to provide. This can be a fixed literal, or an expression like ${VAR} to use the value of 'VAR' from the current environment." - }, - { - "name": "", - "value": "${VAR_S}", - "secret": true, - "_comment_name": "The name of the environment variable when running in Azure. If empty, ignored.", - "_comment_value": "The value to provide. This can be a fixed literal, or an expression like ${VAR_S} to use the value of 'VAR_S' from the current environment." - } - ] - } - }, - "frontendExists": { - "value": "${SERVICE_FRONTEND_RESOURCE_EXISTS=false}" - }, - "frontendDefinition": { - "value": { - "settings": [ - { - "name": "", - "value": "${VAR}", - "_comment_name": "The name of the environment variable when running in Azure. If empty, ignored.", - "_comment_value": "The value to provide. This can be a fixed literal, or an expression like ${VAR} to use the value of 'VAR' from the current environment." - }, - { - "name": "", - "value": "${VAR_S}", - "secret": true, - "_comment_name": "The name of the environment variable when running in Azure. If empty, ignored.", - "_comment_value": "The value to provide. This can be a fixed literal, or an expression like ${VAR_S} to use the value of 'VAR_S' from the current environment." - } - ] - } + "vmAdminUsername": { + "value": "${AZURE_ENV_VM_ADMIN_USERNAME}" }, - "principalId": { - "value": "${AZURE_PRINCIPAL_ID}" + "vmAdminPassword": { + "value": "${AZURE_ENV_VM_ADMIN_PASSWORD}" }, "aiModelDeployments": { "value": [ { - "name": "gpt-4o", + "name": "${AZURE_ENV_GPT_MODEL_NAME}", "model": { - "name": "gpt-4o", - "version": "2024-08-06", - "format": "OpenAI" + "name": "${AZURE_ENV_GPT_MODEL_NAME}", + "version": "${AZURE_ENV_GPT_MODEL_VERSION}" }, "sku": { - "name": "GlobalStandard", - "capacity": 50 + "name": "${AZURE_ENV_MODEL_DEPLOYMENT_TYPE}", + "capacity": "${AZURE_ENV_GPT_MODEL_CAPACITY}" } } ] } } -} +} \ No newline at end of file diff --git a/infra/main.waf.parameters.json b/infra/main.waf.parameters.json index 5b32c51e..e5ac4968 100644 --- a/infra/main.waf.parameters.json +++ b/infra/main.waf.parameters.json @@ -8,47 +8,44 @@ "location": { "value": "${AZURE_LOCATION}" }, - "gptModelDeploymentType": { + "deploymentType": { "value": "${AZURE_ENV_MODEL_DEPLOYMENT_TYPE}" }, "gptModelName": { - "value": "${AZURE_ENV_MODEL_NAME}" + "value": "${AZURE_ENV_GPT_MODEL_NAME}" }, - "gptModelCapacity": { - "value": "${AZURE_ENV_MODEL_CAPACITY}" + "gptDeploymentCapacity": { + "value": "${AZURE_ENV_GPT_MODEL_CAPACITY}" }, "gptModelVersion": { - "value": "${AZURE_ENV_MODEL_VERSION}" + "value": "${AZURE_ENV_GPT_MODEL_VERSION}" }, - "imageVersion": { - "value": "${AZURE_ENV_IMAGETAG=latest}" + "imageTag": { + "value": "${AZURE_ENV_IMAGE_TAG=latest}" }, - "acrName": { - "value": "${AZURE_ENV_ACR_NAME=cmsacontainerreg.azurecr.io}" + "containerRegistryEndpoint": { + "value": "${AZURE_ENV_CONTAINER_REGISTRY_ENDPOINT=cmsacontainerreg.azurecr.io}" }, "existingLogAnalyticsWorkspaceId": { - "value": "${AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID}" + "value": "${AZURE_ENV_EXISTING_LOG_ANALYTICS_WORKSPACE_RID}" }, - "azureExistingAIProjectResourceId": { - "value": "${AZURE_EXISTING_AI_PROJECT_RESOURCE_ID}" + "existingFoundryProjectResourceId": { + "value": "${AZURE_EXISTING_AIPROJECT_RESOURCE_ID}" }, "secondaryLocation": { - "value": "${AZURE_ENV_COSMOS_SECONDARY_LOCATION}" + "value": "${AZURE_ENV_SECONDARY_LOCATION}" }, "azureAiServiceLocation": { "value": "${AZURE_ENV_AI_SERVICE_LOCATION}" }, - "vmAdminUsername": { - "value": "${AZURE_ENV_JUMPBOX_ADMIN_USERNAME}" - }, - "vmAdminPassword": { - "value": "${AZURE_ENV_JUMPBOX_ADMIN_PASSWORD}" - }, "vmSize": { "value": "${AZURE_ENV_VM_SIZE}" }, - "backendExists": { - "value": "${SERVICE_BACKEND_RESOURCE_EXISTS=false}" + "vmAdminUsername": { + "value": "${AZURE_ENV_VM_ADMIN_USERNAME}" + }, + "vmAdminPassword": { + "value": "${AZURE_ENV_VM_ADMIN_PASSWORD}" }, "enableMonitoring": { "value": true @@ -56,68 +53,23 @@ "enablePrivateNetworking": { "value": true }, - "enableScaling": { + "enableScalability": { "value": true }, - "backendDefinition": { - "value": { - "settings": [ - { - "name": "", - "value": "${VAR}", - "_comment_name": "The name of the environment variable when running in Azure. If empty, ignored.", - "_comment_value": "The value to provide. This can be a fixed literal, or an expression like ${VAR} to use the value of 'VAR' from the current environment." - }, - { - "name": "", - "value": "${VAR_S}", - "secret": true, - "_comment_name": "The name of the environment variable when running in Azure. If empty, ignored.", - "_comment_value": "The value to provide. This can be a fixed literal, or an expression like ${VAR_S} to use the value of 'VAR_S' from the current environment." - } - ] - } - }, - "frontendExists": { - "value": "${SERVICE_FRONTEND_RESOURCE_EXISTS=false}" - }, - "frontendDefinition": { - "value": { - "settings": [ - { - "name": "", - "value": "${VAR}", - "_comment_name": "The name of the environment variable when running in Azure. If empty, ignored.", - "_comment_value": "The value to provide. This can be a fixed literal, or an expression like ${VAR} to use the value of 'VAR' from the current environment." - }, - { - "name": "", - "value": "${VAR_S}", - "secret": true, - "_comment_name": "The name of the environment variable when running in Azure. If empty, ignored.", - "_comment_value": "The value to provide. This can be a fixed literal, or an expression like ${VAR_S} to use the value of 'VAR_S' from the current environment." - } - ] - } - }, - "principalId": { - "value": "${AZURE_PRINCIPAL_ID}" - }, "aiModelDeployments": { "value": [ { - "name": "gpt-4o", + "name": "${AZURE_ENV_GPT_MODEL_NAME}", "model": { - "name": "gpt-4o", - "version": "2024-08-06", - "format": "OpenAI" + "name": "${AZURE_ENV_GPT_MODEL_NAME}", + "version": "${AZURE_ENV_GPT_MODEL_VERSION}" }, "sku": { - "name": "GlobalStandard", - "capacity": 50 + "name": "${AZURE_ENV_MODEL_DEPLOYMENT_TYPE}", + "capacity": "${AZURE_ENV_GPT_MODEL_CAPACITY}" } } ] } } -} +} \ No newline at end of file diff --git a/infra/main_custom.bicep b/infra/main_custom.bicep index b6998587..398378fd 100644 --- a/infra/main_custom.bicep +++ b/infra/main_custom.bicep @@ -57,13 +57,13 @@ var replicaRegionPairs = { var replicaLocation = replicaRegionPairs[resourceGroup().location] @description('Optional. AI model deployment token capacity. Defaults to 150K tokens per minute.') -param gptModelCapacity int = 150 +param gptDeploymentCapacity int = 150 @description('Optional. Enable monitoring for the resources. This will enable Application Insights and Log Analytics. Defaults to false.') param enableMonitoring bool = false @description('Optional. Enable scaling for the container apps. Defaults to false.') -param enableScaling bool = false +param enableScalability bool = false @description('Optional. Enable redundancy for applicable resources. Defaults to false.') param enableRedundancy bool = false @@ -95,7 +95,7 @@ param enableTelemetry bool = true @minLength(1) @description('Optional. GPT model deployment type. Defaults to GlobalStandard.') -param gptModelDeploymentType string = 'GlobalStandard' +param deploymentType string = 'GlobalStandard' @minLength(1) @description('Optional. Name of the GPT model to deploy. Defaults to gpt-4o.') @@ -108,14 +108,14 @@ param frontendImageName string = '' @minLength(1) @description('Optional. Set the Image tag. Defaults to latest') -param imageVersion string = 'latest' +param imageTag string = 'latest' @minLength(1) @description('Optional. Version of the GPT model to deploy. Defaults to 2024-08-06.') param gptModelVersion string = '2024-08-06' @description('Optional. Use this parameter to use an existing AI project resource ID. Defaults to empty string.') -param azureExistingAIProjectResourceId string = '' +param existingFoundryProjectResourceId string = '' @description('Optional. Use this parameter to use an existing Log Analytics workspace resource ID. Defaults to empty string.') param existingLogAnalyticsWorkspaceId string = '' @@ -147,8 +147,8 @@ var modelDeployment = { version: gptModelVersion } sku: { - name: gptModelDeploymentType - capacity: gptModelCapacity + name: deploymentType + capacity: gptDeploymentCapacity } raiPolicyName: 'Microsoft.Default' } @@ -671,7 +671,7 @@ module aiServices 'modules/ai-foundry/aifoundry.bicep' = { projectDescription: 'proj-${solutionSuffix}' logAnalyticsWorkspaceResourceId: enableMonitoring ? logAnalyticsWorkspaceResourceId : '' privateNetworking: null // Private endpoint is handled by the standalone aiFoundryPrivateEndpoint module - existingFoundryProjectResourceId: azureExistingAIProjectResourceId + existingFoundryProjectResourceId: existingFoundryProjectResourceId disableLocalAuth: true //Should be set to true for WAF aligned configuration customSubDomainName: 'aif-${solutionSuffix}' apiProperties: { @@ -710,7 +710,7 @@ module aiServices 'modules/ai-foundry/aifoundry.bicep' = { } var aiFoundryAiServicesResourceName = 'aif-${solutionSuffix}' -var useExistingAiFoundryAiProject = !empty(azureExistingAIProjectResourceId) +var useExistingAiFoundryAiProject = !empty(existingFoundryProjectResourceId) module aiFoundryPrivateEndpoint 'br/public:avm/res/network/private-endpoint:0.8.1' = if (enablePrivateNetworking && !useExistingAiFoundryAiProject) { name: take('pep-${aiFoundryAiServicesResourceName}-deployment', 64) @@ -925,7 +925,7 @@ module containerAppBackend 'br/public:avm/res/app/container-app:0.19.0' = { containers: [ { name: 'cmsabackend' - image: !empty(backendImageName) ? backendImageName : 'cmsacontainerreg.azurecr.io/cmsabackend:${imageVersion}' + image: !empty(backendImageName) ? backendImageName : 'cmsacontainerreg.azurecr.io/cmsabackend:${imageTag}' env: concat( [ { @@ -1065,10 +1065,10 @@ module containerAppBackend 'br/public:avm/res/app/container-app:0.19.0' = { ingressTargetPort: 8000 ingressExternal: true scaleSettings: { - // maxReplicas: enableScaling ? 3 : 1 + // maxReplicas: enableScalability ? 3 : 1 maxReplicas: 1 // maxReplicas set to 1 (not 3) due to multiple agents created per type during WAF deployment minReplicas: 1 - rules: enableScaling + rules: enableScalability ? [ { name: 'http-scaler' @@ -1117,7 +1117,7 @@ module containerAppFrontend 'br/public:avm/res/app/container-app:0.19.0' = { value: 'prod' } ] - image: !empty(frontendImageName) ? frontendImageName : 'cmsacontainerreg.azurecr.io/cmsafrontend:${imageVersion}' + image: !empty(frontendImageName) ? frontendImageName : 'cmsacontainerreg.azurecr.io/cmsafrontend:${imageTag}' name: 'cmsafrontend' resources: { cpu: 1 @@ -1128,9 +1128,9 @@ module containerAppFrontend 'br/public:avm/res/app/container-app:0.19.0' = { ingressTargetPort: 3000 ingressExternal: true scaleSettings: { - maxReplicas: enableScaling ? 3 : 1 + maxReplicas: enableScalability ? 3 : 1 minReplicas: 1 - rules: enableScaling + rules: enableScalability ? [ { name: 'http-scaler' diff --git a/scripts/validate_bicep_params.py b/scripts/validate_bicep_params.py new file mode 100644 index 00000000..ec627dae --- /dev/null +++ b/scripts/validate_bicep_params.py @@ -0,0 +1,423 @@ +""" +Bicep Parameter Mapping Validator +================================= +Validates that parameter names in *.parameters.json files exactly match +the param declarations in their corresponding Bicep templates. + +Checks performed: + 1. Whitespace – parameter names must have no leading/trailing spaces. + 2. Existence – every JSON parameter must map to a `param` in the Bicep file. + 3. Casing – names must match exactly (case-sensitive). + 4. Orphaned – required Bicep params (no default) missing from the JSON file. + 5. Env vars – parameter values bound to environment variables must use the + AZURE_ENV_* naming convention, except for explicitly allowed + names (for example, AZURE_LOCATION, AZURE_EXISTING_AIPROJECT_RESOURCE_ID). + +Usage: + # Validate a specific pair + python validate_bicep_params.py --bicep main.bicep --params main.parameters.json + + # Auto-discover all *.parameters.json files under infra/ + python validate_bicep_params.py --dir infra + + # CI mode – exit code 1 on any error + python validate_bicep_params.py --dir infra --strict + +Returns exit-code 0 when no errors are found, 1 when errors are found (in --strict mode). +""" + +from __future__ import annotations + +import argparse +import json +import re +import sys +from dataclasses import dataclass, field +from pathlib import Path + +# Environment variables exempt from the AZURE_ENV_ naming convention. +_ENV_VAR_EXCEPTIONS = {"AZURE_LOCATION", "AZURE_EXISTING_AIPROJECT_RESOURCE_ID"} + +# --------------------------------------------------------------------------- +# Bicep param parser +# --------------------------------------------------------------------------- + +# Matches lines like: param environmentName string +# param tags resourceInput<...> +# param gptDeploymentCapacity int = 150 +# Ignores commented-out lines (// param ...). +# Captures the type token and the rest of the line so we can detect defaults. +_PARAM_RE = re.compile( + r"^(?!//)[ \t]*param\s+(?P[A-Za-z_]\w*)\s+(?P\S+)(?P.*)", + re.MULTILINE, +) + + +@dataclass +class BicepParam: + name: str + has_default: bool + + +def parse_bicep_params(bicep_path: Path) -> list[BicepParam]: + """Extract all `param` declarations from a Bicep file.""" + text = bicep_path.read_text(encoding="utf-8-sig") + params: list[BicepParam] = [] + for match in _PARAM_RE.finditer(text): + name = match.group("name") + param_type = match.group("type") + rest = match.group("rest") + # A param is optional if it has a default value (= ...) or is nullable (type ends with ?) + has_default = "=" in rest or param_type.endswith("?") + params.append(BicepParam(name=name, has_default=has_default)) + return params + + +# --------------------------------------------------------------------------- +# Parameters JSON parser +# --------------------------------------------------------------------------- + + +def parse_parameters_json(json_path: Path) -> list[str]: + """Return the raw parameter key names (preserving whitespace) from a + parameters JSON file. + """ + text = json_path.read_text(encoding="utf-8-sig") + # azd parameter files may include ${VAR} or ${VAR=default} placeholders inside + # string values. These are valid JSON strings, but we sanitize them so that + # json.loads remains resilient to azd-specific placeholders and any unusual + # default formats. + sanitized = re.sub(r'"\$\{[^}]+\}"', '"__placeholder__"', text) + try: + data = json.loads(sanitized) + except json.JSONDecodeError: + # Fallback: extract keys with regex for resilience. + return _extract_keys_regex(text) + return list(data.get("parameters", {}).keys()) + + +def parse_parameters_env_vars(json_path: Path) -> dict[str, list[str]]: + """Return a mapping of parameter name → list of azd env var names + referenced in its value (e.g. ``${AZURE_ENV_NAME}``). + """ + text = json_path.read_text(encoding="utf-8-sig") + result: dict[str, list[str]] = {} + params = {} + + # Parse the JSON to get the proper parameter structure. + sanitized = re.sub(r'"\$\{([^}]+)\}"', r'"__azd_\1__"', text) + try: + data = json.loads(sanitized) + params = data.get("parameters", {}) + except json.JSONDecodeError: + pass + + # Walk each top-level parameter and scan its entire serialized value + # for ${VAR} references from the original text. + for param_name, param_obj in params.items(): + # Find the raw text block for this parameter in the original file + # by scanning for all ${VAR} patterns in the original value section. + raw_value = json.dumps(param_obj) + # Restore original var references from the sanitized placeholders + for m in re.finditer(r'__azd_([^_].*?)__', raw_value): + var_ref = m.group(1) + # var_ref may contain "=default", extract just the var name + var_name = var_ref.split("=")[0].strip() + if re.match(r'^[A-Za-z_][A-Za-z0-9_]*$', var_name): + result.setdefault(param_name, []).append(var_name) + + return result + + +def _extract_keys_regex(text: str) -> list[str]: + """Fallback key extraction via regex when JSON is non-standard.""" + # Matches the key inside "parameters": { "key": ... } + keys: list[str] = [] + in_params = False + for line in text.splitlines(): + if '"parameters"' in line: + in_params = True + continue + if in_params: + m = re.match(r'\s*"([^"]+)"\s*:', line) + if m: + keys.append(m.group(1)) + return keys + + +# --------------------------------------------------------------------------- +# Validation logic +# --------------------------------------------------------------------------- + +@dataclass +class ValidationIssue: + severity: str # "ERROR" or "WARNING" + param_file: str + bicep_file: str + param_name: str + message: str + + +@dataclass +class ValidationResult: + pair: str + issues: list[ValidationIssue] = field(default_factory=list) + + @property + def has_errors(self) -> bool: + return any(i.severity == "ERROR" for i in self.issues) + + +def validate_pair( + bicep_path: Path, + params_path: Path, +) -> ValidationResult: + """Validate a single (bicep, parameters.json) pair.""" + result = ValidationResult( + pair=f"{params_path.name} -> {bicep_path.name}" + ) + + bicep_params = parse_bicep_params(bicep_path) + bicep_names = {p.name for p in bicep_params} + bicep_names_lower = {p.name.lower(): p.name for p in bicep_params} + required_bicep = {p.name for p in bicep_params if not p.has_default} + + json_keys = parse_parameters_json(params_path) + + seen_json_keys: set[str] = set() + + for raw_key in json_keys: + stripped = raw_key.strip() + + # 1. Whitespace check + if raw_key != stripped: + result.issues.append(ValidationIssue( + severity="ERROR", + param_file=str(params_path), + bicep_file=str(bicep_path), + param_name=repr(raw_key), + message=( + f"Parameter name has leading/trailing whitespace. " + f"Raw key: {repr(raw_key)}, expected: {repr(stripped)}" + ), + )) + + # 2. Exact match check + if stripped not in bicep_names: + # 3. Case-insensitive near-match + suggestion = bicep_names_lower.get(stripped.lower()) + if suggestion: + result.issues.append(ValidationIssue( + severity="ERROR", + param_file=str(params_path), + bicep_file=str(bicep_path), + param_name=stripped, + message=( + f"Case mismatch: JSON has '{stripped}', " + f"Bicep declares '{suggestion}'." + ), + )) + else: + result.issues.append(ValidationIssue( + severity="ERROR", + param_file=str(params_path), + bicep_file=str(bicep_path), + param_name=stripped, + message=( + f"Parameter '{stripped}' exists in JSON but has no " + f"matching param in the Bicep template." + ), + )) + seen_json_keys.add(stripped) + + # 4. Required Bicep params missing from JSON + for req in sorted(required_bicep - seen_json_keys): + result.issues.append(ValidationIssue( + severity="WARNING", + param_file=str(params_path), + bicep_file=str(bicep_path), + param_name=req, + message=( + f"Required Bicep param '{req}' (no default value) is not " + f"supplied in the parameters file." + ), + )) + + # 5. Env var naming convention – all azd vars should start with AZURE_ENV_ + env_vars = parse_parameters_env_vars(params_path) + for param_name, var_names in sorted(env_vars.items()): + for var in var_names: + if not var.startswith("AZURE_ENV_") and var not in _ENV_VAR_EXCEPTIONS: + result.issues.append(ValidationIssue( + severity="WARNING", + param_file=str(params_path), + bicep_file=str(bicep_path), + param_name=param_name, + message=( + f"Env var '${{{var}}}' does not follow the " + f"AZURE_ENV_ naming convention." + ), + )) + + return result + + +# --------------------------------------------------------------------------- +# Discovery – find (bicep, params) pairs automatically +# --------------------------------------------------------------------------- + +def discover_pairs(infra_dir: Path) -> list[tuple[Path, Path]]: + """For each *.parameters.json, find the matching Bicep file. + + Naming convention: a file like ``main.waf.parameters.json`` is a + variant of ``main.parameters.json`` — the user copies its contents + into ``main.parameters.json`` before running ``azd up``. Both + files should therefore be validated against ``main.bicep``. + + Resolution order: + 1. Exact stem match (e.g. ``foo.parameters.json`` → ``foo.bicep``). + 2. Base-stem match (e.g. ``main.waf.parameters.json`` → ``main.bicep``). + """ + pairs: list[tuple[Path, Path]] = [] + for pf in sorted(infra_dir.rglob("*.parameters.json")): + stem = pf.name.replace(".parameters.json", "") + bicep_candidate = pf.parent / f"{stem}.bicep" + if bicep_candidate.exists(): + pairs.append((bicep_candidate, pf)) + else: + # Try the base stem (first segment before the first dot). + base_stem = stem.split(".")[0] + base_candidate = pf.parent / f"{base_stem}.bicep" + if base_candidate.exists(): + pairs.append((base_candidate, pf)) + else: + print(f" [SKIP] No matching Bicep file for {pf.name}") + return pairs + + +# --------------------------------------------------------------------------- +# Reporting +# --------------------------------------------------------------------------- + +_COLORS = { + "ERROR": "\033[91m", # red + "WARNING": "\033[93m", # yellow + "OK": "\033[92m", # green + "RESET": "\033[0m", +} + + +def print_report(results: list[ValidationResult], *, use_color: bool = True) -> None: + c = _COLORS if use_color else {k: "" for k in _COLORS} + total_errors = 0 + total_warnings = 0 + + for r in results: + errors = [i for i in r.issues if i.severity == "ERROR"] + warnings = [i for i in r.issues if i.severity == "WARNING"] + total_errors += len(errors) + total_warnings += len(warnings) + + if not r.issues: + print(f"\n{c['OK']}[PASS]{c['RESET']} {r.pair}") + elif errors: + print(f"\n{c['ERROR']}[FAIL]{c['RESET']} {r.pair}") + else: + print(f"\n{c['WARNING']}[WARN]{c['RESET']} {r.pair}") + + for issue in r.issues: + tag = ( + f"{c['ERROR']}ERROR{c['RESET']}" + if issue.severity == "ERROR" + else f"{c['WARNING']}WARN {c['RESET']}" + ) + print(f" {tag} {issue.param_name}: {issue.message}") + + print(f"\n{'='*60}") + print(f"Total: {total_errors} error(s), {total_warnings} warning(s)") + if total_errors == 0: + print(f"{c['OK']}All parameter mappings are valid.{c['RESET']}") + else: + print(f"{c['ERROR']}Parameter mapping issues detected!{c['RESET']}") + + +# --------------------------------------------------------------------------- +# CLI +# --------------------------------------------------------------------------- + +def main() -> int: + parser = argparse.ArgumentParser( + description="Validate Bicep ↔ parameters.json parameter mappings.", + ) + parser.add_argument( + "--bicep", + type=Path, + help="Path to a specific Bicep template.", + ) + parser.add_argument( + "--params", + type=Path, + help="Path to a specific parameters JSON file.", + ) + parser.add_argument( + "--dir", + type=Path, + help="Directory to scan for *.parameters.json files (auto-discovers pairs).", + ) + parser.add_argument( + "--strict", + action="store_true", + help="Exit with code 1 if any errors are found.", + ) + parser.add_argument( + "--no-color", + action="store_true", + help="Disable colored output (useful for CI logs).", + ) + parser.add_argument( + "--json-output", + type=Path, + help="Write results as JSON to the given file path.", + ) + args = parser.parse_args() + + results: list[ValidationResult] = [] + + if args.bicep and args.params: + results.append(validate_pair(args.bicep, args.params)) + elif args.dir: + pairs = discover_pairs(args.dir) + if not pairs: + print(f"No (bicep, parameters.json) pairs found under {args.dir}") + return 0 + for bicep_path, params_path in pairs: + results.append(validate_pair(bicep_path, params_path)) + else: + parser.error("Provide either --bicep/--params or --dir.") + + print_report(results, use_color=not args.no_color) + + # Optional JSON output for CI artifact consumption + if args.json_output: + json_data = [] + for r in results: + for issue in r.issues: + json_data.append({ + "severity": issue.severity, + "paramFile": issue.param_file, + "bicepFile": issue.bicep_file, + "paramName": issue.param_name, + "message": issue.message, + }) + args.json_output.parent.mkdir(parents=True, exist_ok=True) + args.json_output.write_text( + json.dumps(json_data, indent=2), encoding="utf-8" + ) + print(f"\nJSON report written to {args.json_output}") + + has_errors = any(r.has_errors for r in results) + return 1 if args.strict and has_errors else 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/scripts/validate_model_deployment_quota.ps1 b/scripts/validate_model_deployment_quota.ps1 index 08773d1b..3db32e33 100644 --- a/scripts/validate_model_deployment_quota.ps1 +++ b/scripts/validate_model_deployment_quota.ps1 @@ -77,10 +77,10 @@ Write-Host "šŸŽÆ Active Subscription: $(az account show --query '[name, id]' --o $QuotaAvailable = $true foreach ($deployment in $aiModelDeployments) { - $name = if ($env:AZURE_ENV_MODEL_NAME) { $env:AZURE_ENV_MODEL_NAME } else { $deployment.name } - $model = if ($env:AZURE_ENV_MODEL_NAME) { $env:AZURE_ENV_MODEL_NAME } else { $deployment.model.name } + $name = if ($env:AZURE_ENV_GPT_MODEL_NAME) { $env:AZURE_ENV_GPT_MODEL_NAME } else { $deployment.name } + $model = if ($env:AZURE_ENV_GPT_MODEL_NAME) { $env:AZURE_ENV_GPT_MODEL_NAME } else { $deployment.model.name } $type = if ($env:AZURE_ENV_MODEL_DEPLOYMENT_TYPE) { $env:AZURE_ENV_MODEL_DEPLOYMENT_TYPE } else { $deployment.sku.name } - $capacity = if ($env:AZURE_ENV_MODEL_CAPACITY) { $env:AZURE_ENV_MODEL_CAPACITY } else { $deployment.sku.capacity } + $capacity = if ($env:AZURE_ENV_GPT_MODEL_CAPACITY) { $env:AZURE_ENV_GPT_MODEL_CAPACITY } else { $deployment.sku.capacity } Write-Host "`nšŸ” Validating model deployment: $name ..." & .\scripts\validate_model_quota.ps1 -Location $Location -Model $model -Capacity $capacity -DeploymentType $type diff --git a/scripts/validate_model_deployment_quota.sh b/scripts/validate_model_deployment_quota.sh index bd098bdf..323244ae 100644 --- a/scripts/validate_model_deployment_quota.sh +++ b/scripts/validate_model_deployment_quota.sh @@ -88,10 +88,10 @@ echo "šŸŽÆ Active Subscription: $(az account show --query '[name, id]' --output quotaAvailable=true while IFS= read -r deployment; do - name=${AZURE_ENV_MODEL_NAME:-$(echo "$deployment" | jq -r '.name')} - model=${AZURE_ENV_MODEL_NAME:-$(echo "$deployment" | jq -r '.model.name')} + name=${AZURE_ENV_GPT_MODEL_NAME:-$(echo "$deployment" | jq -r '.name')} + model=${AZURE_ENV_GPT_MODEL_NAME:-$(echo "$deployment" | jq -r '.model.name')} type=${AZURE_ENV_MODEL_DEPLOYMENT_TYPE:-$(echo "$deployment" | jq -r '.sku.name')} - capacity=${AZURE_ENV_MODEL_CAPACITY:-$(echo "$deployment" | jq -r '.sku.capacity')} + capacity=${AZURE_ENV_GPT_MODEL_CAPACITY:-$(echo "$deployment" | jq -r '.sku.capacity')} echo "šŸ” Validating model deployment: $name ..." ./scripts/validate_model_quota.sh --location "$LOCATION" --model "$model" --capacity "$capacity" --deployment-type "$type" diff --git a/src/backend/sql_agents/convert_script.py b/src/backend/sql_agents/convert_script.py index 10b8dbae..b6cb7ec8 100644 --- a/src/backend/sql_agents/convert_script.py +++ b/src/backend/sql_agents/convert_script.py @@ -65,7 +65,8 @@ async def convert_script( # orchestrate the chat current_migration = "No migration" - while True: + is_complete: bool = False + while not is_complete: await comms_manager.group_chat.add_chat_message( ChatMessageContent(role=AuthorRole.USER, content=source_script) ) @@ -99,6 +100,7 @@ async def convert_script( AuthorRole(response.role), ) current_migration = None + is_complete = True break case AgentType.SYNTAX_CHECKER.value: result = SyntaxCheckerResponse.model_validate_json( @@ -269,10 +271,11 @@ async def convert_script( FileResult.ERROR, ), ) + is_complete = True break if comms_manager.group_chat.is_complete: - break + is_complete = True migrated_query = current_migration diff --git a/src/frontend/package-lock.json b/src/frontend/package-lock.json index 0b4e4668..1d9855cd 100644 --- a/src/frontend/package-lock.json +++ b/src/frontend/package-lock.json @@ -49,7 +49,7 @@ "globals": "^17.3.0", "rollup": "^4.59.0", "rollup-plugin-dts": "^6.3.0", - "vite": "^7.3.1", + "vite": "^7.3.2", "vite-plugin-svgr": "^4.5.0" } }, @@ -7284,9 +7284,9 @@ "license": "ISC" }, "node_modules/picomatch": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", - "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.4.tgz", + "integrity": "sha512-QP88BAKvMam/3NxH6vj2o21R6MjxZUAd6nlwAS/pnGvN9IVLocLHxGYIzFhg6fUQ+5th6P4dv4eW9jX3DSIj7A==", "license": "MIT", "engines": { "node": ">=12" @@ -8441,9 +8441,9 @@ } }, "node_modules/vite": { - "version": "7.3.1", - "resolved": "https://registry.npmjs.org/vite/-/vite-7.3.1.tgz", - "integrity": "sha512-w+N7Hifpc3gRjZ63vYBXA56dvvRlNWRczTdmCBBa+CotUzAPf5b7YMdMR/8CQoeYE5LX3W4wj6RYTgonm1b9DA==", + "version": "7.3.2", + "resolved": "https://registry.npmjs.org/vite/-/vite-7.3.2.tgz", + "integrity": "sha512-Bby3NOsna2jsjfLVOHKes8sGwgl4TT0E6vvpYgnAYDIF/tie7MRaFthmKuHx1NSXjiTueXH3do80FMQgvEktRg==", "license": "MIT", "dependencies": { "esbuild": "^0.27.0", diff --git a/src/frontend/package.json b/src/frontend/package.json index a0b5ba49..88f6c95f 100644 --- a/src/frontend/package.json +++ b/src/frontend/package.json @@ -51,12 +51,13 @@ "globals": "^17.3.0", "rollup": "^4.59.0", "rollup-plugin-dts": "^6.3.0", - "vite": "^7.3.1", + "vite": "^7.3.2", "vite-plugin-svgr": "^4.5.0" }, "overrides": { "flatted": "3.4.2", "minimatch": "3.1.3", - "js-yaml": "4.1.1" + "js-yaml": "4.1.1", + "picomatch": "4.0.4" } } diff --git a/src/frontend/src/pages/modernizationPage.tsx b/src/frontend/src/pages/modernizationPage.tsx index 53f128b5..eb22ed07 100644 --- a/src/frontend/src/pages/modernizationPage.tsx +++ b/src/frontend/src/pages/modernizationPage.tsx @@ -497,10 +497,11 @@ const ModernizationPage = () => { const [fileId, setFileId] = React.useState(""); const [expandedSections, setExpandedSections] = React.useState([]); const [allFilesCompleted, setAllFilesCompleted] = useState(false); + const [progressPercentage, setProgressPercentage] = useState(0); const [isZipButtonDisabled, setIsZipButtonDisabled] = useState(true); const [fileLoading, setFileLoading] = useState(false); const [lastActivityTime, setLastActivityTime] = useState(Date.now()); - const [pageLoadTime] = useState(Date.now()); + //const [pageLoadTime] = useState(Date.now()); // Fetch file content when a file is selected useEffect(() => { @@ -514,18 +515,9 @@ const ModernizationPage = () => { if (!selectedFile || !selectedFile.translatedCode) { setFileLoading(true); const newFileUpdate = await fetchFileFromAPI(selectedFile?.fileId || ""); - setFiles((prevFiles) => - prevFiles.map((file) => - file.fileId === selectedFile?.fileId - ? { - ...file, - code: newFileUpdate.content, - translatedCode: newFileUpdate.translated_content, - } - : file - ) - ); setFileLoading(false); + } else { + } } catch (err) { @@ -603,18 +595,9 @@ const ModernizationPage = () => { fetchBatchData(batchId); }, [batchId]); - // Listen for startProcessing completion and navigate to batch view - useEffect(() => { - if (batchState && !batchState.loading && batchState.status === "Processing completed") { - console.log("Start processing API completed successfully - processing is done!"); - - // Check if we have the response with batch_id that matches current batchId - if (batchState.batchId === batchId) { - console.log("Processing completed for current batch, navigating to batch view page"); - navigate(`/batch-view/${batchId}`); - } - } - }, [batchState.loading, batchState.status, batchState.batchId, batchId, navigate]); + // Do NOT navigate based on Redux startProcessing state. + // The start-processing API may return 504 even if backend work is ongoing. + // Navigation is ONLY triggered by actual file completion via WebSocket/polling. const handleDownloadZip = async () => { if (batchId) { @@ -811,37 +794,17 @@ const ModernizationPage = () => { const latestBatch = await fetchBatchSummary(batchId!); setBatchSummary(latestBatch); - // Check if all files are in terminal states OR if the batch itself is marked as completed + // Only complete when all files reach terminal states. const allFilesDone = latestBatch.files.every(file => ["completed", "failed", "error"].includes(file.status?.toLowerCase() || "") ); - - // Also check if batch status indicates completion (for cases where some files remain queued) - const batchCompleted = latestBatch.status?.toLowerCase() === "completed" || - latestBatch.status?.toLowerCase() === "failed"; - - // Special handling for stuck processing files - if no completed files and long time passed - const hasProcessingFiles = latestBatch.files.some(file => - file.status?.toLowerCase() === "in_process" - ); - const hasCompletedFiles = latestBatch.files.some(file => - file.status?.toLowerCase() === "completed" - ); - const timeSinceLastActivity = Date.now() - lastActivityTime; - const likelyStuckProcessing = hasProcessingFiles && - !hasCompletedFiles && - timeSinceLastActivity > 60000; // 60 seconds of no activity - - // Consider processing done if either all files are terminal OR batch is marked complete OR files appear stuck - const processingComplete = allFilesDone || batchCompleted || likelyStuckProcessing; + + const processingComplete = allFilesDone; if (processingComplete) { console.log("Processing complete detected:", { allFilesDone, - batchCompleted, - likelyStuckProcessing, - batchStatus: latestBatch.status, - timeSinceActivity: timeSinceLastActivity + batchStatus: latestBatch.status }); setAllFilesCompleted(true); const hasUsableFile = latestBatch.files.some(file => @@ -868,8 +831,8 @@ const ModernizationPage = () => { return updated; }); - // Navigate to batch view page when processing is complete - console.log("Processing complete (either all files done or batch completed), navigating to batch view page"); + // Navigate only after all files have reached terminal states. + console.log("Processing complete (all files done), navigating to batch view page"); navigate(`/batch-view/${batchId}`); } } catch (err) { @@ -950,17 +913,8 @@ useEffect(() => { file.id === "summary" || // skip summary ["completed", "failed", "error"].includes(file.status?.toLowerCase() || "") ); - - // Also check if we have at least one completed file and no files currently processing - const hasCompletedFiles = files.some(file => - file.id !== "summary" && file.status === "completed" - ); - const hasProcessingFiles = files.some(file => - file.id !== "summary" && file.status === "in_process" - ); - - // Consider done if all terminal OR (has completed files and no processing files) - const effectivelyDone = areAllFilesTerminal || (hasCompletedFiles && !hasProcessingFiles); + + const effectivelyDone = areAllFilesTerminal; if (files.length > 1 && effectivelyDone && !allFilesCompleted) { console.log("Files processing appears complete, checking batch status"); @@ -1013,55 +967,34 @@ useEffect(() => { }; }, [handleWebSocketMessage]); - // Set a timeout for initial loading - if still loading after 30 seconds, show a warning message + // Set a timeout for initial loading - if no progress after 30 seconds, show error useEffect(() => { const loadingTimeout = setTimeout(() => { - if (showLoading) { + if (progressPercentage < 5 && showLoading) { setLoadingError('Processing is taking longer than expected. You can continue waiting or try again later.'); } }, 30000); return () => clearTimeout(loadingTimeout); - }, [showLoading]); + }, [progressPercentage, showLoading]); - // Add timeout mechanism to navigate if no activity for 30 seconds + // Poll summary status during inactivity, but do not force completion/navigation by timeout. useEffect(() => { const checkInactivity = setInterval(() => { const timeSinceLastActivity = Date.now() - lastActivityTime; const hasCompletedFiles = files.some(file => file.id !== "summary" && file.status === "completed" ); - const hasProcessingFiles = files.some(file => - file.id !== "summary" && file.status === "in_process" - ); - const nonSummaryFiles = files.filter(f => f.id !== "summary"); - // If we have completed files and no activity for 30 seconds, check if we should navigate + // If we have completed files and no activity for 30 seconds, refresh status. if (hasCompletedFiles && timeSinceLastActivity > 30000 && !allFilesCompleted) { console.log("No activity for 30 seconds with completed files, checking final status"); updateSummaryStatus(); } - - // Special case: If only harmful files that are stuck in processing for 60+ seconds - if (nonSummaryFiles.length > 0 && - hasProcessingFiles && - !hasCompletedFiles && - timeSinceLastActivity > 60000 && - !allFilesCompleted) { - console.log("Files stuck in processing for 60+ seconds, likely failed - checking batch status"); - updateSummaryStatus(); - } - - // Ultimate fallback: If on page for 2+ minutes with no completion, force navigation - const timeSincePageLoad = Date.now() - pageLoadTime; - if (timeSincePageLoad > 120000 && !allFilesCompleted && nonSummaryFiles.length > 0) { - console.log("Page loaded for 2+ minutes without completion, forcing navigation to batch view"); - navigate(`/batch-view/${batchId}`); - } }, 5000); // Check every 5 seconds return () => clearInterval(checkInactivity); - }, [lastActivityTime, files, allFilesCompleted, updateSummaryStatus, pageLoadTime, navigate, batchId]); + }, [lastActivityTime, files, allFilesCompleted, updateSummaryStatus, navigate, batchId]); useEffect(() => {