1+ name : Test Automation Content Processing-v2
2+
3+ on :
4+ workflow_call :
5+ inputs :
6+ CP_WEB_URL :
7+ required : true
8+ type : string
9+ description : " Web URL for Content Processing"
10+ TEST_SUITE :
11+ required : false
12+ type : string
13+ default : " GoldenPath-Testing"
14+ description : " Test suite to run: 'Smoke-Testing', 'GoldenPath-Testing' "
15+ secrets :
16+ EMAILNOTIFICATION_LOGICAPP_URL_TA :
17+ required : false
18+ description : " Logic App URL for email notifications"
19+ outputs :
20+ TEST_SUCCESS :
21+ description : " Whether tests passed"
22+ value : ${{ jobs.test.outputs.TEST_SUCCESS }}
23+ TEST_REPORT_URL :
24+ description : " URL to test report artifact"
25+ value : ${{ jobs.test.outputs.TEST_REPORT_URL }}
26+
27+ env :
28+ url : ${{ inputs.CP_WEB_URL }}
29+ accelerator_name : " Content Processing"
30+ test_suite : ${{ inputs.TEST_SUITE }}
31+
32+ jobs :
33+ test :
34+ runs-on : ubuntu-latest
35+ outputs :
36+ TEST_SUCCESS : ${{ steps.test1.outcome == 'success' || steps.test2.outcome == 'success' || steps.test3.outcome == 'success' }}
37+ TEST_REPORT_URL : ${{ steps.upload_report.outputs.artifact-url }}
38+ steps :
39+ - name : Checkout repository
40+ uses : actions/checkout@v5
41+
42+ - name : Set up Python
43+ uses : actions/setup-python@v6
44+ with :
45+ python-version : ' 3.13'
46+
47+ - name : Login to Azure
48+ run : |
49+ az login --service-principal -u ${{ secrets.AZURE_CLIENT_ID }} -p ${{ secrets.AZURE_CLIENT_SECRET }} --tenant ${{ secrets.AZURE_TENANT_ID }}
50+ az account set --subscription ${{ secrets.AZURE_SUBSCRIPTION_ID }}
51+
52+ - name : Install dependencies
53+ run : |
54+ python -m pip install --upgrade pip
55+ pip install -r tests/e2e-test/requirements.txt
56+
57+ - name : Ensure browsers are installed
58+ run : python -m playwright install --with-deps chromium
59+
60+ - name : Validate URL
61+ run : |
62+ if [ -z "${{ env.url }}" ]; then
63+ echo "ERROR: No URL provided for testing"
64+ exit 1
65+ fi
66+ echo "Testing URL: ${{ env.url }}"
67+ echo "Test Suite: ${{ env.test_suite }}"
68+
69+
70+ - name : Wait for Application to be Ready
71+ run : |
72+ echo "Waiting for application to be ready at ${{ env.url }} "
73+ max_attempts=10
74+ attempt=1
75+
76+ while [ $attempt -le $max_attempts ]; do
77+ echo "Attempt $attempt: Checking if application is ready..."
78+ if curl -f -s "${{ env.url }}" > /dev/null; then
79+ echo "Application is ready!"
80+ break
81+
82+ fi
83+
84+ if [ $attempt -eq $max_attempts ]; then
85+ echo "Application is not ready after $max_attempts attempts"
86+ exit 1
87+ fi
88+
89+ echo "Application not ready, waiting 30 seconds..."
90+ sleep 30
91+ attempt=$((attempt + 1))
92+ done
93+
94+ - name : Run tests(1)
95+ id : test1
96+ run : |
97+ if [ "${{ env.test_suite }}" == "GoldenPath-Testing" ]; then
98+ xvfb-run pytest -m gp --headed --html=report/report.html --self-contained-html
99+ else
100+ xvfb-run pytest --headed --html=report/report.html --self-contained-html
101+ fi
102+ working-directory : tests/e2e-test
103+ continue-on-error : true
104+
105+ - name : Sleep for 30 seconds
106+ if : ${{ steps.test1.outcome == 'failure' }}
107+ run : sleep 30s
108+ shell : bash
109+
110+ - name : Run tests(2)
111+ id : test2
112+ if : ${{ steps.test1.outcome == 'failure' }}
113+ run : |
114+ if [ "${{ env.test_suite }}" == "GoldenPath-Testing" ]; then
115+ xvfb-run pytest -m gp --headed --html=report/report.html --self-contained-html
116+ else
117+ xvfb-run pytest --headed --html=report/report.html --self-contained-html
118+ fi
119+ working-directory : tests/e2e-test
120+ continue-on-error : true
121+
122+ - name : Sleep for 60 seconds
123+ if : ${{ steps.test2.outcome == 'failure' }}
124+ run : sleep 60s
125+ shell : bash
126+
127+ - name : Run tests(3)
128+ id : test3
129+ if : ${{ steps.test2.outcome == 'failure' }}
130+ run : |
131+ if [ "${{ env.test_suite }}" == "GoldenPath-Testing" ]; then
132+ xvfb-run pytest -m gp --headed --html=report/report.html --self-contained-html
133+ else
134+ xvfb-run pytest --headed --html=report/report.html --self-contained-html
135+ fi
136+ working-directory : tests/e2e-test
137+
138+ - name : Upload test report
139+ id : upload_report
140+ uses : actions/upload-artifact@v4
141+ if : ${{ !cancelled() }}
142+ with :
143+ name : test-report
144+ path : tests/e2e-test/report/*
145+
146+ - name : Generate E2E Test Summary
147+ if : always()
148+ run : |
149+ # Determine test suite type for title
150+ if [ "${{ env.test_suite }}" == "GoldenPath-Testing" ]; then
151+ echo "## 🧪 E2E Test Job Summary : Golden Path Testing" >> $GITHUB_STEP_SUMMARY
152+ else
153+ echo "## 🧪 E2E Test Job Summary : Smoke Testing" >> $GITHUB_STEP_SUMMARY
154+ fi
155+ echo "" >> $GITHUB_STEP_SUMMARY
156+ echo "| Field | Value |" >> $GITHUB_STEP_SUMMARY
157+ echo "|-------|--------|" >> $GITHUB_STEP_SUMMARY
158+
159+ # Determine overall test result
160+ OVERALL_SUCCESS="${{ steps.test1.outcome == 'success' || steps.test2.outcome == 'success' || steps.test3.outcome == 'success' }}"
161+ if [[ "$OVERALL_SUCCESS" == "true" ]]; then
162+ echo "| **Job Status** | ✅ Success |" >> $GITHUB_STEP_SUMMARY
163+ else
164+ echo "| **Job Status** | ❌ Failed |" >> $GITHUB_STEP_SUMMARY
165+ fi
166+
167+ echo "| **Target URL** | [${{ env.url }}](${{ env.url }}) |" >> $GITHUB_STEP_SUMMARY
168+ echo "| **Test Suite** | \`${{ env.test_suite }}\` |" >> $GITHUB_STEP_SUMMARY
169+ echo "| **Test Report** | [Download Artifact](${{ steps.upload_report.outputs.artifact-url }}) |" >> $GITHUB_STEP_SUMMARY
170+ echo "" >> $GITHUB_STEP_SUMMARY
171+
172+ echo "### 📋 Test Execution Details" >> $GITHUB_STEP_SUMMARY
173+ echo "| Attempt | Status | Notes |" >> $GITHUB_STEP_SUMMARY
174+ echo "|---------|--------|-------|" >> $GITHUB_STEP_SUMMARY
175+ echo "| **Test Run 1** | ${{ steps.test1.outcome == 'success' && '✅ Passed' || '❌ Failed' }} | Initial test execution |" >> $GITHUB_STEP_SUMMARY
176+
177+ if [[ "${{ steps.test1.outcome }}" == "failure" ]]; then
178+ echo "| **Test Run 2** | ${{ steps.test2.outcome == 'success' && '✅ Passed' || steps.test2.outcome == 'failure' && '❌ Failed' || '⏸️ Skipped' }} | Retry after 30s delay |" >> $GITHUB_STEP_SUMMARY
179+ fi
180+
181+ if [[ "${{ steps.test2.outcome }}" == "failure" ]]; then
182+ echo "| **Test Run 3** | ${{ steps.test3.outcome == 'success' && '✅ Passed' || steps.test3.outcome == 'failure' && '❌ Failed' || '⏸️ Skipped' }} | Final retry after 60s delay |" >> $GITHUB_STEP_SUMMARY
183+ fi
184+
185+ echo "" >> $GITHUB_STEP_SUMMARY
186+
187+ if [[ "$OVERALL_SUCCESS" == "true" ]]; then
188+ echo "### ✅ Test Results" >> $GITHUB_STEP_SUMMARY
189+ echo "- End-to-end tests completed successfully" >> $GITHUB_STEP_SUMMARY
190+ echo "- Application is functioning as expected" >> $GITHUB_STEP_SUMMARY
191+ else
192+ echo "### ❌ Test Results" >> $GITHUB_STEP_SUMMARY
193+ echo "- All test attempts failed" >> $GITHUB_STEP_SUMMARY
194+ echo "- Check the e2e-test/test job for detailed error information" >> $GITHUB_STEP_SUMMARY
195+ fi
0 commit comments