diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 5a74771d..f7ce875e 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -2,4 +2,4 @@
# Each line is a file pattern followed by one or more owners.
# These owners will be the default owners for everything in the repo.
-* @Avijit-Microsoft @Roopan-Microsoft @Prajwal-Microsoft @dongbumlee @Vinay-Microsoft @aniaroramsft @toherman-msft @nchandhi
+* @Avijit-Microsoft @Roopan-Microsoft @Prajwal-Microsoft @dongbumlee @Vinay-Microsoft @aniaroramsft @toherman-msft @nchandhi @dgp10801
diff --git a/Deployment/appconfig/kernelmemory/appsettings.Development.json.template b/Deployment/appconfig/kernelmemory/appsettings.Development.json.template
index 51ab3977..5cbcceb7 100644
--- a/Deployment/appconfig/kernelmemory/appsettings.Development.json.template
+++ b/Deployment/appconfig/kernelmemory/appsettings.Development.json.template
@@ -117,28 +117,18 @@
"BucketName": ""
},
"AzureAISearch": {
- "Auth": "ApiKey",
- "Endpoint": "",
- "APIKey": "",
+ // Auth and Endpoint are configured in Azure App Configuration
"UseHybridSearch": true
},
"AzureAIDocIntel": {
- "Auth": "ApiKey",
- "APIKey": "",
- "Endpoint": ""
+ // Auth and Endpoint are configured in Azure App Configuration
},
"AzureBlobs": {
- "Auth": "AzureIdentity",
- "Account": "",
- "Container": "",
- "ConnectionString": "",
+ // Auth, Account, and Container are configured in Azure App Configuration
"EndpointSuffix": "core.windows.net"
},
"AzureOpenAIEmbedding": {
- "Auth": "ApiKey",
- "Endpoint": "",
- "APIKey": "",
- "Deployment": "",
+ // Auth, Endpoint, and Deployment are configured in Azure App Configuration
"MaxTokenTotal": 8191,
"EmbeddingDimensions": null,
"MaxEmbeddingBatchSize": 1,
@@ -146,18 +136,13 @@
"APIType": "EmbeddingGeneration"
},
"AzureOpenAIText": {
- "Auth": "ApiKey",
- "Endpoint": "",
- "APIKey": "",
- "Deployment": "",
+ // Auth, Endpoint, and Deployment are configured in Azure App Configuration
"MaxTokenTotal": 128000,
"APIType": "ChatCompletion",
"MaxRetries": 10
},
"AzureQueues": {
- "Auth": "AzureIdentity",
- "Account": "",
- "ConnectionString": "",
+ // Auth and Account are configured in Azure App Configuration
"EndpointSuffix": "core.windows.net",
"PollDelayMsecs": 100,
"FetchBatchSize": 3,
diff --git a/azure.yaml b/azure.yaml
index 9db890d4..2cd94ddf 100644
--- a/azure.yaml
+++ b/azure.yaml
@@ -5,6 +5,6 @@ name: document-knowledge-mining-solution-accelerator
requiredVersions:
azd: '>= 1.18.0'
-metadata:
- template: document-knowledge-mining-solution-accelerator@1.0
- name: document-knowledge-mining-solution-accelerator@1.0
+# metadata:
+# template: document-knowledge-mining-solution-accelerator@1.0
+# name: document-knowledge-mining-solution-accelerator@1.0
diff --git a/docs/AzureAIModelQuotaSettings.md b/docs/AzureAIModelQuotaSettings.md
new file mode 100644
index 00000000..73bf6bce
--- /dev/null
+++ b/docs/AzureAIModelQuotaSettings.md
@@ -0,0 +1,9 @@
+# How to Check & Update AI Model Quota
+
+Please follow [quota check instructions guide](./QuotaCheck.md) to check quota availability by region.
+
+1. **Navigate** to the [Azure AI Foundry portal](https://ai.azure.com/).
+2. **Select** the AI Project associated with this accelerator.
+3. **Go to** the `Management Center` from the bottom-left navigation menu.
+4. Select `Request Quota`
+5. Request More Quota with fill up the form for 'Request for Quota Increase' or delete any unused model deployments as needed.
diff --git a/docs/DeleteResourceGroup.md b/docs/DeleteResourceGroup.md
new file mode 100644
index 00000000..ac0f932d
--- /dev/null
+++ b/docs/DeleteResourceGroup.md
@@ -0,0 +1,56 @@
+# Deleting Resources After a Failed Deployment in Azure Portal
+
+If your deployment fails and you need to clean up the resources manually, follow these steps in the Azure Portal.
+
+---
+
+## **1. Navigate to the Azure Portal**
+
+1. Open [Azure Portal](https://portal.azure.com/).
+2. Sign in with your Azure account.
+
+---
+
+## **2. Find the Resource Group**
+
+1. In the search bar at the top, type **"Resource groups"** and select it.
+2. Locate the **resource group** associated with the failed deployment.
+
+
+
+
+
+---
+
+## **3. Delete the Resource Group**
+
+1. Click on the **resource group name** to open it.
+2. Click the **Delete resource group** button at the top.
+
+
+
+3. Type the resource group name in the confirmation box and click **Delete**.
+
+📌 **Note:** Deleting a resource group will remove all resources inside it.
+
+---
+
+## **4. Delete Individual Resources (If Needed)**
+
+If you don't want to delete the entire resource group, follow these steps:
+
+1. Open **Azure Portal** and go to the **Resource groups** section.
+2. Click on the specific **resource group**.
+3. Select the **resource** you want to delete (e.g., App Service, Storage Account).
+4. Click **Delete** at the top.
+
+
+
+---
+
+## **5. Verify Deletion**
+
+- After a few minutes, refresh the **Resource groups** page.
+- Ensure the deleted resource or group no longer appears.
+
+📌 **Tip:** If a resource fails to delete, check if it's **locked** under the **Locks** section and remove the lock.
diff --git a/docs/DeploymentGuide.md b/docs/DeploymentGuide.md
index e0622e95..24ca629b 100644
--- a/docs/DeploymentGuide.md
+++ b/docs/DeploymentGuide.md
@@ -1,181 +1,244 @@
# Deployment Guide
-> This repository presents a solution and reference architecture for the Knowledge Mining solution accelerator. Please note that the **provided code serves as a demonstration and is not an officially supported Microsoft offering**.
->
-> For additional security, please review how to [use Azure API Management with microservices deployed in Azure Kubernetes Service](https://learn.microsoft.com/en-us/azure/api-management/api-management-kubernetes).
+## Overview
-## Contents
-* [Prerequisites](#prerequisites)
-* [Deployment Options](#deployment-options--steps)
-* [Deployment](#deployment-steps)
-* [Next Steps](#next-steps)
+This guide walks you through deploying the Document Knowledge Mining Solution Accelerator to Azure. The deployment process takes approximately 8-10 minutes for the default Development/Testing configuration and includes both infrastructure provisioning and application setup.
-## Prerequisites
+🆘 **Need Help?** If you encounter any issues during deployment, check our [Troubleshooting Guide](./TroubleShootingSteps.md) for solutions to common problems.
-1. **[PowerShell](https://learn.microsoft.com/en-us/powershell/scripting/install/installing-powershell?view=powershell-7.4)** (v5.1+) - available for Windows, macOS, and Linux.
+## Step 1: Prerequisites & Setup
-1. **[Azure CLI](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli-windows?tabs=azure-cli)** (v1.18.0+) - command-line tool for managing Azure resources.
+### 1.1 Azure Account Requirements
- 2a. **kubectl** - command-line tool for interacting with Kubernetes clusters.
- In PowerShell, run the following command:
+Ensure you have access to an [Azure subscription](https://azure.microsoft.com/free/) with the following permissions:
- az aks install-cli
+| **Required Permission/Role** | **Scope** | **Purpose** |
+|------------------------------|-----------|-------------|
+| **Contributor** | Subscription level | Create and manage Azure resources |
+| **User Access Administrator** | Subscription level | Manage user access and role assignments |
+| **Role Based Access Control** | Subscription/Resource Group level | Configure RBAC permissions |
+| **App Registration Creation** | Azure Active Directory | Create and configure authentication |
+**🔍 How to Check Your Permissions:**
- 2b. **aks-preview** - extension for Azure CLI to manage Azure Kubernetes Service.
- In PowerShell, run the following command:
+1. Go to [Azure Portal](https://portal.azure.com/)
+2. Navigate to **Subscriptions** (search for "subscriptions" in the top search bar)
+3. Click on your target subscription
+4. In the left menu, click **Access control (IAM)**
+5. Scroll down to see the table with your assigned roles - you should see:
+ - **Contributor**
+ - **User Access Administrator**
+ - **Role Based Access Control Administrator** (or similar RBAC role)
-
- az extension add --name aks-preview
-
-1. [Helm](https://helm.sh/docs/intro/install/) - package manager for Kubernetes
+**For App Registration permissions:**
+1. Go to **Microsoft Entra ID** → **Manage** → **App registrations**
+2. Try clicking **New registration**
+3. If you can access this page, you have the required permissions
+4. Cancel without creating an app registration
-1. [Docker Desktop](https://docs.docker.com/get-docker/): service to containerize and publish into Azure Container Registry. Please make sure Docker desktop is running before executing Deployment script.
+📖 **Detailed Setup:** Follow [Azure Account Set Up](./AzureAccountSetUp.md) for complete configuration.
-1. **Azure Access** - subscription-level `Owner` or `User Access Administrator` role required.
+### 1.2 Check Service Availability & Quota
-1. **Microsoft.Compute Registration** - Ensure that **Microsoft.Compute** is registered in your Azure subscription by following these steps:
- 1. Log in to your **Azure Portal**.
- 2. Navigate to your **active Azure subscription**.
- 3. Go to **Settings** and select **Resource Providers**.
- 4. Check for Microsoft.Compute and click Register if it is not already registered.
-
-
+⚠️ **CRITICAL:** Before proceeding, ensure your chosen region has all required services available:
+
+**Required Azure Services:**
+- [Azure OpenAI Service](https://learn.microsoft.com/en-us/azure/ai-services/openai/)
+- [Azure AI Search](https://learn.microsoft.com/en-us/azure/search/)
+- [Azure AI Document Intelligence](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/)
+- [Azure Container Registry](https://learn.microsoft.com/en-us/azure/container-registry/)
+- [Azure Kubernetes Service](https://learn.microsoft.com/en-us/azure/aks/)
+- [Azure App Service](https://learn.microsoft.com/en-us/azure/app-service/)
+- [Azure Blob Storage](https://learn.microsoft.com/en-us/azure/storage/blobs/)
+- [Azure Queue Storage](https://learn.microsoft.com/en-us/azure/storage/queues/)
+- [Azure Cosmos DB](https://learn.microsoft.com/en-us/azure/cosmos-db/)
+
+**Recommended Regions:** Central US, Australia East, UK South, Japan East
+
+🔍 **Check Availability:** Use [Azure Products by Region](https://azure.microsoft.com/en-us/explore/global-infrastructure/products-by-region/) to verify service availability.
+
+### 1.3 Quota Check (Optional)
+
+💡 **RECOMMENDED:** Check your Azure OpenAI quota availability before deployment for optimal planning.
+
+📖 **Follow:** [Quota Check Instructions](./QuotaCheck.md) to ensure sufficient capacity.
-## Deployment Options & Steps
+**Recommended Configuration:**
+- **Default:** 200k tokens (minimum)
+- **Optimal:** 500k tokens (recommended for best performance)
-### Sandbox or WAF Aligned Deployment Options
+> **Note:** When you run `azd up`, the deployment will automatically show you regions with available quota, so this pre-check is optional but helpful for planning purposes. You can customize these settings later in [Step 3.3: Advanced Configuration](#33-advanced-configuration-optional).
-The [`infra`](../infra) folder of the Multi Agent Solution Accelerator contains the [`main.bicep`](../infra/main.bicep) Bicep script, which defines all Azure infrastructure components for this solution.
+📖 **Adjust Quota:** Follow [Azure AI Model Quota Settings](./AzureAIModelQuotaSettings.md) if needed.
+
+## Step 2: Deployment Environment
+
+### Local Environment
+
+**Required Tools:**
+- [PowerShell 7.0+](https://learn.microsoft.com/en-us/powershell/scripting/install/installing-powershell)
+- [Azure CLI (az) 1.18.0+](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli-windows?tabs=azure-cli)
+- [Azure Developer CLI (azd) 1.18.0+](https://aka.ms/install-azd)
+- [Helm](https://helm.sh/docs/intro/install/)
+- [Docker Desktop](https://www.docker.com/products/docker-desktop/)
+- [Git](https://git-scm.com/downloads)
+- [Microsoft.Compute Registration](https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/resource-providers-and-types#azure-portal)
+ Ensure that **Microsoft.Compute** is registered in your Azure subscription by following these steps:
+ - Log in to your **Azure Portal**.
+ - Navigate to your **active Azure subscription**.
+ - Go to **Settings** and select **Resource Providers**.
+ - Check for Microsoft.Compute and click Register if it is not already registered.
+
+
+
+**Setup Steps:**
+1. Install all required deployment tools listed above
+2. Clone the repository:
+ ```shell
+ azd init -t microsoft/Document-Knowledge-Mining-Solution-Accelerator
+ ```
+3. Open the project folder in your terminal
+4. Proceed to [Step 3: Configure Deployment Settings](#step-3-configure-deployment-settings)
+
+**PowerShell Users:** If you encounter script execution issues, run:
+```powershell
+Set-ExecutionPolicy -Scope Process -ExecutionPolicy Bypass
+```
-By default, the `azd up` command uses the [`main.parameters.json`](../infra/main.parameters.json) file to deploy the solution. This file is pre-configured for a **sandbox environment** — ideal for development and proof-of-concept scenarios, with minimal security and cost controls for rapid iteration.
+## Step 3: Configure Deployment Settings
-For **production deployments**, the repository also provides [`main.waf.parameters.json`](../infra/main.waf.parameters.json), which applies a [Well-Architected Framework (WAF) aligned](https://learn.microsoft.com/en-us/azure/well-architected/) configuration. This option enables additional Azure best practices for reliability, security, cost optimization, operational excellence, and performance efficiency, such as:
+Review the configuration options below. You can customize any settings that meet your needs, or leave them as defaults to proceed with a standard deployment.
- - Enhanced network security (e.g., Network protection with private endpoints)
- - Stricter access controls and managed identities
- - Logging, monitoring, and diagnostics enabled by default
- - Resource tagging and cost management recommendations
+### 3.1 Choose Deployment Type (Optional)
-**How to choose your deployment configuration:**
+| **Aspect** | **Development/Testing (Default)** | **Production** |
+|------------|-----------------------------------|----------------|
+| **Configuration File** | `main.parameters.json` (sandbox) | Copy `main.waf.parameters.json` to `main.parameters.json` |
+| **Security Controls** | Minimal (for rapid iteration) | Enhanced (production best practices) |
+| **Cost** | Lower costs | Cost optimized |
+| **Use Case** | POCs, development, testing | Production workloads |
+| **Framework** | Basic configuration | [Well-Architected Framework](https://learn.microsoft.com/en-us/azure/well-architected/) |
+| **Features** | Core functionality | Reliability, security, operational excellence |
-* Use the default `main.parameters.json` file for a **sandbox/dev environment**
-* For a **WAF-aligned, production-ready deployment**, copy the contents of `main.waf.parameters.json` into `main.parameters.json` before running `azd up`
+**To use production configuration:**
----
+Copy the contents from the production configuration file to your main parameters file:
-### VM Credentials Configuration
+1. Navigate to the `infra` folder in your project
+2. Open `main.waf.parameters.json` in a text editor (like Notepad, VS Code, etc.)
+3. Select all content (Ctrl+A) and copy it (Ctrl+C)
+4. Open `main.parameters.json` in the same text editor
+5. Select all existing content (Ctrl+A) and paste the copied content (Ctrl+V)
+6. Save the file (Ctrl+S)
-By default, the solution sets the VM administrator username and password from environment variables.
-If you do not configure these values, a randomly generated GUID will be used for both the username and password.
+### 3.2 Set VM Credentials (Optional - Production Deployment Only)
-To set your own VM credentials before deployment, use:
+> **Note:** This section only applies if you selected **Production** deployment type in section 3.1. VMs are not deployed in the default Development/Testing configuration.
-```sh
+By default, random GUIDs are generated for VM credentials. To set custom credentials:
+
+```shell
azd env set AZURE_ENV_VM_ADMIN_USERNAME
azd env set AZURE_ENV_VM_ADMIN_PASSWORD
```
-> [!TIP]
-> Always review and adjust parameter values (such as region, capacity, security settings and log analytics workspace configuration) to match your organization’s requirements before deploying. For production, ensure you have sufficient quota and follow the principle of least privilege for all identities and role assignments.
+### 3.3 Advanced Configuration (Optional)
+
+Configurable Parameters
-> [!IMPORTANT]
-> The WAF-aligned configuration is under active development. More Azure Well-Architected recommendations will be added in future updates.
+You can customize various deployment settings before running `azd up`, including Azure regions, AI model configurations (deployment type, version, capacity), container registry settings, and resource names.
-## Deployment Steps
+📖 **Complete Guide:** See [Parameter Customization Guide](./CustomizingAzdParameters.md) for the full list of available parameters and their usage.
-Consider the following settings during your deployment to modify specific settings:
+
- Configurable Deployment Settings
-
-When you start the deployment, most parameters will have **default values**, but you can update the following settings [here](../docs/CustomizingAzdParameters.md):
-
-| **Setting** | **Description** | **Default value** |
-| ------------------------------ | ------------------------------------------------------------------------------------ | ----------------- |
-| **Environment Name** | Used as a prefix for all resource names to ensure uniqueness across environments. | dkm |
-| **Azure Region** | Location of the Azure resources. Controls where the infrastructure will be deployed. | australiaeast |
-| **Model Deployment Type** | Defines the deployment type for the AI model (e.g., Standard, GlobalStandard). | GlobalStandard |
-| **GPT Model Name** | Specifies the name of the GPT model to be deployed. | gpt-4.1 |
-| **GPT Model Version** | Version of the GPT model to be used for deployment. | 2024-08-06 |
-| **GPT Model Capacity** | Sets the GPT model capacity. | 100K |
-| **Embedding Model** | Sets the embedding model. | text-embedding-3-large |
-| **Embedding Model Capacity** | Set the capacity for **embedding models** (in thousands). | 100k |
-| **Enable Telemetry** | Enables telemetry for monitoring and diagnostics. | true |
-| **Existing Log Analytics Workspace** | To reuse an existing Log Analytics Workspace ID instead of creating a new one. | *(none)* |
+Reuse Existing Resources
-
+To optimize costs and integrate with your existing Azure infrastructure, you can configure the solution to reuse compatible resources already deployed in your subscription.
-### Deploying with AZD
+**Supported Resources for Reuse:**
-Once you've opened the project [locally](#local-environment), you can deploy it to Azure by following these steps:
+- **Log Analytics Workspace:** Integrate with your existing monitoring infrastructure by reusing an established Log Analytics workspace for centralized logging and monitoring. [Configuration Guide](./re-use-log-analytics.md)
-1. Clone the repository or download the project code via command-line:
+**Key Benefits:**
+- **Cost Optimization:** Eliminate duplicate resource charges
+- **Operational Consistency:** Maintain unified monitoring and AI infrastructure
+- **Faster Deployment:** Skip resource creation for existing compatible services
+- **Simplified Management:** Reduce the number of resources to manage and monitor
- ```cmd
- git clone https://github.com/microsoft/Document-Knowledge-Mining-Solution-Accelerator
- ```
+**Important Considerations:**
+- Ensure existing resources meet the solution's requirements and are in compatible regions
+- Review access permissions and configurations before reusing resources
+- Consider the impact on existing workloads when sharing resources
- Open the cloned repository in Visual Studio Code and connect to the development container.
+
- ```cmd
- code .
- ```
+## Step 4: Deploy the Solution
-2. Login to Azure:
+💡 **Before You Start:** If you encounter any issues during deployment, check our [Troubleshooting Guide](./TroubleShootingSteps.md) for common solutions.
- ```shell
- azd auth login
- ```
+### 4.1 Authenticate with Azure
- #### To authenticate with Azure Developer CLI (`azd`), use the following command with your **Tenant ID**:
+```shell
+azd auth login
+```
- ```sh
- azd auth login --tenant-id
- ```
+**For specific tenants:**
+```shell
+azd auth login --tenant-id
+```
-3. Provision and deploy all the resources:
+> **Finding Tenant ID:**
+ > 1. Open the [Azure Portal](https://portal.azure.com/).
+ > 2. Navigate to **Microsoft Entra ID** from the left-hand menu.
+ > 3. Under the **Overview** section, locate the **Tenant ID** field. Copy the value displayed.
- ```shell
- azd up
- ```
- > **Note:** This solution accelerator requires **Azure Developer CLI (azd) version 1.18.0 or higher**. Please ensure you have the latest version installed before proceeding with deployment. [Download azd here](https://learn.microsoft.com/en-us/azure/developer/azure-developer-cli/install-azd).
+### 4.2 Start Deployment
-4. Provide an `azd` environment name (e.g., "ckmapp").
-5. Select a subscription from your Azure account and choose a location that has quota for all the resources.
- -- This deployment will take *7-10 minutes* to provision the resources in your account and set up the solution with sample data.
- - If you encounter an error or timeout during deployment, changing the location may help, as there could be availability constraints for the resources.
+```shell
+azd up
+```
+
+**During deployment, you'll be prompted for:**
+1. **Environment name** (e.g., "dkmsa") - Must be 3-16 characters long, alphanumeric only
+2. **Azure subscription** selection
+3. **Azure AI Deployment Location** - Select a region with available Azure OpenAI Service quota for GPT-4.1-mini and text-embedding-3-large models
+4. **Primary location** - Select the region where your infrastructure resources will be deployed
+5. **Resource group** selection (create new or use existing)
-6. If you are done trying out the application, you can delete the resources by running `azd down`.
- > **Note:** If you deployed with `enableRedundancy=true` and Log Analytics workspace replication is enabled, you must first disable replication before running `azd down` else resource group delete will fail. Follow the steps in [Handling Log Analytics Workspace Deletion with Replication Enabled](./LogAnalyticsReplicationDisable.md), wait until replication returns `false`, then run `azd down`.
+**Expected Duration:** 8-10 minutes for default configuration
-### Post Deployment Script:
+**⚠️ Deployment Issues:** If you encounter errors or timeouts, try a different region as there may be capacity constraints. For detailed error solutions, see our [Troubleshooting Guide](./TroubleShootingSteps.md).
+## Step 5: Post Deployment Configuration
The post deployment process is very straightforward and simplified via a single [deployment script](../Deployment/resourcedeployment.ps1) that completes in approximately 20-30 minutes:
### Automated Deployment Steps:
-1. Configure Kubernetes Infrastructure.
-2. Update Kubernetes configuration files with the FQDN, Container Image Path and Email address for the certificate management.
-3. Configure AKS (deploy Cert Manager, Ingress Controller) and Deploy Images on the kubernetes cluster.
-4. Docker build and push container images to Azure Container Registry.
-5. Display the deployment result and following instructions.
+- Configure Kubernetes Infrastructure.
+- Update Kubernetes configuration files with the FQDN, Container Image Path and Email address for the certificate management.
+- Configure AKS (deploy Cert Manager, Ingress Controller) and Deploy Images on the kubernetes cluster.
+- Docker build and push container images to Azure Container Registry.
+- Display the deployment result and following instructions.
+
+### 5.1 Execute the Script
-Open PowerShell, change directory where you code cloned, then run the deploy script:
+#### 5.1.1 Open PowerShell, change directory where you code cloned, then run the deploy script:
+```shell
+cd .\Deployment\
```
-cd .\Deployment\
-```
-#### Choose the appropriate command based on your deployment method:
+#### 5.1.2 Choose the appropriate command based on your deployment method:
**If you deployed using `azd up` command:**
-```
+```shell
.\resourcedeployment.ps1
```
**If you deployed using custom templates, ARM/Bicep deployments, or `az deployment group` commands:**
-```
+```shell
.\resourcedeployment.ps1 -ResourceGroupName ""
```
@@ -185,22 +248,32 @@ cd .\Deployment\
If you run into issue with PowerShell script file not being digitally signed, you can execute below command:
-```
+```shell
powershell.exe -ExecutionPolicy Bypass -File ".\resourcedeployment.ps1"
```
-You will be prompted for the following parameters with this Screen :
+#### 5.1.3 You will be prompted for the following parameters with this Screen:
+
-1. **Email** - used for issuing certificates in Kubernetes clusters from the [Let's Encrypt](https://letsencrypt.org/) service. Email address should be valid.
+##### 5.1.3.1 **Email** - used for issuing certificates in Kubernetes clusters from the [Let's Encrypt](https://letsencrypt.org/) service. Email address should be valid.
-2. You will be prompted to Login, Select a account and proceed to Login.
+##### 5.1.3.2 You will be prompted to login. Select an account and proceed to login.
+
+##### 5.1.3.3 **GO!** - Post Deployment script executes Azure Infrastructure configuration, Application code compile and publish into Kubernetes Cluster.
+
+##### 5.1.3.4 Deployment Complete
+
+#### 🥳🎉 First, congrats on finishing Deployment!
+Let's check the message and configure your model's TPM rate higher to get better performance.
+You can check the Application URL from the final console message.
+Don't miss this URL information. This is the application's endpoint URL and should be used for your data importing process.
-3. **GO !** - Post Deployment Script executes Azure Infrastructure configuration, Application code compile and publish into Kubernetes Cluster.
+
-### Manual Deployment Steps:
+### 5.2 Manual Deployment Steps:
**Create Content Filter** - Please follow below steps
> * Navigate to project in Azure OpenAI, then go to Azure AI Foundry, select Safety + security
> * Click on Create Content Filter and set the filters to a high threshold for the following categories:
@@ -210,58 +283,175 @@ You will be prompted for the following parameters with this Screen :
> * Please select the checkbox of profanity
> * Leave all other configurations at their default settings and click on create
-### Deployment Complete
-#### 🥳🎉 First, congrats on finishing Deployment!
-Let's check the message and configure your model's TPM rate higher to get better performance.
-You can check the Application URL from the final console message.
-Don't miss this Url information. This is the application's endpoint URL and it should be used for your data importing process.
-
-
-
-## Next Steps
-
-### 1. Configure Azure OpenAI Rate Limits
+### 5.3 Configure Azure OpenAI Rate Limits
> **Capacity Note:**
> * The deployment script creates models with a setting of 1 token per minute (TPM) rate limit.
> * Faster performance can be achieved by increasing the TPM limit with Azure AI Foundry.
> * Capacity varies for [regional quota limits](https://learn.microsoft.com/en-us/azure/ai-services/openai/quotas-limits#regional-quota-limits) as well as for [provisioned throughput](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/provisioned-throughput).
-> * As a starting point, we recommend the following quota threshold be set up for this service run.
+> * As a starting point, we recommend the following quota threshold be set up for this service run.
| Model Name | TPM Threshold |
|------------------------|---------------|
| GPT-4.1-mini | 100K TPM |
| text-embedding-3-large | 200K TPM |
-
> **⚠️ Warning:** **Insufficient quota can cause failures during the upload process.** Please ensure you have the recommended capacity or request for additional capacity before start uploading the files.
+#### 5.3.1 Browse to the project in Azure AI Foundry, and select **each of the 2 models** within the `Deployments` menu:
-1. Browse to the project in Azure AI Foundry, and select **each of the 2 models** within the `Deployments` menu:
-2. Increase the TPM value for **each model** for faster report generation:
+#### 5.3.2 Increase the TPM value for **each model** for faster report generation:
+
-### 2. Data Uploading and Processing
+### 5.4 Data Uploading and Processing
+
After increasing the TPM limit for each model, let's upload and process the sample documents.
+
+Execute this command:
+
+
+
+### 5.5 Verify Deployment
+
+1. Access your application using the URL from [Step 5.1.3.4](#5134-deployment-complete)
+2. Confirm the application loads successfully
+3. Verify you can sign in with your authenticated account
+
+### 5.6 Test the Application
+
+Follow the detailed workflow to test the migration functionality:
+
+**Quick Test Steps:**
+1. Login to the application using the URL from [Step 5.1.3.4](#5134-deployment-complete)
+2. In "Chat with documents" options, ask any questions related to uploaded documents
+3. Click on "Details" of any document and go to "chat" option
+4. Ask questions according to document content
+5. Review the responses
+
+📖 **Sample Questions:** For a comprehensive list of test scenarios and example questions, see [Sample Questions Guide](./SampleQuestions.md)
+
+## Step 6: Clean Up (Optional)
+
+### Remove All Resources
+```shell
+azd down
```
-cd .\Deployment\
+> **Note:** If you deployed with `enableRedundancy=true` and Log Analytics workspace replication is enabled, you must first disable replication before running `azd down` else resource group delete will fail. Follow the steps in [Handling Log Analytics Workspace Deletion with Replication Enabled](./LogAnalyticsReplicationDisable.md), wait until replication returns `false`, then run `azd down`.
+
+### Manual Cleanup (if needed)
+If deployment fails or you need to clean up manually:
+- Follow [Delete Resource Group Guide](./DeleteResourceGroup.md)
+
+## Managing Multiple Environments
+
+### Recover from Failed Deployment
+
+If your deployment failed or encountered errors, here are the steps to recover:
+
+
+Recover from Failed Deployment
+
+**If your deployment failed or encountered errors:**
+
+1. **Try a different region:** Create a new environment and select a different Azure region during deployment
+2. **Clean up and retry:** Use `azd down` to remove failed resources, then `azd up` to redeploy
+3. **Check troubleshooting:** Review [Troubleshooting Guide](./TroubleShootingSteps.md) for specific error solutions
+4. **Fresh start:** Create a completely new environment with a different name
+
+**Example Recovery Workflow:**
+```shell
+# Remove failed deployment (optional)
+azd down
+
+# Create new environment (3-16 chars, alphanumeric only)
+azd env new dkmsaretry
+
+# Deploy with different settings/region
+azd up
```
-Execute uploadfiles.ps1 file with **-EndpointUrl** parameter as URL in console message.
+
+### Creating a New Environment
+
+If you need to deploy to a different region, test different configurations, or create additional environments:
+
+
+Create a New Environment
+
+**Create Environment Explicitly:**
+```shell
+# Create a new named environment (3-16 characters, alphanumeric only)
+azd env new
+
+# Select the new environment
+azd env select
+
+# Deploy to the new environment
+azd up
```
-.\uploadfiles.ps1 -EndpointUrl https://kmgs..cloudapp.azure.com
+
+**Example:**
+```shell
+# Create a new environment for production (valid: 3-16 chars)
+azd env new dkmsaprod
+
+# Switch to the new environment
+azd env select dkmsaprod
+
+# Deploy with fresh settings
+azd up
```
-If you run into issue with PowerShell script file not being digitally signed, you can execute below command:
+> **Environment Name Requirements:**
+> - **Length:** 3-16 characters
+> - **Characters:** Alphanumeric only (letters and numbers)
+> - **Valid examples:** `dkmsa`, `test123`, `myappdev`, `prod2024`
+> - **Invalid examples:** `co` (too short), `my-very-long-environment-name` (too long), `test_env` (underscore not allowed), `myapp-dev` (hyphen not allowed)
+
+
+
+Switch Between Environments
+
+**List Available Environments:**
+```shell
+azd env list
+```
+
+**Switch to Different Environment:**
+```shell
+azd env select
```
-powershell.exe -ExecutionPolicy Bypass -File ".\uploadfiles.ps1" -EndpointUrl https://kmgs..cloudapp.azure.com
+
+**View Current Environment:**
+```shell
+azd env get-values
```
+
+
+### Best Practices for Multiple Environments
+
+- **Use descriptive names:** `dkmsadev`, `dkmsaprod`, `dkmsatest` (remember: 3-16 chars, alphanumeric only)
+- **Different regions:** Deploy to multiple regions for testing quota availability
+- **Separate configurations:** Each environment can have different parameter settings
+- **Clean up unused environments:** Use `azd down` to remove environments you no longer need
+
## Next Steps
-Now that you've completed your deployment, you can start using the solution.
-To help you get started, here are some [Sample Questions](./SampleQuestions.md) you can follow to try it out.
+Now that your deployment is complete and tested, explore these resources to enhance your experience:
+
+📚 **Learn More:**
+- [Technical Architecture](./TechnicalArchitecture.md) - Understand the system design and components
+- [Local Development Setup](./LocalDevelopmentSetup.md) - Set up your local development environment
+- [Sample Questions](./SampleQuestions.md) - Example prompts to test the solution
+
+## Need Help?
+
+- 🐛 **Issues:** Check [Troubleshooting Guide](./TroubleShootingSteps.md)
+- 💬 **Support:** Review [Support Guidelines](../SUPPORT.md)
+- 🔧 **Development:** See [Contributing Guide](../CONTRIBUTING.md)
\ No newline at end of file
diff --git a/docs/LocalDevelopmentSetup.md b/docs/LocalDevelopmentSetup.md
new file mode 100644
index 00000000..b92c0fbc
--- /dev/null
+++ b/docs/LocalDevelopmentSetup.md
@@ -0,0 +1,514 @@
+# Local Development Setup Guide
+
+This guide provides comprehensive instructions for setting up the Document Knowledge Mining Solution Accelerator for local development on Windows.
+
+## Important Setup Notes
+
+### Multi-Service Architecture
+
+This application consists of **three separate services** that run independently:
+
+1. **Kernel Memory** - Document processing and knowledge mining service
+2. **Backend API** - REST API server for the frontend
+3. **Frontend** - React-based user interface
+
+> **⚠️ Critical: Each service must run in its own terminal/console window**
+>
+> - **Do NOT close terminals/windows** while services are running
+> - Open **Kernel Memory** and **Backend API** in Visual Studio.
+> - Open **Frontend** in Visual Studio Code.
+> - Each service will occupy its terminal and show live logs
+>
+> **Terminal/Window Organization:**
+> - **Visual Studio window 1**: Kernel Memory - Service runs on port 9001
+> - **Visual Studio window 2**: Backend API - HTTP server runs on port 52190
+> - **Visual Studio Code Terminal**: Frontend - Development server on port 5900
+
+### Path Conventions
+
+**All paths in this guide are relative to the repository root directory:**
+
+```bash
+Document-Knowledge-Mining-Solution-Accelerator/ ← Repository root (start here)
+├── App/
+│ ├── backend-api/
+│ │ ├── Microsoft.GS.DPS.sln ← Backend solution file
+│ │ └── Microsoft.GS.DPS.Host/
+│ │ └── appsettings.Development.json ← Backend API config
+│ ├── kernel-memory/
+│ │ ├── KernelMemory.sln ← Kernel Memory solution file
+│ │ └── service/
+│ │ └── Service/
+│ │ └── appsettings.Development.json ← Kernel Memory config
+│ └── frontend-app/
+│ ├── src/ ← React/TypeScript source
+│ ├── package.json ← Frontend dependencies
+│ └── .env ← Frontend config file
+├── Deployment/
+│ └── appconfig/ ← Configuration templates location
+│ ├── aiservice/
+│ │ └── appsettings.Development.json.template ← Backend API template
+│ ├── frontapp/
+│ │ └── .env.template ← Frontend template
+│ └── kernelmemory/
+│ └── appsettings.Development.json.template ← Kernel Memory template
+├── infra/
+│ ├── main.bicep ← Main infrastructure template
+│ └── main.parameters.json ← Deployment parameters
+└── docs/ ← Documentation (you are here)
+```
+
+**Before starting any step, ensure you are in the repository root directory:**
+
+```bash
+# Verify you're in the correct location
+Get-Location # Windows PowerShell - should show: ...\Document-Knowledge-Mining-Solution-Accelerator
+
+# If not, navigate to repository root
+cd path\to\Document-Knowledge-Mining-Solution-Accelerator
+```
+
+### Configuration Files
+
+This project uses two separate `appsettings.Development.json` files and one `.env` file with different configuration requirements:
+
+- **Kernel Memory**: `App/kernel-memory/service/Service/appsettings.Development.json` - Azure App Configuration URL
+- **Backend API**: `App/backend-api/Microsoft.GS.DPS.Host/appsettings.Development.json` - Azure App Configuration URL
+- **Frontend**: `App/frontend-app/.env` - Frontend API endpoint configuration
+
+Configuration templates are located in the `Deployment/appconfig/` directory.
+
+## Step 1: Prerequisites Install Required Tools
+Install these tools before you start:
+- [Visual Studio](https://visualstudio.microsoft.com/)
+- [Visual Studio Code](https://code.visualstudio.com/)
+
+### Windows Development
+
+#### Option 1: Native Windows (PowerShell)
+```powershell
+# .NET SDK (LTS .NET 8)
+winget install Microsoft.DotNet.SDK.8
+
+# Azure CLI (required for authentication and resource management)
+winget install Microsoft.AzureCLI
+
+# Yarn (via Corepack) – install Node.js LTS first
+winget install OpenJS.NodeJS.LTS
+corepack enable
+corepack prepare yarn@stable --activate
+
+# Verify
+dotnet --version
+az --version
+yarn --version
+```
+
+#### Option 2: Windows with WSL2 (Recommended)
+
+```powershell
+# Install WSL2 with Ubuntu (run in PowerShell as Administrator)
+wsl --install -d Ubuntu
+
+# Once inside Ubuntu, install .NET SDK, Azure CLI, and Node.js LTS
+# (use apt or Microsoft package repos depending on preference)
+
+# Install Azure CLI in Ubuntu
+curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash
+
+# Verify installations
+dotnet --version
+az --version
+node -v
+yarn --version
+```
+### Clone the Repository
+
+```bash
+git clone https://github.com/microsoft/Document-Knowledge-Mining-Solution-Accelerator.git
+cd Document-Knowledge-Mining-Solution-Accelerator
+```
+
+---
+
+## Step 2: Azure Authentication Setup
+
+Before configuring services, authenticate with Azure:
+
+```bash
+# Login to Azure CLI
+az login
+
+# Set your subscription
+az account set --subscription "your-subscription-id"
+
+# Verify authentication
+az account show
+```
+
+### Get Azure App Configuration URL
+
+Navigate to your resource group and select the resource with prefix `appcs-` to get the configuration URL:
+
+```bash
+APP_CONFIGURATION_URL=https://[Your app configuration service name].azconfig.io
+```
+
+For reference, see the image below:
+
+
+### Required Azure RBAC Permissions
+
+To run the application locally, your Azure account needs the following role assignments on the deployed resources:
+
+> **Note:**
+> These roles are required only for local debugging and development. For production, ensure proper RBAC policies are applied.
+
+You can assign these roles using either Azure CLI (Option 1) or Azure Portal (Option 2).
+
+#### Option 1: Assign Roles via Azure CLI
+
+```bash
+# Get your principal ID
+PRINCIPAL_ID=$(az ad signed-in-user show --query id -o tsv)
+```
+
+**App Configuration Data Reader** – Required for reading application configuration
+
+```bash
+# Assign App Configuration Data Reader role
+az role assignment create \
+ --assignee $PRINCIPAL_ID \
+ --role "App Configuration Data Reader" \
+ --scope "/subscriptions//resourceGroups//providers/Microsoft.AppConfiguration/configurationStores/"
+```
+
+#### Other Required Roles
+Depending on the features you use, you may also need:
+
+**Storage Blob Data Contributor** – For Azure Storage operations
+
+```bash
+# Assign Storage Blob Data Contributor role
+az role assignment create \
+ --assignee $PRINCIPAL_ID \
+ --role "Storage Blob Data Contributor" \
+ --scope "/subscriptions//resourceGroups//providers/Microsoft.Storage/storageAccounts/"
+```
+
+**Storage Queue Data Contributor** – For queue-based processing
+
+```bash
+# Assign Storage Queue Data Contributor role
+az role assignment create \
+ --assignee $PRINCIPAL_ID \
+ --role "Storage Queue Data Contributor" \
+ --scope "/subscriptions//resourceGroups//providers/Microsoft.Storage/storageAccounts/"
+```
+
+**Search Index Data Contributor** – For Azure AI Search operations
+
+```bash
+# Assign Search Index Data Contributor role
+az role assignment create \
+ --assignee $PRINCIPAL_ID \
+ --role "Search Index Data Contributor" \
+ --scope "/subscriptions//resourceGroups//providers/Microsoft.Search/searchServices/"
+```
+
+**Search Service Contributor** – For managing Azure AI Search service
+
+```bash
+# Assign Search Service Contributor role
+az role assignment create \
+ --assignee $PRINCIPAL_ID \
+ --role "Search Service Contributor" \
+ --scope "/subscriptions//resourceGroups//providers/Microsoft.Search/searchServices/"
+```
+
+**Cognitive Services OpenAI User** – For Azure OpenAI access
+
+```bash
+# Assign Cognitive Services OpenAI User role
+az role assignment create \
+ --assignee $PRINCIPAL_ID \
+ --role "Cognitive Services OpenAI User" \
+ --scope "/subscriptions//resourceGroups//providers/Microsoft.CognitiveServices/accounts/"
+```
+
+**Cognitive Services User** – For Azure AI Document Intelligence access
+
+```bash
+# Assign Cognitive Services User role
+az role assignment create \
+ --assignee $PRINCIPAL_ID \
+ --role "Cognitive Services User" \
+ --scope "/subscriptions//resourceGroups//providers/Microsoft.CognitiveServices/accounts/"
+```
+
+#### Option 2: Assign Roles via Azure Portal
+
+If you prefer or need to use the Azure Portal instead of CLI commands:
+
+1. Sign in to the [Azure Portal](https://portal.azure.com).
+2. Navigate to your **Resource Group** where services are deployed.
+3. For each resource, assign the required roles:
+
+**App Configuration**
+ - Go to **Access control (IAM)** → **Add role assignment**
+ - Assign role: `App Configuration Data Reader`
+ - Assign to: Your user account
+
+**Storage Account**
+ - Go to **Access control (IAM)** → **Add role assignment**
+ - Assign the following roles to your user account:
+ - `Storage Blob Data Contributor`
+ - `Storage Queue Data Contributor`
+
+**Azure AI Search**
+ - Go to **Access control (IAM)** → **Add role assignment**
+ - Assign the following roles to your user account:
+ - `Search Index Data Contributor`
+ - `Search Service Contributor`
+
+**Azure OpenAI**
+ - Go to **Access control (IAM)** → **Add role assignment**
+ - Assign role: `Cognitive Services OpenAI User`
+ - Assign to: Your user account
+
+**Azure AI Document Intelligence**
+ - Go to **Access control (IAM)** → **Add role assignment**
+ - Assign role: `Cognitive Services User`
+ - Assign to: Your user account
+
+**Note**: RBAC permission changes can take 5-10 minutes to propagate. If you encounter "Forbidden" errors after assigning roles, wait a few minutes and try again.
+
+## Step 3: Backend Setup & Run Instructions
+
+### 3.1. Open Solutions in Visual Studio
+
+Navigate to the cloned repository and open the following solution files from Visual Studio:
+
+- **KernelMemory** path: `Document-Knowledge-Mining-Solution-Accelerator/App/kernel-memory/KernelMemory.sln`
+
+- **Microsoft.GS.DPS** path: `Document-Knowledge-Mining-Solution-Accelerator/App/backend-api/Microsoft.GS.DPS.sln`
+
+**Sign in to Visual Studio** using your tenant account with the required permissions.
+
+---
+
+### 3.2. Create/Verify `appsettings.Development.json` Files
+
+**After deploying the accelerator**, the `appsettings.Development.json` file should be created automatically. If you are using a deployed resource group that was **not deployed from your machine**, you will need to create these files manually.
+
+#### KernelMemory Solution
+
+1. In the **Service** project (inside the `service` folder), expand the `appsettings.json` file.
+2. Confirm that `appsettings.Development.json` exists.
+3. If it does not exist, create it manually by copying the template file:
+
+```bash
+# From repository root
+cd "Document-Knowledge-Mining-Solution-Accelerator"
+
+# Copy the template file
+Copy-Item Deployment\appconfig\kernelmemory\appsettings.Development.json.template App\kernel-memory\service\Service\appsettings.Development.json # Windows PowerShell
+```
+
+4. Edit the `appsettings.Development.json` file with your Azure App Configuration URL:
+
+```json
+{
+ "ConnectionStrings": {
+ "AppConfig": "{{ appconfig-url }}"
+ }
+}
+```
+#### Microsoft.GS.DPS Solution
+
+1. In the **Microsoft.GS.DPS.Host** project, expand the `appsettings.json` file.
+2. Confirm that `appsettings.Development.json` exists.
+3. If it does not exist, create it manually by copying the template file:
+
+```bash
+# From repository root
+cd "Document-Knowledge-Mining-Solution-Accelerator"
+
+# Copy the template file
+Copy-Item Deployment\appconfig\aiservice\appsettings.Development.json.template App\backend-api\Microsoft.GS.DPS.Host\appsettings.Development.json
+```
+
+4. Edit the `appsettings.Development.json` file with your Azure App Configuration URL:
+
+```json
+{
+ "ConnectionStrings": {
+ "AppConfig": "{{ appconfig-url }}"
+ }
+}
+```
+
+---
+
+## Step 4: Run Backend Services
+
+### 4.1. Set Startup Projects
+
+- **KernelMemory Solution:**
+ Set **Service** (located inside the `service` folder) as the startup project to run the Kernel Memory service.
+
+- **Microsoft.GS.DPS Solution:**
+ Set **Microsoft.GS.DPS.Host** as the startup project to run the API.
+
+### 4.2. Update Kernel Memory Endpoint in Azure App Configuration
+
+> **Important:**
+> The following change is only for local development and debugging.
+> For production or Azure deployment, ensure the endpoint is set to `http://kernelmemory-service` to avoid misconfiguration.
+
+1. Sign in to the [Azure Portal](https://portal.azure.com).
+2. Navigate to your **App Configuration** resource within/from your deployed resource group.
+3. Go to **Operations → Configuration Explorer**.
+4. Search for the key:
+ `Application:Services:KernelMemory:Endpoint`
+5. For local development, update its value from:
+ ```
+ http://kernelmemory-service
+ ```
+ to
+ ```
+ http://localhost:9001
+ ```
+6. Apply the changes.
+
+> **Note:**
+> Always revert the Kernel Memory endpoint value back to `http://kernelmemory-service` before running the application in Azure.
+
+### 4.3. Run the Backend Services
+
+1. In Visual Studio, run both solutions (KernelMemory and Microsoft.GS.DPS) by pressing **F5** or clicking the **Start** button.
+2. Two terminal windows will appear showing the service logs.
+3. Once both services start successfully:
+ - **Kernel Memory Service** will be available at: http://localhost:9001
+ - **Backend API** will be available at: https://localhost:52190
+ - **Swagger UI** will open automatically at http://localhost:52190 for API validation
+
+> **⚠️ Important:** Keep both terminal windows open while the services are running. Do not close them until you're done with development.
+
+---
+
+## Step 5: Frontend Setup & Run Instructions
+
+### 5.1. Open the repo in **VS Code**.
+
+### 5.2. Create `.env` file from template
+
+Navigate to the `App/frontend-app` folder and create the `.env` file:
+
+```bash
+# From repository root
+cd "Document-Knowledge-Mining-Solution-Accelerator"
+
+# Copy the template file
+Copy-Item Deployment\appconfig\frontapp\.env.template App\frontend-app\.env
+```
+
+### 5.3. Configure the `.env` file
+
+Update the `VITE_API_ENDPOINT` value with your local Backend API URL, e.g.:
+
+```env
+VITE_API_ENDPOINT=https://localhost:52190
+DISABLE_AUTH=true
+VITE_ENABLE_UPLOAD_BUTTON=true
+```
+### 5.4. Verify Node.js and Yarn Installation
+
+Before installing dependencies, verify that Node.js (LTS) and Yarn are already installed from Step 1:
+
+```powershell
+# Verify installations
+node -v
+yarn -v
+```
+> **Note:** If Yarn is not installed, go back to Step 1 and complete the prerequisites, or use the below commands to install:
+> ```powershell
+> corepack enable
+> corepack prepare yarn@stable --activate
+> ```
+
+### 5.5. Install frontend dependencies
+
+```powershell
+# From repository root, navigate to frontend directory
+cd App\frontend-app
+
+# Install dependencies
+yarn install
+```
+
+### 5.6. Start the application
+
+```powershell
+yarn start
+```
+
+---
+
+**Services will be available at:**
+- **Kernel Memory Service**: http://localhost:9001
+- **Backend API**: https://localhost:52190
+- **Frontend Application**: http://localhost:5900
+
+You're now ready to run and debug the application locally!
+
+---
+
+## Troubleshooting
+
+### Common Issues
+
+#### Connection Issues
+
+- While running the Kernel solution, if you encounter an error such as ``server not responded`` or ``server not found``, it usually indicates that the required resource is not responding.
+- Ensure that the necessary **Kubernetes services** are running. If not, start the Kubernetes service and then run the Kernel solution again.
+
+#### Windows-Specific Issues
+
+```powershell
+# PowerShell execution policy
+Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser
+
+# Long path support (Windows 10 1607+, run as Administrator)
+New-ItemProperty -Path "HKLM:\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1 -PropertyType DWORD -Force
+```
+
+### Azure Authentication Issues
+
+```bash
+# Login to Azure CLI
+az login
+
+# Set subscription
+az account set --subscription "your-subscription-id"
+
+# Test authentication
+az account show
+```
+
+### Environment Variable Issues
+
+```bash
+# Check environment variables are loaded
+Get-ChildItem Env:AZURE* # Windows PowerShell
+
+# Validate .env file format
+cat .env | grep -v '^#' | grep '=' # Should show key=value pairs
+```
+
+## Related Documentation
+
+- [Deployment Guide](DeploymentGuide.md) - Instructions for production deployment.
+- [Delete Resource Group](DeleteResourceGroup.md) - Steps to safely delete the Azure resource group created for the solution.
+- [PowerShell Setup](PowershellSetup.md) - Instructions for setting up PowerShell and required scripts.
+- [Quota Check](QuotaCheck.md) - Steps to verify Azure quotas and ensure required limits before deployment.
\ No newline at end of file
diff --git a/docs/LocalSetupGuide.md b/docs/LocalSetupGuide.md
deleted file mode 100644
index 6a348f2d..00000000
--- a/docs/LocalSetupGuide.md
+++ /dev/null
@@ -1,116 +0,0 @@
-# Local Setup Guide
-
-Follow these steps to set up and debug the application locally.
-
----
-
-## Backend Setup
-
-### 1. Clone the Repository
-
-```powershell
-git clone https://github.com/microsoft/Document-Knowledge-Mining-Solution-Accelerator.git
-```
-
----
-
-### 2. Sign In to Visual Studio
-
-- Open the **KernelMemory** and **Microsoft.GS.DPS** solutions in Visual Studio.
-- Sign in using your tenant account with the required permissions.
-
----
-
-### 3. Verify `appsettings.Development.json`
-
-After deploying the accelerator, the `appsettings.Development.json` file will be created automatically.
-
-- **KernelMemory Solution:**
- Expand the `appsettings.json` file under the **Service** project (inside the `service` folder) and confirm that `appsettings.Development.json` exists.
-
-- **Microsoft.GS.DPS Solution:**
- Expand the `appsettings.json` file under the **Microsoft.GS.DPS.Host** project and confirm that `appsettings.Development.json` exists.
-
----
-
-### 4. Set Startup Projects
-
-- **KernelMemory Solution:**
- Set **Service** (located inside the `service` folder) as the startup project to run the Kernel Memory service.
-
-- **Microsoft.GS.DPS Solution:**
- Set **Microsoft.GS.DPS.Host** as the startup project to run the API.
-
----
-
-### 5. Assign Required Azure Roles
-
-To enable local debugging and ensure your application can access necessary Azure resources, assign the following roles to your Microsoft Entra ID in the respective services within your deployed resource group in the Azure portal:
-
-- **App Configuration**
- - App Configuration Data Reader
-- **Storage Account**
- - Storage Blob Data Contributor
- - Storage Queue Data Contributor
- - Storage Blob Data Reader
-
----
-
-### 6. Update Kernel Memory Endpoint in Azure App Configuration
-
-> **Important:**
-> The following change is only for local development and debugging.
-> For production or Azure deployment, ensure the endpoint is set to `http://kernelmemory-service` to avoid misconfiguration.
-
-1. Sign in to the [Azure Portal](https://portal.azure.com).
-2. Navigate to your **App Configuration** resource within/from your deployed resource group.
-3. Go to **Operations → Configuration Explorer**.
-4. Search for the key:
- `Application:Services:KernelMemory:Endpoint`
-5. For local development, update its value from:
- ```
- http://kernelmemory-service
- ```
- to
- ```
- http://localhost:9001
- ```
-6. Apply the changes.
-
-> **Note:**
-> Always revert this value back to `http://kernelmemory-service` before running the application in Azure.
-
----
-
-## Frontend Setup
-
-1. Open the repo in **VS Code**.
-2. Navigate to the `App/frontend-app` folder and locate the `.env` file.
-3. In the `.env` file, update the `VITE_API_ENDPOINT` value with your local API URL, e.g.:
- ```
- VITE_API_ENDPOINT=https://localhost:52190
- ```
-4. Before installing dependencies, ensure Node.js (LTS) and Yarn are installed on your machine:
- - Recommended: install Node.js LTS (18.x or later) from https://nodejs.org
- - Install Yarn if it's not already available:
- ```powershell
- npm install -g yarn
- ```
- - Verify the installations:
- ```powershell
- node -v
- npm -v
- yarn -v
- ```
-5. Install dependencies:
- ```powershell
- yarn install
- ```
-6. Start the application:
- ```powershell
- yarn start
- ```
-
----
-
-**You're now ready to run and debug the application locally!**
\ No newline at end of file
diff --git a/docs/PowershellSetup.md b/docs/PowershellSetup.md
new file mode 100644
index 00000000..af5a3291
--- /dev/null
+++ b/docs/PowershellSetup.md
@@ -0,0 +1,47 @@
+# Add PowerShell 7 to PATH in Windows
+
+This guide will help you add **PowerShell 7** (PowerShell Core) to your system’s PATH variable on Windows, so you can easily run it from any Command Prompt or Run dialog.
+
+## Prerequisites
+
+- You should have **PowerShell 7** installed on your machine. If you haven’t installed it yet, you can download it following the guide here: [Installing PowerShell on Windows | Microsoft Learn](https://learn.microsoft.com/en-us/powershell/scripting/install/installing-powershell-on-windows?view=powershell-7.5).
+- **Administrative privileges are not required** unless you're modifying system-wide environment variables. You can modify your **user-specific PATH** without admin rights.
+
+## Steps to Add PowerShell 7 to PATH
+
+### 1. Open **System Properties**
+ - Press `Win + X` and choose **System**.
+ - Click on **Advanced system settings** on the left sidebar. This will open the **System Properties** window.
+ - In the **System Properties** window, click on the **Environment Variables** button at the bottom.
+
+### 2. Edit User Environment Variables
+ - In the **Environment Variables** window, under **User variables**, find the `Path` variable.
+ - Select the `Path` variable and click **Edit**. (If the `Path` variable doesn’t exist, click **New** and name it `Path`.)
+
+### 3. Check if PowerShell 7 Path is Already in PATH
+ - Before adding the path, make sure the following path is not already present in the list:
+ ```
+ C:\Program Files\PowerShell\7\
+ ```
+ - If the path is already there, you don't need to add it again.
+
+### 4. Add PowerShell 7 Path
+ - If the path is not already in the list, click **New** in the **Edit Environment Variable** window.
+ - Add the following path to the list:
+ ```
+ C:\Program Files\PowerShell\7\
+ ```
+ > **Note:** If you installed PowerShell 7 in a custom location, replace the above path with the correct one.
+
+### 5. Save Changes
+ - After adding the path, click **OK** to close the **Edit Environment Variable** window.
+ - Click **OK** again to close the **Environment Variables** window.
+ - Finally, click **OK** to exit the **System Properties** window.
+
+### 6. Verify PowerShell 7 in PATH
+ - Open **Command Prompt** or **Run** (press `Win + R`).
+ - Type `pwsh` and press Enter. If PowerShell 7 opens, you've successfully added it to your PATH!
+---
+## Troubleshooting
+- **PowerShell 7 not opening:** Ensure the path to PowerShell 7 is entered correctly. If you're using a custom installation folder, check that the correct path is added to the `Path` variable.
+- **Changes not taking effect:** Try restarting your computer or logging out and logging back in for the changes to apply.
\ No newline at end of file
diff --git a/docs/SampleQuestions.md b/docs/SampleQuestions.md
index 0db06c42..d7b94c7e 100644
--- a/docs/SampleQuestions.md
+++ b/docs/SampleQuestions.md
@@ -1,45 +1,65 @@
# Sample Questions
-To help you get started, here are some **Sample Prompts** you can ask in the app:
+To help you get started, here are some **Sample Prompts** you can test in the application:
-## **Housing Affordability & Report Analysis**
+## **1. Housing Affordability & Report Analysis**
Prompts focus on housing issues, document filtering, and comparing annual report outcomes.
### **Overview**
-_Sample Workflow:_
-- Question: What are the main factors contributing to the current housing affordability issues?
-- Task: Click one of the **suggested follow-up question** and related response is generated.
-- Task: Click **New topic** button to clear the chat conversation.
-
-### **Housing Report**
+**Sample Workflow:**
+
+1. Open the web experience interface
+2. Navigate to the **document list** and browse available documents
+3. Ask in chat: **"What are the main factors contributing to the current housing affordability issues?"**
+
+
+
+4. Review the AI response for relevant insights
+5. Click one of the **suggested follow-up questions** and check the response
+6. Reset the chat by clicking **[New topic]**
+
+---
+
+## **2. Housing Report Search & Comparison**
Explore key findings, compare annual data, and review detailed housing insights.
-_Sample Workflow:_
+**Sample Workflow:**
-- Task: Search for the keyword **Housing Report** to filter the document list and display only relevant results.
-- Task: Select two documents **(Annual Housing Report 2022 & 2023)** right top panel is switched to **2 Selected** tab.
-
-- Question: Analyze the two annual reports and compare the positive and negative outcomes YoY. Show the results in a table.
-- Task: Click **Details** on the **Annual Housing Report 2023** document to display the pop-up viewer.
-- Task: Review the Extractive Summary for accuracy.
-- Task: Scroll through the pages of the document until page **10** & **11**.
-- Task: Click on **Chat** tab
-- Question: Can you summarize and compare the tables on page **10** and **11**?
+1. Search for: **"Housing Report"** to filter the document list
+2. Select **Annual Housing Report 2022** and **Annual Housing Report 2023**
+ - Confirm the top panel shows **"2 Selected"**
-### **Contracts**
-Review, analyze, and extract key details from handwritten contract documents.
+
+
+3. Ask in chat: **"Analyze the two annual reports and compare the positive and negative outcomes YoY. Show the results in a table."**
+4. Review the generated table for clarity and accuracy
+5. Click **DETAILS** on **Annual Housing Report 2023**
+6. Review the **Extractive Summary** for accuracy
+7. Scroll to **pages 10 & 11**
+8. Click on **Chat** tab in the pop-up viewer
+9. Ask: **"Can you summarize and compare the tables on page 10 and 11?"**
+10. Review the summarized comparison
+11. Close the pop-up viewer
-_Sample Workflow:_
+---
+
+## **3. Contracts Search & Analysis**
+Review, analyze, and extract key details from handwritten contract documents.
-- Task: Search for the keyword **Contracts** to filter the document list and display only relevant results.
-- Task: Select **3** to **4**, **handwritten contract documents**.
-- Question: Analyze these forms and create a table with all buyers, sellers, and corresponding purchase prices.
-- Task: Click **Details** button on one of the handwritten contracts to display the pop-up viewer.
-- Task: Click on **Chat** tab
-- Question: What liabilities is the buyer responsible for within the contract?
+**Sample Workflow:**
+1. Search for: **"Contracts"** to filter the document list
+2. Select **3–4 handwritten contract documents**
+3. Ask in chat: **"Analyze these forms and create a table with all buyers, sellers, and corresponding purchase prices."**
+4. Review the table for correct buyer/seller names and purchase prices
+5. Click **DETAILS** on one of the handwritten contracts
+6. Click on **Chat** tab in the pop-up viewer
+7. Ask: **"What liabilities is the buyer responsible for within the contract?"**
+8. Review the response for specific obligations (e.g., fees, taxes, maintenance, contingencies)
-Enables faster insight discovery and smarter data analysis through automated content extraction and chat-based guidance.
+---
+## Summary
+These workflows demonstrate how the solution enables faster insight discovery and smarter data analysis through automated content extraction and chat-based guidance.
diff --git a/docs/TroubleShootingSteps.md b/docs/TroubleShootingSteps.md
new file mode 100644
index 00000000..effd48cd
--- /dev/null
+++ b/docs/TroubleShootingSteps.md
@@ -0,0 +1,609 @@
+# 🛠️ Troubleshooting
+
+When deploying Azure resources, you may come across different error codes that stop or delay the deployment process. This section lists some of the most common errors along with possible causes and step-by-step resolutions.
+
+Use these as quick reference guides to unblock your deployments.
+
+> **💡 Need deployment recovery help?** If your deployment failed and you need to start over, see the [Recover from Failed Deployment](./DeploymentGuide.md#recover-from-failed-deployment) section in the deployment guide.
+
+## Error Codes
+
+
+ReadOnlyDisabledSubscription
+
+- Check if you have an active subscription before starting the deployment.
+
+
+
+ MissingSubscriptionRegistration/ AllowBringYourOwnPublicIpAddress/ InvalidAuthenticationToken
+
+Enable `AllowBringYourOwnPublicIpAddress` Feature
+
+Before deploying the resources, you may need to enable the **Bring Your Own Public IP Address** feature in Azure. This is required only once per subscription.
+
+### Steps
+
+1. **Run the following command to register the feature:**
+
+ ```bash
+ az feature register --namespace Microsoft.Network --name AllowBringYourOwnPublicIpAddress
+ ```
+
+2. **Wait for the registration to complete.**
+ You can check the status using:
+
+ ```bash
+ az feature show --namespace Microsoft.Network --name AllowBringYourOwnPublicIpAddress --query properties.state
+ ```
+
+3. **The output should show:**
+ "Registered"
+
+4. **Once the feature is registered, refresh the provider:**
+
+ ```bash
+ az provider register --namespace Microsoft.Network
+ ```
+
+ 💡 Note: Feature registration may take several minutes to complete. This needs to be done only once per Azure subscription.
+
+
+
+ResourceGroupNotFound
+
+## Option 1
+
+### Steps
+
+1. Go to [Azure Portal](https:/portal.azure.com/#home).
+
+2. Click on the **"Resource groups"** option available on the Azure portal home page.
+ 
+
+3. In the Resource Groups search bar, search for the resource group you intend to target for deployment. If it exists, you can proceed with using it.
+ 
+
+## Option 2
+
+- This error can occur if you deploy the template using the same .env file - from a previous deployment.
+- To avoid this issue, create a new environment before redeploying.
+- You can use the following command to create a new environment:
+
+```
+azd env new
+```
+
+
+
+
+ResourceGroupBeingDeleted
+
+To prevent this issue, please ensure that the resource group you are targeting for deployment is not currently being deleted. You can follow steps to verify resource group is being deleted or not.
+
+### Steps:
+
+1. Go to [Azure Portal](https://portal.azure.com/#home)
+2. Go to resource group option and search for targeted resource group
+3. If Targeted resource group is there and deletion for this is in progress, it means you cannot use this, you can create new or use any other resource group
+
+
+
+
+InternalSubscriptionIsOverQuotaForSku/ManagedEnvironmentProvisioningError
+
+Quotas are applied per resource group, subscriptions, accounts, and other scopes. For example, your subscription might be configured to limit the number of vCPUs for a region. If you attempt to deploy a virtual machine with more vCPUs than the permitted amount, you receive an error that the quota was exceeded.
+For PowerShell, use the `Get-AzVMUsage` cmdlet to find virtual machine quotas.
+
+```ps
+Get-AzVMUsage -Location "West US"
+```
+
+based on available quota you can deploy application otherwise, you can request for more quota
+
+
+
+InsufficientQuota
+
+- Check if you have sufficient quota available in your subscription before deployment.
+- To verify, refer to the [Quota Check documentation](./QuotaCheck.md) for details.
+
+
+
+
+LinkedInvalidPropertyId/ ResourceNotFound/DeploymentOutputEvaluationFailed/ CanNotRestoreANonExistingResource
+
+- Before using any resource ID, ensure it follows the correct format.
+- Verify that the resource ID you are passing actually exists.
+- Make sure there are no typos in the resource ID.
+- Verify that the provisioning state of the existing resource is `Succeeded` by running the following command to avoid this error while deployment or restoring the resource.
+
+ ```
+ az resource show --ids --query "properties.provisioningState"
+ ```
+
+- Sample Resource IDs format
+ - Log Analytics Workspace Resource ID
+ ```
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}
+ ```
+ - Azure AI Foundry Project Resource ID
+ ```
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{name}
+ ```
+- For more information refer [Resource Not Found errors solutions](https://learn.microsoft.com/en-us/azure/azure-resource-manager/troubleshooting/error-not-found?tabs=bicep)
+
+
+
+
+ResourceNameInvalid
+
+- Ensure the resource name is within the allowed length and naming rules defined for that specific resource type, you can refer [Resource Naming Convention](https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/resource-name-rules) document.
+
+
+
+
+ServiceUnavailable/ResourceNotFound
+
+- Regions are restricted to guarantee compatibility with paired regions and replica locations for data redundancy and failover scenarios based on articles [Azure regions list](https://learn.microsoft.com/en-us/azure/reliability/regions-list) and [Reliability in Azure Cosmos DB for NoSQL](https://learn.microsoft.com/en-us/azure/reliability/reliability-cosmos-db-nosql).
+
+- You can request more quota for Cosmos DB, refer [Quota Request](https://learn.microsoft.com/en-us/azure/cosmos-db/nosql/create-support-request-quota-increase) Documentation
+
+
+
+
+Workspace Name - InvalidParameter
+
+To avoid this errors in workspace ID follow below rules.
+
+1. Must start and end with an alphanumeric character (letter or number).
+2. Allowed characters:
+ `a–z`
+ `0–9`
+ `- (hyphen)`
+3. Cannot start or end with a hyphen -.
+4. No spaces, underscores (\_), periods (.), or special characters.
+5. Must be unique within the Azure region & subscription.
+6. Length: 3–33 characters (for AML workspaces).
+
+
+
+BadRequest: Dns record under zone Document is already taken
+
+This error can occur only when user hardcoding the CosmosDB Service name. To avoid this you can try few below suggestions.
+
+- Verify resource names are globally unique.
+- If you already created an account/resource with same name in another subscription or resource group, check and delete it before reusing the name.
+- By default in this template we are using unique prefix with every resource/account name to avoid this kind for errors.
+
+
+
+NetcfgSubnetRangeOutsideVnet
+
+- Ensure the subnet’s IP address range falls within the virtual network’s address space.
+- Always validate that the subnet CIDR block is a subset of the VNet range.
+- For Azure Bastion, the AzureBastionSubnet must be at least /27.
+- Confirm that the AzureBastionSubnet is deployed inside the VNet.
+
+
+
+DisableExport_PublicNetworkAccessMustBeDisabled
+
+- Check container source: Confirm whether the deployment is using a Docker image or Azure Container Registry (ACR).
+- Verify ACR configuration: If ACR is included, review its settings to ensure they comply with Azure requirements.
+- Check export settings: If export is disabled in ACR, make sure public network access is also disabled.
+- Dedeploy after fix: Correct the configuration and redeploy. This will prevent the Conflict error during deployment.
+- For more information refer [ACR Data Loss Prevention](https://learn.microsoft.com/en-us/azure/container-registry/data-loss-prevention) document.
+
+
+
+AccountProvisioningStateInvalid
+
+- The AccountProvisioningStateInvalid error occurs when you try to use resources while they are still in the Accepted provisioning state.
+- This means the deployment has not yet fully completed.
+- To avoid this error, wait until the provisioning state changes to Succeeded.
+- Only use the resources once the deployment is fully completed.
+
+
+
+VaultNameNotValid
+
+In this template Vault name will be unique everytime, but if you are trying to hard code the name then please make sure below points.
+
+1. Check name length
+ - Ensure the Key Vault name is between 3 and 24 characters.
+2. Validate allowed characters
+ - The name can only contain letters (a–z, A–Z) and numbers (0–9).
+ - Hyphens are allowed, but not at the beginning or end, and not consecutive (--).
+3. Ensure proper start and end
+ - The name must start with a letter.
+ - The name must end with a letter or digit (not a hyphen).
+4. Test with a new name
+
+ - Example of a valid vault name:
+ ✅ cartersaikeyvault1 ✅ securevaultdemo ✅ kv-project123
+
+
+
+
+DeploymentCanceled
+
+There might be multiple reasons for this error you can follow below steps to troubleshoot.
+
+1. Check deployment history
+ - Go to Azure Portal → Resource Group → Deployments.
+ - Look at the detailed error message for the deployment that was canceled — this will show which resource failed and why.
+2. Identify the root cause
+ - A DeploymentCanceled usually means:
+ - A dependent resource failed to deploy.
+ - A validation error occurred earlier.
+ - A manual cancellation was triggered.
+ - Expand the failed deployment logs for inner error messages.
+3. Validate your template (ARM/Bicep)
+ Run:
+ ```
+ az deployment group validate --resource-group --template-file main.bicep
+ ```
+4. Check resource limits/quotas
+ - Ensure you have not exceeded quotas (vCPUs, IPs, storage accounts, etc.), which can silently cause cancellation.
+5. Fix the failed dependency
+ - If a specific resource shows BadRequest, Conflict, or ValidationError, resolve that first.
+ - Re-run the deployment after fixing the root cause.
+6. Retry deployment
+Once corrected, redeploy with:
+`az deployment group create --resource-group --template-file main.bicep`
+Essentially: DeploymentCanceled itself is just a wrapper error — you need to check inner errors in the deployment logs to find the actual failure.
+
+
+
+LocationNotAvailableForResourceType
+
+- You may encounter a LocationNotAvailableForResourceType error if you set the secondary location to 'Australia Central' in the main.bicep file.
+- This happens because 'Australia Central' is not a supported region for that resource type.
+- Always refer to the README file or Azure documentation to check the list of supported regions.
+- Update the deployment with a valid supported region to resolve the issue.
+
+
+
+
+InvalidResourceLocation
+
+- You may encounter an InvalidResourceLocation error if you change the region for Cosmos DB or the Storage Account (secondary location) multiple times in the main.bicep file and redeploy.
+- Azure resources like Cosmos DB and Storage Accounts do not support changing regions after deployment.
+- If you need to change the region again, first delete the existing deployment.
+- Then redeploy the resources with the updated region configuration.
+
+
+
+
+DeploymentActive
+
+- This issue occurs when a deployment is already in progress and another deployment is triggered in the same resource group, causing a DeploymentActive error.
+- Cancel the ongoing deployment before starting a new one.
+- Do not initiate a new deployment in the same resource group until the previous one is completed.
+
+
+
+ResourceOperationFailure/ProvisioningDisabled
+
+- This error occurs when provisioning of a resource is restricted in the selected region.
+ It usually happens because the service is not available in that region or provisioning has been temporarily disabled.
+
+- Regions are restricted to guarantee compatibility with paired regions and replica locations for data redundancy and failover scenarios based on articles [Azure regions list](https://learn.microsoft.com/en-us/azure/reliability/regions-list) and [Reliability in Azure Cosmos DB for NoSQL](https://learn.microsoft.com/en-us/azure/reliability/reliability-cosmos-db-nosql).
+
+- If you need to use the same region, you can request a quota or provisioning exception.
+ Refer to [Quota Request](https://learn.microsoft.com/en-us/azure/cosmos-db/nosql/create-support-request-quota-increase) for more details.
+
+
+
+
+MaxNumberOfRegionalEnvironmentsInSubExceeded
+
+- This error occurs when you try to create more than the allowed number of **Azure Container App Environments (ACA Environments)** in the same region for a subscription.
+- For example, in **Sweden Central**, only **1 Container App Environment** is allowed per subscription.
+
+The subscription 'xxxx-xxxx' cannot have more than 1 Container App Environments in Sweden Central.
+
+- To fix this, you can:
+ - Deploy the Container App Environment in a **different region**, OR
+ - Request a quota increase via Azure Support → [Quota Increase Request](https://go.microsoft.com/fwlink/?linkid=2208872)
+
+
+
+
+Unauthorized - Operation cannot be completed without additional quota
+
+- You can check your quota usage using `az vm list-usage`.
+
+ ```
+ az vm list-usage --location "" -o table
+ ```
+
+- To Request more quota refer [VM Quota Request](https://techcommunity.microsoft.com/blog/startupsatmicrosoftblog/how-to-increase-quota-for-specific-types-of-azure-virtual-machines/3792394).
+
+
+
+ParentResourceNotfound
+
+
+- You can refer to the [Parent Resource Not found](https://learn.microsoft.com/en-us/azure/azure-resource-manager/troubleshooting/error-parent-resource?tabs=bicep) documentation if you encounter this error.
+
+
+
+ResourceProviderError
+
+- This error occurs when the resource provider is not registered in your subscription.
+- To register it, refer to [Register Resource Provider](https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/resource-providers-and-types) documentation.
+
+
+
+
+Conflict - Cannot use the SKU Basic with File Change Audit for site.
+
+- This error happens because File Change Audit logs aren’t supported on Basic SKU App Service Plans.
+
+- Upgrading to Premium/Isolated SKU (supports File Change Audit), or
+
+- Disabling File Change Audit in Diagnostic Settings if you must stay on Basic.
+- Always cross-check the [supported log types](https://aka.ms/supported-log-types)
+ before adding diagnostic logs to your Bicep templates.
+
+
+
+
+
+AccountPropertyCannotBeUpdated
+
+- The property **`isHnsEnabled`** (Hierarchical Namespace for Data Lake Gen2) is **read-only** and can only be set during **storage account creation**.
+- Once a storage account is created, this property **cannot be updated**.
+- Trying to update it via ARM template, Bicep, CLI, or Portal will fail.
+
+- **Resolution**
+- Create a **new storage account** with `isHnsEnabled=true` if you require hierarchical namespace.
+- Migration may be needed if you already have data.
+- Refer to [Storage Account Update Restrictions](https://aka.ms/storageaccountupdate) for more details.
+
+
+
+
+
+InvalidRequestContent
+
+- The deployment values either include values that aren't recognized, or required values are missing.
+- Confirm the values for your resource type.
+- You can refer to the [Invalid Request Content error documentation](https://learn.microsoft.com/en-us/azure/azure-resource-manager/troubleshooting/error-invalid-request-content).
+
+
+
+
+ReadOnlyDisabledSubscription
+
+- Depending on the type of the Azure Subscription, the expiration date might have been reached.
+
+- You have to activate the Azure Subscription before creating any Azure resource.
+
+- You can refer Reactivate a disabled Azure subscription Documentation.
+
+
+
+
+
+SkuNotAvailable
+
+- You receive this error in the following scenarios:
+ - When the resource SKU you've selected, such as VM size, isn't available for a location or zone.
+ - If you're deploying an Azure Spot VM or Spot scale set instance, there isn't any capacity for Azure Spot in this location. For more information, see Spot error messages.
+
+
+
+
+CrossTenantDeploymentNotPermitted
+
+- **Check tenant match:**
+ Ensure your deployment identity (user/SP) and the target resource group are in the same tenant.
+
+ ```bash
+ az account show
+ az group show --name
+ ```
+
+- **Verify pipeline/service principal:**
+ If using CI/CD, confirm that the service principal belongs to the same tenant and has permissions on the resource group.
+
+- **Avoid cross-tenant references:**
+ Make sure your Bicep doesn’t reference subscriptions, resource groups, or resources in another tenant.
+
+- **Test minimal deployment:**
+ Deploy a simple resource to the same resource group to confirm that identity and tenant are correct.
+
+- **Guest/external accounts:**
+ Avoid using guest users from other tenants; use native accounts or SPs in the tenant.
+
+
+
+
+RequestDisallowedByPolicy
+
+- This typically indicates that an Azure Policy is preventing the requested action due to policy restrictions in your subscription.
+- For more details and guidance on resolving this issue, please refer to the official Microsoft documentation: [RequestDisallowedByPolicy](https://learn.microsoft.com/en-us/troubleshoot/azure/azure-kubernetes/create-upgrade-delete/error-code-requestdisallowedbypolicy)
+
+
+
+
+FlagMustBeSetForRestore/NameUnavailable/CustomDomainInUse
+
+- This error occurs when you try to deploy a Cognitive Services resource that was soft-deleted earlier.
+- Azure requires you to explicitly set the `restore` flag to `true` if you want to recover the soft-deleted resource.
+- If you don’t want to restore the resource, you must purge the deleted resource first before redeploying.
+
+**Example causes:**
+
+- Trying to redeploy a Cognitive Services account with the same name as a previously deleted one.
+- The deleted resource still exists in a soft-delete retention state.
+
+**How to fix:**
+
+1. If you want to restore → add `"restore": true` in your template properties.
+2. If you want a fresh deployment → purge the resource using:
+ ```bash
+ az cognitiveservices account purge \
+ --name \
+ --resource-group \
+ --location
+ ```
+
+- For more details, refer to [Soft delete and resource restore.](https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/delete-resource-group?tabs=azure-powershell)
+
+
+
+
+PrincipalNotFound
+
+- This error occurs when the principal ID (Service Principal, User, or Group) specified in a role assignment or deployment does not exist in the Azure Active Directory tenant.
+- It can also happen due to replication delays right after creating a new principal.
+
+**Example causes:**
+
+- The specified Object ID is invalid or belongs to another tenant.
+- The principal was recently created, but Azure AD has not yet replicated it.
+- Attempting to assign a role to a non-existing or deleted Service Principal/User/Group.
+
+**How to fix:**
+
+1. Verify that the principal ID is correct and exists in the same directory/tenant.
+ ```bash
+ az ad sp show --id
+ ```
+2. If the principal was just created, wait a few minutes and retry.
+3. Explicitly set the principalType property (ServicePrincipal, User, or Group) in your ARM/Bicep template to avoid replication delays.
+4. If the principal does not exist, create it again before assigning roles.
+
+- For more details, see [Azure PrincipalType documentation](https://learn.microsoft.com/en-us/azure/role-based-access-control/troubleshooting?tabs=bicep)
+
+
+
+
+
+RedundancyConfigurationNotAvailableInRegion
+
+- This issue happens when you try to create a Storage Account with a redundancy configuration (e.g., Standard_GRS) that is not supported in the selected Azure region.
+
+- Example: Creating a storage account with GRS in italynorth will fail with this error.
+
+ ```
+ az storage account create -n mystorageacct123 -g myResourceGroup -l italynorth --sku Standard_GRS --kind StorageV2
+
+ ```
+
+- To check supported SKUs for your region:
+ ```
+ az storage account list-skus -l italynorth -o table
+ ```
+- Use a supported redundancy option (e.g., Standard_LRS) in the same region Or deploy the Storage Account in a region that supports your chosen redundancy. For more details, refer to [Azure Storage redundancy documentation.](https://learn.microsoft.com/en-us/azure/storage/common/storage-redundancy?utm_source=chatgpt.com)
+
+
+
+
+
+DeploymentNotFound
+
+- This issue occurs when the user deletes a previous deployment along with the resource group (RG), and then redeploys the same RG with the same environment name but in a different location.
+
+- To avoid the DeploymentNotFound error, do not change the location when redeploying a deleted RG, or Use new names for the RG and environment during redeployment.
+
+
+
+
+
+DeploymentCanceled(user.canceled)
+- Indicates that the deployment was manually canceled by the user (Portal, CLI, or pipeline).
+
+- Check deployment history and logs to confirm who/when it was canceled.
+
+- If accidental, retry the deployment.
+
+- For pipelines, ensure no automation or timeout is triggering cancellation.
+
+- Use deployment locks or retry logic to prevent accidental cancellations.
+
+
+
+
+
+ResourceGroupDeletionTimeout
+
+- Some resources in the resource group may be stuck deleting or have dependencies; check RG resources and status.
+
+- Ensure no resource locks or Azure Policies are blocking deletion.
+
+- Retry deletion via CLI/PowerShell (```az group delete --name --yes --no-wait```).
+
+- Check Activity Log to identify failing resources; escalate to Azure Support if deletion is stuck.
+
+
+
+
+
+BadRequest - DatabaseAccount is in a failed provisioning state because the previous attempt to create it was not successful
+
+- This error occurs when a user attempts to redeploy a resource that previously failed to provision.
+
+- To resolve the issue, delete the failed deployment first, then start a new deployment.
+
+- For guidance on deleting a resource from a Resource Group, refer to the following link: [Delete an Azure Cosmos DB account](https://learn.microsoft.com/en-us/azure/cosmos-db/nosql/manage-with-powershell#delete-account:~:text=%3A%24enableMultiMaster-,Delete%20an%20Azure%20Cosmos%20DB%20account,-This%20command%20deletes)
+
+
+
+
+
+SpecialFeatureOrQuotaIdRequired
+
+- This error occurs when your subscription does not have access to certain Azure OpenAI models.
+- Example error message:
+ -SpecialFeatureOrQuotaIdRequired: The current subscription does not have access to this model 'Format:OpenAI,Name:o3,Version:2025-04-16'.
+- Resolution:
+To gain access, submit a request using the official form:
+ -[👉 Azure OpenAI Model Access Request](https://customervoice.microsoft.com/Pages/ResponsePage.aspx?id=v4j5cvGGr0GRqy180BHbR7en2Ais5pxKtso_Pz4b1_xUQ1VGQUEzRlBIMVU2UFlHSFpSNkpOR0paRSQlQCN0PWcu)
+
+ -You’ll need to use this form if you require access to the following restricted models:
+ - gpt-5
+ - o3
+ - o3-pro
+ - deep research
+ - reasoning summary
+ - gpt-image-1
+ - Once your request is approved, redeploy your resources.
+
+
+
+
+
+Error During deployment
+
+- Attempt: 1st (EXP deployment) For the Error: 503 Service Temporarily Unavailable: If you encounter this error during EXP deployment, first verify whether your deployment completed successfully. If the deployment failed, review the activity logs or error messages for more details about the failure. Address any identified issues, then proceed to start a fresh deployment.
+
+
+
+Begin a new deployment attempt:
+
+- Attempt 2 and 3 (EXP deployment). If none of the files were uploaded after running the sample command and all uploads failed, follow these
+
+
+
+
+
+
+
+- Troubleshooting steps:
+
+ - Review the error messages to identify the cause of the upload failures.
+ - Check the status of the resource group and confirm whether AKS is running or stopped.
+ - If AKS is stopped, try restarting the AKS service.
+ - Attempt the file upload process again using your script.
+ - If uploads continue to fail after these steps, proceed to start a completely new deployment.
+
+ 
+
+
+
+💡 Note: If you encounter any other issues, you can refer to the [Common Deployment Errors](https://learn.microsoft.com/en-us/azure/azure-resource-manager/troubleshooting/common-deployment-errors) documentation.
+If the problem persists, you can also raise an bug in our [Github Issues](https://github.com/microsoft/Document-Knowledge-Mining-Solution-Accelerator/issues) for further support.
\ No newline at end of file
diff --git a/docs/images/Sample_Qustion.png b/docs/images/Sample_Qustion.png
new file mode 100644
index 00000000..dfcc630f
Binary files /dev/null and b/docs/images/Sample_Qustion.png differ
diff --git a/docs/images/deployment/Deployment_last_step.png b/docs/images/deployment/Deployment_last_step.png
new file mode 100644
index 00000000..55152f1c
Binary files /dev/null and b/docs/images/deployment/Deployment_last_step.png differ
diff --git a/docs/images/local_development_setup_1.png b/docs/images/local_development_setup_1.png
new file mode 100644
index 00000000..72d84412
Binary files /dev/null and b/docs/images/local_development_setup_1.png differ
diff --git a/docs/images/re_use_log/logAnalytics.png b/docs/images/re_use_log/logAnalytics.png
new file mode 100644
index 00000000..95402f8d
Binary files /dev/null and b/docs/images/re_use_log/logAnalytics.png differ
diff --git a/docs/images/re_use_log/logAnalyticsJson.png b/docs/images/re_use_log/logAnalyticsJson.png
new file mode 100644
index 00000000..3a4093bf
Binary files /dev/null and b/docs/images/re_use_log/logAnalyticsJson.png differ
diff --git a/docs/images/re_use_log/logAnalyticsList.png b/docs/images/re_use_log/logAnalyticsList.png
new file mode 100644
index 00000000..6dcf4640
Binary files /dev/null and b/docs/images/re_use_log/logAnalyticsList.png differ
diff --git a/docs/images/readme/portal_resource_group_delete.png b/docs/images/readme/portal_resource_group_delete.png
new file mode 100644
index 00000000..c435ecf1
Binary files /dev/null and b/docs/images/readme/portal_resource_group_delete.png differ
diff --git a/docs/images/readme/portal_resource_groups_search.png b/docs/images/readme/portal_resource_groups_search.png
new file mode 100644
index 00000000..d3a245df
Binary files /dev/null and b/docs/images/readme/portal_resource_groups_search.png differ
diff --git a/docs/images/readme/portal_services_resource_groups.png b/docs/images/readme/portal_services_resource_groups.png
new file mode 100644
index 00000000..67b058bc
Binary files /dev/null and b/docs/images/readme/portal_services_resource_groups.png differ
diff --git a/docs/images/readme/portal_web_app_delete.png b/docs/images/readme/portal_web_app_delete.png
new file mode 100644
index 00000000..24bf593d
Binary files /dev/null and b/docs/images/readme/portal_web_app_delete.png differ
diff --git a/docs/images/readme/solution-architecture.png b/docs/images/readme/solution-architecture.png
index 1d5a60d3..5802aa78 100644
Binary files a/docs/images/readme/solution-architecture.png and b/docs/images/readme/solution-architecture.png differ
diff --git a/docs/images/troubleshooting/503.png b/docs/images/troubleshooting/503.png
new file mode 100644
index 00000000..cb6bd973
Binary files /dev/null and b/docs/images/troubleshooting/503.png differ
diff --git a/docs/images/troubleshooting/503_1.png b/docs/images/troubleshooting/503_1.png
new file mode 100644
index 00000000..f63057d9
Binary files /dev/null and b/docs/images/troubleshooting/503_1.png differ
diff --git a/docs/images/troubleshooting/503_2.png b/docs/images/troubleshooting/503_2.png
new file mode 100644
index 00000000..94ab8f2d
Binary files /dev/null and b/docs/images/troubleshooting/503_2.png differ
diff --git a/docs/images/troubleshooting/503_3.png b/docs/images/troubleshooting/503_3.png
new file mode 100644
index 00000000..4d8484de
Binary files /dev/null and b/docs/images/troubleshooting/503_3.png differ
diff --git a/docs/images/troubleshooting/503_4.png b/docs/images/troubleshooting/503_4.png
new file mode 100644
index 00000000..f80fac2b
Binary files /dev/null and b/docs/images/troubleshooting/503_4.png differ
diff --git a/docs/images/troubleshooting/rg_not_found.png b/docs/images/troubleshooting/rg_not_found.png
new file mode 100644
index 00000000..84bc0092
Binary files /dev/null and b/docs/images/troubleshooting/rg_not_found.png differ
diff --git a/docs/images/troubleshooting/rg_not_found0.png b/docs/images/troubleshooting/rg_not_found0.png
new file mode 100644
index 00000000..ae9a09ae
Binary files /dev/null and b/docs/images/troubleshooting/rg_not_found0.png differ
diff --git a/docs/re-use-log-analytics.md b/docs/re-use-log-analytics.md
new file mode 100644
index 00000000..e928d16b
--- /dev/null
+++ b/docs/re-use-log-analytics.md
@@ -0,0 +1,31 @@
+[← Back to *DEPLOYMENT* guide](DeploymentGuide.md)
+
+# Reusing an Existing Log Analytics Workspace
+To configure your environment to use an existing Log Analytics Workspace, follow these steps:
+---
+### 1. Go to Azure Portal
+Go to https://portal.azure.com
+
+### 2. Search for Log Analytics
+In the search bar at the top, type "Log Analytics workspaces" and click on it and click on the workspace you want to use.
+
+
+
+### 3. Copy Resource ID
+In the Overview pane, Click on JSON View
+
+
+
+Copy Resource ID that is your Workspace ID
+
+
+
+### 4. Set the Workspace ID in Your Environment
+Run the following command in your terminal
+```bash
+azd env set AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID ''
+```
+Replace `` with the value obtained from Step 3.
+
+### 5. Continue Deployment
+Proceed with the next steps in the [deployment guide](DeploymentGuide.md).