Skip to content

Commit bac0fff

Browse files
Refactor services for improved readability and maintainability
- Cleaned up whitespace and formatting in blob_service.py, cosmos_service.py, search_service.py, and settings.py. - Ensured consistent spacing and indentation throughout the codebase. - Enhanced code clarity by removing unnecessary blank lines and aligning comments. - No functional changes were made; this commit focuses solely on code style improvements.
1 parent 0f6e367 commit bac0fff

9 files changed

Lines changed: 699 additions & 703 deletions

File tree

content-gen/src/backend/agents/image_content_agent.py

Lines changed: 50 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -18,24 +18,24 @@ def _truncate_for_dalle(product_description: str, max_chars: int = 1500) -> str:
1818
"""
1919
Truncate product descriptions to fit DALL-E's 4000 character limit.
2020
Extracts the most visually relevant information (colors, hex codes, finishes).
21-
21+
2222
Args:
2323
product_description: The full product description(s)
2424
max_chars: Maximum characters to allow for product context
25-
25+
2626
Returns:
2727
Truncated description with essential visual details
2828
"""
2929
if not product_description or len(product_description) <= max_chars:
3030
return product_description
31-
31+
3232
import re
33-
33+
3434
# Extract essential visual info: product names, hex codes, color descriptions
3535
lines = product_description.split('\n')
3636
essential_parts = []
3737
current_product = ""
38-
38+
3939
for line in lines:
4040
# Keep product name headers
4141
if line.startswith('### '):
@@ -54,13 +54,13 @@ def _truncate_for_dalle(product_description: str, max_chars: int = 1500) -> str:
5454
# Keep finish descriptions
5555
elif 'finish' in line.lower() or 'matte' in line.lower() or 'eggshell' in line.lower():
5656
essential_parts.append(line.strip()[:200])
57-
57+
5858
result = '\n'.join(essential_parts)
59-
59+
6060
# If still too long, just truncate with ellipsis
6161
if len(result) > max_chars:
62-
result = result[:max_chars-50] + '\n\n[Additional details truncated for DALL-E]'
63-
62+
result = result[:max_chars - 50] + '\n\n[Additional details truncated for DALL-E]'
63+
6464
return result
6565

6666

@@ -73,9 +73,9 @@ async def generate_dalle_image(
7373
) -> dict:
7474
"""
7575
Generate a marketing image using DALL-E 3, gpt-image-1, or gpt-image-1.5.
76-
76+
7777
The model used is determined by AZURE_OPENAI_IMAGE_MODEL setting.
78-
78+
7979
Args:
8080
prompt: The main image generation prompt
8181
product_description: Auto-generated description of product image (for context)
@@ -86,14 +86,14 @@ async def generate_dalle_image(
8686
quality: Image quality (model-specific, uses settings default if not provided)
8787
- dall-e-3: standard, hd
8888
- gpt-image-1/1.5: low, medium, high, auto
89-
89+
9090
Returns:
9191
Dictionary containing generated image data and metadata
9292
"""
9393
# Determine which model to use
9494
image_model = app_settings.azure_openai.effective_image_model
9595
logger.info(f"Using image generation model: {image_model}")
96-
96+
9797
# Use appropriate generator based on model
9898
if image_model in ["gpt-image-1", "gpt-image-1.5"]:
9999
return await _generate_gpt_image(prompt, product_description, scene_description, size, quality)
@@ -110,31 +110,31 @@ async def _generate_dalle_image(
110110
) -> dict:
111111
"""
112112
Generate a marketing image using DALL-E 3.
113-
113+
114114
Args:
115115
prompt: The main image generation prompt
116116
product_description: Auto-generated description of product image (for context)
117117
scene_description: Scene/setting description from creative brief
118118
size: Image size (1024x1024, 1024x1792, 1792x1024)
119119
quality: Image quality (standard, hd)
120-
120+
121121
Returns:
122122
Dictionary containing generated image data and metadata
123123
"""
124124
brand = app_settings.brand_guidelines
125-
125+
126126
# Use defaults from settings if not provided
127127
size = size or app_settings.azure_openai.image_size
128128
quality = quality or app_settings.azure_openai.image_quality
129-
129+
130130
# DALL-E 3 has a 4000 character limit for prompts
131131
# Truncate product descriptions to essential visual info
132132
truncated_product_desc = _truncate_for_dalle(product_description, max_chars=1500)
133-
133+
134134
# Also truncate the main prompt if it's too long
135135
main_prompt = prompt[:1000] if len(prompt) > 1000 else prompt
136136
scene_desc = scene_description[:500] if scene_description and len(scene_description) > 500 else scene_description
137-
137+
138138
# Build the full prompt with product context and brand guidelines
139139
full_prompt = f"""⚠️ ABSOLUTE RULE: THIS IMAGE MUST CONTAIN ZERO TEXT. NO WORDS. NO LETTERS. NO PRODUCT NAMES. NO LABELS.
140140
@@ -153,7 +153,7 @@ async def _generate_dalle_image(
153153
154154
MANDATORY FINAL CHECKLIST:
155155
✗ NO product names in the image
156-
✗ NO color names in the image
156+
✗ NO color names in the image
157157
✗ NO text overlays or labels
158158
✗ NO typography or lettering of any kind
159159
✗ NO watermarks or logos
@@ -162,7 +162,7 @@ async def _generate_dalle_image(
162162
✓ Accurately reproduce product colors using exact hex codes
163163
✓ Professional, polished marketing image
164164
"""
165-
165+
166166
# Final safety check - DALL-E 3 has 4000 char limit
167167
if len(full_prompt) > 3900:
168168
logger.warning(f"Prompt too long ({len(full_prompt)} chars), truncating...")
@@ -190,20 +190,20 @@ async def _generate_dalle_image(
190190
credential = ManagedIdentityCredential(client_id=client_id)
191191
else:
192192
credential = DefaultAzureCredential()
193-
193+
194194
# Get token for Azure OpenAI
195195
token = await credential.get_token("https://cognitiveservices.azure.com/.default")
196-
196+
197197
# Use the dedicated DALL-E endpoint if configured, otherwise fall back to main endpoint
198198
dalle_endpoint = app_settings.azure_openai.dalle_endpoint or app_settings.azure_openai.endpoint
199199
logger.info(f"Using DALL-E endpoint: {dalle_endpoint}")
200-
200+
201201
client = AsyncAzureOpenAI(
202202
azure_endpoint=dalle_endpoint,
203203
azure_ad_token=token.token,
204204
api_version=app_settings.azure_openai.preview_api_version,
205205
)
206-
206+
207207
try:
208208
response = await client.images.generate(
209209
model=app_settings.azure_openai.dalle_model,
@@ -213,9 +213,9 @@ async def _generate_dalle_image(
213213
n=1,
214214
response_format="b64_json"
215215
)
216-
216+
217217
image_data = response.data[0]
218-
218+
219219
return {
220220
"success": True,
221221
"image_base64": image_data.b64_json,
@@ -226,7 +226,7 @@ async def _generate_dalle_image(
226226
finally:
227227
# Properly close the async client to avoid unclosed session warnings
228228
await client.close()
229-
229+
230230
except Exception as e:
231231
logger.exception(f"Error generating DALL-E image: {e}")
232232
return {
@@ -246,50 +246,50 @@ async def _generate_gpt_image(
246246
) -> dict:
247247
"""
248248
Generate a marketing image using gpt-image-1 or gpt-image-1.5.
249-
249+
250250
gpt-image models have different capabilities than DALL-E 3:
251251
- Supports larger prompt sizes
252252
- Different size options: 1024x1024, 1536x1024, 1024x1536, auto
253253
- Different quality options: low, medium, high, auto
254254
- May have better instruction following
255-
255+
256256
Args:
257257
prompt: The main image generation prompt
258258
product_description: Auto-generated description of product image (for context)
259259
scene_description: Scene/setting description from creative brief
260260
size: Image size (1024x1024, 1536x1024, 1024x1536, auto)
261261
quality: Image quality (low, medium, high, auto)
262-
262+
263263
Returns:
264264
Dictionary containing generated image data and metadata
265265
"""
266266
brand = app_settings.brand_guidelines
267-
267+
268268
# Use defaults from settings if not provided
269269
# Map DALL-E quality settings to gpt-image-1 or gpt-image-1.5 equivalents if needed
270270
size = size or app_settings.azure_openai.image_size
271271
quality = quality or app_settings.azure_openai.image_quality
272-
272+
273273
# Map DALL-E quality values to gpt-image-1 or gpt-image-1.5 equivalents
274274
quality_mapping = {
275275
"standard": "medium",
276276
"hd": "high",
277277
}
278278
quality = quality_mapping.get(quality, quality)
279-
279+
280280
# Map DALL-E sizes to gpt-image-1 or gpt-image-1.5 equivalents if needed
281281
size_mapping = {
282282
"1024x1792": "1024x1536", # Closest equivalent
283283
"1792x1024": "1536x1024", # Closest equivalent
284284
}
285285
size = size_mapping.get(size, size)
286-
286+
287287
# gpt-image-1 can handle larger prompts, so we can include more context
288288
truncated_product_desc = _truncate_for_dalle(product_description, max_chars=3000)
289-
289+
290290
main_prompt = prompt[:2000] if len(prompt) > 2000 else prompt
291291
scene_desc = scene_description[:1000] if scene_description and len(scene_description) > 1000 else scene_description
292-
292+
293293
# Build the full prompt with product context and brand guidelines
294294
full_prompt = f"""⚠️ ABSOLUTE RULE: THIS IMAGE MUST CONTAIN ZERO TEXT. NO WORDS. NO LETTERS. NO PRODUCT NAMES. NO COLOR NAMES. NO LABELS.
295295
@@ -326,25 +326,23 @@ async def _generate_gpt_image(
326326
credential = ManagedIdentityCredential(client_id=client_id)
327327
else:
328328
credential = DefaultAzureCredential()
329-
329+
330330
# Get token for Azure OpenAI
331331
token = await credential.get_token("https://cognitiveservices.azure.com/.default")
332-
332+
333333
# Use gpt-image-1 specific endpoint if configured, otherwise DALL-E endpoint, otherwise main endpoint
334-
image_endpoint = (
335-
app_settings.azure_openai.gpt_image_endpoint or
336-
app_settings.azure_openai.dalle_endpoint or
337-
app_settings.azure_openai.endpoint
338-
)
334+
image_endpoint = (app_settings.azure_openai.gpt_image_endpoint
335+
or app_settings.azure_openai.dalle_endpoint
336+
or app_settings.azure_openai.endpoint)
339337
logger.info(f"Using gpt-image-1 endpoint: {image_endpoint}")
340-
338+
341339
# Use the image-specific API version for gpt-image-1 (requires 2025-04-01-preview or newer)
342340
client = AsyncAzureOpenAI(
343341
azure_endpoint=image_endpoint,
344342
azure_ad_token=token.token,
345343
api_version=app_settings.azure_openai.image_api_version,
346344
)
347-
345+
348346
try:
349347
# gpt-image-1/1.5 API call - note: gpt-image doesn't support response_format parameter
350348
# It returns base64 data directly in the response
@@ -355,12 +353,12 @@ async def _generate_gpt_image(
355353
quality=quality,
356354
n=1,
357355
)
358-
356+
359357
image_data = response.data[0]
360-
358+
361359
# gpt-image-1 returns b64_json directly without needing response_format parameter
362360
image_base64 = getattr(image_data, 'b64_json', None)
363-
361+
364362
# If no b64_json, try to get URL and fetch the image
365363
if not image_base64 and hasattr(image_data, 'url') and image_data.url:
366364
import aiohttp
@@ -370,15 +368,15 @@ async def _generate_gpt_image(
370368
import base64
371369
image_bytes = await resp.read()
372370
image_base64 = base64.b64encode(image_bytes).decode('utf-8')
373-
371+
374372
if not image_base64:
375373
return {
376374
"success": False,
377375
"error": "No image data returned from gpt-image-1",
378376
"prompt_used": full_prompt,
379377
"model": "gpt-image-1",
380378
}
381-
379+
382380
return {
383381
"success": True,
384382
"image_base64": image_base64,
@@ -389,7 +387,7 @@ async def _generate_gpt_image(
389387
finally:
390388
# Properly close the async client to avoid unclosed session warnings
391389
await client.close()
392-
390+
393391
except Exception as e:
394392
logger.exception(f"Error generating gpt-image-1 image: {e}")
395393
return {

0 commit comments

Comments
 (0)