1+ import logging
12from typing import List
23import json
34
1011from ..parser .OutputParserTool import OutputParserTool
1112from ..common .Answer import Answer
1213
14+ logger = logging .getLogger (__name__ )
15+
1316
1417class OpenAIFunctionsOrchestrator (OrchestratorBase ):
1518 def __init__ (self ) -> None :
@@ -57,12 +60,14 @@ def orchestrate(
5760
5861 # Call Content Safety tool
5962 if self .config .prompts .enable_content_safety :
63+ logger .debug ("Calling content safety with question" )
6064 filtered_user_message = (
6165 self .content_safety_checker .validate_input_and_replace_if_harmful (
6266 user_message
6367 )
6468 )
6569 if user_message != filtered_user_message :
70+ logger .warning ("Content safety detected harmful content in question" )
6671 messages = output_formatter .parse (
6772 question = user_message ,
6873 answer = filtered_user_message ,
@@ -95,7 +100,9 @@ def orchestrate(
95100 # TODO: call content safety if needed
96101
97102 if result .choices [0 ].finish_reason == "function_call" :
103+ logger .info ("Function call detected" )
98104 if result .choices [0 ].message .function_call .name == "search_documents" :
105+ logger .info ("search_documents function detected" )
99106 question = json .loads (
100107 result .choices [0 ].message .function_call .arguments
101108 )["question" ]
@@ -110,13 +117,15 @@ def orchestrate(
110117
111118 # Run post prompt if needed
112119 if self .config .prompts .enable_post_answering_prompt :
120+ logger .debug ("Running post answering prompt" )
113121 post_prompt_tool = PostPromptTool ()
114122 answer = post_prompt_tool .validate_answer (answer )
115123 self .log_tokens (
116124 prompt_tokens = answer .prompt_tokens ,
117125 completion_tokens = answer .completion_tokens ,
118126 )
119127 elif result .choices [0 ].message .function_call .name == "text_processing" :
128+ logger .info ("text_processing function detected" )
120129 text = json .loads (result .choices [0 ].message .function_call .arguments )[
121130 "text"
122131 ]
@@ -132,17 +141,20 @@ def orchestrate(
132141 completion_tokens = answer .completion_tokens ,
133142 )
134143 else :
144+ logger .info ("No function call detected" )
135145 text = result .choices [0 ].message .content
136146 answer = Answer (question = user_message , answer = text )
137147
138148 # Call Content Safety tool
139149 if self .config .prompts .enable_content_safety :
150+ logger .debug ("Calling content safety with answer" )
140151 filtered_answer = (
141152 self .content_safety_checker .validate_output_and_replace_if_harmful (
142153 answer .answer
143154 )
144155 )
145156 if answer .answer != filtered_answer :
157+ logger .warning ("Content safety detected harmful content in answer" )
146158 messages = output_formatter .parse (
147159 question = user_message , answer = filtered_answer , source_documents = []
148160 )
0 commit comments