Skip to content

Commit 9194f84

Browse files
committed
[Deploy] Hide unnecessary log.
1 parent 64e8c77 commit 9194f84

2 files changed

Lines changed: 7 additions & 7 deletions

File tree

python/fedml/computing/scheduler/model_scheduler/device_model_cache.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -369,7 +369,7 @@ def get_idle_device(self,
369369
if "model_status" in result_payload and result_payload["model_status"] == "DEPLOYED":
370370
idle_device_list.append({"device_id": device_id, "end_point_id": end_point_id})
371371

372-
logging.info(f"{len(idle_device_list)} devices this model has on it: {idle_device_list}")
372+
logging.debug(f"{len(idle_device_list)} devices this model has on it: {idle_device_list}")
373373

374374
if len(idle_device_list) <= 0:
375375
return None, None
@@ -398,7 +398,7 @@ def get_idle_device(self,
398398
logging.info("Inference Device selection Failed:")
399399
logging.info(e)
400400

401-
logging.info(f"Using Round Robin, the device index is {selected_device_index}")
401+
logging.debug(f"Using Round Robin, the device index is {selected_device_index}")
402402
idle_device_dict = idle_device_list[selected_device_index]
403403

404404
# Note that within the same endpoint_id, there could be one device with multiple same models
@@ -411,7 +411,7 @@ def get_idle_device(self,
411411
# Find deployment result from the target idle device.
412412
try:
413413
for result_item in result_list:
414-
logging.info("enter the for loop")
414+
logging.debug("enter the for loop")
415415
device_id, _, result_payload = self.get_result_item_info(result_item)
416416
found_end_point_id = result_payload["end_point_id"]
417417
found_end_point_name = result_payload["end_point_name"]
@@ -425,7 +425,7 @@ def get_idle_device(self,
425425
if same_model_device_rank > 0:
426426
same_model_device_rank -= 1
427427
continue
428-
logging.info(f"The chosen device is {device_id}")
428+
logging.debug(f"The chosen device is {device_id}")
429429
return result_payload, device_id
430430
except Exception as e:
431431
logging.info(str(e))

python/fedml/computing/scheduler/model_scheduler/device_model_inference.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -230,7 +230,7 @@ async def _predict(
230230
model_metrics.set_start_time(start_time)
231231

232232
# Send inference request to idle device
233-
logging.info("inference url {}.".format(inference_output_url))
233+
logging.debug("inference url {}.".format(inference_output_url))
234234
if inference_output_url != "":
235235
input_list = input_json.get("inputs", input_json)
236236
stream_flag = input_json.get("stream", False)
@@ -329,7 +329,7 @@ def found_idle_inference_device(end_point_id, end_point_name, in_model_name, in_
329329

330330
res = (idle_device, end_point_id, model_id, model_name, model_version, inference_host, inference_output_url,
331331
connectivity_type)
332-
logging.info(f"found idle device with metrics: {res}")
332+
logging.debug(f"found idle device with metrics: {res}")
333333

334334
return res
335335

@@ -352,7 +352,7 @@ async def send_inference_request(idle_device, end_point_id, inference_url, input
352352
output_list,
353353
inference_type=inference_type,
354354
timeout=request_timeout_sec)
355-
logging.info(f"Use http inference. return {response_ok}")
355+
logging.debug(f"Use http inference. return {response_ok}")
356356
return inference_response
357357
elif connectivity_type == ClientConstants.WORKER_CONNECTIVITY_TYPE_HTTP_PROXY:
358358
logging.warning("Use http proxy inference.")

0 commit comments

Comments
 (0)