@@ -115,7 +115,6 @@ def run_impl(
115115
116116 # start unified inference server
117117 self .start_device_inference_gateway (
118- run_id , end_point_name , model_id , model_name , model_version ,
119118 agent_config = self .agent_config , inference_port = inference_port )
120119
121120 # start inference monitor server
@@ -464,20 +463,16 @@ def process_deployment_result_message(self, topic=None, payload=None):
464463
465464 @staticmethod
466465 def start_device_inference_gateway (
467- run_id , end_point_name , model_id ,
468- model_name , model_version , inference_port = ServerConstants .MODEL_INFERENCE_DEFAULT_PORT ,
466+ inference_port = ServerConstants .MODEL_INFERENCE_DEFAULT_PORT ,
469467 agent_config = None , redis_addr = "localhost" , redis_port = 6379 , redis_password = "fedml_default"
470468 ):
471469 # start unified inference server
472- running_model_name = ServerConstants .get_running_model_name (end_point_name ,
473- model_name , model_version , run_id , model_id )
474470 python_program = get_python_program ()
475471 master_port = os .getenv ("FEDML_MASTER_PORT" , None )
476472 if master_port is not None :
477473 inference_port = int (master_port )
478474 if not ServerConstants .is_running_on_k8s ():
479- logging .info (f"start the model inference gateway, end point { run_id } , "
480- f"model name { model_name } at port { inference_port } ..." )
475+ logging .info (f"start the model inference gateway..." )
481476 use_mqtt_inference = os .getenv ("FEDML_USE_MQTT_INFERENCE" , "False" )
482477 use_mqtt_inference = True if use_mqtt_inference .lower () == 'true' else False
483478 use_worker_gateway = os .getenv ("FEDML_USE_WORKER_GATEWAY" , "False" )
@@ -501,8 +496,8 @@ def start_device_inference_gateway(
501496 "USE_MQTT_INFERENCE={} USE_WORKER_GATEWAY={} EXT_INFO={} "
502497 "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} "
503498 "--log-level critical" .format (
504- redis_addr , redis_port , redis_password , end_point_name ,
505- model_name , model_version , "" , fedml .get_env_version (), use_mqtt_inference ,
499+ redis_addr , str ( redis_port ) , redis_password , "" ,
500+ "" , "" , "" , fedml .get_env_version (), use_mqtt_inference ,
506501 use_worker_gateway , ext_info , python_program , inference_gw_cmd , str (inference_port ),
507502 fedml_base_dir ),
508503 should_capture_stdout = False , should_capture_stderr = False )
@@ -570,8 +565,7 @@ def recover_inference_and_monitor():
570565 pass
571566
572567 FedMLDeployMasterJobRunner .start_device_inference_gateway (
573- run_id , end_point_name , model_id , model_name , model_version , inference_port = inference_port ,
574- agent_config = agent_config )
568+ inference_port = inference_port , agent_config = agent_config )
575569
576570 FedMLDeployMasterJobRunner .stop_device_inference_monitor (
577571 run_id , end_point_name , model_id , model_name , model_version )
0 commit comments