@@ -115,8 +115,7 @@ def run_impl(
115115
116116 # start unified inference server
117117 self .start_device_inference_gateway (
118- run_id , end_point_name , model_id , model_name , model_version ,
119- agent_config = self .agent_config , inference_port = inference_port )
118+ inference_port = inference_port , agent_config = self .agent_config )
120119
121120 # start inference monitor server
122121 self .stop_device_inference_monitor (
@@ -464,20 +463,16 @@ def process_deployment_result_message(self, topic=None, payload=None):
464463
465464 @staticmethod
466465 def start_device_inference_gateway (
467- run_id , end_point_name , model_id ,
468- model_name , model_version , inference_port = ServerConstants .MODEL_INFERENCE_DEFAULT_PORT ,
466+ inference_port = ServerConstants .MODEL_INFERENCE_DEFAULT_PORT ,
469467 agent_config = None , redis_addr = "localhost" , redis_port = 6379 , redis_password = "fedml_default"
470468 ):
471469 # start unified inference server
472- running_model_name = ServerConstants .get_running_model_name (end_point_name ,
473- model_name , model_version , run_id , model_id )
474470 python_program = get_python_program ()
475471 master_port = os .getenv ("FEDML_MASTER_PORT" , None )
476472 if master_port is not None :
477473 inference_port = int (master_port )
478474 if not ServerConstants .is_running_on_k8s ():
479- logging .info (f"start the model inference gateway, end point { run_id } , "
480- f"model name { model_name } at port { inference_port } ..." )
475+ logging .info (f"start the model inference gateway..." )
481476 use_mqtt_inference = os .getenv ("FEDML_USE_MQTT_INFERENCE" , "False" )
482477 use_mqtt_inference = True if use_mqtt_inference .lower () == 'true' else False
483478 use_worker_gateway = os .getenv ("FEDML_USE_WORKER_GATEWAY" , "False" )
@@ -501,8 +496,8 @@ def start_device_inference_gateway(
501496 "USE_MQTT_INFERENCE={} USE_WORKER_GATEWAY={} EXT_INFO={} "
502497 "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} "
503498 "--log-level critical" .format (
504- redis_addr , redis_port , redis_password , end_point_name ,
505- model_name , model_version , "" , fedml .get_env_version (), use_mqtt_inference ,
499+ redis_addr , str ( redis_port ) , redis_password , "" ,
500+ "" , "" , "" , fedml .get_env_version (), use_mqtt_inference ,
506501 use_worker_gateway , ext_info , python_program , inference_gw_cmd , str (inference_port ),
507502 fedml_base_dir ),
508503 should_capture_stdout = False , should_capture_stderr = False )
@@ -545,6 +540,14 @@ def stop_device_inference_monitor(run_id, end_point_name, model_id, model_name,
545540 def recover_inference_and_monitor ():
546541 # noinspection PyBroadException
547542 try :
543+ agent_config = dict ()
544+ try :
545+ agent_config ["mqtt_config" ], _ , _ , _ = MLOpsConfigs .fetch_all_configs ()
546+ except Exception as e :
547+ pass
548+
549+ FedMLDeployMasterJobRunner .start_device_inference_gateway (agent_config = agent_config )
550+
548551 history_jobs = FedMLServerDataInterface .get_instance ().get_history_jobs ()
549552 for job in history_jobs .job_list :
550553 if job .running_json is None :
@@ -563,16 +566,6 @@ def recover_inference_and_monitor():
563566 if not is_activated :
564567 continue
565568
566- agent_config = dict ()
567- try :
568- agent_config ["mqtt_config" ], _ , _ , _ = MLOpsConfigs .fetch_all_configs ()
569- except Exception as e :
570- pass
571-
572- FedMLDeployMasterJobRunner .start_device_inference_gateway (
573- run_id , end_point_name , model_id , model_name , model_version , inference_port = inference_port ,
574- agent_config = agent_config )
575-
576569 FedMLDeployMasterJobRunner .stop_device_inference_monitor (
577570 run_id , end_point_name , model_id , model_name , model_version )
578571 FedMLDeployMasterJobRunner .start_device_inference_monitor (
0 commit comments