Skip to content

Commit 21a8a4c

Browse files
committed
[Deploy] Change few more places relate to gateway port.
1 parent f0dd29e commit 21a8a4c

1 file changed

Lines changed: 6 additions & 7 deletions

File tree

python/fedml/computing/scheduler/model_scheduler/master_job_runner.py

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -144,7 +144,8 @@ def run_impl(
144144
# No device is added, updated or removed
145145
logging.info("No device is added, updated or removed. No action needed for reconciliation.")
146146
ip = GeneralConstants.get_ip_address(self.request_json)
147-
master_port = os.getenv("FEDML_MASTER_PORT", None)
147+
master_port = os.environ.get(ServerConstants.ENV_MASTER_INFERENCE_PORT_KEY,
148+
ServerConstants.MODEL_INFERENCE_DEFAULT_PORT)
148149
if master_port is not None:
149150
inference_port = int(master_port)
150151
model_inference_port = inference_port
@@ -299,9 +300,8 @@ def process_deployment_result_message(self, topic=None, payload=None):
299300
else:
300301
# This is the last worker that failed, so we should continue to "ABORTED" status
301302
model_config_parameters = self.request_json["parameters"]
302-
inference_port = model_config_parameters.get("server_internal_port",
303-
ServerConstants.MODEL_INFERENCE_DEFAULT_PORT)
304-
inference_port_external = model_config_parameters.get("server_external_port", inference_port)
303+
inference_port_external = os.environ.get(ServerConstants.ENV_MASTER_INFERENCE_PORT_KEY,
304+
ServerConstants.MODEL_INFERENCE_DEFAULT_PORT)
305305
ip = GeneralConstants.get_ip_address(self.request_json)
306306
if ip.startswith("http://") or ip.startswith("https://"):
307307
model_inference_url = "{}/inference/{}".format(ip, end_point_id)
@@ -753,9 +753,8 @@ def parse_model_run_params(running_json):
753753
model_version = model_config["model_version"]
754754
model_config_parameters = running_json.get("parameters", {})
755755

756-
inference_port = model_config_parameters.get("server_internal_port", # Internal port is for the gateway
757-
ServerConstants.MODEL_INFERENCE_DEFAULT_PORT)
758-
inference_port_external = model_config_parameters.get("server_external_port", inference_port)
756+
inference_port = int(os.environ.get(ServerConstants.ENV_MASTER_INFERENCE_PORT_KEY,
757+
ServerConstants.MODEL_INFERENCE_DEFAULT_PORT))
759758

760759
return run_id, end_point_name, token, user_id, user_name, device_ids, device_objs, model_config, model_name, \
761760
model_id, model_storage_url, scale_min, scale_max, inference_engine, model_is_from_open, \

0 commit comments

Comments
 (0)