github.com/alwaysproblem/mlserving-tutorial@v0.0.0-20221124033215-121cfddbfbf4/TFserving/ClientAPI/python/grpc_model_status.py (about)

     1  """Grpc request for model status"""
     2  import numpy as np
     3  
     4  from tensorflow_serving.apis import predict_pb2
     5  from tensorflow_serving.apis import model_service_pb2_grpc
     6  import grpc
     7  
     8  host = "0.0.0.0"
     9  port = 8500
    10  server = host + ":" + str(port)
    11  timeout_req = 30.0
    12  
    13  req_data = np.array([[1., 2.], [1., 3.]])
    14  
    15  if __name__ == "__main__":
    16  
    17    import argparse
    18  
    19    parse = argparse.ArgumentParser(prog="the tensorflow client for python.")
    20    parse.add_argument(
    21        "-m", "--model", type=str, action="store", dest="model", default="Toy"
    22    )
    23    parse.add_argument(
    24        "-v", "--version", type=int, action="store", dest="version", default=-1
    25    )
    26  
    27    args = parse.parse_args()
    28  
    29    channel = grpc.insecure_channel(server)
    30  
    31    # for output tensor
    32    request = predict_pb2.PredictRequest()
    33    request.model_spec.name = args.model
    34  
    35    if args.version > -1:
    36      request.model_spec.version.value = args.version
    37  
    38    request.model_spec.signature_name = "serving_default"
    39  
    40    # this HandleReloadConfigRequest is for the reload API of the model specified
    41    modelstub = model_service_pb2_grpc.ModelServiceStub(channel)
    42  
    43    # this can get the status for model served.
    44    model_status = modelstub.GetModelStatus(request, timeout_req)
    45    print(model_status)
    46  
    47    # # for output filter out (you can also check the grpc api `predict.proto` )
    48    # print(resp.outputs["output_1"].float_val)