# REST API के साथ उपयोग करें

Serverless Hosted API में सभी मॉडल और Workflows के लिए एक ही endpoint है:

```
https://serverless.roboflow.com
```

### HTTP endpoints

## Legacy Infer From Request

> Legacy inference endpoint for object detection, instance segmentation, and classification.\
> \
> Args:\
> &#x20;   background\_tasks: (BackgroundTasks) pool of fastapi background tasks\
> &#x20;   dataset\_id (str): ID of a Roboflow dataset corresponding to the model to use for inference OR workspace ID\
> &#x20;   version\_id (str): ID of a Roboflow dataset version corresponding to the model to use for inference OR model ID\
> &#x20;   api\_key (Optional\[str], default None): Roboflow API Key passed to the model during initialization for artifact retrieval.\
> &#x20;   \# Other parameters described in the function signature...\
> \
> Returns:\
> &#x20;   Union\[InstanceSegmentationInferenceResponse, KeypointsDetectionInferenceRequest, ObjectDetectionInferenceResponse, ClassificationInferenceResponse, MultiLabelClassificationInferenceResponse, SemanticSegmentationInferenceResponse, Any]: The response containing the inference results.

```json
{"openapi":"3.1.0","info":{"title":"Roboflow Inference Server","version":"1.2.3"},"paths":{"/{dataset_id}/{version_id}":{"post":{"summary":"Legacy Infer From Request","description":"Legacy inference endpoint for object detection, instance segmentation, and classification.\n\nArgs:\n    background_tasks: (BackgroundTasks) pool of fastapi background tasks\n    dataset_id (str): ID of a Roboflow dataset corresponding to the model to use for inference OR workspace ID\n    version_id (str): ID of a Roboflow dataset version corresponding to the model to use for inference OR model ID\n    api_key (Optional[str], default None): Roboflow API Key passed to the model during initialization for artifact retrieval.\n    # Other parameters described in the function signature...\n\nReturns:\n    Union[InstanceSegmentationInferenceResponse, KeypointsDetectionInferenceRequest, ObjectDetectionInferenceResponse, ClassificationInferenceResponse, MultiLabelClassificationInferenceResponse, SemanticSegmentationInferenceResponse, Any]: The response containing the inference results.","operationId":"legacy_infer_from_request__dataset_id___version_id__post","parameters":[{"name":"dataset_id","in":"path","required":true,"schema":{"type":"string","description":"ID of a Roboflow dataset corresponding to the model to use for inference OR workspace ID","title":"Dataset Id"},"description":"ID of a Roboflow dataset corresponding to the model to use for inference OR workspace ID"},{"name":"version_id","in":"path","required":true,"schema":{"type":"string","description":"ID of a Roboflow dataset version corresponding to the model to use for inference OR model ID","title":"Version Id"},"description":"ID of a Roboflow dataset version corresponding to the model to use for inference OR model ID"},{"name":"api_key","in":"query","required":false,"schema":{"anyOf":[{"type":"string"},{"type":"null"}],"description":"Roboflow API Key that will be passed to the model during initialization for artifact retrieval","title":"Api Key"},"description":"Roboflow API Key that will be passed to the model during initialization for artifact retrieval"},{"name":"confidence","in":"query","required":false,"schema":{"type":"number","description":"The confidence threshold used to filter out predictions","default":0.4,"title":"Confidence"},"description":"The confidence threshold used to filter out predictions"},{"name":"keypoint_confidence","in":"query","required":false,"schema":{"type":"number","description":"The confidence threshold used to filter out keypoints that are not visible based on model confidence","default":0,"title":"Keypoint Confidence"},"description":"The confidence threshold used to filter out keypoints that are not visible based on model confidence"},{"name":"format","in":"query","required":false,"schema":{"type":"string","description":"One of 'json' or 'image'. If 'json' prediction data is return as a JSON string. If 'image' prediction data is visualized and overlayed on the original input image.","default":"json","title":"Format"},"description":"One of 'json' or 'image'. If 'json' prediction data is return as a JSON string. If 'image' prediction data is visualized and overlayed on the original input image."},{"name":"image","in":"query","required":false,"schema":{"anyOf":[{"type":"string"},{"type":"null"}],"description":"The publically accessible URL of an image to use for inference.","title":"Image"},"description":"The publically accessible URL of an image to use for inference."},{"name":"image_type","in":"query","required":false,"schema":{"anyOf":[{"type":"string"},{"type":"null"}],"description":"One of base64 or numpy. Note, numpy input is not supported for Roboflow Hosted Inference.","default":"base64","title":"Image Type"},"description":"One of base64 or numpy. Note, numpy input is not supported for Roboflow Hosted Inference."},{"name":"labels","in":"query","required":false,"schema":{"anyOf":[{"type":"boolean"},{"type":"null"}],"description":"If true, labels will be include in any inference visualization.","default":false,"title":"Labels"},"description":"If true, labels will be include in any inference visualization."},{"name":"mask_decode_mode","in":"query","required":false,"schema":{"anyOf":[{"type":"string"},{"type":"null"}],"description":"One of 'accurate' or 'fast'. If 'accurate' the mask will be decoded using the original image size. If 'fast' the mask will be decoded using the original mask size. 'accurate' is slower but more accurate.","default":"accurate","title":"Mask Decode Mode"},"description":"One of 'accurate' or 'fast'. If 'accurate' the mask will be decoded using the original image size. If 'fast' the mask will be decoded using the original mask size. 'accurate' is slower but more accurate."},{"name":"tradeoff_factor","in":"query","required":false,"schema":{"anyOf":[{"type":"number"},{"type":"null"}],"description":"The amount to tradeoff between 0='fast' and 1='accurate'","default":0,"title":"Tradeoff Factor"},"description":"The amount to tradeoff between 0='fast' and 1='accurate'"},{"name":"max_detections","in":"query","required":false,"schema":{"type":"integer","description":"The maximum number of detections to return. This is used to limit the number of predictions returned by the model. The model may return more predictions than this number, but only the top `max_detections` predictions will be returned.","default":300,"title":"Max Detections"},"description":"The maximum number of detections to return. This is used to limit the number of predictions returned by the model. The model may return more predictions than this number, but only the top `max_detections` predictions will be returned."},{"name":"overlap","in":"query","required":false,"schema":{"type":"number","description":"The IoU threhsold that must be met for a box pair to be considered duplicate during NMS","default":0.3,"title":"Overlap"},"description":"The IoU threhsold that must be met for a box pair to be considered duplicate during NMS"},{"name":"stroke","in":"query","required":false,"schema":{"type":"integer","description":"The stroke width used when visualizing predictions","default":1,"title":"Stroke"},"description":"The stroke width used when visualizing predictions"},{"name":"disable_preproc_auto_orient","in":"query","required":false,"schema":{"anyOf":[{"type":"boolean"},{"type":"null"}],"description":"If true, disables automatic image orientation","default":false,"title":"Disable Preproc Auto Orient"},"description":"If true, disables automatic image orientation"},{"name":"disable_preproc_contrast","in":"query","required":false,"schema":{"anyOf":[{"type":"boolean"},{"type":"null"}],"description":"If true, disables automatic contrast adjustment","default":false,"title":"Disable Preproc Contrast"},"description":"If true, disables automatic contrast adjustment"},{"name":"disable_preproc_grayscale","in":"query","required":false,"schema":{"anyOf":[{"type":"boolean"},{"type":"null"}],"description":"If true, disables automatic grayscale conversion","default":false,"title":"Disable Preproc Grayscale"},"description":"If true, disables automatic grayscale conversion"},{"name":"disable_preproc_static_crop","in":"query","required":false,"schema":{"anyOf":[{"type":"boolean"},{"type":"null"}],"description":"If true, disables automatic static crop","default":false,"title":"Disable Preproc Static Crop"},"description":"If true, disables automatic static crop"},{"name":"disable_active_learning","in":"query","required":false,"schema":{"anyOf":[{"type":"boolean"},{"type":"null"}],"description":"If true, the predictions will be prevented from registration by Active Learning (if the functionality is enabled)","default":false,"title":"Disable Active Learning"},"description":"If true, the predictions will be prevented from registration by Active Learning (if the functionality is enabled)"},{"name":"active_learning_target_dataset","in":"query","required":false,"schema":{"anyOf":[{"type":"string"},{"type":"null"}],"description":"Parameter to be used when Active Learning data registration should happen against different dataset than the one pointed by model_id","title":"Active Learning Target Dataset"},"description":"Parameter to be used when Active Learning data registration should happen against different dataset than the one pointed by model_id"},{"name":"source","in":"query","required":false,"schema":{"anyOf":[{"type":"string"},{"type":"null"}],"description":"The source of the inference request","default":"external","title":"Source"},"description":"The source of the inference request"},{"name":"source_info","in":"query","required":false,"schema":{"anyOf":[{"type":"string"},{"type":"null"}],"description":"The detailed source information of the inference request","default":"external","title":"Source Info"},"description":"The detailed source information of the inference request"}],"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"anyOf":[{"$ref":"#/components/schemas/InstanceSegmentationInferenceResponse"},{"$ref":"#/components/schemas/KeypointsDetectionInferenceResponse"},{"$ref":"#/components/schemas/ObjectDetectionInferenceResponse"},{"$ref":"#/components/schemas/ClassificationInferenceResponse"},{"$ref":"#/components/schemas/MultiLabelClassificationInferenceResponse"},{"$ref":"#/components/schemas/SemanticSegmentationInferenceResponse"},{"$ref":"#/components/schemas/StubResponse"},{}],"title":"Response Legacy Infer From Request  Dataset Id   Version Id  Post"}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}}}}},"components":{"schemas":{"InstanceSegmentationInferenceResponse":{"properties":{"visualization":{"anyOf":[{"type":"string"},{"type":"null"}],"title":"Visualization","description":"Base64 encoded string containing prediction visualization image data"},"inference_id":{"anyOf":[{"type":"string"},{"type":"null"}],"title":"Inference Id","description":"Unique identifier of inference"},"frame_id":{"anyOf":[{"type":"integer"},{"type":"null"}],"title":"Frame Id","description":"The frame id of the image used in inference if the input was a video"},"time":{"anyOf":[{"type":"number"},{"type":"null"}],"title":"Time","description":"The time in seconds it took to produce the predictions including image preprocessing"},"image":{"anyOf":[{"items":{"$ref":"#/components/schemas/InferenceResponseImage"},"type":"array"},{"$ref":"#/components/schemas/InferenceResponseImage"}],"title":"Image"},"predictions":{"items":{"$ref":"#/components/schemas/InstanceSegmentationPrediction"},"type":"array","title":"Predictions"}},"type":"object","required":["image","predictions"],"title":"InstanceSegmentationInferenceResponse","description":"Instance Segmentation inference response.\n\nAttributes:\n    predictions (List[inference.core.entities.responses.inference.InstanceSegmentationPrediction]): List of instance segmentation predictions."},"InferenceResponseImage":{"properties":{"width":{"type":"integer","title":"Width","description":"The original width of the image used in inference"},"height":{"type":"integer","title":"Height","description":"The original height of the image used in inference"}},"type":"object","required":["width","height"],"title":"InferenceResponseImage","description":"Inference response image information.\n\nAttributes:\n    width (int): The original width of the image used in inference.\n    height (int): The original height of the image used in inference."},"InstanceSegmentationPrediction":{"properties":{"x":{"type":"number","title":"X","description":"The center x-axis pixel coordinate of the prediction"},"y":{"type":"number","title":"Y","description":"The center y-axis pixel coordinate of the prediction"},"width":{"type":"number","title":"Width","description":"The width of the prediction bounding box in number of pixels"},"height":{"type":"number","title":"Height","description":"The height of the prediction bounding box in number of pixels"},"confidence":{"type":"number","title":"Confidence","description":"The detection confidence as a fraction between 0 and 1"},"class":{"type":"string","title":"Class","description":"The predicted class label"},"class_id":{"type":"integer","title":"Class Id","description":"The class id of the prediction"},"detection_id":{"type":"string","title":"Detection Id","description":"Unique identifier of detection"},"parent_id":{"anyOf":[{"type":"string"},{"type":"null"}],"title":"Parent Id","description":"Identifier of parent image region"},"class_confidence":{"anyOf":[{"type":"number"},{"type":"null"}],"title":"Class Confidence","description":"The class label confidence as a fraction between 0 and 1"},"points":{"items":{"$ref":"#/components/schemas/Point-Output"},"type":"array","title":"Points","description":"The list of points that make up the instance polygon"}},"type":"object","required":["x","y","width","height","confidence","class","class_id","points"],"title":"InstanceSegmentationPrediction"},"Point-Output":{"properties":{"x":{"type":"number","title":"X","description":"The x-axis pixel coordinate of the point"},"y":{"type":"number","title":"Y","description":"The y-axis pixel coordinate of the point"}},"type":"object","required":["x","y"],"title":"Point","description":"Point coordinates.\n\nAttributes:\n    x (float): The x-axis pixel coordinate of the point.\n    y (float): The y-axis pixel coordinate of the point."},"KeypointsDetectionInferenceResponse":{"properties":{"visualization":{"anyOf":[{"type":"string"},{"type":"null"}],"title":"Visualization","description":"Base64 encoded string containing prediction visualization image data"},"inference_id":{"anyOf":[{"type":"string"},{"type":"null"}],"title":"Inference Id","description":"Unique identifier of inference"},"frame_id":{"anyOf":[{"type":"integer"},{"type":"null"}],"title":"Frame Id","description":"The frame id of the image used in inference if the input was a video"},"time":{"anyOf":[{"type":"number"},{"type":"null"}],"title":"Time","description":"The time in seconds it took to produce the predictions including image preprocessing"},"image":{"anyOf":[{"items":{"$ref":"#/components/schemas/InferenceResponseImage"},"type":"array"},{"$ref":"#/components/schemas/InferenceResponseImage"}],"title":"Image"},"predictions":{"items":{"$ref":"#/components/schemas/KeypointsPrediction"},"type":"array","title":"Predictions"}},"type":"object","required":["image","predictions"],"title":"KeypointsDetectionInferenceResponse"},"KeypointsPrediction":{"properties":{"x":{"type":"number","title":"X","description":"The center x-axis pixel coordinate of the prediction"},"y":{"type":"number","title":"Y","description":"The center y-axis pixel coordinate of the prediction"},"width":{"type":"number","title":"Width","description":"The width of the prediction bounding box in number of pixels"},"height":{"type":"number","title":"Height","description":"The height of the prediction bounding box in number of pixels"},"confidence":{"type":"number","title":"Confidence","description":"The detection confidence as a fraction between 0 and 1"},"class":{"type":"string","title":"Class","description":"The predicted class label"},"class_confidence":{"anyOf":[{"type":"number"},{"type":"null"}],"title":"Class Confidence","description":"The class label confidence as a fraction between 0 and 1"},"class_id":{"type":"integer","title":"Class Id","description":"The class id of the prediction"},"tracker_id":{"anyOf":[{"type":"integer"},{"type":"null"}],"title":"Tracker Id","description":"The tracker id of the prediction if tracking is enabled"},"detection_id":{"type":"string","title":"Detection Id","description":"Unique identifier of detection"},"parent_id":{"anyOf":[{"type":"string"},{"type":"null"}],"title":"Parent Id","description":"Identifier of parent image region. Useful when stack of detection-models is in use to refer the RoI being the input to inference"},"keypoints":{"items":{"$ref":"#/components/schemas/Keypoint"},"type":"array","title":"Keypoints"}},"type":"object","required":["x","y","width","height","confidence","class","class_id","keypoints"],"title":"KeypointsPrediction"},"Keypoint":{"properties":{"x":{"type":"number","title":"X","description":"The x-axis pixel coordinate of the point"},"y":{"type":"number","title":"Y","description":"The y-axis pixel coordinate of the point"},"confidence":{"type":"number","title":"Confidence","description":"Model confidence regarding keypoint visibility."},"class_id":{"type":"integer","title":"Class Id","description":"Identifier of keypoint."},"class":{"type":"string","title":"Class","description":"Type of keypoint."}},"type":"object","required":["x","y","confidence","class_id","class"],"title":"Keypoint"},"ObjectDetectionInferenceResponse":{"properties":{"visualization":{"anyOf":[{"type":"string"},{"type":"null"}],"title":"Visualization","description":"Base64 encoded string containing prediction visualization image data"},"inference_id":{"anyOf":[{"type":"string"},{"type":"null"}],"title":"Inference Id","description":"Unique identifier of inference"},"frame_id":{"anyOf":[{"type":"integer"},{"type":"null"}],"title":"Frame Id","description":"The frame id of the image used in inference if the input was a video"},"time":{"anyOf":[{"type":"number"},{"type":"null"}],"title":"Time","description":"The time in seconds it took to produce the predictions including image preprocessing"},"image":{"anyOf":[{"items":{"$ref":"#/components/schemas/InferenceResponseImage"},"type":"array"},{"$ref":"#/components/schemas/InferenceResponseImage"}],"title":"Image"},"predictions":{"items":{"$ref":"#/components/schemas/ObjectDetectionPrediction"},"type":"array","title":"Predictions"}},"type":"object","required":["image","predictions"],"title":"ObjectDetectionInferenceResponse","description":"Object Detection inference response.\n\nAttributes:\n    predictions (List[inference.core.entities.responses.inference.ObjectDetectionPrediction]): List of object detection predictions."},"ObjectDetectionPrediction":{"properties":{"x":{"type":"number","title":"X","description":"The center x-axis pixel coordinate of the prediction"},"y":{"type":"number","title":"Y","description":"The center y-axis pixel coordinate of the prediction"},"width":{"type":"number","title":"Width","description":"The width of the prediction bounding box in number of pixels"},"height":{"type":"number","title":"Height","description":"The height of the prediction bounding box in number of pixels"},"confidence":{"type":"number","title":"Confidence","description":"The detection confidence as a fraction between 0 and 1"},"class":{"type":"string","title":"Class","description":"The predicted class label"},"class_confidence":{"anyOf":[{"type":"number"},{"type":"null"}],"title":"Class Confidence","description":"The class label confidence as a fraction between 0 and 1"},"class_id":{"type":"integer","title":"Class Id","description":"The class id of the prediction"},"tracker_id":{"anyOf":[{"type":"integer"},{"type":"null"}],"title":"Tracker Id","description":"The tracker id of the prediction if tracking is enabled"},"detection_id":{"type":"string","title":"Detection Id","description":"Unique identifier of detection"},"parent_id":{"anyOf":[{"type":"string"},{"type":"null"}],"title":"Parent Id","description":"Identifier of parent image region. Useful when stack of detection-models is in use to refer the RoI being the input to inference"}},"type":"object","required":["x","y","width","height","confidence","class","class_id"],"title":"ObjectDetectionPrediction","description":"Object Detection prediction.\n\nAttributes:\n    x (float): The center x-axis pixel coordinate of the prediction.\n    y (float): The center y-axis pixel coordinate of the prediction.\n    width (float): The width of the prediction bounding box in number of pixels.\n    height (float): The height of the prediction bounding box in number of pixels.\n    confidence (float): The detection confidence as a fraction between 0 and 1.\n    class_name (str): The predicted class label.\n    class_confidence (Union[float, None]): The class label confidence as a fraction between 0 and 1.\n    class_id (int): The class id of the prediction"},"ClassificationInferenceResponse":{"properties":{"visualization":{"anyOf":[{"type":"string"},{"type":"null"}],"title":"Visualization","description":"Base64 encoded string containing prediction visualization image data"},"inference_id":{"anyOf":[{"type":"string"},{"type":"null"}],"title":"Inference Id","description":"Unique identifier of inference"},"frame_id":{"anyOf":[{"type":"integer"},{"type":"null"}],"title":"Frame Id","description":"The frame id of the image used in inference if the input was a video"},"time":{"anyOf":[{"type":"number"},{"type":"null"}],"title":"Time","description":"The time in seconds it took to produce the predictions including image preprocessing"},"image":{"anyOf":[{"items":{"$ref":"#/components/schemas/InferenceResponseImage"},"type":"array"},{"$ref":"#/components/schemas/InferenceResponseImage"}],"title":"Image"},"predictions":{"items":{"$ref":"#/components/schemas/ClassificationPrediction"},"type":"array","title":"Predictions"},"top":{"type":"string","title":"Top","description":"The top predicted class label","default":""},"confidence":{"type":"number","title":"Confidence","description":"The confidence of the top predicted class label","default":0},"parent_id":{"anyOf":[{"type":"string"},{"type":"null"}],"title":"Parent Id","description":"Identifier of parent image region. Useful when stack of detection-models is in use to refer the RoI being the input to inference"}},"type":"object","required":["image","predictions"],"title":"ClassificationInferenceResponse","description":"Classification inference response.\n\nAttributes:\n    predictions (List[inference.core.entities.responses.inference.ClassificationPrediction]): List of classification predictions.\n    top (str): The top predicted class label.\n    confidence (float): The confidence of the top predicted class label."},"ClassificationPrediction":{"properties":{"class":{"type":"string","title":"Class","description":"The predicted class label"},"class_id":{"type":"integer","title":"Class Id","description":"Numeric ID associated with the class label"},"confidence":{"type":"number","title":"Confidence","description":"The class label confidence as a fraction between 0 and 1"}},"type":"object","required":["class","class_id","confidence"],"title":"ClassificationPrediction","description":"Classification prediction.\n\nAttributes:\n    class_name (str): The predicted class label.\n    class_id (int): Numeric ID associated with the class label.\n    confidence (float): The class label confidence as a fraction between 0 and 1."},"MultiLabelClassificationInferenceResponse":{"properties":{"visualization":{"anyOf":[{"type":"string"},{"type":"null"}],"title":"Visualization","description":"Base64 encoded string containing prediction visualization image data"},"inference_id":{"anyOf":[{"type":"string"},{"type":"null"}],"title":"Inference Id","description":"Unique identifier of inference"},"frame_id":{"anyOf":[{"type":"integer"},{"type":"null"}],"title":"Frame Id","description":"The frame id of the image used in inference if the input was a video"},"time":{"anyOf":[{"type":"number"},{"type":"null"}],"title":"Time","description":"The time in seconds it took to produce the predictions including image preprocessing"},"image":{"anyOf":[{"items":{"$ref":"#/components/schemas/InferenceResponseImage"},"type":"array"},{"$ref":"#/components/schemas/InferenceResponseImage"}],"title":"Image"},"predictions":{"additionalProperties":{"$ref":"#/components/schemas/MultiLabelClassificationPrediction"},"type":"object","title":"Predictions"},"predicted_classes":{"items":{"type":"string"},"type":"array","title":"Predicted Classes","description":"The list of predicted classes"},"parent_id":{"anyOf":[{"type":"string"},{"type":"null"}],"title":"Parent Id","description":"Identifier of parent image region. Useful when stack of detection-models is in use to refer the RoI being the input to inference"}},"type":"object","required":["image","predictions","predicted_classes"],"title":"MultiLabelClassificationInferenceResponse","description":"Multi-label Classification inference response.\n\nAttributes:\n    predictions (Dict[str, inference.core.entities.responses.inference.MultiLabelClassificationPrediction]): Dictionary of multi-label classification predictions.\n    predicted_classes (List[str]): The list of predicted classes."},"MultiLabelClassificationPrediction":{"properties":{"confidence":{"type":"number","title":"Confidence","description":"The class label confidence as a fraction between 0 and 1"},"class_id":{"type":"integer","title":"Class Id","description":"Numeric ID associated with the class label"}},"type":"object","required":["confidence","class_id"],"title":"MultiLabelClassificationPrediction","description":"Multi-label Classification prediction.\n\nAttributes:\n    confidence (float): The class label confidence as a fraction between 0 and 1."},"SemanticSegmentationInferenceResponse":{"properties":{"visualization":{"anyOf":[{"type":"string"},{"type":"null"}],"title":"Visualization","description":"Base64 encoded string containing prediction visualization image data"},"inference_id":{"anyOf":[{"type":"string"},{"type":"null"}],"title":"Inference Id","description":"Unique identifier of inference"},"frame_id":{"anyOf":[{"type":"integer"},{"type":"null"}],"title":"Frame Id","description":"The frame id of the image used in inference if the input was a video"},"time":{"anyOf":[{"type":"number"},{"type":"null"}],"title":"Time","description":"The time in seconds it took to produce the predictions including image preprocessing"},"image":{"anyOf":[{"items":{"$ref":"#/components/schemas/InferenceResponseImage"},"type":"array"},{"$ref":"#/components/schemas/InferenceResponseImage"}],"title":"Image"},"predictions":{"$ref":"#/components/schemas/SemanticSegmentationPrediction"}},"type":"object","required":["image","predictions"],"title":"SemanticSegmentationInferenceResponse","description":"Semantic Segmentation inference response.\n\nAttributes:\n    predictions (inference.core.entities.responses.inference.SemanticSegmentationPrediction): Semantic segmentation predictions."},"SemanticSegmentationPrediction":{"properties":{"segmentation_mask":{"type":"string","title":"Segmentation Mask","description":"base64-encoded PNG of predicted class label at each pixel"},"class_map":{"additionalProperties":{"type":"string"},"type":"object","title":"Class Map","description":"Map of pixel intensity value to class label"},"confidence_mask":{"type":"string","title":"Confidence Mask","description":"base64-encoded PNG of predicted class confidence at each pixel"}},"type":"object","required":["segmentation_mask","class_map","confidence_mask"],"title":"SemanticSegmentationPrediction"},"StubResponse":{"properties":{"visualization":{"anyOf":[{"type":"string"},{"type":"null"}],"title":"Visualization","description":"Base64 encoded string containing prediction visualization image data"},"inference_id":{"anyOf":[{"type":"string"},{"type":"null"}],"title":"Inference Id","description":"Unique identifier of inference"},"frame_id":{"anyOf":[{"type":"integer"},{"type":"null"}],"title":"Frame Id","description":"The frame id of the image used in inference if the input was a video"},"time":{"anyOf":[{"type":"number"},{"type":"null"}],"title":"Time","description":"The time in seconds it took to produce the predictions including image preprocessing"},"is_stub":{"type":"boolean","title":"Is Stub","description":"Field to mark prediction type as stub"},"model_id":{"type":"string","title":"Model Id","description":"Identifier of a model stub that was called"},"task_type":{"type":"string","title":"Task Type","description":"Task type of the project"}},"type":"object","required":["is_stub","model_id","task_type"],"title":"StubResponse"},"HTTPValidationError":{"properties":{"detail":{"items":{"$ref":"#/components/schemas/ValidationError"},"type":"array","title":"Detail"}},"type":"object","title":"HTTPValidationError"},"ValidationError":{"properties":{"loc":{"items":{"anyOf":[{"type":"string"},{"type":"integer"}]},"type":"array","title":"Location"},"msg":{"type":"string","title":"Message"},"type":{"type":"string","title":"Error Type"}},"type":"object","required":["loc","msg","type"],"title":"ValidationError"}}}}
```
