Skip to content

Commit

Permalink
Handle when model not in cache
Browse files Browse the repository at this point in the history
Signed-off-by: Sicheng Song <sicheng.song@outlook.com>
  • Loading branch information
b4sjoo committed Mar 1, 2025
1 parent 584b824 commit c10ffb3
Showing 1 changed file with 10 additions and 4 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -88,12 +88,18 @@ public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client
String modelId = getParameterId(request, PARAMETER_MODEL_ID);
Optional<FunctionName> functionName = modelManager.getOptionalModelFunctionName(modelId);

if (userAlgorithm != null && functionName.isPresent()) {
MLPredictionTaskRequest mlPredictionTaskRequest = getRequest(modelId, functionName.get().name(), userAlgorithm, request);
return channel -> client
.execute(MLPredictionTaskAction.INSTANCE, mlPredictionTaskRequest, new RestToXContentListener<>(channel));
// check if the model is in cache
if (functionName.isPresent()) {
MLPredictionTaskRequest predictionRequest = getRequest(
modelId,
functionName.get().name(),
Objects.requireNonNullElse(userAlgorithm, functionName.get().name()),
request
);
return channel -> client.execute(MLPredictionTaskAction.INSTANCE, predictionRequest, new RestToXContentListener<>(channel));
}

// If the model isn't in cache
return channel -> {
ActionListener<MLModel> listener = ActionListener.wrap(mlModel -> {
String modelType = mlModel.getAlgorithm().name();
Expand Down

0 comments on commit c10ffb3

Please sign in to comment.