Skip to content

Commit cd9a706

Browse files
Merge branch 'main' into lens-fix-warning-messages
2 parents dd3f46c + 8f70f1b commit cd9a706

File tree

35 files changed

+1724
-324
lines changed

35 files changed

+1724
-324
lines changed

x-pack/plugins/enterprise_search/common/ml_inference_pipeline/index.test.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ describe('getSetProcessorForInferenceType lib function', () => {
7878
description:
7979
"Copy the predicted_value to 'dest' if the prediction_probability is greater than 0.5",
8080
field: destinationField,
81-
if: 'ml.inference.dest.prediction_probability > 0.5',
81+
if: 'ctx.ml.inference.dest.prediction_probability > 0.5',
8282
value: undefined,
8383
};
8484

@@ -191,7 +191,7 @@ describe('generateMlInferencePipelineBody lib function', () => {
191191
description:
192192
"Copy the predicted_value to 'my-destination-field' if the prediction_probability is greater than 0.5",
193193
field: 'my-destination-field',
194-
if: 'ml.inference.my-destination-field.prediction_probability > 0.5',
194+
if: 'ctx.ml.inference.my-destination-field.prediction_probability > 0.5',
195195
},
196196
}),
197197
]),

x-pack/plugins/enterprise_search/common/ml_inference_pipeline/index.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,7 @@ export const getSetProcessorForInferenceType = (
118118
copy_from: `${prefixedDestinationField}.predicted_value`,
119119
description: `Copy the predicted_value to '${destinationField}' if the prediction_probability is greater than 0.5`,
120120
field: destinationField,
121-
if: `${prefixedDestinationField}.prediction_probability > 0.5`,
121+
if: `ctx.${prefixedDestinationField}.prediction_probability > 0.5`,
122122
value: undefined,
123123
};
124124
} else if (inferenceType === SUPPORTED_PYTORCH_TASKS.TEXT_EMBEDDING) {

x-pack/plugins/enterprise_search/common/types/error_codes.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ export enum ErrorCode {
1515
INDEX_NOT_FOUND = 'index_not_found',
1616
PIPELINE_ALREADY_EXISTS = 'pipeline_already_exists',
1717
PIPELINE_IS_IN_USE = 'pipeline_is_in_use',
18+
PIPELINE_NOT_FOUND = 'pipeline_not_found',
1819
RESOURCE_NOT_FOUND = 'resource_not_found',
1920
UNAUTHORIZED = 'unauthorized',
2021
UNCAUGHT_EXCEPTION = 'uncaught_exception',
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
/*
2+
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
3+
* or more contributor license agreements. Licensed under the Elastic License
4+
* 2.0; you may not use this file except in compliance with the Elastic License
5+
* 2.0.
6+
*/
7+
8+
import { IngestSimulateResponse } from '@elastic/elasticsearch/lib/api/typesWithBodyKey';
9+
10+
import { createApiLogic } from '../../../shared/api_logic/create_api_logic';
11+
12+
import { HttpLogic } from '../../../shared/http';
13+
14+
export interface SimulateExistingMlInterfacePipelineArgs {
15+
docs: string;
16+
indexName: string;
17+
pipelineName: string;
18+
}
19+
export type SimulateExistingMlInterfacePipelineResponse = IngestSimulateResponse;
20+
21+
export const simulateExistingMlInferencePipeline = async ({
22+
docs,
23+
indexName,
24+
pipelineName,
25+
}: SimulateExistingMlInterfacePipelineArgs) => {
26+
const route = `/internal/enterprise_search/indices/${indexName}/ml_inference/pipeline_processors/simulate/${pipelineName}`;
27+
28+
return await HttpLogic.values.http.post<IngestSimulateResponse>(route, {
29+
body: JSON.stringify({
30+
docs,
31+
}),
32+
});
33+
};
34+
35+
export const SimulateExistingMlInterfacePipelineApiLogic = createApiLogic(
36+
['simulate_existing_ml_inference_pipeline_api_logic'],
37+
simulateExistingMlInferencePipeline
38+
);

x-pack/plugins/enterprise_search/public/applications/enterprise_search_content/components/search_index/pipelines/ml_inference/configure_pipeline.tsx

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@ import { IndexViewLogic } from '../../index_view_logic';
3333
import { EMPTY_PIPELINE_CONFIGURATION, MLInferenceLogic } from './ml_inference_logic';
3434
import { MlModelSelectOption } from './model_select_option';
3535
import { PipelineSelectOption } from './pipeline_select_option';
36+
import { MODEL_REDACTED_VALUE, MODEL_SELECT_PLACEHOLDER } from './utils';
3637

3738
const MODEL_SELECT_PLACEHOLDER_VALUE = 'model_placeholder$$';
3839
const PIPELINE_SELECT_PLACEHOLDER_VALUE = 'pipeline_placeholder$$';
@@ -79,17 +80,17 @@ export const ConfigurePipeline: React.FC = () => {
7980
useActions(MLInferenceLogic);
8081
const { ingestionMethod } = useValues(IndexViewLogic);
8182

82-
const { destinationField, modelID, pipelineName, sourceField } = configuration;
83+
const { destinationField, existingPipeline, modelID, pipelineName, sourceField } = configuration;
8384
const nameError = formErrors.pipelineName !== undefined && pipelineName.length > 0;
8485
const emptySourceFields = (sourceFields?.length ?? 0) === 0;
8586

8687
const modelOptions: Array<EuiSuperSelectOption<string>> = [
8788
{
8889
disabled: true,
89-
inputDisplay: i18n.translate(
90-
'xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.configure.model.placeholder',
91-
{ defaultMessage: 'Select a model' }
92-
),
90+
inputDisplay:
91+
existingPipeline && pipelineName.length > 0
92+
? MODEL_REDACTED_VALUE
93+
: MODEL_SELECT_PLACEHOLDER,
9394
value: MODEL_SELECT_PLACEHOLDER_VALUE,
9495
},
9596
...supportedMLModels.map((model) => ({
@@ -212,7 +213,7 @@ export const ConfigurePipeline: React.FC = () => {
212213
'xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.configure.name.helpText',
213214
{
214215
defaultMessage:
215-
'Pipeline names are unique within a deployment and can only contain letters, numbers, underscores, and hyphens. The pipeline name will be automatically prefixed with "ml-inference-".',
216+
'Pipeline names are unique within a deployment and can only contain letters, numbers, underscores, and hyphens.',
216217
}
217218
)
218219
}
@@ -223,6 +224,7 @@ export const ConfigurePipeline: React.FC = () => {
223224
data-telemetry-id={`entSearchContent-${ingestionMethod}-pipelines-configureInferencePipeline-uniqueName`}
224225
disabled={inputsDisabled}
225226
fullWidth
227+
prepend="ml-inference-"
226228
placeholder={i18n.translate(
227229
'xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.configure.namePlaceholder',
228230
{

x-pack/plugins/enterprise_search/public/applications/enterprise_search_content/components/search_index/pipelines/ml_inference/ml_inference_logic.test.ts

Lines changed: 170 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
import { LogicMounter } from '../../../../../__mocks__/kea_logic';
99

1010
import { HttpResponse } from '@kbn/core/public';
11+
import { TrainedModelConfigResponse } from '@kbn/ml-plugin/common/types/trained_models';
1112

1213
import { ErrorResponse, HttpError, Status } from '../../../../../../../common/types/api';
1314
import { TrainedModelState } from '../../../../../../../common/types/pipelines';
@@ -18,6 +19,7 @@ import { AttachMlInferencePipelineApiLogic } from '../../../../api/pipelines/att
1819
import { CreateMlInferencePipelineApiLogic } from '../../../../api/pipelines/create_ml_inference_pipeline';
1920
import { FetchMlInferencePipelineProcessorsApiLogic } from '../../../../api/pipelines/fetch_ml_inference_pipeline_processors';
2021
import { FetchMlInferencePipelinesApiLogic } from '../../../../api/pipelines/fetch_ml_inference_pipelines';
22+
import { SimulateExistingMlInterfacePipelineApiLogic } from '../../../../api/pipelines/simulate_existing_ml_inference_pipeline';
2123
import { SimulateMlInterfacePipelineApiLogic } from '../../../../api/pipelines/simulate_ml_inference_pipeline_processors';
2224

2325
import {
@@ -69,6 +71,8 @@ const DEFAULT_VALUES: MLInferenceProcessorsValues = {
6971
mlInferencePipelinesData: undefined,
7072
mlModelsData: undefined,
7173
mlModelsStatus: 0,
74+
simulateExistingPipelineData: undefined,
75+
simulateExistingPipelineStatus: 0,
7276
simulatePipelineData: undefined,
7377
simulatePipelineErrors: [],
7478
simulatePipelineResult: undefined,
@@ -81,6 +85,9 @@ describe('MlInferenceLogic', () => {
8185
const { mount } = new LogicMounter(MLInferenceLogic);
8286
const { mount: mountMappingApiLogic } = new LogicMounter(MappingsApiLogic);
8387
const { mount: mountMLModelsApiLogic } = new LogicMounter(MLModelsApiLogic);
88+
const { mount: mountSimulateExistingMlInterfacePipelineApiLogic } = new LogicMounter(
89+
SimulateExistingMlInterfacePipelineApiLogic
90+
);
8491
const { mount: mountSimulateMlInterfacePipelineApiLogic } = new LogicMounter(
8592
SimulateMlInterfacePipelineApiLogic
8693
);
@@ -103,6 +110,7 @@ describe('MlInferenceLogic', () => {
103110
mountMLModelsApiLogic();
104111
mountFetchMlInferencePipelineProcessorsApiLogic();
105112
mountFetchMlInferencePipelinesApiLogic();
113+
mountSimulateExistingMlInterfacePipelineApiLogic();
106114
mountSimulateMlInterfacePipelineApiLogic();
107115
mountCreateMlInferencePipelineApiLogic();
108116
mountAttachMlInferencePipelineApiLogic();
@@ -245,6 +253,44 @@ describe('MlInferenceLogic', () => {
245253
};
246254
SimulateMlInterfacePipelineApiLogic.actions.apiSuccess(simulateResponse);
247255

256+
expect(MLInferenceLogic.values.simulatePipelineResult).toEqual(simulateResponse);
257+
});
258+
it('returns existing simulation result when API is successful', () => {
259+
const simulateResponse = {
260+
docs: [
261+
{
262+
doc: {
263+
_id: 'id',
264+
_index: 'index',
265+
_ingest: { timestamp: '2022-10-06T10:28:54.3326245Z' },
266+
_source: {
267+
_ingest: {
268+
inference_errors: [
269+
{
270+
message:
271+
"Processor 'inference' in pipeline 'test' failed with message 'Input field [text_field] does not exist in the source document'",
272+
pipeline: 'guy',
273+
timestamp: '2022-10-06T10:28:54.332624500Z',
274+
},
275+
],
276+
processors: [
277+
{
278+
model_version: '8.6.0',
279+
pipeline: 'guy',
280+
processed_timestamp: '2022-10-06T10:28:54.332624500Z',
281+
types: ['pytorch', 'ner'],
282+
},
283+
],
284+
},
285+
_version: '-3',
286+
foo: 'bar',
287+
},
288+
},
289+
},
290+
],
291+
};
292+
SimulateExistingMlInterfacePipelineApiLogic.actions.apiSuccess(simulateResponse);
293+
248294
expect(MLInferenceLogic.values.simulatePipelineResult).toEqual(simulateResponse);
249295
});
250296
});
@@ -322,7 +368,7 @@ describe('MlInferenceLogic', () => {
322368
},
323369
]);
324370
});
325-
it('returns disabled pipeline option if model is redacted', () => {
371+
it('returns enabled pipeline option if model is redacted', () => {
326372
FetchMlInferencePipelinesApiLogic.actions.apiSuccess({
327373
'unit-test': {
328374
processors: [
@@ -343,8 +389,7 @@ describe('MlInferenceLogic', () => {
343389
expect(MLInferenceLogic.values.existingInferencePipelines).toEqual([
344390
{
345391
destinationField: 'test-field',
346-
disabled: true,
347-
disabledReason: expect.any(String),
392+
disabled: false,
348393
pipelineName: 'unit-test',
349394
modelType: '',
350395
modelId: '',
@@ -518,5 +563,127 @@ describe('MlInferenceLogic', () => {
518563
});
519564
});
520565
});
566+
describe('simulatePipeline', () => {
567+
const mockModelConfiguration = {
568+
...DEFAULT_VALUES.addInferencePipelineModal,
569+
configuration: {
570+
destinationField: '',
571+
modelID: 'mock-model-id',
572+
pipelineName: 'mock-pipeline-name',
573+
sourceField: 'mock_text_field',
574+
},
575+
indexName: 'my-index-123',
576+
};
577+
const mlModelsData: TrainedModelConfigResponse[] = [
578+
{
579+
inference_config: {
580+
text_classification: {},
581+
},
582+
input: {
583+
field_names: ['text_field'],
584+
},
585+
model_id: 'mock-model-id',
586+
model_type: 'pytorch',
587+
tags: ['test_tag'],
588+
version: '1',
589+
},
590+
];
591+
it('does nothing if mlInferencePipeline is undefined', () => {
592+
mount({
593+
...DEFAULT_VALUES,
594+
});
595+
596+
jest.spyOn(MLInferenceLogic.actions, 'setSimulatePipelineErrors');
597+
jest.spyOn(MLInferenceLogic.actions, 'simulateExistingPipelineApiReset');
598+
jest.spyOn(MLInferenceLogic.actions, 'simulatePipelineApiReset');
599+
jest.spyOn(MLInferenceLogic.actions, 'makeSimulateExistingPipelineRequest');
600+
jest.spyOn(MLInferenceLogic.actions, 'makeSimulatePipelineRequest');
601+
602+
MLInferenceLogic.actions.simulatePipeline();
603+
604+
expect(MLInferenceLogic.actions.setSimulatePipelineErrors).toHaveBeenCalledTimes(0);
605+
expect(MLInferenceLogic.actions.simulateExistingPipelineApiReset).toHaveBeenCalledTimes(0);
606+
expect(MLInferenceLogic.actions.simulatePipelineApiReset).toHaveBeenCalledTimes(0);
607+
expect(MLInferenceLogic.actions.makeSimulateExistingPipelineRequest).toHaveBeenCalledTimes(
608+
0
609+
);
610+
expect(MLInferenceLogic.actions.makeSimulatePipelineRequest).toHaveBeenCalledTimes(0);
611+
});
612+
it('clears simulate errors', () => {
613+
mount({
614+
...DEFAULT_VALUES,
615+
addInferencePipelineModal: {
616+
...mockModelConfiguration,
617+
},
618+
});
619+
MLModelsApiLogic.actions.apiSuccess(mlModelsData);
620+
jest.spyOn(MLInferenceLogic.actions, 'setSimulatePipelineErrors');
621+
MLInferenceLogic.actions.simulatePipeline();
622+
expect(MLInferenceLogic.actions.setSimulatePipelineErrors).toHaveBeenCalledWith([]);
623+
});
624+
it('resets API logics', () => {
625+
mount({
626+
...DEFAULT_VALUES,
627+
addInferencePipelineModal: {
628+
...mockModelConfiguration,
629+
},
630+
});
631+
MLModelsApiLogic.actions.apiSuccess(mlModelsData);
632+
633+
jest.spyOn(MLInferenceLogic.actions, 'simulateExistingPipelineApiReset');
634+
jest.spyOn(MLInferenceLogic.actions, 'simulatePipelineApiReset');
635+
636+
MLInferenceLogic.actions.simulatePipeline();
637+
638+
expect(MLInferenceLogic.actions.simulateExistingPipelineApiReset).toHaveBeenCalledTimes(1);
639+
expect(MLInferenceLogic.actions.simulatePipelineApiReset).toHaveBeenCalledTimes(1);
640+
});
641+
it('calls simulate with new pipeline', () => {
642+
mount({
643+
...DEFAULT_VALUES,
644+
addInferencePipelineModal: {
645+
...mockModelConfiguration,
646+
},
647+
});
648+
MLModelsApiLogic.actions.apiSuccess(mlModelsData);
649+
650+
jest.spyOn(MLInferenceLogic.actions, 'makeSimulateExistingPipelineRequest');
651+
jest.spyOn(MLInferenceLogic.actions, 'makeSimulatePipelineRequest');
652+
653+
MLInferenceLogic.actions.simulatePipeline();
654+
655+
expect(MLInferenceLogic.actions.makeSimulatePipelineRequest).toHaveBeenCalledTimes(1);
656+
expect(MLInferenceLogic.actions.makeSimulateExistingPipelineRequest).toHaveBeenCalledTimes(
657+
0
658+
);
659+
});
660+
it('calls simulate existing with existing pipeline', () => {
661+
mount({
662+
...DEFAULT_VALUES,
663+
addInferencePipelineModal: {
664+
...mockModelConfiguration,
665+
configuration: {
666+
...mockModelConfiguration.configuration,
667+
existingPipeline: true,
668+
pipelineName: 'my-test-pipeline',
669+
},
670+
},
671+
});
672+
MLModelsApiLogic.actions.apiSuccess(mlModelsData);
673+
FetchMlInferencePipelinesApiLogic.actions.apiSuccess({
674+
'my-test-pipeline': {},
675+
});
676+
677+
jest.spyOn(MLInferenceLogic.actions, 'makeSimulateExistingPipelineRequest');
678+
jest.spyOn(MLInferenceLogic.actions, 'makeSimulatePipelineRequest');
679+
680+
MLInferenceLogic.actions.simulatePipeline();
681+
682+
expect(MLInferenceLogic.actions.makeSimulateExistingPipelineRequest).toHaveBeenCalledTimes(
683+
1
684+
);
685+
expect(MLInferenceLogic.actions.makeSimulatePipelineRequest).toHaveBeenCalledTimes(0);
686+
});
687+
});
521688
});
522689
});

0 commit comments

Comments
 (0)