-
Notifications
You must be signed in to change notification settings - Fork 359
/
Copy pathapi_service.ts
371 lines (349 loc) · 13.5 KB
/
api_service.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {LitTypeTypesList, LIT_TYPES_REGISTRY} from '../lib/lit_types';
import {CallConfig, IndexedInput, LitMetadata, Preds, SerializedPyClass} from '../lib/types';
import {createLitType, getTypeNames} from '../lib/utils';
import {LitService} from './lit_service';
import {StatusService} from './status_service';
/** A dictionary of Metrics components to a list of their results. */
export interface MetricsResponse {
[metricsComponent: string]: MetricsResult[];
}
/** The prediction key, label key, and computed metric values. */
export interface MetricsResult {
// Using case to achieve parity with the property names in Python code
// tslint:disable-next-line:enforce-name-casing
pred_key: string;
// tslint:disable-next-line:enforce-name-casing
label_key: string;
metrics: MetricsValues;
}
/** A dictionary of metric names to values, from one metric component. */
export interface MetricsValues {
[metricName: string]: number;
}
/**
* API service singleton, responsible for actually making calls to the server
* and (as best it can) enforcing type safety on returned values.
*/
export class ApiService extends LitService {
constructor(private readonly statusService: StatusService) {
super();
}
/**
* Send a request to the server to get inputs for a dataset.
* @param dataset name of dataset to load
*/
async getDataset(dataset: string): Promise<IndexedInput[]> {
const loadMessage = 'Loading inputs';
const examples = await this.queryServer<IndexedInput[]>(
'/get_dataset', {'dataset_name': dataset}, [], loadMessage);
if (examples == null) {
const errorText = 'Failed to load dataset (server returned null).';
this.statusService.addError(errorText);
throw (new Error(errorText));
}
return examples;
}
/**
* Request the server to create a new dataset.
* Loads the server on the backend, but examples need to be
* fetched to the frontend separately using getDataset().
* Returns (updated metadata, name of just-loaded dataset)
* @param dataset name of (base) dataset to dispatch to load()
* @param config a dictionary continaing the values for parameterizing the
* initialization of the Dataset. The keys of this dictionary should align
* with and satisfy the requirements of the `Dataset.init_spec()`.
*/
async createDataset(dataset: string, config: CallConfig):
Promise<[LitMetadata, string]> {
const loadMessage = 'Creating new dataset';
return this.queryServer(
'/create_dataset',
{'dataset_name': dataset},
[], loadMessage, config);
}
/**
* Request the server to create a new model.
* Loads the model on the backend.
* Returns (updated metadata, name of just-loaded model)
* @param model name of (base) model to dispatch to load()
* @param config a dictionary continaing the values for parameterizing the
* initialization of the Model. The keys of this dictionary should align
* with and satisfy the requirements of the `Model.init_spec()`.
*/
async createModel(model: string, config: CallConfig):
Promise<[LitMetadata, string[]]> {
const loadMessage = 'Loading new model';
return this.queryServer(
'/create_model',
{'model_name': model},
[], loadMessage, config);
}
/**
* Send a request to the server to get dataset info.
*/
async getInfo(): Promise<LitMetadata> {
return this.queryServer<LitMetadata>(
'/get_info', {}, [], 'Loading metadata');
}
/**
* Calls the server to get predictions of the given types.
* @param inputs inputs to run model on
* @param model model to query
* @param datasetName current dataset (for caching)
* @param requestedTypes datatypes to request
* @param requestedFields optional fields to request
* @param loadMessage optional loading message to display in toolbar
*/
getPreds(
inputs: IndexedInput[], model: string, datasetName: string,
requestedTypes: LitTypeTypesList, requestedFields?: string[],
loadMessage?: string): Promise<Preds[]> {
loadMessage = loadMessage || 'Fetching predictions';
requestedFields = requestedFields || [];
return this.queryServer(
'/get_preds', {
'model': model,
'dataset_name': datasetName,
'requested_types': getTypeNames(requestedTypes).join(','),
'requested_fields': requestedFields.join(','),
},
inputs, loadMessage);
}
/**
* Calls the server to get newly generated inputs for a set of inputs, for a
* given generator and model.
* @param inputs inputs to run on
* @param modelName model to query
* @param datasetName current dataset
* @param generator generator being used
* @param config: configuration to send to backend (optional)
* @param loadMessage: loading message to show to user (optional)
*/
async getGenerated(
inputs: IndexedInput[], modelName: string, datasetName: string,
generator: string, config?: CallConfig,
loadMessage?: string): Promise<IndexedInput[][]> {
loadMessage = loadMessage ?? 'Loading generator output';
return this.queryServer<IndexedInput[][]>(
'/get_generated', {
'model': modelName,
'dataset_name': datasetName,
'generator': generator,
},
inputs, loadMessage, config);
}
/**
* Calls the server to create and set the IDs and other data for the provided
* inputs.
* @param inputs Inputs to get the IDs for.
* @param datasetName current dataset
*
* @return Inputs with the IDs correctly set.
*/
annotateNewData(
inputs: IndexedInput[], datasetName: string): Promise<IndexedInput[]> {
return this.queryServer<IndexedInput[]>(
'/annotate_new_data', {
'dataset_name': datasetName,
},
inputs);
}
fetchNewData(savedDatapointsId: string): Promise<IndexedInput[]> {
return this.queryServer<IndexedInput[]>(
'/fetch_new_data', {
'saved_datapoints_id': savedDatapointsId,
},
[]);
}
/**
* Calls the server to run an interpretation component.
* @param inputs inputs to run on
* @param modelName model to query
* @param datasetName current dataset (for caching)
* @param interpreterName interpreter to run
* @param config: configuration to send to backend (optional)
* @param loadMessage: loading message to show to user (optional)
* @param skipPredict: whether to skip the call to _get_preds() before
* calling the interpreter component. See app.py, and remove after
* b/278586715 is resolved.
*/
getInterpretations(
inputs: IndexedInput[], modelName: string, datasetName: string,
interpreterName: string, config?: CallConfig,
// tslint:disable-next-line:no-any
loadMessage?: string, skipPredict = false): Promise<any> {
loadMessage = loadMessage ?? 'Fetching interpretations';
return this.queryServer(
'/get_interpretations', {
'model': modelName,
'dataset_name': datasetName,
'interpreter': interpreterName,
'do_predict': skipPredict ? '0' : '1'
},
inputs, loadMessage, config);
}
/**
* Calls the server to run an interpretation component.
* @param inputs inputs to run on
* @param modelName model to query
* @param datasetName current dataset (for caching)
* @param metrics A comma-separated list of the metrics to run
* @param config configuration to send to backend (optional)
* @param loadMessage loading message to show to user (optional)
* @param skipPredict whether to skip the call to _get_preds() before
* calling the Metrics component. See app.py, and remove after
* b/278586715 is resolved.
*/
getMetrics(
inputs: IndexedInput[], modelName: string, datasetName: string,
metrics?: string, config?: CallConfig,
loadMessage?: string, skipPredict = false): Promise<MetricsResponse> {
loadMessage = loadMessage ?? 'Computing metrics.';
return this.queryServer<MetricsResponse>(
'/get_metrics', {
'model': modelName,
'dataset_name': datasetName,
'metrics': metrics ?? '',
'do_predict': skipPredict ? '0' : '1'
},
inputs, loadMessage, config);
}
/**
* Calls the server to save new datapoints.
* @param inputs Text inputs to persist.
* @param datasetName dataset being used.
* @param path path to save to.
*/
saveDatapoints(inputs: IndexedInput[], datasetName: string, path: string):
Promise<string> {
const loadMessage = 'Saving new datapoints';
return this.queryServer(
'/save_datapoints', {
'dataset_name': datasetName,
'path': path,
},
inputs, loadMessage);
}
/**
* Calls the server to load persisted datapoints.
* @param datasetName dataset being used,
* @param path path to load from.
*/
loadDatapoints(datasetName: string, path: string): Promise<IndexedInput[]> {
const loadMessage = 'Loading new datapoints';
return this.queryServer(
'/load_datapoints', {
'dataset_name': datasetName,
'path': path,
},
[], loadMessage);
}
/**
* Push UI state to the server.
* @param selection current selection
* @param datasetName dataset being used,
* @param config additional params to pass to UIStateTracker.update_state()
*/
pushState(
selection: IndexedInput[], datasetName: string, config?: CallConfig) {
const loadMessage = 'Syncing UI state.';
return this.queryServer(
'/push_ui_state', {'dataset_name': datasetName}, selection, loadMessage,
config);
}
/**
* Send a standard request to the server.
* @param endpoint server endpoint, like /get_preds
* @param params query params
* @param inputs input examples
* @param loadMessage an optional string to display via the StatusService
* describing the API call.
* @param config an optional dictionary passed as the `config` property of the
* POST requests body.
*/
private async queryServer<T>(
endpoint: string,
params: {[key: string]: string},
inputs: IndexedInput[],
loadMessage = '',
config?: CallConfig): Promise<T> {
const finished = this.statusService.startLoading(loadMessage);
// For a smaller request, replace known (original) examples with their IDs;
// we can simply look these up on the server.
// TODO: consider sending the metadata as well, since this might be changed
// from the frontend.
const processedInputs: Array<IndexedInput|string> = inputs.map(input => {
if (!input.meta.added) {
return input.id;
}
return input;
});
const paramsArray =
Object.keys(params).map((key: string) => `${key}=${params[key]}`);
const url = encodeURI(`.${endpoint}?${paramsArray.join('&')}`);
const body = JSON.stringify({inputs: processedInputs, config});
try {
const res = await fetch(url, {method: 'POST', body});
// All responses are parsed as text so that we can use the custom reviver
// with JSON.parse() below. See more about this parameter at:
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/JSON/parse#the_reviver_parameter
const text = await res.text();
if (!res.ok) {throw (new Error(text));}
// This reviver converts serialized LitType instances (indicated by the
// value of __class__) to their parallel TypeScript classes. All other
// values (e.g., np.adarrys, dtypes, etc.; see ../../lib/serialize.py) are
// returned as-is. If the value of __name__ does not correspond to a
// @registered LitType subclass, an Error is thrown.
const json = JSON.parse(text, (unusedKey, value) => {
if (value != null &&
typeof value === 'object' &&
Object.hasOwn(value, '__class__') &&
Object.hasOwn(value, '__name__')) {
const serialized = value as SerializedPyClass;
if (serialized.__class__ !== 'LitType') return value;
const litTypeCls = LIT_TYPES_REGISTRY[serialized.__name__];
if (litTypeCls == null) {
throw new Error(`Attempted to revive an unknown LitType '${
serialized.__name__}' with value:\n\n${
JSON.stringify(value, null, ' ')}`);
}
return createLitType(litTypeCls, value as {});
} else {
return value;
}
});
finished();
// When a call finishes, clear any previous error of the same call.
this.statusService.removeError(url);
return json as T;
// tslint:disable-next-line:no-any
} catch (err: any) {
finished();
// Extract error text if returned from tsserver.
const found = err.message.match('^.*?(?=\n\nDetails:)');
if (!found) {
this.statusService.addError(
'Unknown error', err.toString(), url);
} else {
this.statusService.addError(found[0], err.toString(), url);
}
throw (err);
}
}
}