| // This is a generated file (see the discoveryapis_generator project). |
| |
| // ignore_for_file: unused_import, unnecessary_cast |
| |
| library googleapis_beta.speech.v2beta1; |
| |
| import 'dart:core' as core; |
| import 'dart:async' as async; |
| |
| import 'package:_discoveryapis_commons/_discoveryapis_commons.dart' as commons; |
| import 'package:http/http.dart' as http; |
| |
| export 'package:_discoveryapis_commons/_discoveryapis_commons.dart' |
| show ApiRequestError, DetailedApiRequestError; |
| |
| const core.String USER_AGENT = 'dart-api-client speech/v2beta1'; |
| |
| /// Converts audio to text by applying powerful neural network models. |
| class SpeechApi { |
| /// View and manage your data across Google Cloud Platform services |
| static const CloudPlatformScope = |
| "https://www.googleapis.com/auth/cloud-platform"; |
| |
| final commons.ApiRequester _requester; |
| |
| ProjectsResourceApi get projects => new ProjectsResourceApi(_requester); |
| |
| SpeechApi(http.Client client, |
| {core.String rootUrl = "https://speech.googleapis.com/", |
| core.String servicePath = ""}) |
| : _requester = |
| new commons.ApiRequester(client, rootUrl, servicePath, USER_AGENT); |
| } |
| |
| class ProjectsResourceApi { |
| final commons.ApiRequester _requester; |
| |
| ProjectsLocationsResourceApi get locations => |
| new ProjectsLocationsResourceApi(_requester); |
| |
| ProjectsResourceApi(commons.ApiRequester client) : _requester = client; |
| } |
| |
| class ProjectsLocationsResourceApi { |
| final commons.ApiRequester _requester; |
| |
| ProjectsLocationsOperationsResourceApi get operations => |
| new ProjectsLocationsOperationsResourceApi(_requester); |
| |
| ProjectsLocationsResourceApi(commons.ApiRequester client) |
| : _requester = client; |
| } |
| |
| class ProjectsLocationsOperationsResourceApi { |
| final commons.ApiRequester _requester; |
| |
| ProjectsLocationsOperationsResourceApi(commons.ApiRequester client) |
| : _requester = client; |
| |
| /// Gets the latest state of a long-running operation. Clients can use this |
| /// method to poll the operation result at intervals as recommended by the API |
| /// service. |
| /// |
| /// Request parameters: |
| /// |
| /// [name] - The name of the operation resource. |
| /// Value must have pattern |
| /// "^projects/[^/]+/locations/[^/]+/operations/[^/]+$". |
| /// |
| /// [$fields] - Selector specifying which fields to include in a partial |
| /// response. |
| /// |
| /// Completes with a [Operation]. |
| /// |
| /// Completes with a [commons.ApiRequestError] if the API endpoint returned an |
| /// error. |
| /// |
| /// If the used [http.Client] completes with an error when making a REST call, |
| /// this method will complete with the same error. |
| async.Future<Operation> get(core.String name, {core.String $fields}) { |
| var _url; |
| var _queryParams = new core.Map<core.String, core.List<core.String>>(); |
| var _uploadMedia; |
| var _uploadOptions; |
| var _downloadOptions = commons.DownloadOptions.Metadata; |
| var _body; |
| |
| if (name == null) { |
| throw new core.ArgumentError("Parameter name is required."); |
| } |
| if ($fields != null) { |
| _queryParams["fields"] = [$fields]; |
| } |
| |
| _url = 'v2beta1/' + commons.Escaper.ecapeVariableReserved('$name'); |
| |
| var _response = _requester.request(_url, "GET", |
| body: _body, |
| queryParams: _queryParams, |
| uploadOptions: _uploadOptions, |
| uploadMedia: _uploadMedia, |
| downloadOptions: _downloadOptions); |
| return _response.then((data) => new Operation.fromJson(data)); |
| } |
| |
| /// Lists operations that match the specified filter in the request. If the |
| /// server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the |
| /// `name` binding allows API services to override the binding to use |
| /// different resource name schemes, such as `users / * /operations`. To |
| /// override the binding, API services can add a binding such as |
| /// `"/v1/{name=users / * }/operations"` to their service configuration. For |
| /// backwards compatibility, the default name includes the operations |
| /// collection id, however overriding users must ensure the name binding is |
| /// the parent resource, without the operations collection id. |
| /// |
| /// Request parameters: |
| /// |
| /// [name] - The name of the operation's parent resource. |
| /// Value must have pattern "^projects/[^/]+/locations/[^/]+$". |
| /// |
| /// [pageSize] - The standard list page size. |
| /// |
| /// [pageToken] - The standard list page token. |
| /// |
| /// [filter] - The standard list filter. |
| /// |
| /// [$fields] - Selector specifying which fields to include in a partial |
| /// response. |
| /// |
| /// Completes with a [ListOperationsResponse]. |
| /// |
| /// Completes with a [commons.ApiRequestError] if the API endpoint returned an |
| /// error. |
| /// |
| /// If the used [http.Client] completes with an error when making a REST call, |
| /// this method will complete with the same error. |
| async.Future<ListOperationsResponse> list(core.String name, |
| {core.int pageSize, |
| core.String pageToken, |
| core.String filter, |
| core.String $fields}) { |
| var _url; |
| var _queryParams = new core.Map<core.String, core.List<core.String>>(); |
| var _uploadMedia; |
| var _uploadOptions; |
| var _downloadOptions = commons.DownloadOptions.Metadata; |
| var _body; |
| |
| if (name == null) { |
| throw new core.ArgumentError("Parameter name is required."); |
| } |
| if (pageSize != null) { |
| _queryParams["pageSize"] = ["${pageSize}"]; |
| } |
| if (pageToken != null) { |
| _queryParams["pageToken"] = [pageToken]; |
| } |
| if (filter != null) { |
| _queryParams["filter"] = [filter]; |
| } |
| if ($fields != null) { |
| _queryParams["fields"] = [$fields]; |
| } |
| |
| _url = 'v2beta1/' + |
| commons.Escaper.ecapeVariableReserved('$name') + |
| '/operations'; |
| |
| var _response = _requester.request(_url, "GET", |
| body: _body, |
| queryParams: _queryParams, |
| uploadOptions: _uploadOptions, |
| uploadMedia: _uploadMedia, |
| downloadOptions: _downloadOptions); |
| return _response.then((data) => new ListOperationsResponse.fromJson(data)); |
| } |
| } |
| |
| /// The response message for Operations.ListOperations. |
| class ListOperationsResponse { |
| /// The standard List next-page token. |
| core.String nextPageToken; |
| |
| /// A list of operations that matches the specified filter in the request. |
| core.List<Operation> operations; |
| |
| ListOperationsResponse(); |
| |
| ListOperationsResponse.fromJson(core.Map _json) { |
| if (_json.containsKey("nextPageToken")) { |
| nextPageToken = _json["nextPageToken"]; |
| } |
| if (_json.containsKey("operations")) { |
| operations = (_json["operations"] as core.List) |
| .map<Operation>((value) => new Operation.fromJson(value)) |
| .toList(); |
| } |
| } |
| |
| core.Map<core.String, core.Object> toJson() { |
| final core.Map<core.String, core.Object> _json = |
| new core.Map<core.String, core.Object>(); |
| if (nextPageToken != null) { |
| _json["nextPageToken"] = nextPageToken; |
| } |
| if (operations != null) { |
| _json["operations"] = |
| operations.map((value) => (value).toJson()).toList(); |
| } |
| return _json; |
| } |
| } |
| |
| /// Describes the progress of a long-running `LongRunningRecognize` call. It is |
| /// included in the `metadata` field of the `Operation` returned by the |
| /// `GetOperation` call of the `google::longrunning::Operations` service. |
| class LongRunningRecognizeMetadata { |
| /// Output only. Time of the most recent processing update. |
| core.String lastUpdateTime; |
| |
| /// Output only. Approximate percentage of audio processed thus far. |
| /// Guaranteed to be 100 when the audio is fully processed and the results are |
| /// available. |
| core.int progressPercent; |
| |
| /// Output only. Time when the request was received. |
| core.String startTime; |
| |
| /// The URI of the audio file being transcribed. Empty if the audio was sent |
| /// as byte content. |
| core.String uri; |
| |
| LongRunningRecognizeMetadata(); |
| |
| LongRunningRecognizeMetadata.fromJson(core.Map _json) { |
| if (_json.containsKey("lastUpdateTime")) { |
| lastUpdateTime = _json["lastUpdateTime"]; |
| } |
| if (_json.containsKey("progressPercent")) { |
| progressPercent = _json["progressPercent"]; |
| } |
| if (_json.containsKey("startTime")) { |
| startTime = _json["startTime"]; |
| } |
| if (_json.containsKey("uri")) { |
| uri = _json["uri"]; |
| } |
| } |
| |
| core.Map<core.String, core.Object> toJson() { |
| final core.Map<core.String, core.Object> _json = |
| new core.Map<core.String, core.Object>(); |
| if (lastUpdateTime != null) { |
| _json["lastUpdateTime"] = lastUpdateTime; |
| } |
| if (progressPercent != null) { |
| _json["progressPercent"] = progressPercent; |
| } |
| if (startTime != null) { |
| _json["startTime"] = startTime; |
| } |
| if (uri != null) { |
| _json["uri"] = uri; |
| } |
| return _json; |
| } |
| } |
| |
| /// The only message returned to the client by the `LongRunningRecognize` |
| /// method. It contains the result as zero or more sequential |
| /// SpeechRecognitionResult messages. It is included in the `result.response` |
| /// field of the `Operation` returned by the `GetOperation` call of the |
| /// `google::longrunning::Operations` service. |
| class LongRunningRecognizeResponse { |
| /// Output only. Sequential list of transcription results corresponding to |
| /// sequential portions of audio. |
| core.List<SpeechRecognitionResult> results; |
| |
| LongRunningRecognizeResponse(); |
| |
| LongRunningRecognizeResponse.fromJson(core.Map _json) { |
| if (_json.containsKey("results")) { |
| results = (_json["results"] as core.List) |
| .map<SpeechRecognitionResult>( |
| (value) => new SpeechRecognitionResult.fromJson(value)) |
| .toList(); |
| } |
| } |
| |
| core.Map<core.String, core.Object> toJson() { |
| final core.Map<core.String, core.Object> _json = |
| new core.Map<core.String, core.Object>(); |
| if (results != null) { |
| _json["results"] = results.map((value) => (value).toJson()).toList(); |
| } |
| return _json; |
| } |
| } |
| |
| /// This resource represents a long-running operation that is the result of a |
| /// network API call. |
| class Operation { |
| /// If the value is `false`, it means the operation is still in progress. If |
| /// `true`, the operation is completed, and either `error` or `response` is |
| /// available. |
| core.bool done; |
| |
| /// The error result of the operation in case of failure or cancellation. |
| Status error; |
| |
| /// Service-specific metadata associated with the operation. It typically |
| /// contains progress information and common metadata such as create time. |
| /// Some services might not provide such metadata. Any method that returns a |
| /// long-running operation should document the metadata type, if any. |
| /// |
| /// The values for Object must be JSON objects. It can consist of `num`, |
| /// `String`, `bool` and `null` as well as `Map` and `List` values. |
| core.Map<core.String, core.Object> metadata; |
| |
| /// The server-assigned name, which is only unique within the same service |
| /// that originally returns it. If you use the default HTTP mapping, the |
| /// `name` should be a resource name ending with `operations/{unique_id}`. |
| core.String name; |
| |
| /// The normal response of the operation in case of success. If the original |
| /// method returns no data on success, such as `Delete`, the response is |
| /// `google.protobuf.Empty`. If the original method is standard |
| /// `Get`/`Create`/`Update`, the response should be the resource. For other |
| /// methods, the response should have the type `XxxResponse`, where `Xxx` is |
| /// the original method name. For example, if the original method name is |
| /// `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`. |
| /// |
| /// The values for Object must be JSON objects. It can consist of `num`, |
| /// `String`, `bool` and `null` as well as `Map` and `List` values. |
| core.Map<core.String, core.Object> response; |
| |
| Operation(); |
| |
| Operation.fromJson(core.Map _json) { |
| if (_json.containsKey("done")) { |
| done = _json["done"]; |
| } |
| if (_json.containsKey("error")) { |
| error = new Status.fromJson(_json["error"]); |
| } |
| if (_json.containsKey("metadata")) { |
| metadata = |
| (_json["metadata"] as core.Map).cast<core.String, core.Object>(); |
| } |
| if (_json.containsKey("name")) { |
| name = _json["name"]; |
| } |
| if (_json.containsKey("response")) { |
| response = |
| (_json["response"] as core.Map).cast<core.String, core.Object>(); |
| } |
| } |
| |
| core.Map<core.String, core.Object> toJson() { |
| final core.Map<core.String, core.Object> _json = |
| new core.Map<core.String, core.Object>(); |
| if (done != null) { |
| _json["done"] = done; |
| } |
| if (error != null) { |
| _json["error"] = (error).toJson(); |
| } |
| if (metadata != null) { |
| _json["metadata"] = metadata; |
| } |
| if (name != null) { |
| _json["name"] = name; |
| } |
| if (response != null) { |
| _json["response"] = response; |
| } |
| return _json; |
| } |
| } |
| |
| /// Alternative hypotheses (a.k.a. n-best list). |
| class SpeechRecognitionAlternative { |
| /// Output only. The confidence estimate between 0.0 and 1.0. A higher number |
| /// indicates an estimated greater likelihood that the recognized words are |
| /// correct. This field is set only for the top alternative of a non-streaming |
| /// result or, of a streaming result where `is_final=true`. This field is not |
| /// guaranteed to be accurate and users should not rely on it to be always |
| /// provided. The default of 0.0 is a sentinel value indicating `confidence` |
| /// was not set. |
| core.double confidence; |
| |
| /// Output only. Transcript text representing the words that the user spoke. |
| core.String transcript; |
| |
| /// Output only. A list of word-specific information for each recognized word. |
| /// Note: When `enable_speaker_diarization` is true, you will see all the |
| /// words from the beginning of the audio. |
| core.List<WordInfo> words; |
| |
| SpeechRecognitionAlternative(); |
| |
| SpeechRecognitionAlternative.fromJson(core.Map _json) { |
| if (_json.containsKey("confidence")) { |
| confidence = _json["confidence"].toDouble(); |
| } |
| if (_json.containsKey("transcript")) { |
| transcript = _json["transcript"]; |
| } |
| if (_json.containsKey("words")) { |
| words = (_json["words"] as core.List) |
| .map<WordInfo>((value) => new WordInfo.fromJson(value)) |
| .toList(); |
| } |
| } |
| |
| core.Map<core.String, core.Object> toJson() { |
| final core.Map<core.String, core.Object> _json = |
| new core.Map<core.String, core.Object>(); |
| if (confidence != null) { |
| _json["confidence"] = confidence; |
| } |
| if (transcript != null) { |
| _json["transcript"] = transcript; |
| } |
| if (words != null) { |
| _json["words"] = words.map((value) => (value).toJson()).toList(); |
| } |
| return _json; |
| } |
| } |
| |
| /// A speech recognition result corresponding to a portion of the audio. |
| class SpeechRecognitionResult { |
| /// Output only. May contain one or more recognition hypotheses (up to the |
| /// maximum specified in `max_alternatives`). These alternatives are ordered |
| /// in terms of accuracy, with the top (first) alternative being the most |
| /// probable, as ranked by the recognizer. |
| core.List<SpeechRecognitionAlternative> alternatives; |
| |
| /// Output only. For multi-channel audio, this is the channel number |
| /// corresponding to the recognized result for the audio from that channel. |
| /// For `audio_channel_count` = N, its output values can range from `1` to |
| /// `N`. |
| core.int channelTag; |
| |
| /// Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) |
| /// language tag of the language in this result. This language code was |
| /// detected to have the most likelihood of being spoken in the audio. |
| core.String languageCode; |
| |
| SpeechRecognitionResult(); |
| |
| SpeechRecognitionResult.fromJson(core.Map _json) { |
| if (_json.containsKey("alternatives")) { |
| alternatives = (_json["alternatives"] as core.List) |
| .map<SpeechRecognitionAlternative>( |
| (value) => new SpeechRecognitionAlternative.fromJson(value)) |
| .toList(); |
| } |
| if (_json.containsKey("channelTag")) { |
| channelTag = _json["channelTag"]; |
| } |
| if (_json.containsKey("languageCode")) { |
| languageCode = _json["languageCode"]; |
| } |
| } |
| |
| core.Map<core.String, core.Object> toJson() { |
| final core.Map<core.String, core.Object> _json = |
| new core.Map<core.String, core.Object>(); |
| if (alternatives != null) { |
| _json["alternatives"] = |
| alternatives.map((value) => (value).toJson()).toList(); |
| } |
| if (channelTag != null) { |
| _json["channelTag"] = channelTag; |
| } |
| if (languageCode != null) { |
| _json["languageCode"] = languageCode; |
| } |
| return _json; |
| } |
| } |
| |
| /// The `Status` type defines a logical error model that is suitable for |
| /// different programming environments, including REST APIs and RPC APIs. It is |
| /// used by [gRPC](https://github.com/grpc). Each `Status` message contains |
| /// three pieces of data: error code, error message, and error details. You can |
| /// find out more about this error model and how to work with it in the [API |
| /// Design Guide](https://cloud.google.com/apis/design/errors). |
| class Status { |
| /// The status code, which should be an enum value of google.rpc.Code. |
| core.int code; |
| |
| /// A list of messages that carry the error details. There is a common set of |
| /// message types for APIs to use. |
| /// |
| /// The values for Object must be JSON objects. It can consist of `num`, |
| /// `String`, `bool` and `null` as well as `Map` and `List` values. |
| core.List<core.Map<core.String, core.Object>> details; |
| |
| /// A developer-facing error message, which should be in English. Any |
| /// user-facing error message should be localized and sent in the |
| /// google.rpc.Status.details field, or localized by the client. |
| core.String message; |
| |
| Status(); |
| |
| Status.fromJson(core.Map _json) { |
| if (_json.containsKey("code")) { |
| code = _json["code"]; |
| } |
| if (_json.containsKey("details")) { |
| details = (_json["details"] as core.List) |
| .map<core.Map<core.String, core.Object>>( |
| (value) => (value as core.Map).cast<core.String, core.Object>()) |
| .toList(); |
| } |
| if (_json.containsKey("message")) { |
| message = _json["message"]; |
| } |
| } |
| |
| core.Map<core.String, core.Object> toJson() { |
| final core.Map<core.String, core.Object> _json = |
| new core.Map<core.String, core.Object>(); |
| if (code != null) { |
| _json["code"] = code; |
| } |
| if (details != null) { |
| _json["details"] = details; |
| } |
| if (message != null) { |
| _json["message"] = message; |
| } |
| return _json; |
| } |
| } |
| |
| /// Word-specific information for recognized words. |
| class WordInfo { |
| /// Output only. The confidence estimate between 0.0 and 1.0. A higher number |
| /// indicates an estimated greater likelihood that the recognized words are |
| /// correct. This field is set only for the top alternative of a non-streaming |
| /// result or, of a streaming result where `is_final=true`. This field is not |
| /// guaranteed to be accurate and users should not rely on it to be always |
| /// provided. The default of 0.0 is a sentinel value indicating `confidence` |
| /// was not set. |
| core.double confidence; |
| |
| /// Output only. Time offset relative to the beginning of the audio, and |
| /// corresponding to the end of the spoken word. This field is only set if |
| /// `enable_word_time_offsets=true` and only in the top hypothesis. This is an |
| /// experimental feature and the accuracy of the time offset can vary. |
| core.String endOffset; |
| |
| /// Output only. A distinct integer value is assigned for every speaker within |
| /// the audio. This field specifies which one of those speakers was detected |
| /// to have spoken this word. Value ranges from `1` to |
| /// `diarization_config.max_speaker_count` . `speaker_tag` is set if |
| /// `diarization_config.enable_speaker_diarization` = `true` and only in the |
| /// top alternative. |
| core.int speakerTag; |
| |
| /// Output only. Time offset relative to the beginning of the audio, and |
| /// corresponding to the start of the spoken word. This field is only set if |
| /// `enable_word_time_offsets=true` and only in the top hypothesis. This is an |
| /// experimental feature and the accuracy of the time offset can vary. |
| core.String startOffset; |
| |
| /// Output only. The word corresponding to this set of information. |
| core.String word; |
| |
| WordInfo(); |
| |
| WordInfo.fromJson(core.Map _json) { |
| if (_json.containsKey("confidence")) { |
| confidence = _json["confidence"].toDouble(); |
| } |
| if (_json.containsKey("endOffset")) { |
| endOffset = _json["endOffset"]; |
| } |
| if (_json.containsKey("speakerTag")) { |
| speakerTag = _json["speakerTag"]; |
| } |
| if (_json.containsKey("startOffset")) { |
| startOffset = _json["startOffset"]; |
| } |
| if (_json.containsKey("word")) { |
| word = _json["word"]; |
| } |
| } |
| |
| core.Map<core.String, core.Object> toJson() { |
| final core.Map<core.String, core.Object> _json = |
| new core.Map<core.String, core.Object>(); |
| if (confidence != null) { |
| _json["confidence"] = confidence; |
| } |
| if (endOffset != null) { |
| _json["endOffset"] = endOffset; |
| } |
| if (speakerTag != null) { |
| _json["speakerTag"] = speakerTag; |
| } |
| if (startOffset != null) { |
| _json["startOffset"] = startOffset; |
| } |
| if (word != null) { |
| _json["word"] = word; |
| } |
| return _json; |
| } |
| } |