- 0.84.0 (latest)
- 0.82.0
- 0.81.0
- 0.80.0
- 0.79.0
- 0.78.0
- 0.76.0
- 0.74.0
- 0.73.0
- 0.70.0
- 0.69.0
- 0.68.0
- 0.66.0
- 0.65.0
- 0.64.0
- 0.63.0
- 0.62.0
- 0.61.0
- 0.60.0
- 0.59.0
- 0.58.0
- 0.57.0
- 0.55.0
- 0.54.0
- 0.53.0
- 0.52.0
- 0.51.0
- 0.50.0
- 0.49.0
- 0.48.0
- 0.47.0
- 0.46.0
- 0.45.0
- 0.43.0
- 0.42.0
- 0.41.0
- 0.40.0
- 0.39.0
- 0.38.0
- 0.37.0
- 0.36.0
- 0.35.0
- 0.34.0
- 0.33.0
- 0.30.0
- 0.29.0
- 0.28.0
- 0.27.0
- 0.26.0
- 0.25.0
- 0.24.0
- 0.23.0
- 0.22.0
- 0.21.0
- 0.20.0
- 0.19.0
- 0.18.0
- 0.17.0
- 0.15.0
- 0.14.0
- 0.13.0
- 0.12.0
- 0.11.0
- 0.10.0
- 0.9.6
- 0.7.10
public interface StreamingTranslateSpeechRequestOrBuilder extends MessageOrBuilderImplements
MessageOrBuilderMethods
getAudioContent()
public abstract ByteString getAudioContent() The audio data to be translated. Sequential chunks of audio data are sent
in sequential StreamingTranslateSpeechRequest messages. The first
StreamingTranslateSpeechRequest message must not contain
audio_content data and all subsequent StreamingTranslateSpeechRequest
messages must contain audio_content data. The audio bytes must be
encoded as specified in StreamingTranslateSpeechConfig. Note: as with
all bytes fields, protobuffers use a pure binary representation (not
base64).
bytes audio_content = 2;
| Returns | |
|---|---|
| Type | Description |
ByteString |
The audioContent. |
getStreamingConfig()
public abstract StreamingTranslateSpeechConfig getStreamingConfig() Provides information to the recognizer that specifies how to process the
request. The first StreamingTranslateSpeechRequest message must contain
a streaming_config message.
.google.cloud.mediatranslation.v1beta1.StreamingTranslateSpeechConfig streaming_config = 1;
| Returns | |
|---|---|
| Type | Description |
StreamingTranslateSpeechConfig |
The streamingConfig. |
getStreamingConfigOrBuilder()
public abstract StreamingTranslateSpeechConfigOrBuilder getStreamingConfigOrBuilder() Provides information to the recognizer that specifies how to process the
request. The first StreamingTranslateSpeechRequest message must contain
a streaming_config message.
.google.cloud.mediatranslation.v1beta1.StreamingTranslateSpeechConfig streaming_config = 1;
| Returns | |
|---|---|
| Type | Description |
StreamingTranslateSpeechConfigOrBuilder |
|
getStreamingRequestCase()
public abstract StreamingTranslateSpeechRequest.StreamingRequestCase getStreamingRequestCase()| Returns | |
|---|---|
| Type | Description |
StreamingTranslateSpeechRequest.StreamingRequestCase |
|
hasAudioContent()
public abstract boolean hasAudioContent() The audio data to be translated. Sequential chunks of audio data are sent
in sequential StreamingTranslateSpeechRequest messages. The first
StreamingTranslateSpeechRequest message must not contain
audio_content data and all subsequent StreamingTranslateSpeechRequest
messages must contain audio_content data. The audio bytes must be
encoded as specified in StreamingTranslateSpeechConfig. Note: as with
all bytes fields, protobuffers use a pure binary representation (not
base64).
bytes audio_content = 2;
| Returns | |
|---|---|
| Type | Description |
boolean |
Whether the audioContent field is set. |
hasStreamingConfig()
public abstract boolean hasStreamingConfig() Provides information to the recognizer that specifies how to process the
request. The first StreamingTranslateSpeechRequest message must contain
a streaming_config message.
.google.cloud.mediatranslation.v1beta1.StreamingTranslateSpeechConfig streaming_config = 1;
| Returns | |
|---|---|
| Type | Description |
boolean |
Whether the streamingConfig field is set. |