public static final class SynthesizeSpeechResponse.Builder extends GeneratedMessageV3.Builder<SynthesizeSpeechResponse.Builder> implements SynthesizeSpeechResponseOrBuilder
   
   The message returned to the client by the SynthesizeSpeech method.
 Protobuf type google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse
    Inherited Members
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
      com.google.protobuf.GeneratedMessageV3.Builder.getUnknownFieldSetBuilder()
    
    
    
    
    
    
    
      com.google.protobuf.GeneratedMessageV3.Builder.internalGetMapFieldReflection(int)
    
    
    
      com.google.protobuf.GeneratedMessageV3.Builder.internalGetMutableMapFieldReflection(int)
    
    
    
    
    
    
      com.google.protobuf.GeneratedMessageV3.Builder.mergeUnknownLengthDelimitedField(int,com.google.protobuf.ByteString)
    
    
      com.google.protobuf.GeneratedMessageV3.Builder.mergeUnknownVarintField(int,int)
    
    
    
    
    
      com.google.protobuf.GeneratedMessageV3.Builder.parseUnknownField(com.google.protobuf.CodedInputStream,com.google.protobuf.ExtensionRegistryLite,int)
    
    
    
    
      com.google.protobuf.GeneratedMessageV3.Builder.setUnknownFieldSetBuilder(com.google.protobuf.UnknownFieldSet.Builder)
    
    
    
    
    
    
    
    
    
    
    
    
   
  Static Methods
  
  
  getDescriptor()
  
    public static final Descriptors.Descriptor getDescriptor()
   
  
  Methods
  
  
  addAllTimepoints(Iterable<? extends Timepoint> values)
  
    public SynthesizeSpeechResponse.Builder addAllTimepoints(Iterable<? extends Timepoint> values)
   
   A link between a position in the original request input and a corresponding
 time in the output audio. It's only supported via <mark> of SSML input.
 repeated .google.cloud.texttospeech.v1beta1.Timepoint timepoints = 2;
    
      
        | Parameter | 
      
        | Name | Description | 
      
        | values | Iterable<? extends com.google.cloud.texttospeech.v1beta1.Timepoint>
 | 
    
  
  
  
  addRepeatedField(Descriptors.FieldDescriptor field, Object value)
  
    public SynthesizeSpeechResponse.Builder addRepeatedField(Descriptors.FieldDescriptor field, Object value)
   
  
  
  Overrides
  
  
  addTimepoints(Timepoint value)
  
    public SynthesizeSpeechResponse.Builder addTimepoints(Timepoint value)
   
   A link between a position in the original request input and a corresponding
 time in the output audio. It's only supported via <mark> of SSML input.
 repeated .google.cloud.texttospeech.v1beta1.Timepoint timepoints = 2;
addTimepoints(Timepoint.Builder builderForValue)
  
    public SynthesizeSpeechResponse.Builder addTimepoints(Timepoint.Builder builderForValue)
   
   A link between a position in the original request input and a corresponding
 time in the output audio. It's only supported via <mark> of SSML input.
 repeated .google.cloud.texttospeech.v1beta1.Timepoint timepoints = 2;
addTimepoints(int index, Timepoint value)
  
    public SynthesizeSpeechResponse.Builder addTimepoints(int index, Timepoint value)
   
   A link between a position in the original request input and a corresponding
 time in the output audio. It's only supported via <mark> of SSML input.
 repeated .google.cloud.texttospeech.v1beta1.Timepoint timepoints = 2;
addTimepoints(int index, Timepoint.Builder builderForValue)
  
    public SynthesizeSpeechResponse.Builder addTimepoints(int index, Timepoint.Builder builderForValue)
   
   A link between a position in the original request input and a corresponding
 time in the output audio. It's only supported via <mark> of SSML input.
 repeated .google.cloud.texttospeech.v1beta1.Timepoint timepoints = 2;
addTimepointsBuilder()
  
    public Timepoint.Builder addTimepointsBuilder()
   
   A link between a position in the original request input and a corresponding
 time in the output audio. It's only supported via <mark> of SSML input.
 repeated .google.cloud.texttospeech.v1beta1.Timepoint timepoints = 2;
addTimepointsBuilder(int index)
  
    public Timepoint.Builder addTimepointsBuilder(int index)
   
   A link between a position in the original request input and a corresponding
 time in the output audio. It's only supported via <mark> of SSML input.
 repeated .google.cloud.texttospeech.v1beta1.Timepoint timepoints = 2;
    
      
        | Parameter | 
      
        | Name | Description | 
      
        | index | int
 | 
    
  
  
  
  build()
  
    public SynthesizeSpeechResponse build()
   
  
  
  buildPartial()
  
    public SynthesizeSpeechResponse buildPartial()
   
  
  
  clear()
  
    public SynthesizeSpeechResponse.Builder clear()
   
  
  Overrides
  
  
  clearAudioConfig()
  
    public SynthesizeSpeechResponse.Builder clearAudioConfig()
   
   The audio metadata of audio_content.
 .google.cloud.texttospeech.v1beta1.AudioConfig audio_config = 4;
clearAudioContent()
  
    public SynthesizeSpeechResponse.Builder clearAudioContent()
   
   The audio data bytes encoded as specified in the request, including the
 header for encodings that are wrapped in containers (e.g. MP3, OGG_OPUS).
 For LINEAR16 audio, we include the WAV header. Note: as
 with all bytes fields, protobuffers use a pure binary representation,
 whereas JSON representations use base64.
 bytes audio_content = 1;
clearField(Descriptors.FieldDescriptor field)
  
    public SynthesizeSpeechResponse.Builder clearField(Descriptors.FieldDescriptor field)
   
  
  
  Overrides
  
  
  clearOneof(Descriptors.OneofDescriptor oneof)
  
    public SynthesizeSpeechResponse.Builder clearOneof(Descriptors.OneofDescriptor oneof)
   
  
  
  Overrides
  
  
  clearTimepoints()
  
    public SynthesizeSpeechResponse.Builder clearTimepoints()
   
   A link between a position in the original request input and a corresponding
 time in the output audio. It's only supported via <mark> of SSML input.
 repeated .google.cloud.texttospeech.v1beta1.Timepoint timepoints = 2;
clone()
  
    public SynthesizeSpeechResponse.Builder clone()
   
  
  Overrides
  
  
  getAudioConfig()
  
    public AudioConfig getAudioConfig()
   
   The audio metadata of audio_content.
 .google.cloud.texttospeech.v1beta1.AudioConfig audio_config = 4;
getAudioConfigBuilder()
  
    public AudioConfig.Builder getAudioConfigBuilder()
   
   The audio metadata of audio_content.
 .google.cloud.texttospeech.v1beta1.AudioConfig audio_config = 4;
getAudioConfigOrBuilder()
  
    public AudioConfigOrBuilder getAudioConfigOrBuilder()
   
   The audio metadata of audio_content.
 .google.cloud.texttospeech.v1beta1.AudioConfig audio_config = 4;
getAudioContent()
  
    public ByteString getAudioContent()
   
   The audio data bytes encoded as specified in the request, including the
 header for encodings that are wrapped in containers (e.g. MP3, OGG_OPUS).
 For LINEAR16 audio, we include the WAV header. Note: as
 with all bytes fields, protobuffers use a pure binary representation,
 whereas JSON representations use base64.
 bytes audio_content = 1;
    
      
        | Returns | 
      
        | Type | Description | 
      
        | ByteString | The audioContent. | 
    
  
  
  getDefaultInstanceForType()
  
    public SynthesizeSpeechResponse getDefaultInstanceForType()
   
  
  
  getDescriptorForType()
  
    public Descriptors.Descriptor getDescriptorForType()
   
  
  Overrides
  
  
  getTimepoints(int index)
  
    public Timepoint getTimepoints(int index)
   
   A link between a position in the original request input and a corresponding
 time in the output audio. It's only supported via <mark> of SSML input.
 repeated .google.cloud.texttospeech.v1beta1.Timepoint timepoints = 2;
    
      
        | Parameter | 
      
        | Name | Description | 
      
        | index | int
 | 
    
  
  
  
  getTimepointsBuilder(int index)
  
    public Timepoint.Builder getTimepointsBuilder(int index)
   
   A link between a position in the original request input and a corresponding
 time in the output audio. It's only supported via <mark> of SSML input.
 repeated .google.cloud.texttospeech.v1beta1.Timepoint timepoints = 2;
    
      
        | Parameter | 
      
        | Name | Description | 
      
        | index | int
 | 
    
  
  
  
  getTimepointsBuilderList()
  
    public List<Timepoint.Builder> getTimepointsBuilderList()
   
   A link between a position in the original request input and a corresponding
 time in the output audio. It's only supported via <mark> of SSML input.
 repeated .google.cloud.texttospeech.v1beta1.Timepoint timepoints = 2;
getTimepointsCount()
  
    public int getTimepointsCount()
   
   A link between a position in the original request input and a corresponding
 time in the output audio. It's only supported via <mark> of SSML input.
 repeated .google.cloud.texttospeech.v1beta1.Timepoint timepoints = 2;
    
      
        | Returns | 
      
        | Type | Description | 
      
        | int |  | 
    
  
  
  getTimepointsList()
  
    public List<Timepoint> getTimepointsList()
   
   A link between a position in the original request input and a corresponding
 time in the output audio. It's only supported via <mark> of SSML input.
 repeated .google.cloud.texttospeech.v1beta1.Timepoint timepoints = 2;
getTimepointsOrBuilder(int index)
  
    public TimepointOrBuilder getTimepointsOrBuilder(int index)
   
   A link between a position in the original request input and a corresponding
 time in the output audio. It's only supported via <mark> of SSML input.
 repeated .google.cloud.texttospeech.v1beta1.Timepoint timepoints = 2;
    
      
        | Parameter | 
      
        | Name | Description | 
      
        | index | int
 | 
    
  
  
  
  getTimepointsOrBuilderList()
  
    public List<? extends TimepointOrBuilder> getTimepointsOrBuilderList()
   
   A link between a position in the original request input and a corresponding
 time in the output audio. It's only supported via <mark> of SSML input.
 repeated .google.cloud.texttospeech.v1beta1.Timepoint timepoints = 2;
    
      
        | Returns | 
      
        | Type | Description | 
      
        | List<? extends com.google.cloud.texttospeech.v1beta1.TimepointOrBuilder> |  | 
    
  
  
  hasAudioConfig()
  
    public boolean hasAudioConfig()
   
   The audio metadata of audio_content.
 .google.cloud.texttospeech.v1beta1.AudioConfig audio_config = 4;
    
      
        | Returns | 
      
        | Type | Description | 
      
        | boolean | Whether the audioConfig field is set. | 
    
  
  
  internalGetFieldAccessorTable()
  
    protected GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable()
   
  
  Overrides
  
  
  isInitialized()
  
    public final boolean isInitialized()
   
  
  Overrides
  
  
  mergeAudioConfig(AudioConfig value)
  
    public SynthesizeSpeechResponse.Builder mergeAudioConfig(AudioConfig value)
   
   The audio metadata of audio_content.
 .google.cloud.texttospeech.v1beta1.AudioConfig audio_config = 4;
mergeFrom(SynthesizeSpeechResponse other)
  
    public SynthesizeSpeechResponse.Builder mergeFrom(SynthesizeSpeechResponse other)
   
  
  
  
  mergeFrom(CodedInputStream input, ExtensionRegistryLite extensionRegistry)
  
    public SynthesizeSpeechResponse.Builder mergeFrom(CodedInputStream input, ExtensionRegistryLite extensionRegistry)
   
  
  
  Overrides
  
  
  
  mergeFrom(Message other)
  
    public SynthesizeSpeechResponse.Builder mergeFrom(Message other)
   
  
    
      
        | Parameter | 
      
        | Name | Description | 
      
        | other | Message
 | 
    
  
  
  Overrides
  
  
  mergeUnknownFields(UnknownFieldSet unknownFields)
  
    public final SynthesizeSpeechResponse.Builder mergeUnknownFields(UnknownFieldSet unknownFields)
   
  
  
  Overrides
  
  
  removeTimepoints(int index)
  
    public SynthesizeSpeechResponse.Builder removeTimepoints(int index)
   
   A link between a position in the original request input and a corresponding
 time in the output audio. It's only supported via <mark> of SSML input.
 repeated .google.cloud.texttospeech.v1beta1.Timepoint timepoints = 2;
    
      
        | Parameter | 
      
        | Name | Description | 
      
        | index | int
 | 
    
  
  
  
  setAudioConfig(AudioConfig value)
  
    public SynthesizeSpeechResponse.Builder setAudioConfig(AudioConfig value)
   
   The audio metadata of audio_content.
 .google.cloud.texttospeech.v1beta1.AudioConfig audio_config = 4;
setAudioConfig(AudioConfig.Builder builderForValue)
  
    public SynthesizeSpeechResponse.Builder setAudioConfig(AudioConfig.Builder builderForValue)
   
   The audio metadata of audio_content.
 .google.cloud.texttospeech.v1beta1.AudioConfig audio_config = 4;
setAudioContent(ByteString value)
  
    public SynthesizeSpeechResponse.Builder setAudioContent(ByteString value)
   
   The audio data bytes encoded as specified in the request, including the
 header for encodings that are wrapped in containers (e.g. MP3, OGG_OPUS).
 For LINEAR16 audio, we include the WAV header. Note: as
 with all bytes fields, protobuffers use a pure binary representation,
 whereas JSON representations use base64.
 bytes audio_content = 1;
    
      
        | Parameter | 
      
        | Name | Description | 
      
        | value | ByteString
 The audioContent to set. | 
    
  
  
  
  setField(Descriptors.FieldDescriptor field, Object value)
  
    public SynthesizeSpeechResponse.Builder setField(Descriptors.FieldDescriptor field, Object value)
   
  
  
  Overrides
  
  
  setRepeatedField(Descriptors.FieldDescriptor field, int index, Object value)
  
    public SynthesizeSpeechResponse.Builder setRepeatedField(Descriptors.FieldDescriptor field, int index, Object value)
   
  
  
  Overrides
  
  
  setTimepoints(int index, Timepoint value)
  
    public SynthesizeSpeechResponse.Builder setTimepoints(int index, Timepoint value)
   
   A link between a position in the original request input and a corresponding
 time in the output audio. It's only supported via <mark> of SSML input.
 repeated .google.cloud.texttospeech.v1beta1.Timepoint timepoints = 2;
setTimepoints(int index, Timepoint.Builder builderForValue)
  
    public SynthesizeSpeechResponse.Builder setTimepoints(int index, Timepoint.Builder builderForValue)
   
   A link between a position in the original request input and a corresponding
 time in the output audio. It's only supported via <mark> of SSML input.
 repeated .google.cloud.texttospeech.v1beta1.Timepoint timepoints = 2;
setUnknownFields(UnknownFieldSet unknownFields)
  
    public final SynthesizeSpeechResponse.Builder setUnknownFields(UnknownFieldSet unknownFields)
   
  
  
  Overrides