- 1.72.0 (latest)
- 1.71.0
- 1.70.0
- 1.69.0
- 1.68.0
- 1.67.0
- 1.66.2
- 1.65.0
- 1.64.0
- 1.63.1
- 1.62.0
- 1.61.0
- 1.60.0
- 1.59.1
- 1.58.0
- 1.57.1
- 1.54.0
- 1.53.0
- 1.52.0
- 1.51.2
- 1.50.0
- 1.49.0
- 1.48.0
- 1.47.0
- 1.46.0
- 1.45.0
- 1.44.0
- 1.43.0
- 1.42.0
- 1.41.0
- 1.40.0
- 1.39.0
- 1.38.0
- 1.37.0
- 1.36.0
- 1.35.0
- 1.34.1
- 1.33.0
- 1.32.0
- 1.31.0
- 1.30.2
- 1.29.0
- 1.28.0
- 1.27.0
- 1.26.0
- 1.25.0
- 1.24.0
- 1.23.0
- 1.22.0
- 1.21.0
- 1.20.1
- 1.19.0
- 1.18.0
- 1.17.0
- 1.16.0
- 1.15.0
Constants
BigQueryRead_CreateReadSession_FullMethodName, BigQueryRead_ReadRows_FullMethodName, BigQueryRead_SplitReadStream_FullMethodName
const (
BigQueryRead_CreateReadSession_FullMethodName = "/google.cloud.bigquery.storage.v1beta2.BigQueryRead/CreateReadSession"
BigQueryRead_ReadRows_FullMethodName = "/google.cloud.bigquery.storage.v1beta2.BigQueryRead/ReadRows"
BigQueryRead_SplitReadStream_FullMethodName = "/google.cloud.bigquery.storage.v1beta2.BigQueryRead/SplitReadStream"
)BigQueryWrite_CreateWriteStream_FullMethodName, BigQueryWrite_AppendRows_FullMethodName, BigQueryWrite_GetWriteStream_FullMethodName, BigQueryWrite_FinalizeWriteStream_FullMethodName, BigQueryWrite_BatchCommitWriteStreams_FullMethodName, BigQueryWrite_FlushRows_FullMethodName
const (
BigQueryWrite_CreateWriteStream_FullMethodName = "/google.cloud.bigquery.storage.v1beta2.BigQueryWrite/CreateWriteStream"
BigQueryWrite_AppendRows_FullMethodName = "/google.cloud.bigquery.storage.v1beta2.BigQueryWrite/AppendRows"
BigQueryWrite_GetWriteStream_FullMethodName = "/google.cloud.bigquery.storage.v1beta2.BigQueryWrite/GetWriteStream"
BigQueryWrite_FinalizeWriteStream_FullMethodName = "/google.cloud.bigquery.storage.v1beta2.BigQueryWrite/FinalizeWriteStream"
BigQueryWrite_BatchCommitWriteStreams_FullMethodName = "/google.cloud.bigquery.storage.v1beta2.BigQueryWrite/BatchCommitWriteStreams"
BigQueryWrite_FlushRows_FullMethodName = "/google.cloud.bigquery.storage.v1beta2.BigQueryWrite/FlushRows"
)Variables
ArrowSerializationOptions_Format_name, ArrowSerializationOptions_Format_value
var (
ArrowSerializationOptions_Format_name = map[int32]string{
0: "FORMAT_UNSPECIFIED",
1: "ARROW_0_14",
2: "ARROW_0_15",
}
ArrowSerializationOptions_Format_value = map[string]int32{
"FORMAT_UNSPECIFIED": 0,
"ARROW_0_14": 1,
"ARROW_0_15": 2,
}
)Enum value maps for ArrowSerializationOptions_Format.
StorageError_StorageErrorCode_name, StorageError_StorageErrorCode_value
var (
StorageError_StorageErrorCode_name = map[int32]string{
0: "STORAGE_ERROR_CODE_UNSPECIFIED",
1: "TABLE_NOT_FOUND",
2: "STREAM_ALREADY_COMMITTED",
3: "STREAM_NOT_FOUND",
4: "INVALID_STREAM_TYPE",
5: "INVALID_STREAM_STATE",
6: "STREAM_FINALIZED",
}
StorageError_StorageErrorCode_value = map[string]int32{
"STORAGE_ERROR_CODE_UNSPECIFIED": 0,
"TABLE_NOT_FOUND": 1,
"STREAM_ALREADY_COMMITTED": 2,
"STREAM_NOT_FOUND": 3,
"INVALID_STREAM_TYPE": 4,
"INVALID_STREAM_STATE": 5,
"STREAM_FINALIZED": 6,
}
)Enum value maps for StorageError_StorageErrorCode.
DataFormat_name, DataFormat_value
var (
DataFormat_name = map[int32]string{
0: "DATA_FORMAT_UNSPECIFIED",
1: "AVRO",
2: "ARROW",
}
DataFormat_value = map[string]int32{
"DATA_FORMAT_UNSPECIFIED": 0,
"AVRO": 1,
"ARROW": 2,
}
)Enum value maps for DataFormat.
WriteStream_Type_name, WriteStream_Type_value
var (
WriteStream_Type_name = map[int32]string{
0: "TYPE_UNSPECIFIED",
1: "COMMITTED",
2: "PENDING",
3: "BUFFERED",
}
WriteStream_Type_value = map[string]int32{
"TYPE_UNSPECIFIED": 0,
"COMMITTED": 1,
"PENDING": 2,
"BUFFERED": 3,
}
)Enum value maps for WriteStream_Type.
TableFieldSchema_Type_name, TableFieldSchema_Type_value
var (
TableFieldSchema_Type_name = map[int32]string{
0: "TYPE_UNSPECIFIED",
1: "STRING",
2: "INT64",
3: "DOUBLE",
4: "STRUCT",
5: "BYTES",
6: "BOOL",
7: "TIMESTAMP",
8: "DATE",
9: "TIME",
10: "DATETIME",
11: "GEOGRAPHY",
12: "NUMERIC",
13: "BIGNUMERIC",
14: "INTERVAL",
15: "JSON",
}
TableFieldSchema_Type_value = map[string]int32{
"TYPE_UNSPECIFIED": 0,
"STRING": 1,
"INT64": 2,
"DOUBLE": 3,
"STRUCT": 4,
"BYTES": 5,
"BOOL": 6,
"TIMESTAMP": 7,
"DATE": 8,
"TIME": 9,
"DATETIME": 10,
"GEOGRAPHY": 11,
"NUMERIC": 12,
"BIGNUMERIC": 13,
"INTERVAL": 14,
"JSON": 15,
}
)Enum value maps for TableFieldSchema_Type.
TableFieldSchema_Mode_name, TableFieldSchema_Mode_value
var (
TableFieldSchema_Mode_name = map[int32]string{
0: "MODE_UNSPECIFIED",
1: "NULLABLE",
2: "REQUIRED",
3: "REPEATED",
}
TableFieldSchema_Mode_value = map[string]int32{
"MODE_UNSPECIFIED": 0,
"NULLABLE": 1,
"REQUIRED": 2,
"REPEATED": 3,
}
)Enum value maps for TableFieldSchema_Mode.
BigQueryRead_ServiceDesc
var BigQueryRead_ServiceDesc = grpc.ServiceDesc{
ServiceName: "google.cloud.bigquery.storage.v1beta2.BigQueryRead",
HandlerType: (*BigQueryReadServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "CreateReadSession",
Handler: _BigQueryRead_CreateReadSession_Handler,
},
{
MethodName: "SplitReadStream",
Handler: _BigQueryRead_SplitReadStream_Handler,
},
},
Streams: []grpc.StreamDesc{
{
StreamName: "ReadRows",
Handler: _BigQueryRead_ReadRows_Handler,
ServerStreams: true,
},
},
Metadata: "google/cloud/bigquery/storage/v1beta2/storage.proto",
}BigQueryRead_ServiceDesc is the grpc.ServiceDesc for BigQueryRead service. It's only intended for direct use with grpc.RegisterService, and not to be introspected or modified (even as a copy)
BigQueryWrite_ServiceDesc
var BigQueryWrite_ServiceDesc = grpc.ServiceDesc{
ServiceName: "google.cloud.bigquery.storage.v1beta2.BigQueryWrite",
HandlerType: (*BigQueryWriteServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "CreateWriteStream",
Handler: _BigQueryWrite_CreateWriteStream_Handler,
},
{
MethodName: "GetWriteStream",
Handler: _BigQueryWrite_GetWriteStream_Handler,
},
{
MethodName: "FinalizeWriteStream",
Handler: _BigQueryWrite_FinalizeWriteStream_Handler,
},
{
MethodName: "BatchCommitWriteStreams",
Handler: _BigQueryWrite_BatchCommitWriteStreams_Handler,
},
{
MethodName: "FlushRows",
Handler: _BigQueryWrite_FlushRows_Handler,
},
},
Streams: []grpc.StreamDesc{
{
StreamName: "AppendRows",
Handler: _BigQueryWrite_AppendRows_Handler,
ServerStreams: true,
ClientStreams: true,
},
},
Metadata: "google/cloud/bigquery/storage/v1beta2/storage.proto",
}BigQueryWrite_ServiceDesc is the grpc.ServiceDesc for BigQueryWrite service. It's only intended for direct use with grpc.RegisterService, and not to be introspected or modified (even as a copy)
File_google_cloud_bigquery_storage_v1beta2_arrow_proto
var File_google_cloud_bigquery_storage_v1beta2_arrow_proto protoreflect.FileDescriptorFile_google_cloud_bigquery_storage_v1beta2_avro_proto
var File_google_cloud_bigquery_storage_v1beta2_avro_proto protoreflect.FileDescriptorFile_google_cloud_bigquery_storage_v1beta2_protobuf_proto
var File_google_cloud_bigquery_storage_v1beta2_protobuf_proto protoreflect.FileDescriptorFile_google_cloud_bigquery_storage_v1beta2_storage_proto
var File_google_cloud_bigquery_storage_v1beta2_storage_proto protoreflect.FileDescriptorFile_google_cloud_bigquery_storage_v1beta2_stream_proto
var File_google_cloud_bigquery_storage_v1beta2_stream_proto protoreflect.FileDescriptorFile_google_cloud_bigquery_storage_v1beta2_table_proto
var File_google_cloud_bigquery_storage_v1beta2_table_proto protoreflect.FileDescriptorFunctions
func RegisterBigQueryReadServer
func RegisterBigQueryReadServer(s grpc.ServiceRegistrar, srv BigQueryReadServer)func RegisterBigQueryWriteServer
func RegisterBigQueryWriteServer(s grpc.ServiceRegistrar, srv BigQueryWriteServer)Deprecated: Do not use.
AppendRowsRequest
type AppendRowsRequest struct {
// Required. The stream that is the target of the append operation. This value
// must be specified for the initial request. If subsequent requests specify
// the stream name, it must equal to the value provided in the first request.
// To write to the _default stream, populate this field with a string in the
// format `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
WriteStream string `protobuf:"bytes,1,opt,name=write_stream,json=writeStream,proto3" json:"write_stream,omitempty"`
// If present, the write is only performed if the next append offset is same
// as the provided value. If not present, the write is performed at the
// current end of stream. Specifying a value for this field is not allowed
// when calling AppendRows for the '_default' stream.
Offset *wrapperspb.Int64Value `protobuf:"bytes,2,opt,name=offset,proto3" json:"offset,omitempty"`
// Input rows. The `writer_schema` field must be specified at the initial
// request and currently, it will be ignored if specified in following
// requests. Following requests must have data in the same format as the
// initial request.
//
// Types that are assignable to Rows:
//
// *AppendRowsRequest_ProtoRows
Rows isAppendRowsRequest_Rows `protobuf_oneof:"rows"`
// Id set by client to annotate its identity. Only initial request setting is
// respected.
TraceId string `protobuf:"bytes,6,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`
// contains filtered or unexported fields
}Request message for AppendRows.
func (*AppendRowsRequest) Descriptor
func (*AppendRowsRequest) Descriptor() ([]byte, []int)Deprecated: Use AppendRowsRequest.ProtoReflect.Descriptor instead.
func (*AppendRowsRequest) GetOffset
func (x *AppendRowsRequest) GetOffset() *wrapperspb.Int64Valuefunc (*AppendRowsRequest) GetProtoRows
func (x *AppendRowsRequest) GetProtoRows() *AppendRowsRequest_ProtoDatafunc (*AppendRowsRequest) GetRows
func (m *AppendRowsRequest) GetRows() isAppendRowsRequest_Rowsfunc (*AppendRowsRequest) GetTraceId
func (x *AppendRowsRequest) GetTraceId() stringfunc (*AppendRowsRequest) GetWriteStream
func (x *AppendRowsRequest) GetWriteStream() stringfunc (*AppendRowsRequest) ProtoMessage
func (*AppendRowsRequest) ProtoMessage()func (*AppendRowsRequest) ProtoReflect
func (x *AppendRowsRequest) ProtoReflect() protoreflect.Messagefunc (*AppendRowsRequest) Reset
func (x *AppendRowsRequest) Reset()func (*AppendRowsRequest) String
func (x *AppendRowsRequest) String() stringAppendRowsRequest_ProtoData
type AppendRowsRequest_ProtoData struct {
// Proto schema used to serialize the data.
WriterSchema *ProtoSchema `protobuf:"bytes,1,opt,name=writer_schema,json=writerSchema,proto3" json:"writer_schema,omitempty"`
// Serialized row data in protobuf message format.
Rows *ProtoRows `protobuf:"bytes,2,opt,name=rows,proto3" json:"rows,omitempty"`
// contains filtered or unexported fields
}Proto schema and data.
func (*AppendRowsRequest_ProtoData) Descriptor
func (*AppendRowsRequest_ProtoData) Descriptor() ([]byte, []int)Deprecated: Use AppendRowsRequest_ProtoData.ProtoReflect.Descriptor instead.
func (*AppendRowsRequest_ProtoData) GetRows
func (x *AppendRowsRequest_ProtoData) GetRows() *ProtoRowsfunc (*AppendRowsRequest_ProtoData) GetWriterSchema
func (x *AppendRowsRequest_ProtoData) GetWriterSchema() *ProtoSchemafunc (*AppendRowsRequest_ProtoData) ProtoMessage
func (*AppendRowsRequest_ProtoData) ProtoMessage()func (*AppendRowsRequest_ProtoData) ProtoReflect
func (x *AppendRowsRequest_ProtoData) ProtoReflect() protoreflect.Messagefunc (*AppendRowsRequest_ProtoData) Reset
func (x *AppendRowsRequest_ProtoData) Reset()func (*AppendRowsRequest_ProtoData) String
func (x *AppendRowsRequest_ProtoData) String() stringAppendRowsRequest_ProtoRows
type AppendRowsRequest_ProtoRows struct {
// Rows in proto format.
ProtoRows *AppendRowsRequest_ProtoData `protobuf:"bytes,4,opt,name=proto_rows,json=protoRows,proto3,oneof"`
}AppendRowsResponse
type AppendRowsResponse struct {
// Types that are assignable to Response:
//
// *AppendRowsResponse_AppendResult_
// *AppendRowsResponse_Error
Response isAppendRowsResponse_Response `protobuf_oneof:"response"`
// If backend detects a schema update, pass it to user so that user can
// use it to input new type of message. It will be empty when no schema
// updates have occurred.
UpdatedSchema *TableSchema `protobuf:"bytes,3,opt,name=updated_schema,json=updatedSchema,proto3" json:"updated_schema,omitempty"`
// contains filtered or unexported fields
}Response message for AppendRows.
func (*AppendRowsResponse) Descriptor
func (*AppendRowsResponse) Descriptor() ([]byte, []int)Deprecated: Use AppendRowsResponse.ProtoReflect.Descriptor instead.
func (*AppendRowsResponse) GetAppendResult
func (x *AppendRowsResponse) GetAppendResult() *AppendRowsResponse_AppendResultfunc (*AppendRowsResponse) GetError
func (x *AppendRowsResponse) GetError() *status.Statusfunc (*AppendRowsResponse) GetResponse
func (m *AppendRowsResponse) GetResponse() isAppendRowsResponse_Responsefunc (*AppendRowsResponse) GetUpdatedSchema
func (x *AppendRowsResponse) GetUpdatedSchema() *TableSchemafunc (*AppendRowsResponse) ProtoMessage
func (*AppendRowsResponse) ProtoMessage()func (*AppendRowsResponse) ProtoReflect
func (x *AppendRowsResponse) ProtoReflect() protoreflect.Messagefunc (*AppendRowsResponse) Reset
func (x *AppendRowsResponse) Reset()func (*AppendRowsResponse) String
func (x *AppendRowsResponse) String() stringAppendRowsResponse_AppendResult
type AppendRowsResponse_AppendResult struct {
// The row offset at which the last append occurred. The offset will not be
// set if appending using default streams.
Offset *wrapperspb.Int64Value `protobuf:"bytes,1,opt,name=offset,proto3" json:"offset,omitempty"`
// contains filtered or unexported fields
}AppendResult is returned for successful append requests.
func (*AppendRowsResponse_AppendResult) Descriptor
func (*AppendRowsResponse_AppendResult) Descriptor() ([]byte, []int)Deprecated: Use AppendRowsResponse_AppendResult.ProtoReflect.Descriptor instead.
func (*AppendRowsResponse_AppendResult) GetOffset
func (x *AppendRowsResponse_AppendResult) GetOffset() *wrapperspb.Int64Valuefunc (*AppendRowsResponse_AppendResult) ProtoMessage
func (*AppendRowsResponse_AppendResult) ProtoMessage()func (*AppendRowsResponse_AppendResult) ProtoReflect
func (x *AppendRowsResponse_AppendResult) ProtoReflect() protoreflect.Messagefunc (*AppendRowsResponse_AppendResult) Reset
func (x *AppendRowsResponse_AppendResult) Reset()func (*AppendRowsResponse_AppendResult) String
func (x *AppendRowsResponse_AppendResult) String() stringAppendRowsResponse_AppendResult_
type AppendRowsResponse_AppendResult_ struct {
// Result if the append is successful.
AppendResult *AppendRowsResponse_AppendResult `protobuf:"bytes,1,opt,name=append_result,json=appendResult,proto3,oneof"`
}AppendRowsResponse_Error
type AppendRowsResponse_Error struct {
// Error returned when problems were encountered. If present,
// it indicates rows were not accepted into the system.
// Users can retry or continue with other append requests within the
// same connection.
//
// Additional information about error signalling:
//
// ALREADY_EXISTS: Happens when an append specified an offset, and the
// backend already has received data at this offset. Typically encountered
// in retry scenarios, and can be ignored.
//
// OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
// the current end of the stream.
//
// INVALID_ARGUMENT: Indicates a malformed request or data.
//
// ABORTED: Request processing is aborted because of prior failures. The
// request can be retried if previous failure is addressed.
//
// INTERNAL: Indicates server side error(s) that can be retried.
Error *status.Status `protobuf:"bytes,2,opt,name=error,proto3,oneof"`
}ArrowRecordBatch
type ArrowRecordBatch struct {
// IPC-serialized Arrow RecordBatch.
SerializedRecordBatch []byte `protobuf:"bytes,1,opt,name=serialized_record_batch,json=serializedRecordBatch,proto3" json:"serialized_record_batch,omitempty"`
// contains filtered or unexported fields
}Arrow RecordBatch.
func (*ArrowRecordBatch) Descriptor
func (*ArrowRecordBatch) Descriptor() ([]byte, []int)Deprecated: Use ArrowRecordBatch.ProtoReflect.Descriptor instead.
func (*ArrowRecordBatch) GetSerializedRecordBatch
func (x *ArrowRecordBatch) GetSerializedRecordBatch() []bytefunc (*ArrowRecordBatch) ProtoMessage
func (*ArrowRecordBatch) ProtoMessage()func (*ArrowRecordBatch) ProtoReflect
func (x *ArrowRecordBatch) ProtoReflect() protoreflect.Messagefunc (*ArrowRecordBatch) Reset
func (x *ArrowRecordBatch) Reset()func (*ArrowRecordBatch) String
func (x *ArrowRecordBatch) String() stringArrowSchema
type ArrowSchema struct {
// IPC serialized Arrow schema.
SerializedSchema []byte `protobuf:"bytes,1,opt,name=serialized_schema,json=serializedSchema,proto3" json:"serialized_schema,omitempty"`
// contains filtered or unexported fields
}Arrow schema as specified in https://arrow.apache.org/docs/python/api/datatypes.html and serialized to bytes using IPC: https://arrow.apache.org/docs/format/Columnar.html#serialization-and-interprocess-communication-ipc
See code samples on how this message can be deserialized.
func (*ArrowSchema) Descriptor
func (*ArrowSchema) Descriptor() ([]byte, []int)Deprecated: Use ArrowSchema.ProtoReflect.Descriptor instead.
func (*ArrowSchema) GetSerializedSchema
func (x *ArrowSchema) GetSerializedSchema() []bytefunc (*ArrowSchema) ProtoMessage
func (*ArrowSchema) ProtoMessage()func (*ArrowSchema) ProtoReflect
func (x *ArrowSchema) ProtoReflect() protoreflect.Messagefunc (*ArrowSchema) Reset
func (x *ArrowSchema) Reset()func (*ArrowSchema) String
func (x *ArrowSchema) String() stringArrowSerializationOptions
type ArrowSerializationOptions struct {
// The Arrow IPC format to use.
Format ArrowSerializationOptions_Format `protobuf:"varint,1,opt,name=format,proto3,enum=google.cloud.bigquery.storage.v1beta2.ArrowSerializationOptions_Format" json:"format,omitempty"`
// contains filtered or unexported fields
}Contains options specific to Arrow Serialization.
func (*ArrowSerializationOptions) Descriptor
func (*ArrowSerializationOptions) Descriptor() ([]byte, []int)Deprecated: Use ArrowSerializationOptions.ProtoReflect.Descriptor instead.
func (*ArrowSerializationOptions) GetFormat
func (x *ArrowSerializationOptions) GetFormat() ArrowSerializationOptions_Formatfunc (*ArrowSerializationOptions) ProtoMessage
func (*ArrowSerializationOptions) ProtoMessage()func (*ArrowSerializationOptions) ProtoReflect
func (x *ArrowSerializationOptions) ProtoReflect() protoreflect.Messagefunc (*ArrowSerializationOptions) Reset
func (x *ArrowSerializationOptions) Reset()func (*ArrowSerializationOptions) String
func (x *ArrowSerializationOptions) String() stringArrowSerializationOptions_Format
type ArrowSerializationOptions_Format int32The IPC format to use when serializing Arrow streams.
ArrowSerializationOptions_FORMAT_UNSPECIFIED, ArrowSerializationOptions_ARROW_0_14, ArrowSerializationOptions_ARROW_0_15
const (
// If unspecied the IPC format as of 0.15 release will be used.
ArrowSerializationOptions_FORMAT_UNSPECIFIED ArrowSerializationOptions_Format = 0
// Use the legacy IPC message format as of Apache Arrow Release 0.14.
ArrowSerializationOptions_ARROW_0_14 ArrowSerializationOptions_Format = 1
// Use the message format as of Apache Arrow Release 0.15.
ArrowSerializationOptions_ARROW_0_15 ArrowSerializationOptions_Format = 2
)func (ArrowSerializationOptions_Format) Descriptor
func (ArrowSerializationOptions_Format) Descriptor() protoreflect.EnumDescriptorfunc (ArrowSerializationOptions_Format) Enum
func (x ArrowSerializationOptions_Format) Enum() *ArrowSerializationOptions_Formatfunc (ArrowSerializationOptions_Format) EnumDescriptor
func (ArrowSerializationOptions_Format) EnumDescriptor() ([]byte, []int)Deprecated: Use ArrowSerializationOptions_Format.Descriptor instead.
func (ArrowSerializationOptions_Format) Number
func (x ArrowSerializationOptions_Format) Number() protoreflect.EnumNumberfunc (ArrowSerializationOptions_Format) String
func (x ArrowSerializationOptions_Format) String() stringfunc (ArrowSerializationOptions_Format) Type
func (ArrowSerializationOptions_Format) Type() protoreflect.EnumTypeAvroRows
type AvroRows struct {
// Binary serialized rows in a block.
SerializedBinaryRows []byte `protobuf:"bytes,1,opt,name=serialized_binary_rows,json=serializedBinaryRows,proto3" json:"serialized_binary_rows,omitempty"`
// contains filtered or unexported fields
}Avro rows.
func (*AvroRows) Descriptor
Deprecated: Use AvroRows.ProtoReflect.Descriptor instead.
func (*AvroRows) GetSerializedBinaryRows
func (*AvroRows) ProtoMessage
func (*AvroRows) ProtoMessage()func (*AvroRows) ProtoReflect
func (x *AvroRows) ProtoReflect() protoreflect.Messagefunc (*AvroRows) Reset
func (x *AvroRows) Reset()func (*AvroRows) String
AvroSchema
type AvroSchema struct {
// Json serialized schema, as described at
// https://avro.apache.org/docs/1.8.1/spec.html.
Schema string `protobuf:"bytes,1,opt,name=schema,proto3" json:"schema,omitempty"`
// contains filtered or unexported fields
}Avro schema.
func (*AvroSchema) Descriptor
func (*AvroSchema) Descriptor() ([]byte, []int)Deprecated: Use AvroSchema.ProtoReflect.Descriptor instead.
func (*AvroSchema) GetSchema
func (x *AvroSchema) GetSchema() stringfunc (*AvroSchema) ProtoMessage
func (*AvroSchema) ProtoMessage()func (*AvroSchema) ProtoReflect
func (x *AvroSchema) ProtoReflect() protoreflect.Messagefunc (*AvroSchema) Reset
func (x *AvroSchema) Reset()func (*AvroSchema) String
func (x *AvroSchema) String() stringBatchCommitWriteStreamsRequest
type BatchCommitWriteStreamsRequest struct {
// Required. Parent table that all the streams should belong to, in the form
// of `projects/{project}/datasets/{dataset}/tables/{table}`.
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
// Required. The group of streams that will be committed atomically.
WriteStreams []string `protobuf:"bytes,2,rep,name=write_streams,json=writeStreams,proto3" json:"write_streams,omitempty"`
// contains filtered or unexported fields
}Request message for BatchCommitWriteStreams.
func (*BatchCommitWriteStreamsRequest) Descriptor
func (*BatchCommitWriteStreamsRequest) Descriptor() ([]byte, []int)Deprecated: Use BatchCommitWriteStreamsRequest.ProtoReflect.Descriptor instead.
func (*BatchCommitWriteStreamsRequest) GetParent
func (x *BatchCommitWriteStreamsRequest) GetParent() stringfunc (*BatchCommitWriteStreamsRequest) GetWriteStreams
func (x *BatchCommitWriteStreamsRequest) GetWriteStreams() []stringfunc (*BatchCommitWriteStreamsRequest) ProtoMessage
func (*BatchCommitWriteStreamsRequest) ProtoMessage()func (*BatchCommitWriteStreamsRequest) ProtoReflect
func (x *BatchCommitWriteStreamsRequest) ProtoReflect() protoreflect.Messagefunc (*BatchCommitWriteStreamsRequest) Reset
func (x *BatchCommitWriteStreamsRequest) Reset()func (*BatchCommitWriteStreamsRequest) String
func (x *BatchCommitWriteStreamsRequest) String() stringBatchCommitWriteStreamsResponse
type BatchCommitWriteStreamsResponse struct {
// The time at which streams were committed in microseconds granularity.
// This field will only exist when there are no stream errors.
// **Note** if this field is not set, it means the commit was not successful.
CommitTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=commit_time,json=commitTime,proto3" json:"commit_time,omitempty"`
// Stream level error if commit failed. Only streams with error will be in
// the list.
// If empty, there is no error and all streams are committed successfully.
// If non empty, certain streams have errors and ZERO stream is committed due
// to atomicity guarantee.
StreamErrors []*StorageError `protobuf:"bytes,2,rep,name=stream_errors,json=streamErrors,proto3" json:"stream_errors,omitempty"`
// contains filtered or unexported fields
}Response message for BatchCommitWriteStreams.
func (*BatchCommitWriteStreamsResponse) Descriptor
func (*BatchCommitWriteStreamsResponse) Descriptor() ([]byte, []int)Deprecated: Use BatchCommitWriteStreamsResponse.ProtoReflect.Descriptor instead.
func (*BatchCommitWriteStreamsResponse) GetCommitTime
func (x *BatchCommitWriteStreamsResponse) GetCommitTime() *timestamppb.Timestampfunc (*BatchCommitWriteStreamsResponse) GetStreamErrors
func (x *BatchCommitWriteStreamsResponse) GetStreamErrors() []*StorageErrorfunc (*BatchCommitWriteStreamsResponse) ProtoMessage
func (*BatchCommitWriteStreamsResponse) ProtoMessage()func (*BatchCommitWriteStreamsResponse) ProtoReflect
func (x *BatchCommitWriteStreamsResponse) ProtoReflect() protoreflect.Messagefunc (*BatchCommitWriteStreamsResponse) Reset
func (x *BatchCommitWriteStreamsResponse) Reset()func (*BatchCommitWriteStreamsResponse) String
func (x *BatchCommitWriteStreamsResponse) String() stringBigQueryReadClient
type BigQueryReadClient interface {
// Creates a new read session. A read session divides the contents of a
// BigQuery table into one or more streams, which can then be used to read
// data from the table. The read session also specifies properties of the
// data to be read, such as a list of columns or a push-down filter describing
// the rows to be returned.
//
// A particular row can be read by at most one stream. When the caller has
// reached the end of each stream in the session, then all the data in the
// table has been read.
//
// Data is assigned to each stream such that roughly the same number of
// rows can be read from each stream. Because the server-side unit for
// assigning data is collections of rows, the API does not guarantee that
// each stream will return the same number or rows. Additionally, the
// limits are enforced based on the number of pre-filtered rows, so some
// filters can lead to lopsided assignments.
//
// Read sessions automatically expire 6 hours after they are created and do
// not require manual clean-up by the caller.
CreateReadSession(ctx context.Context, in *CreateReadSessionRequest, opts ...grpc.CallOption) (*ReadSession, error)
// Reads rows from the stream in the format prescribed by the ReadSession.
// Each response contains one or more table rows, up to a maximum of 100 MiB
// per response; read requests which attempt to read individual rows larger
// than 100 MiB will fail.
//
// Each request also returns a set of stream statistics reflecting the current
// state of the stream.
ReadRows(ctx context.Context, in *ReadRowsRequest, opts ...grpc.CallOption) (BigQueryRead_ReadRowsClient, error)
// Splits a given `ReadStream` into two `ReadStream` objects. These
// `ReadStream` objects are referred to as the primary and the residual
// streams of the split. The original `ReadStream` can still be read from in
// the same manner as before. Both of the returned `ReadStream` objects can
// also be read from, and the rows returned by both child streams will be
// the same as the rows read from the original stream.
//
// Moreover, the two child streams will be allocated back-to-back in the
// original `ReadStream`. Concretely, it is guaranteed that for streams
// original, primary, and residual, that original[0-j] = primary[0-j] and
// original[j-n] = residual[0-m] once the streams have been read to
// completion.
SplitReadStream(ctx context.Context, in *SplitReadStreamRequest, opts ...grpc.CallOption) (*SplitReadStreamResponse, error)
}BigQueryReadClient is the client API for BigQueryRead service.
For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
func NewBigQueryReadClient
func NewBigQueryReadClient(cc grpc.ClientConnInterface) BigQueryReadClientBigQueryReadServer
type BigQueryReadServer interface {
// Creates a new read session. A read session divides the contents of a
// BigQuery table into one or more streams, which can then be used to read
// data from the table. The read session also specifies properties of the
// data to be read, such as a list of columns or a push-down filter describing
// the rows to be returned.
//
// A particular row can be read by at most one stream. When the caller has
// reached the end of each stream in the session, then all the data in the
// table has been read.
//
// Data is assigned to each stream such that roughly the same number of
// rows can be read from each stream. Because the server-side unit for
// assigning data is collections of rows, the API does not guarantee that
// each stream will return the same number or rows. Additionally, the
// limits are enforced based on the number of pre-filtered rows, so some
// filters can lead to lopsided assignments.
//
// Read sessions automatically expire 6 hours after they are created and do
// not require manual clean-up by the caller.
CreateReadSession(context.Context, *CreateReadSessionRequest) (*ReadSession, error)
// Reads rows from the stream in the format prescribed by the ReadSession.
// Each response contains one or more table rows, up to a maximum of 100 MiB
// per response; read requests which attempt to read individual rows larger
// than 100 MiB will fail.
//
// Each request also returns a set of stream statistics reflecting the current
// state of the stream.
ReadRows(*ReadRowsRequest, BigQueryRead_ReadRowsServer) error
// Splits a given `ReadStream` into two `ReadStream` objects. These
// `ReadStream` objects are referred to as the primary and the residual
// streams of the split. The original `ReadStream` can still be read from in
// the same manner as before. Both of the returned `ReadStream` objects can
// also be read from, and the rows returned by both child streams will be
// the same as the rows read from the original stream.
//
// Moreover, the two child streams will be allocated back-to-back in the
// original `ReadStream`. Concretely, it is guaranteed that for streams
// original, primary, and residual, that original[0-j] = primary[0-j] and
// original[j-n] = residual[0-m] once the streams have been read to
// completion.
SplitReadStream(context.Context, *SplitReadStreamRequest) (*SplitReadStreamResponse, error)
}BigQueryReadServer is the server API for BigQueryRead service. All implementations should embed UnimplementedBigQueryReadServer for forward compatibility
BigQueryRead_ReadRowsClient
type BigQueryRead_ReadRowsClient interface {
Recv() (*ReadRowsResponse, error)
grpc.ClientStream
}BigQueryRead_ReadRowsServer
type BigQueryRead_ReadRowsServer interface {
Send(*ReadRowsResponse) error
grpc.ServerStream
}BigQueryWriteClient (deprecated)
type BigQueryWriteClient interface {
// Deprecated: Do not use.
// Creates a write stream to the given table.
// Additionally, every table has a special COMMITTED stream named '_default'
// to which data can be written. This stream doesn't need to be created using
// CreateWriteStream. It is a stream that can be used simultaneously by any
// number of clients. Data written to this stream is considered committed as
// soon as an acknowledgement is received.
CreateWriteStream(ctx context.Context, in *CreateWriteStreamRequest, opts ...grpc.CallOption) (*WriteStream, error)
// Deprecated: Do not use.
// Appends data to the given stream.
//
// If `offset` is specified, the `offset` is checked against the end of
// stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an
// attempt is made to append to an offset beyond the current end of the stream
// or `ALREADY_EXISTS` if user provids an `offset` that has already been
// written to. User can retry with adjusted offset within the same RPC
// stream. If `offset` is not specified, append happens at the end of the
// stream.
//
// The response contains the offset at which the append happened. Responses
// are received in the same order in which requests are sent. There will be
// one response for each successful request. If the `offset` is not set in
// response, it means append didn't happen due to some errors. If one request
// fails, all the subsequent requests will also fail until a success request
// is made again.
//
// If the stream is of `PENDING` type, data will only be available for read
// operations after the stream is committed.
AppendRows(ctx context.Context, opts ...grpc.CallOption) (BigQueryWrite_AppendRowsClient, error)
// Deprecated: Do not use.
// Gets a write stream.
GetWriteStream(ctx context.Context, in *GetWriteStreamRequest, opts ...grpc.CallOption) (*WriteStream, error)
// Deprecated: Do not use.
// Finalize a write stream so that no new data can be appended to the
// stream. Finalize is not supported on the '_default' stream.
FinalizeWriteStream(ctx context.Context, in *FinalizeWriteStreamRequest, opts ...grpc.CallOption) (*FinalizeWriteStreamResponse, error)
// Deprecated: Do not use.
// Atomically commits a group of `PENDING` streams that belong to the same
// `parent` table.
// Streams must be finalized before commit and cannot be committed multiple
// times. Once a stream is committed, data in the stream becomes available
// for read operations.
BatchCommitWriteStreams(ctx context.Context, in *BatchCommitWriteStreamsRequest, opts ...grpc.CallOption) (*BatchCommitWriteStreamsResponse, error)
// Deprecated: Do not use.
// Flushes rows to a BUFFERED stream.
// If users are appending rows to BUFFERED stream, flush operation is
// required in order for the rows to become available for reading. A
// Flush operation flushes up to any previously flushed offset in a BUFFERED
// stream, to the offset specified in the request.
// Flush is not supported on the _default stream, since it is not BUFFERED.
FlushRows(ctx context.Context, in *FlushRowsRequest, opts ...grpc.CallOption) (*FlushRowsResponse, error)
}BigQueryWriteClient is the client API for BigQueryWrite service.
For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
Deprecated: Do not use.
func NewBigQueryWriteClient (deprecated)
func NewBigQueryWriteClient(cc grpc.ClientConnInterface) BigQueryWriteClientDeprecated: Do not use.
BigQueryWriteServer (deprecated)
type BigQueryWriteServer interface {
// Deprecated: Do not use.
// Creates a write stream to the given table.
// Additionally, every table has a special COMMITTED stream named '_default'
// to which data can be written. This stream doesn't need to be created using
// CreateWriteStream. It is a stream that can be used simultaneously by any
// number of clients. Data written to this stream is considered committed as
// soon as an acknowledgement is received.
CreateWriteStream(context.Context, *CreateWriteStreamRequest) (*WriteStream, error)
// Deprecated: Do not use.
// Appends data to the given stream.
//
// If `offset` is specified, the `offset` is checked against the end of
// stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an
// attempt is made to append to an offset beyond the current end of the stream
// or `ALREADY_EXISTS` if user provids an `offset` that has already been
// written to. User can retry with adjusted offset within the same RPC
// stream. If `offset` is not specified, append happens at the end of the
// stream.
//
// The response contains the offset at which the append happened. Responses
// are received in the same order in which requests are sent. There will be
// one response for each successful request. If the `offset` is not set in
// response, it means append didn't happen due to some errors. If one request
// fails, all the subsequent requests will also fail until a success request
// is made again.
//
// If the stream is of `PENDING` type, data will only be available for read
// operations after the stream is committed.
AppendRows(BigQueryWrite_AppendRowsServer) error
// Deprecated: Do not use.
// Gets a write stream.
GetWriteStream(context.Context, *GetWriteStreamRequest) (*WriteStream, error)
// Deprecated: Do not use.
// Finalize a write stream so that no new data can be appended to the
// stream. Finalize is not supported on the '_default' stream.
FinalizeWriteStream(context.Context, *FinalizeWriteStreamRequest) (*FinalizeWriteStreamResponse, error)
// Deprecated: Do not use.
// Atomically commits a group of `PENDING` streams that belong to the same
// `parent` table.
// Streams must be finalized before commit and cannot be committed multiple
// times. Once a stream is committed, data in the stream becomes available
// for read operations.
BatchCommitWriteStreams(context.Context, *BatchCommitWriteStreamsRequest) (*BatchCommitWriteStreamsResponse, error)
// Deprecated: Do not use.
// Flushes rows to a BUFFERED stream.
// If users are appending rows to BUFFERED stream, flush operation is
// required in order for the rows to become available for reading. A
// Flush operation flushes up to any previously flushed offset in a BUFFERED
// stream, to the offset specified in the request.
// Flush is not supported on the _default stream, since it is not BUFFERED.
FlushRows(context.Context, *FlushRowsRequest) (*FlushRowsResponse, error)
}BigQueryWriteServer is the server API for BigQueryWrite service. All implementations should embed UnimplementedBigQueryWriteServer for forward compatibility
Deprecated: Do not use.
BigQueryWrite_AppendRowsClient
type BigQueryWrite_AppendRowsClient interface {
Send(*AppendRowsRequest) error
Recv() (*AppendRowsResponse, error)
grpc.ClientStream
}BigQueryWrite_AppendRowsServer
type BigQueryWrite_AppendRowsServer interface {
Send(*AppendRowsResponse) error
Recv() (*AppendRowsRequest, error)
grpc.ServerStream
}CreateReadSessionRequest
type CreateReadSessionRequest struct {
// Required. The request project that owns the session, in the form of
// `projects/{project_id}`.
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
// Required. Session to be created.
ReadSession *ReadSession `protobuf:"bytes,2,opt,name=read_session,json=readSession,proto3" json:"read_session,omitempty"`
// Max initial number of streams. If unset or zero, the server will
// provide a value of streams so as to produce reasonable throughput. Must be
// non-negative. The number of streams may be lower than the requested number,
// depending on the amount parallelism that is reasonable for the table. Error
// will be returned if the max count is greater than the current system
// max limit of 1,000.
//
// Streams must be read starting from offset 0.
MaxStreamCount int32 `protobuf:"varint,3,opt,name=max_stream_count,json=maxStreamCount,proto3" json:"max_stream_count,omitempty"`
// contains filtered or unexported fields
}Request message for CreateReadSession.
func (*CreateReadSessionRequest) Descriptor
func (*CreateReadSessionRequest) Descriptor() ([]byte, []int)Deprecated: Use CreateReadSessionRequest.ProtoReflect.Descriptor instead.
func (*CreateReadSessionRequest) GetMaxStreamCount
func (x *CreateReadSessionRequest) GetMaxStreamCount() int32func (*CreateReadSessionRequest) GetParent
func (x *CreateReadSessionRequest) GetParent() stringfunc (*CreateReadSessionRequest) GetReadSession
func (x *CreateReadSessionRequest) GetReadSession() *ReadSessionfunc (*CreateReadSessionRequest) ProtoMessage
func (*CreateReadSessionRequest) ProtoMessage()func (*CreateReadSessionRequest) ProtoReflect
func (x *CreateReadSessionRequest) ProtoReflect() protoreflect.Messagefunc (*CreateReadSessionRequest) Reset
func (x *CreateReadSessionRequest) Reset()func (*CreateReadSessionRequest) String
func (x *CreateReadSessionRequest) String() stringCreateWriteStreamRequest
type CreateWriteStreamRequest struct {
// Required. Reference to the table to which the stream belongs, in the format
// of `projects/{project}/datasets/{dataset}/tables/{table}`.
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
// Required. Stream to be created.
WriteStream *WriteStream `protobuf:"bytes,2,opt,name=write_stream,json=writeStream,proto3" json:"write_stream,omitempty"`
// contains filtered or unexported fields
}Request message for CreateWriteStream.
func (*CreateWriteStreamRequest) Descriptor
func (*CreateWriteStreamRequest) Descriptor() ([]byte, []int)Deprecated: Use CreateWriteStreamRequest.ProtoReflect.Descriptor instead.
func (*CreateWriteStreamRequest) GetParent
func (x *CreateWriteStreamRequest) GetParent() stringfunc (*CreateWriteStreamRequest) GetWriteStream
func (x *CreateWriteStreamRequest) GetWriteStream() *WriteStreamfunc (*CreateWriteStreamRequest) ProtoMessage
func (*CreateWriteStreamRequest) ProtoMessage()func (*CreateWriteStreamRequest) ProtoReflect
func (x *CreateWriteStreamRequest) ProtoReflect() protoreflect.Messagefunc (*CreateWriteStreamRequest) Reset
func (x *CreateWriteStreamRequest) Reset()func (*CreateWriteStreamRequest) String
func (x *CreateWriteStreamRequest) String() stringDataFormat
type DataFormat int32Data format for input or output data.
DataFormat_DATA_FORMAT_UNSPECIFIED, DataFormat_AVRO, DataFormat_ARROW
const (
DataFormat_DATA_FORMAT_UNSPECIFIED DataFormat = 0
// Avro is a standard open source row based file format.
// See https://avro.apache.org/ for more details.
DataFormat_AVRO DataFormat = 1
// Arrow is a standard open source column-based message format.
// See https://arrow.apache.org/ for more details.
DataFormat_ARROW DataFormat = 2
)func (DataFormat) Descriptor
func (DataFormat) Descriptor() protoreflect.EnumDescriptorfunc (DataFormat) Enum
func (x DataFormat) Enum() *DataFormatfunc (DataFormat) EnumDescriptor
func (DataFormat) EnumDescriptor() ([]byte, []int)Deprecated: Use DataFormat.Descriptor instead.
func (DataFormat) Number
func (x DataFormat) Number() protoreflect.EnumNumberfunc (DataFormat) String
func (x DataFormat) String() stringfunc (DataFormat) Type
func (DataFormat) Type() protoreflect.EnumTypeFinalizeWriteStreamRequest
type FinalizeWriteStreamRequest struct {
// Required. Name of the stream to finalize, in the form of
// `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// contains filtered or unexported fields
}Request message for invoking FinalizeWriteStream.
func (*FinalizeWriteStreamRequest) Descriptor
func (*FinalizeWriteStreamRequest) Descriptor() ([]byte, []int)Deprecated: Use FinalizeWriteStreamRequest.ProtoReflect.Descriptor instead.
func (*FinalizeWriteStreamRequest) GetName
func (x *FinalizeWriteStreamRequest) GetName() stringfunc (*FinalizeWriteStreamRequest) ProtoMessage
func (*FinalizeWriteStreamRequest) ProtoMessage()func (*FinalizeWriteStreamRequest) ProtoReflect
func (x *FinalizeWriteStreamRequest) ProtoReflect() protoreflect.Messagefunc (*FinalizeWriteStreamRequest) Reset
func (x *FinalizeWriteStreamRequest) Reset()func (*FinalizeWriteStreamRequest) String
func (x *FinalizeWriteStreamRequest) String() stringFinalizeWriteStreamResponse
type FinalizeWriteStreamResponse struct {
// Number of rows in the finalized stream.
RowCount int64 `protobuf:"varint,1,opt,name=row_count,json=rowCount,proto3" json:"row_count,omitempty"`
// contains filtered or unexported fields
}Response message for FinalizeWriteStream.
func (*FinalizeWriteStreamResponse) Descriptor
func (*FinalizeWriteStreamResponse) Descriptor() ([]byte, []int)Deprecated: Use FinalizeWriteStreamResponse.ProtoReflect.Descriptor instead.
func (*FinalizeWriteStreamResponse) GetRowCount
func (x *FinalizeWriteStreamResponse) GetRowCount() int64func (*FinalizeWriteStreamResponse) ProtoMessage
func (*FinalizeWriteStreamResponse) ProtoMessage()func (*FinalizeWriteStreamResponse) ProtoReflect
func (x *FinalizeWriteStreamResponse) ProtoReflect() protoreflect.Messagefunc (*FinalizeWriteStreamResponse) Reset
func (x *FinalizeWriteStreamResponse) Reset()func (*FinalizeWriteStreamResponse) String
func (x *FinalizeWriteStreamResponse) String() stringFlushRowsRequest
type FlushRowsRequest struct {
// Required. The stream that is the target of the flush operation.
WriteStream string `protobuf:"bytes,1,opt,name=write_stream,json=writeStream,proto3" json:"write_stream,omitempty"`
// Ending offset of the flush operation. Rows before this offset(including
// this offset) will be flushed.
Offset *wrapperspb.Int64Value `protobuf:"bytes,2,opt,name=offset,proto3" json:"offset,omitempty"`
// contains filtered or unexported fields
}Request message for FlushRows.
func (*FlushRowsRequest) Descriptor
func (*FlushRowsRequest) Descriptor() ([]byte, []int)Deprecated: Use FlushRowsRequest.ProtoReflect.Descriptor instead.
func (*FlushRowsRequest) GetOffset
func (x *FlushRowsRequest) GetOffset() *wrapperspb.Int64Valuefunc (*FlushRowsRequest) GetWriteStream
func (x *FlushRowsRequest) GetWriteStream() stringfunc (*FlushRowsRequest) ProtoMessage
func (*FlushRowsRequest) ProtoMessage()func (*FlushRowsRequest) ProtoReflect
func (x *FlushRowsRequest) ProtoReflect() protoreflect.Messagefunc (*FlushRowsRequest) Reset
func (x *FlushRowsRequest) Reset()func (*FlushRowsRequest) String
func (x *FlushRowsRequest) String() stringFlushRowsResponse
type FlushRowsResponse struct {
// The rows before this offset (including this offset) are flushed.
Offset int64 `protobuf:"varint,1,opt,name=offset,proto3" json:"offset,omitempty"`
// contains filtered or unexported fields
}Respond message for FlushRows.
func (*FlushRowsResponse) Descriptor
func (*FlushRowsResponse) Descriptor() ([]byte, []int)Deprecated: Use FlushRowsResponse.ProtoReflect.Descriptor instead.
func (*FlushRowsResponse) GetOffset
func (x *FlushRowsResponse) GetOffset() int64func (*FlushRowsResponse) ProtoMessage
func (*FlushRowsResponse) ProtoMessage()func (*FlushRowsResponse) ProtoReflect
func (x *FlushRowsResponse) ProtoReflect() protoreflect.Messagefunc (*FlushRowsResponse) Reset
func (x *FlushRowsResponse) Reset()func (*FlushRowsResponse) String
func (x *FlushRowsResponse) String() stringGetWriteStreamRequest
type GetWriteStreamRequest struct {
// Required. Name of the stream to get, in the form of
// `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// contains filtered or unexported fields
}Request message for GetWriteStreamRequest.
func (*GetWriteStreamRequest) Descriptor
func (*GetWriteStreamRequest) Descriptor() ([]byte, []int)Deprecated: Use GetWriteStreamRequest.ProtoReflect.Descriptor instead.
func (*GetWriteStreamRequest) GetName
func (x *GetWriteStreamRequest) GetName() stringfunc (*GetWriteStreamRequest) ProtoMessage
func (*GetWriteStreamRequest) ProtoMessage()func (*GetWriteStreamRequest) ProtoReflect
func (x *GetWriteStreamRequest) ProtoReflect() protoreflect.Messagefunc (*GetWriteStreamRequest) Reset
func (x *GetWriteStreamRequest) Reset()func (*GetWriteStreamRequest) String
func (x *GetWriteStreamRequest) String() stringProtoRows
type ProtoRows struct {
// A sequence of rows serialized as a Protocol Buffer.
//
// See https://developers.google.com/protocol-buffers/docs/overview for more
// information on deserializing this field.
SerializedRows [][]byte `protobuf:"bytes,1,rep,name=serialized_rows,json=serializedRows,proto3" json:"serialized_rows,omitempty"`
// contains filtered or unexported fields
}func (*ProtoRows) Descriptor
Deprecated: Use ProtoRows.ProtoReflect.Descriptor instead.
func (*ProtoRows) GetSerializedRows
func (*ProtoRows) ProtoMessage
func (*ProtoRows) ProtoMessage()func (*ProtoRows) ProtoReflect
func (x *ProtoRows) ProtoReflect() protoreflect.Messagefunc (*ProtoRows) Reset
func (x *ProtoRows) Reset()func (*ProtoRows) String
ProtoSchema
type ProtoSchema struct {
// Descriptor for input message. The descriptor has to be self contained,
// including all the nested types, excepted for proto buffer well known types
// (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf).
ProtoDescriptor *descriptorpb.DescriptorProto `protobuf:"bytes,1,opt,name=proto_descriptor,json=protoDescriptor,proto3" json:"proto_descriptor,omitempty"`
// contains filtered or unexported fields
}ProtoSchema describes the schema of the serialized protocol buffer data rows.
func (*ProtoSchema) Descriptor
func (*ProtoSchema) Descriptor() ([]byte, []int)Deprecated: Use ProtoSchema.ProtoReflect.Descriptor instead.
func (*ProtoSchema) GetProtoDescriptor
func (x *ProtoSchema) GetProtoDescriptor() *descriptorpb.DescriptorProtofunc (*ProtoSchema) ProtoMessage
func (*ProtoSchema) ProtoMessage()func (*ProtoSchema) ProtoReflect
func (x *ProtoSchema) ProtoReflect() protoreflect.Messagefunc (*ProtoSchema) Reset
func (x *ProtoSchema) Reset()func (*ProtoSchema) String
func (x *ProtoSchema) String() stringReadRowsRequest
type ReadRowsRequest struct {
// Required. Stream to read rows from.
ReadStream string `protobuf:"bytes,1,opt,name=read_stream,json=readStream,proto3" json:"read_stream,omitempty"`
// The offset requested must be less than the last row read from Read.
// Requesting a larger offset is undefined. If not specified, start reading
// from offset zero.
Offset int64 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"`
// contains filtered or unexported fields
}Request message for ReadRows.
func (*ReadRowsRequest) Descriptor
func (*ReadRowsRequest) Descriptor() ([]byte, []int)Deprecated: Use ReadRowsRequest.ProtoReflect.Descriptor instead.
func (*ReadRowsRequest) GetOffset
func (x *ReadRowsRequest) GetOffset() int64func (*ReadRowsRequest) GetReadStream
func (x *ReadRowsRequest) GetReadStream() stringfunc (*ReadRowsRequest) ProtoMessage
func (*ReadRowsRequest) ProtoMessage()func (*ReadRowsRequest) ProtoReflect
func (x *ReadRowsRequest) ProtoReflect() protoreflect.Messagefunc (*ReadRowsRequest) Reset
func (x *ReadRowsRequest) Reset()func (*ReadRowsRequest) String
func (x *ReadRowsRequest) String() stringReadRowsResponse
type ReadRowsResponse struct {
// Row data is returned in format specified during session creation.
//
// Types that are assignable to Rows:
//
// *ReadRowsResponse_AvroRows
// *ReadRowsResponse_ArrowRecordBatch
Rows isReadRowsResponse_Rows `protobuf_oneof:"rows"`
// Number of serialized rows in the rows block.
RowCount int64 `protobuf:"varint,6,opt,name=row_count,json=rowCount,proto3" json:"row_count,omitempty"`
// Statistics for the stream.
Stats *StreamStats `protobuf:"bytes,2,opt,name=stats,proto3" json:"stats,omitempty"`
// Throttling state. If unset, the latest response still describes
// the current throttling status.
ThrottleState *ThrottleState `protobuf:"bytes,5,opt,name=throttle_state,json=throttleState,proto3" json:"throttle_state,omitempty"`
// The schema for the read. If read_options.selected_fields is set, the
// schema may be different from the table schema as it will only contain
// the selected fields. This schema is equivalent to the one returned by
// CreateSession. This field is only populated in the first ReadRowsResponse
// RPC.
//
// Types that are assignable to Schema:
//
// *ReadRowsResponse_AvroSchema
// *ReadRowsResponse_ArrowSchema
Schema isReadRowsResponse_Schema `protobuf_oneof:"schema"`
// contains filtered or unexported fields
}Response from calling ReadRows may include row data, progress and
throttling information.
func (*ReadRowsResponse) Descriptor
func (*ReadRowsResponse) Descriptor() ([]byte, []int)Deprecated: Use ReadRowsResponse.ProtoReflect.Descriptor instead.
func (*ReadRowsResponse) GetArrowRecordBatch
func (x *ReadRowsResponse) GetArrowRecordBatch() *ArrowRecordBatchfunc (*ReadRowsResponse) GetArrowSchema
func (x *ReadRowsResponse) GetArrowSchema() *ArrowSchemafunc (*ReadRowsResponse) GetAvroRows
func (x *ReadRowsResponse) GetAvroRows() *AvroRowsfunc (*ReadRowsResponse) GetAvroSchema
func (x *ReadRowsResponse) GetAvroSchema() *AvroSchemafunc (*ReadRowsResponse) GetRowCount
func (x *ReadRowsResponse) GetRowCount() int64func (*ReadRowsResponse) GetRows
func (m *ReadRowsResponse) GetRows() isReadRowsResponse_Rowsfunc (*ReadRowsResponse) GetSchema
func (m *ReadRowsResponse) GetSchema() isReadRowsResponse_Schemafunc (*ReadRowsResponse) GetStats
func (x *ReadRowsResponse) GetStats() *StreamStatsfunc (*ReadRowsResponse) GetThrottleState
func (x *ReadRowsResponse) GetThrottleState() *ThrottleStatefunc (*ReadRowsResponse) ProtoMessage
func (*ReadRowsResponse) ProtoMessage()func (*ReadRowsResponse) ProtoReflect
func (x *ReadRowsResponse) ProtoReflect() protoreflect.Messagefunc (*ReadRowsResponse) Reset
func (x *ReadRowsResponse) Reset()func (*ReadRowsResponse) String
func (x *ReadRowsResponse) String() stringReadRowsResponse_ArrowRecordBatch
type ReadRowsResponse_ArrowRecordBatch struct {
// Serialized row data in Arrow RecordBatch format.
ArrowRecordBatch *ArrowRecordBatch `protobuf:"bytes,4,opt,name=arrow_record_batch,json=arrowRecordBatch,proto3,oneof"`
}ReadRowsResponse_ArrowSchema
type ReadRowsResponse_ArrowSchema struct {
// Output only. Arrow schema.
ArrowSchema *ArrowSchema `protobuf:"bytes,8,opt,name=arrow_schema,json=arrowSchema,proto3,oneof"`
}ReadRowsResponse_AvroRows
type ReadRowsResponse_AvroRows struct {
// Serialized row data in AVRO format.
AvroRows *AvroRows `protobuf:"bytes,3,opt,name=avro_rows,json=avroRows,proto3,oneof"`
}ReadRowsResponse_AvroSchema
type ReadRowsResponse_AvroSchema struct {
// Output only. Avro schema.
AvroSchema *AvroSchema `protobuf:"bytes,7,opt,name=avro_schema,json=avroSchema,proto3,oneof"`
}ReadSession
type ReadSession struct {
// Output only. Unique identifier for the session, in the form
// `projects/{project_id}/locations/{location}/sessions/{session_id}`.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Output only. Time at which the session becomes invalid. After this time, subsequent
// requests to read this Session will return errors. The expire_time is
// automatically assigned and currently cannot be specified or updated.
ExpireTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"`
// Immutable. Data format of the output data.
DataFormat DataFormat `protobuf:"varint,3,opt,name=data_format,json=dataFormat,proto3,enum=google.cloud.bigquery.storage.v1beta2.DataFormat" json:"data_format,omitempty"`
// The schema for the read. If read_options.selected_fields is set, the
// schema may be different from the table schema as it will only contain
// the selected fields.
//
// Types that are assignable to Schema:
//
// *ReadSession_AvroSchema
// *ReadSession_ArrowSchema
Schema isReadSession_Schema `protobuf_oneof:"schema"`
// Immutable. Table that this ReadSession is reading from, in the form
// `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}
Table string `protobuf:"bytes,6,opt,name=table,proto3" json:"table,omitempty"`
// Optional. Any modifiers which are applied when reading from the specified table.
TableModifiers *ReadSession_TableModifiers `protobuf:"bytes,7,opt,name=table_modifiers,json=tableModifiers,proto3" json:"table_modifiers,omitempty"`
// Optional. Read options for this session (e.g. column selection, filters).
ReadOptions *ReadSession_TableReadOptions `protobuf:"bytes,8,opt,name=read_options,json=readOptions,proto3" json:"read_options,omitempty"`
// Output only. A list of streams created with the session.
//
// At least one stream is created with the session. In the future, larger
// request_stream_count values *may* result in this list being unpopulated,
// in that case, the user will need to use a List method to get the streams
// instead, which is not yet available.
Streams []*ReadStream `protobuf:"bytes,10,rep,name=streams,proto3" json:"streams,omitempty"`
// contains filtered or unexported fields
}Information about the ReadSession.
func (*ReadSession) Descriptor
func (*ReadSession) Descriptor() ([]byte, []int)Deprecated: Use ReadSession.ProtoReflect.Descriptor instead.
func (*ReadSession) GetArrowSchema
func (x *ReadSession) GetArrowSchema() *ArrowSchemafunc (*ReadSession) GetAvroSchema
func (x *ReadSession) GetAvroSchema() *AvroSchemafunc (*ReadSession) GetDataFormat
func (x *ReadSession) GetDataFormat() DataFormatfunc (*ReadSession) GetExpireTime
func (x *ReadSession) GetExpireTime() *timestamppb.Timestampfunc (*ReadSession) GetName
func (x *ReadSession) GetName() stringfunc (*ReadSession) GetReadOptions
func (x *ReadSession) GetReadOptions() *ReadSession_TableReadOptionsfunc (*ReadSession) GetSchema
func (m *ReadSession) GetSchema() isReadSession_Schemafunc (*ReadSession) GetStreams
func (x *ReadSession) GetStreams() []*ReadStreamfunc (*ReadSession) GetTable
func (x *ReadSession) GetTable() stringfunc (*ReadSession) GetTableModifiers
func (x *ReadSession) GetTableModifiers() *ReadSession_TableModifiersfunc (*ReadSession) ProtoMessage
func (*ReadSession) ProtoMessage()func (*ReadSession) ProtoReflect
func (x *ReadSession) ProtoReflect() protoreflect.Messagefunc (*ReadSession) Reset
func (x *ReadSession) Reset()func (*ReadSession) String
func (x *ReadSession) String() stringReadSession_ArrowSchema
type ReadSession_ArrowSchema struct {
// Output only. Arrow schema.
ArrowSchema *ArrowSchema `protobuf:"bytes,5,opt,name=arrow_schema,json=arrowSchema,proto3,oneof"`
}ReadSession_AvroSchema
type ReadSession_AvroSchema struct {
// Output only. Avro schema.
AvroSchema *AvroSchema `protobuf:"bytes,4,opt,name=avro_schema,json=avroSchema,proto3,oneof"`
}ReadSession_TableModifiers
type ReadSession_TableModifiers struct {
// The snapshot time of the table. If not set, interpreted as now.
SnapshotTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=snapshot_time,json=snapshotTime,proto3" json:"snapshot_time,omitempty"`
// contains filtered or unexported fields
}Additional attributes when reading a table.
func (*ReadSession_TableModifiers) Descriptor
func (*ReadSession_TableModifiers) Descriptor() ([]byte, []int)Deprecated: Use ReadSession_TableModifiers.ProtoReflect.Descriptor instead.
func (*ReadSession_TableModifiers) GetSnapshotTime
func (x *ReadSession_TableModifiers) GetSnapshotTime() *timestamppb.Timestampfunc (*ReadSession_TableModifiers) ProtoMessage
func (*ReadSession_TableModifiers) ProtoMessage()func (*ReadSession_TableModifiers) ProtoReflect
func (x *ReadSession_TableModifiers) ProtoReflect() protoreflect.Messagefunc (*ReadSession_TableModifiers) Reset
func (x *ReadSession_TableModifiers) Reset()func (*ReadSession_TableModifiers) String
func (x *ReadSession_TableModifiers) String() stringReadSession_TableReadOptions
type ReadSession_TableReadOptions struct {
// Names of the fields in the table that should be read. If empty, all
// fields will be read. If the specified field is a nested field, all
// the sub-fields in the field will be selected. The output field order is
// unrelated to the order of fields in selected_fields.
SelectedFields []string `protobuf:"bytes,1,rep,name=selected_fields,json=selectedFields,proto3" json:"selected_fields,omitempty"`
// SQL text filtering statement, similar to a WHERE clause in a query.
// Aggregates are not supported.
//
// Examples: "int_field > 5"
//
// "date_field = CAST('2014-9-27' as DATE)"
// "nullable_field is not NULL"
// "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
// "numeric_field BETWEEN 1.0 AND 5.0"
//
// Restricted to a maximum length for 1 MB.
RowRestriction string `protobuf:"bytes,2,opt,name=row_restriction,json=rowRestriction,proto3" json:"row_restriction,omitempty"`
// Optional. Options specific to the Apache Arrow output format.
ArrowSerializationOptions *ArrowSerializationOptions `protobuf:"bytes,3,opt,name=arrow_serialization_options,json=arrowSerializationOptions,proto3" json:"arrow_serialization_options,omitempty"`
// contains filtered or unexported fields
}Options dictating how we read a table.
func (*ReadSession_TableReadOptions) Descriptor
func (*ReadSession_TableReadOptions) Descriptor() ([]byte, []int)Deprecated: Use ReadSession_TableReadOptions.ProtoReflect.Descriptor instead.
func (*ReadSession_TableReadOptions) GetArrowSerializationOptions
func (x *ReadSession_TableReadOptions) GetArrowSerializationOptions() *ArrowSerializationOptionsfunc (*ReadSession_TableReadOptions) GetRowRestriction
func (x *ReadSession_TableReadOptions) GetRowRestriction() stringfunc (*ReadSession_TableReadOptions) GetSelectedFields
func (x *ReadSession_TableReadOptions) GetSelectedFields() []stringfunc (*ReadSession_TableReadOptions) ProtoMessage
func (*ReadSession_TableReadOptions) ProtoMessage()func (*ReadSession_TableReadOptions) ProtoReflect
func (x *ReadSession_TableReadOptions) ProtoReflect() protoreflect.Messagefunc (*ReadSession_TableReadOptions) Reset
func (x *ReadSession_TableReadOptions) Reset()func (*ReadSession_TableReadOptions) String
func (x *ReadSession_TableReadOptions) String() stringReadStream
type ReadStream struct {
// Output only. Name of the stream, in the form
// `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// contains filtered or unexported fields
}Information about a single stream that gets data out of the storage system.
Most of the information about ReadStream instances is aggregated, making
ReadStream lightweight.
func (*ReadStream) Descriptor
func (*ReadStream) Descriptor() ([]byte, []int)Deprecated: Use ReadStream.ProtoReflect.Descriptor instead.
func (*ReadStream) GetName
func (x *ReadStream) GetName() stringfunc (*ReadStream) ProtoMessage
func (*ReadStream) ProtoMessage()func (*ReadStream) ProtoReflect
func (x *ReadStream) ProtoReflect() protoreflect.Messagefunc (*ReadStream) Reset
func (x *ReadStream) Reset()func (*ReadStream) String
func (x *ReadStream) String() stringSplitReadStreamRequest
type SplitReadStreamRequest struct {
// Required. Name of the stream to split.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// A value in the range (0.0, 1.0) that specifies the fractional point at
// which the original stream should be split. The actual split point is
// evaluated on pre-filtered rows, so if a filter is provided, then there is
// no guarantee that the division of the rows between the new child streams
// will be proportional to this fractional value. Additionally, because the
// server-side unit for assigning data is collections of rows, this fraction
// will always map to a data storage boundary on the server side.
Fraction float64 `protobuf:"fixed64,2,opt,name=fraction,proto3" json:"fraction,omitempty"`
// contains filtered or unexported fields
}Request message for SplitReadStream.
func (*SplitReadStreamRequest) Descriptor
func (*SplitReadStreamRequest) Descriptor() ([]byte, []int)Deprecated: Use SplitReadStreamRequest.ProtoReflect.Descriptor instead.
func (*SplitReadStreamRequest) GetFraction
func (x *SplitReadStreamRequest) GetFraction() float64func (*SplitReadStreamRequest) GetName
func (x *SplitReadStreamRequest) GetName() stringfunc (*SplitReadStreamRequest) ProtoMessage
func (*SplitReadStreamRequest) ProtoMessage()func (*SplitReadStreamRequest) ProtoReflect
func (x *SplitReadStreamRequest) ProtoReflect() protoreflect.Messagefunc (*SplitReadStreamRequest) Reset
func (x *SplitReadStreamRequest) Reset()func (*SplitReadStreamRequest) String
func (x *SplitReadStreamRequest) String() stringSplitReadStreamResponse
type SplitReadStreamResponse struct {
// Primary stream, which contains the beginning portion of
// |original_stream|. An empty value indicates that the original stream can no
// longer be split.
PrimaryStream *ReadStream `protobuf:"bytes,1,opt,name=primary_stream,json=primaryStream,proto3" json:"primary_stream,omitempty"`
// Remainder stream, which contains the tail of |original_stream|. An empty
// value indicates that the original stream can no longer be split.
RemainderStream *ReadStream `protobuf:"bytes,2,opt,name=remainder_stream,json=remainderStream,proto3" json:"remainder_stream,omitempty"`
// contains filtered or unexported fields
}func (*SplitReadStreamResponse) Descriptor
func (*SplitReadStreamResponse) Descriptor() ([]byte, []int)Deprecated: Use SplitReadStreamResponse.ProtoReflect.Descriptor instead.
func (*SplitReadStreamResponse) GetPrimaryStream
func (x *SplitReadStreamResponse) GetPrimaryStream() *ReadStreamfunc (*SplitReadStreamResponse) GetRemainderStream
func (x *SplitReadStreamResponse) GetRemainderStream() *ReadStreamfunc (*SplitReadStreamResponse) ProtoMessage
func (*SplitReadStreamResponse) ProtoMessage()func (*SplitReadStreamResponse) ProtoReflect
func (x *SplitReadStreamResponse) ProtoReflect() protoreflect.Messagefunc (*SplitReadStreamResponse) Reset
func (x *SplitReadStreamResponse) Reset()func (*SplitReadStreamResponse) String
func (x *SplitReadStreamResponse) String() stringStorageError
type StorageError struct {
// BigQuery Storage specific error code.
Code StorageError_StorageErrorCode `protobuf:"varint,1,opt,name=code,proto3,enum=google.cloud.bigquery.storage.v1beta2.StorageError_StorageErrorCode" json:"code,omitempty"`
// Name of the failed entity.
Entity string `protobuf:"bytes,2,opt,name=entity,proto3" json:"entity,omitempty"`
// Message that describes the error.
ErrorMessage string `protobuf:"bytes,3,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"`
// contains filtered or unexported fields
}Structured custom BigQuery Storage error message. The error can be attached as error details in the returned rpc Status. In particular, the use of error codes allows more structured error handling, and reduces the need to evaluate unstructured error text strings.
func (*StorageError) Descriptor
func (*StorageError) Descriptor() ([]byte, []int)Deprecated: Use StorageError.ProtoReflect.Descriptor instead.
func (*StorageError) GetCode
func (x *StorageError) GetCode() StorageError_StorageErrorCodefunc (*StorageError) GetEntity
func (x *StorageError) GetEntity() stringfunc (*StorageError) GetErrorMessage
func (x *StorageError) GetErrorMessage() stringfunc (*StorageError) ProtoMessage
func (*StorageError) ProtoMessage()func (*StorageError) ProtoReflect
func (x *StorageError) ProtoReflect() protoreflect.Messagefunc (*StorageError) Reset
func (x *StorageError) Reset()func (*StorageError) String
func (x *StorageError) String() stringStorageError_StorageErrorCode
type StorageError_StorageErrorCode int32Error code for StorageError.
StorageError_STORAGE_ERROR_CODE_UNSPECIFIED, StorageError_TABLE_NOT_FOUND, StorageError_STREAM_ALREADY_COMMITTED, StorageError_STREAM_NOT_FOUND, StorageError_INVALID_STREAM_TYPE, StorageError_INVALID_STREAM_STATE, StorageError_STREAM_FINALIZED
const (
// Default error.
StorageError_STORAGE_ERROR_CODE_UNSPECIFIED StorageError_StorageErrorCode = 0
// Table is not found in the system.
StorageError_TABLE_NOT_FOUND StorageError_StorageErrorCode = 1
// Stream is already committed.
StorageError_STREAM_ALREADY_COMMITTED StorageError_StorageErrorCode = 2
// Stream is not found.
StorageError_STREAM_NOT_FOUND StorageError_StorageErrorCode = 3
// Invalid Stream type.
// For example, you try to commit a stream that is not pending.
StorageError_INVALID_STREAM_TYPE StorageError_StorageErrorCode = 4
// Invalid Stream state.
// For example, you try to commit a stream that is not finalized or is
// garbaged.
StorageError_INVALID_STREAM_STATE StorageError_StorageErrorCode = 5
// Stream is finalized.
StorageError_STREAM_FINALIZED StorageError_StorageErrorCode = 6
)func (StorageError_StorageErrorCode) Descriptor
func (StorageError_StorageErrorCode) Descriptor() protoreflect.EnumDescriptorfunc (StorageError_StorageErrorCode) Enum
func (x StorageError_StorageErrorCode) Enum() *StorageError_StorageErrorCodefunc (StorageError_StorageErrorCode) EnumDescriptor
func (StorageError_StorageErrorCode) EnumDescriptor() ([]byte, []int)Deprecated: Use StorageError_StorageErrorCode.Descriptor instead.
func (StorageError_StorageErrorCode) Number
func (x StorageError_StorageErrorCode) Number() protoreflect.EnumNumberfunc (StorageError_StorageErrorCode) String
func (x StorageError_StorageErrorCode) String() stringfunc (StorageError_StorageErrorCode) Type
func (StorageError_StorageErrorCode) Type() protoreflect.EnumTypeStreamStats
type StreamStats struct {
// Represents the progress of the current stream.
Progress *StreamStats_Progress `protobuf:"bytes,2,opt,name=progress,proto3" json:"progress,omitempty"`
// contains filtered or unexported fields
}Estimated stream statistics for a given Stream.
func (*StreamStats) Descriptor
func (*StreamStats) Descriptor() ([]byte, []int)Deprecated: Use StreamStats.ProtoReflect.Descriptor instead.
func (*StreamStats) GetProgress
func (x *StreamStats) GetProgress() *StreamStats_Progressfunc (*StreamStats) ProtoMessage
func (*StreamStats) ProtoMessage()func (*StreamStats) ProtoReflect
func (x *StreamStats) ProtoReflect() protoreflect.Messagefunc (*StreamStats) Reset
func (x *StreamStats) Reset()func (*StreamStats) String
func (x *StreamStats) String() stringStreamStats_Progress
type StreamStats_Progress struct {
// The fraction of rows assigned to the stream that have been processed by
// the server so far, not including the rows in the current response
// message.
//
// This value, along with `at_response_end`, can be used to interpolate
// the progress made as the rows in the message are being processed using
// the following formula: `at_response_start + (at_response_end -
// at_response_start) * rows_processed_from_response / rows_in_response`.
//
// Note that if a filter is provided, the `at_response_end` value of the
// previous response may not necessarily be equal to the
// `at_response_start` value of the current response.
AtResponseStart float64 `protobuf:"fixed64,1,opt,name=at_response_start,json=atResponseStart,proto3" json:"at_response_start,omitempty"`
// Similar to `at_response_start`, except that this value includes the
// rows in the current response.
AtResponseEnd float64 `protobuf:"fixed64,2,opt,name=at_response_end,json=atResponseEnd,proto3" json:"at_response_end,omitempty"`
// contains filtered or unexported fields
}func (*StreamStats_Progress) Descriptor
func (*StreamStats_Progress) Descriptor() ([]byte, []int)Deprecated: Use StreamStats_Progress.ProtoReflect.Descriptor instead.
func (*StreamStats_Progress) GetAtResponseEnd
func (x *StreamStats_Progress) GetAtResponseEnd() float64func (*StreamStats_Progress) GetAtResponseStart
func (x *StreamStats_Progress) GetAtResponseStart() float64func (*StreamStats_Progress) ProtoMessage
func (*StreamStats_Progress) ProtoMessage()func (*StreamStats_Progress) ProtoReflect
func (x *StreamStats_Progress) ProtoReflect() protoreflect.Messagefunc (*StreamStats_Progress) Reset
func (x *StreamStats_Progress) Reset()func (*StreamStats_Progress) String
func (x *StreamStats_Progress) String() stringTableFieldSchema
type TableFieldSchema struct {
// Required. The field name. The name must contain only letters (a-z, A-Z),
// numbers (0-9), or underscores (_), and must start with a letter or
// underscore. The maximum length is 128 characters.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Required. The field data type.
Type TableFieldSchema_Type `protobuf:"varint,2,opt,name=type,proto3,enum=google.cloud.bigquery.storage.v1beta2.TableFieldSchema_Type" json:"type,omitempty"`
// Optional. The field mode. The default value is NULLABLE.
Mode TableFieldSchema_Mode `protobuf:"varint,3,opt,name=mode,proto3,enum=google.cloud.bigquery.storage.v1beta2.TableFieldSchema_Mode" json:"mode,omitempty"`
// Optional. Describes the nested schema fields if the type property is set to STRUCT.
Fields []*TableFieldSchema `protobuf:"bytes,4,rep,name=fields,proto3" json:"fields,omitempty"`
// Optional. The field description. The maximum length is 1,024 characters.
Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"`
// contains filtered or unexported fields
}A field in TableSchema
func (*TableFieldSchema) Descriptor
func (*TableFieldSchema) Descriptor() ([]byte, []int)Deprecated: Use TableFieldSchema.ProtoReflect.Descriptor instead.
func (*TableFieldSchema) GetDescription
func (x *TableFieldSchema) GetDescription() stringfunc (*TableFieldSchema) GetFields
func (x *TableFieldSchema) GetFields() []*TableFieldSchemafunc (*TableFieldSchema) GetMode
func (x *TableFieldSchema) GetMode() TableFieldSchema_Modefunc (*TableFieldSchema) GetName
func (x *TableFieldSchema) GetName() stringfunc (*TableFieldSchema) GetType
func (x *TableFieldSchema) GetType() TableFieldSchema_Typefunc (*TableFieldSchema) ProtoMessage
func (*TableFieldSchema) ProtoMessage()func (*TableFieldSchema) ProtoReflect
func (x *TableFieldSchema) ProtoReflect() protoreflect.Messagefunc (*TableFieldSchema) Reset
func (x *TableFieldSchema) Reset()func (*TableFieldSchema) String
func (x *TableFieldSchema) String() stringTableFieldSchema_Mode
type TableFieldSchema_Mode int32TableFieldSchema_MODE_UNSPECIFIED, TableFieldSchema_NULLABLE, TableFieldSchema_REQUIRED, TableFieldSchema_REPEATED
const (
// Illegal value
TableFieldSchema_MODE_UNSPECIFIED TableFieldSchema_Mode = 0
TableFieldSchema_NULLABLE TableFieldSchema_Mode = 1
TableFieldSchema_REQUIRED TableFieldSchema_Mode = 2
TableFieldSchema_REPEATED TableFieldSchema_Mode = 3
)func (TableFieldSchema_Mode) Descriptor
func (TableFieldSchema_Mode) Descriptor() protoreflect.EnumDescriptorfunc (TableFieldSchema_Mode) Enum
func (x TableFieldSchema_Mode) Enum() *TableFieldSchema_Modefunc (TableFieldSchema_Mode) EnumDescriptor
func (TableFieldSchema_Mode) EnumDescriptor() ([]byte, []int)Deprecated: Use TableFieldSchema_Mode.Descriptor instead.
func (TableFieldSchema_Mode) Number
func (x TableFieldSchema_Mode) Number() protoreflect.EnumNumberfunc (TableFieldSchema_Mode) String
func (x TableFieldSchema_Mode) String() stringfunc (TableFieldSchema_Mode) Type
func (TableFieldSchema_Mode) Type() protoreflect.EnumTypeTableFieldSchema_Type
type TableFieldSchema_Type int32TableFieldSchema_TYPE_UNSPECIFIED, TableFieldSchema_STRING, TableFieldSchema_INT64, TableFieldSchema_DOUBLE, TableFieldSchema_STRUCT, TableFieldSchema_BYTES, TableFieldSchema_BOOL, TableFieldSchema_TIMESTAMP, TableFieldSchema_DATE, TableFieldSchema_TIME, TableFieldSchema_DATETIME, TableFieldSchema_GEOGRAPHY, TableFieldSchema_NUMERIC, TableFieldSchema_BIGNUMERIC, TableFieldSchema_INTERVAL, TableFieldSchema_JSON
const (
// Illegal value
TableFieldSchema_TYPE_UNSPECIFIED TableFieldSchema_Type = 0
// 64K, UTF8
TableFieldSchema_STRING TableFieldSchema_Type = 1
// 64-bit signed
TableFieldSchema_INT64 TableFieldSchema_Type = 2
// 64-bit IEEE floating point
TableFieldSchema_DOUBLE TableFieldSchema_Type = 3
// Aggregate type
TableFieldSchema_STRUCT TableFieldSchema_Type = 4
// 64K, Binary
TableFieldSchema_BYTES TableFieldSchema_Type = 5
// 2-valued
TableFieldSchema_BOOL TableFieldSchema_Type = 6
// 64-bit signed usec since UTC epoch
TableFieldSchema_TIMESTAMP TableFieldSchema_Type = 7
// Civil date - Year, Month, Day
TableFieldSchema_DATE TableFieldSchema_Type = 8
// Civil time - Hour, Minute, Second, Microseconds
TableFieldSchema_TIME TableFieldSchema_Type = 9
// Combination of civil date and civil time
TableFieldSchema_DATETIME TableFieldSchema_Type = 10
// Geography object
TableFieldSchema_GEOGRAPHY TableFieldSchema_Type = 11
// Numeric value
TableFieldSchema_NUMERIC TableFieldSchema_Type = 12
// BigNumeric value
TableFieldSchema_BIGNUMERIC TableFieldSchema_Type = 13
// Interval
TableFieldSchema_INTERVAL TableFieldSchema_Type = 14
// JSON, String
TableFieldSchema_JSON TableFieldSchema_Type = 15
)func (TableFieldSchema_Type) Descriptor
func (TableFieldSchema_Type) Descriptor() protoreflect.EnumDescriptorfunc (TableFieldSchema_Type) Enum
func (x TableFieldSchema_Type) Enum() *TableFieldSchema_Typefunc (TableFieldSchema_Type) EnumDescriptor
func (TableFieldSchema_Type) EnumDescriptor() ([]byte, []int)Deprecated: Use TableFieldSchema_Type.Descriptor instead.
func (TableFieldSchema_Type) Number
func (x TableFieldSchema_Type) Number() protoreflect.EnumNumberfunc (TableFieldSchema_Type) String
func (x TableFieldSchema_Type) String() stringfunc (TableFieldSchema_Type) Type
func (TableFieldSchema_Type) Type() protoreflect.EnumTypeTableSchema
type TableSchema struct {
// Describes the fields in a table.
Fields []*TableFieldSchema `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty"`
// contains filtered or unexported fields
}Schema of a table
func (*TableSchema) Descriptor
func (*TableSchema) Descriptor() ([]byte, []int)Deprecated: Use TableSchema.ProtoReflect.Descriptor instead.
func (*TableSchema) GetFields
func (x *TableSchema) GetFields() []*TableFieldSchemafunc (*TableSchema) ProtoMessage
func (*TableSchema) ProtoMessage()func (*TableSchema) ProtoReflect
func (x *TableSchema) ProtoReflect() protoreflect.Messagefunc (*TableSchema) Reset
func (x *TableSchema) Reset()func (*TableSchema) String
func (x *TableSchema) String() stringThrottleState
type ThrottleState struct {
// How much this connection is being throttled. Zero means no throttling,
// 100 means fully throttled.
ThrottlePercent int32 `protobuf:"varint,1,opt,name=throttle_percent,json=throttlePercent,proto3" json:"throttle_percent,omitempty"`
// contains filtered or unexported fields
}Information on if the current connection is being throttled.
func (*ThrottleState) Descriptor
func (*ThrottleState) Descriptor() ([]byte, []int)Deprecated: Use ThrottleState.ProtoReflect.Descriptor instead.
func (*ThrottleState) GetThrottlePercent
func (x *ThrottleState) GetThrottlePercent() int32func (*ThrottleState) ProtoMessage
func (*ThrottleState) ProtoMessage()func (*ThrottleState) ProtoReflect
func (x *ThrottleState) ProtoReflect() protoreflect.Messagefunc (*ThrottleState) Reset
func (x *ThrottleState) Reset()func (*ThrottleState) String
func (x *ThrottleState) String() stringUnimplementedBigQueryReadServer
type UnimplementedBigQueryReadServer struct {
}UnimplementedBigQueryReadServer should be embedded to have forward compatible implementations.
func (UnimplementedBigQueryReadServer) CreateReadSession
func (UnimplementedBigQueryReadServer) CreateReadSession(context.Context, *CreateReadSessionRequest) (*ReadSession, error)func (UnimplementedBigQueryReadServer) ReadRows
func (UnimplementedBigQueryReadServer) ReadRows(*ReadRowsRequest, BigQueryRead_ReadRowsServer) errorfunc (UnimplementedBigQueryReadServer) SplitReadStream
func (UnimplementedBigQueryReadServer) SplitReadStream(context.Context, *SplitReadStreamRequest) (*SplitReadStreamResponse, error)UnimplementedBigQueryWriteServer
type UnimplementedBigQueryWriteServer struct {
}UnimplementedBigQueryWriteServer should be embedded to have forward compatible implementations.
func (UnimplementedBigQueryWriteServer) AppendRows
func (UnimplementedBigQueryWriteServer) AppendRows(BigQueryWrite_AppendRowsServer) errorfunc (UnimplementedBigQueryWriteServer) BatchCommitWriteStreams
func (UnimplementedBigQueryWriteServer) BatchCommitWriteStreams(context.Context, *BatchCommitWriteStreamsRequest) (*BatchCommitWriteStreamsResponse, error)func (UnimplementedBigQueryWriteServer) CreateWriteStream
func (UnimplementedBigQueryWriteServer) CreateWriteStream(context.Context, *CreateWriteStreamRequest) (*WriteStream, error)func (UnimplementedBigQueryWriteServer) FinalizeWriteStream
func (UnimplementedBigQueryWriteServer) FinalizeWriteStream(context.Context, *FinalizeWriteStreamRequest) (*FinalizeWriteStreamResponse, error)func (UnimplementedBigQueryWriteServer) FlushRows
func (UnimplementedBigQueryWriteServer) FlushRows(context.Context, *FlushRowsRequest) (*FlushRowsResponse, error)func (UnimplementedBigQueryWriteServer) GetWriteStream
func (UnimplementedBigQueryWriteServer) GetWriteStream(context.Context, *GetWriteStreamRequest) (*WriteStream, error)UnsafeBigQueryReadServer
type UnsafeBigQueryReadServer interface {
// contains filtered or unexported methods
}UnsafeBigQueryReadServer may be embedded to opt out of forward compatibility for this service. Use of this interface is not recommended, as added methods to BigQueryReadServer will result in compilation errors.
UnsafeBigQueryWriteServer
type UnsafeBigQueryWriteServer interface {
// contains filtered or unexported methods
}UnsafeBigQueryWriteServer may be embedded to opt out of forward compatibility for this service. Use of this interface is not recommended, as added methods to BigQueryWriteServer will result in compilation errors.
WriteStream
type WriteStream struct {
// Output only. Name of the stream, in the form
// `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Immutable. Type of the stream.
Type WriteStream_Type `protobuf:"varint,2,opt,name=type,proto3,enum=google.cloud.bigquery.storage.v1beta2.WriteStream_Type" json:"type,omitempty"`
// Output only. Create time of the stream. For the _default stream, this is the
// creation_time of the table.
CreateTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
// Output only. Commit time of the stream.
// If a stream is of `COMMITTED` type, then it will have a commit_time same as
// `create_time`. If the stream is of `PENDING` type, commit_time being empty
// means it is not committed.
CommitTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=commit_time,json=commitTime,proto3" json:"commit_time,omitempty"`
// Output only. The schema of the destination table. It is only returned in
// `CreateWriteStream` response. Caller should generate data that's
// compatible with this schema to send in initial `AppendRowsRequest`.
// The table schema could go out of date during the life time of the stream.
TableSchema *TableSchema `protobuf:"bytes,5,opt,name=table_schema,json=tableSchema,proto3" json:"table_schema,omitempty"`
// contains filtered or unexported fields
}Information about a single stream that gets data inside the storage system.
func (*WriteStream) Descriptor
func (*WriteStream) Descriptor() ([]byte, []int)Deprecated: Use WriteStream.ProtoReflect.Descriptor instead.
func (*WriteStream) GetCommitTime
func (x *WriteStream) GetCommitTime() *timestamppb.Timestampfunc (*WriteStream) GetCreateTime
func (x *WriteStream) GetCreateTime() *timestamppb.Timestampfunc (*WriteStream) GetName
func (x *WriteStream) GetName() stringfunc (*WriteStream) GetTableSchema
func (x *WriteStream) GetTableSchema() *TableSchemafunc (*WriteStream) GetType
func (x *WriteStream) GetType() WriteStream_Typefunc (*WriteStream) ProtoMessage
func (*WriteStream) ProtoMessage()func (*WriteStream) ProtoReflect
func (x *WriteStream) ProtoReflect() protoreflect.Messagefunc (*WriteStream) Reset
func (x *WriteStream) Reset()func (*WriteStream) String
func (x *WriteStream) String() stringWriteStream_Type
type WriteStream_Type int32Type enum of the stream.
WriteStream_TYPE_UNSPECIFIED, WriteStream_COMMITTED, WriteStream_PENDING, WriteStream_BUFFERED
const (
// Unknown type.
WriteStream_TYPE_UNSPECIFIED WriteStream_Type = 0
// Data will commit automatically and appear as soon as the write is
// acknowledged.
WriteStream_COMMITTED WriteStream_Type = 1
// Data is invisible until the stream is committed.
WriteStream_PENDING WriteStream_Type = 2
// Data is only visible up to the offset to which it was flushed.
WriteStream_BUFFERED WriteStream_Type = 3
)func (WriteStream_Type) Descriptor
func (WriteStream_Type) Descriptor() protoreflect.EnumDescriptorfunc (WriteStream_Type) Enum
func (x WriteStream_Type) Enum() *WriteStream_Typefunc (WriteStream_Type) EnumDescriptor
func (WriteStream_Type) EnumDescriptor() ([]byte, []int)Deprecated: Use WriteStream_Type.Descriptor instead.
func (WriteStream_Type) Number
func (x WriteStream_Type) Number() protoreflect.EnumNumberfunc (WriteStream_Type) String
func (x WriteStream_Type) String() stringfunc (WriteStream_Type) Type
func (WriteStream_Type) Type() protoreflect.EnumType