- 1.72.0 (latest)
- 1.71.0
- 1.70.0
- 1.69.0
- 1.68.0
- 1.67.0
- 1.66.2
- 1.65.0
- 1.64.0
- 1.63.1
- 1.62.0
- 1.61.0
- 1.60.0
- 1.59.1
- 1.58.0
- 1.57.1
- 1.54.0
- 1.53.0
- 1.52.0
- 1.51.2
- 1.50.0
- 1.49.0
- 1.48.0
- 1.47.0
- 1.46.0
- 1.45.0
- 1.44.0
- 1.43.0
- 1.42.0
- 1.41.0
- 1.40.0
- 1.39.0
- 1.38.0
- 1.37.0
- 1.36.0
- 1.35.0
- 1.34.1
- 1.33.0
- 1.32.0
- 1.31.0
- 1.30.2
- 1.29.0
- 1.28.0
- 1.27.0
- 1.26.0
- 1.25.0
- 1.24.0
- 1.23.0
- 1.22.0
- 1.21.0
- 1.20.1
- 1.19.0
- 1.18.0
- 1.17.0
- 1.16.0
- 1.15.0
Constants
BigQueryStorage_CreateReadSession_FullMethodName, BigQueryStorage_ReadRows_FullMethodName, BigQueryStorage_BatchCreateReadSessionStreams_FullMethodName, BigQueryStorage_FinalizeStream_FullMethodName, BigQueryStorage_SplitReadStream_FullMethodName
const (
	BigQueryStorage_CreateReadSession_FullMethodName             = "/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/CreateReadSession"
	BigQueryStorage_ReadRows_FullMethodName                      = "/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/ReadRows"
	BigQueryStorage_BatchCreateReadSessionStreams_FullMethodName = "/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/BatchCreateReadSessionStreams"
	BigQueryStorage_FinalizeStream_FullMethodName                = "/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/FinalizeStream"
	BigQueryStorage_SplitReadStream_FullMethodName               = "/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/SplitReadStream"
)Variables
DataFormat_name, DataFormat_value
var (
	DataFormat_name = map[int32]string{
		0: "DATA_FORMAT_UNSPECIFIED",
		1: "AVRO",
		3: "ARROW",
	}
	DataFormat_value = map[string]int32{
		"DATA_FORMAT_UNSPECIFIED": 0,
		"AVRO":                    1,
		"ARROW":                   3,
	}
)Enum value maps for DataFormat.
ShardingStrategy_name, ShardingStrategy_value
var (
	ShardingStrategy_name = map[int32]string{
		0: "SHARDING_STRATEGY_UNSPECIFIED",
		1: "LIQUID",
		2: "BALANCED",
	}
	ShardingStrategy_value = map[string]int32{
		"SHARDING_STRATEGY_UNSPECIFIED": 0,
		"LIQUID":                        1,
		"BALANCED":                      2,
	}
)Enum value maps for ShardingStrategy.
BigQueryStorage_ServiceDesc
var BigQueryStorage_ServiceDesc = grpc.ServiceDesc{
	ServiceName: "google.cloud.bigquery.storage.v1beta1.BigQueryStorage",
	HandlerType: (*BigQueryStorageServer)(nil),
	Methods: []grpc.MethodDesc{
		{
			MethodName: "CreateReadSession",
			Handler:    _BigQueryStorage_CreateReadSession_Handler,
		},
		{
			MethodName: "BatchCreateReadSessionStreams",
			Handler:    _BigQueryStorage_BatchCreateReadSessionStreams_Handler,
		},
		{
			MethodName: "FinalizeStream",
			Handler:    _BigQueryStorage_FinalizeStream_Handler,
		},
		{
			MethodName: "SplitReadStream",
			Handler:    _BigQueryStorage_SplitReadStream_Handler,
		},
	},
	Streams: []grpc.StreamDesc{
		{
			StreamName:    "ReadRows",
			Handler:       _BigQueryStorage_ReadRows_Handler,
			ServerStreams: true,
		},
	},
	Metadata: "google/cloud/bigquery/storage/v1beta1/storage.proto",
}BigQueryStorage_ServiceDesc is the grpc.ServiceDesc for BigQueryStorage service. It's only intended for direct use with grpc.RegisterService, and not to be introspected or modified (even as a copy)
File_google_cloud_bigquery_storage_v1beta1_arrow_proto
var File_google_cloud_bigquery_storage_v1beta1_arrow_proto protoreflect.FileDescriptorFile_google_cloud_bigquery_storage_v1beta1_avro_proto
var File_google_cloud_bigquery_storage_v1beta1_avro_proto protoreflect.FileDescriptorFile_google_cloud_bigquery_storage_v1beta1_read_options_proto
var File_google_cloud_bigquery_storage_v1beta1_read_options_proto protoreflect.FileDescriptorFile_google_cloud_bigquery_storage_v1beta1_storage_proto
var File_google_cloud_bigquery_storage_v1beta1_storage_proto protoreflect.FileDescriptorFile_google_cloud_bigquery_storage_v1beta1_table_reference_proto
var File_google_cloud_bigquery_storage_v1beta1_table_reference_proto protoreflect.FileDescriptorFunctions
func RegisterBigQueryStorageServer
func RegisterBigQueryStorageServer(s grpc.ServiceRegistrar, srv BigQueryStorageServer)ArrowRecordBatch
type ArrowRecordBatch struct {
	// IPC serialized Arrow RecordBatch.
	SerializedRecordBatch []byte `protobuf:"bytes,1,opt,name=serialized_record_batch,json=serializedRecordBatch,proto3" json:"serialized_record_batch,omitempty"`
	// The count of rows in the returning block.
	RowCount int64 `protobuf:"varint,2,opt,name=row_count,json=rowCount,proto3" json:"row_count,omitempty"`
	// contains filtered or unexported fields
}Arrow RecordBatch.
func (*ArrowRecordBatch) Descriptor
func (*ArrowRecordBatch) Descriptor() ([]byte, []int)Deprecated: Use ArrowRecordBatch.ProtoReflect.Descriptor instead.
func (*ArrowRecordBatch) GetRowCount
func (x *ArrowRecordBatch) GetRowCount() int64func (*ArrowRecordBatch) GetSerializedRecordBatch
func (x *ArrowRecordBatch) GetSerializedRecordBatch() []bytefunc (*ArrowRecordBatch) ProtoMessage
func (*ArrowRecordBatch) ProtoMessage()func (*ArrowRecordBatch) ProtoReflect
func (x *ArrowRecordBatch) ProtoReflect() protoreflect.Messagefunc (*ArrowRecordBatch) Reset
func (x *ArrowRecordBatch) Reset()func (*ArrowRecordBatch) String
func (x *ArrowRecordBatch) String() stringArrowSchema
type ArrowSchema struct {
	// IPC serialized Arrow schema.
	SerializedSchema []byte `protobuf:"bytes,1,opt,name=serialized_schema,json=serializedSchema,proto3" json:"serialized_schema,omitempty"`
	// contains filtered or unexported fields
}Arrow schema.
func (*ArrowSchema) Descriptor
func (*ArrowSchema) Descriptor() ([]byte, []int)Deprecated: Use ArrowSchema.ProtoReflect.Descriptor instead.
func (*ArrowSchema) GetSerializedSchema
func (x *ArrowSchema) GetSerializedSchema() []bytefunc (*ArrowSchema) ProtoMessage
func (*ArrowSchema) ProtoMessage()func (*ArrowSchema) ProtoReflect
func (x *ArrowSchema) ProtoReflect() protoreflect.Messagefunc (*ArrowSchema) Reset
func (x *ArrowSchema) Reset()func (*ArrowSchema) String
func (x *ArrowSchema) String() stringAvroRows
type AvroRows struct {
	// Binary serialized rows in a block.
	SerializedBinaryRows []byte `protobuf:"bytes,1,opt,name=serialized_binary_rows,json=serializedBinaryRows,proto3" json:"serialized_binary_rows,omitempty"`
	// The count of rows in the returning block.
	RowCount int64 `protobuf:"varint,2,opt,name=row_count,json=rowCount,proto3" json:"row_count,omitempty"`
	// contains filtered or unexported fields
}Avro rows.
func (*AvroRows) Descriptor
Deprecated: Use AvroRows.ProtoReflect.Descriptor instead.
func (*AvroRows) GetRowCount
func (*AvroRows) GetSerializedBinaryRows
func (*AvroRows) ProtoMessage
func (*AvroRows) ProtoMessage()func (*AvroRows) ProtoReflect
func (x *AvroRows) ProtoReflect() protoreflect.Messagefunc (*AvroRows) Reset
func (x *AvroRows) Reset()func (*AvroRows) String
AvroSchema
type AvroSchema struct {
	// Json serialized schema, as described at
	// https://avro.apache.org/docs/1.8.1/spec.html
	Schema string `protobuf:"bytes,1,opt,name=schema,proto3" json:"schema,omitempty"`
	// contains filtered or unexported fields
}Avro schema.
func (*AvroSchema) Descriptor
func (*AvroSchema) Descriptor() ([]byte, []int)Deprecated: Use AvroSchema.ProtoReflect.Descriptor instead.
func (*AvroSchema) GetSchema
func (x *AvroSchema) GetSchema() stringfunc (*AvroSchema) ProtoMessage
func (*AvroSchema) ProtoMessage()func (*AvroSchema) ProtoReflect
func (x *AvroSchema) ProtoReflect() protoreflect.Messagefunc (*AvroSchema) Reset
func (x *AvroSchema) Reset()func (*AvroSchema) String
func (x *AvroSchema) String() stringBatchCreateReadSessionStreamsRequest
type BatchCreateReadSessionStreamsRequest struct {
	// Required. Must be a non-expired session obtained from a call to
	// CreateReadSession. Only the name field needs to be set.
	Session *ReadSession `protobuf:"bytes,1,opt,name=session,proto3" json:"session,omitempty"`
	// Required. Number of new streams requested. Must be positive.
	// Number of added streams may be less than this, see CreateReadSessionRequest
	// for more information.
	RequestedStreams int32 `protobuf:"varint,2,opt,name=requested_streams,json=requestedStreams,proto3" json:"requested_streams,omitempty"`
	// contains filtered or unexported fields
}Information needed to request additional streams for an established read session.
func (*BatchCreateReadSessionStreamsRequest) Descriptor
func (*BatchCreateReadSessionStreamsRequest) Descriptor() ([]byte, []int)Deprecated: Use BatchCreateReadSessionStreamsRequest.ProtoReflect.Descriptor instead.
func (*BatchCreateReadSessionStreamsRequest) GetRequestedStreams
func (x *BatchCreateReadSessionStreamsRequest) GetRequestedStreams() int32func (*BatchCreateReadSessionStreamsRequest) GetSession
func (x *BatchCreateReadSessionStreamsRequest) GetSession() *ReadSessionfunc (*BatchCreateReadSessionStreamsRequest) ProtoMessage
func (*BatchCreateReadSessionStreamsRequest) ProtoMessage()func (*BatchCreateReadSessionStreamsRequest) ProtoReflect
func (x *BatchCreateReadSessionStreamsRequest) ProtoReflect() protoreflect.Messagefunc (*BatchCreateReadSessionStreamsRequest) Reset
func (x *BatchCreateReadSessionStreamsRequest) Reset()func (*BatchCreateReadSessionStreamsRequest) String
func (x *BatchCreateReadSessionStreamsRequest) String() stringBatchCreateReadSessionStreamsResponse
type BatchCreateReadSessionStreamsResponse struct {
	// Newly added streams.
	Streams []*Stream `protobuf:"bytes,1,rep,name=streams,proto3" json:"streams,omitempty"`
	// contains filtered or unexported fields
}The response from BatchCreateReadSessionStreams returns the stream
identifiers for the newly created streams.
func (*BatchCreateReadSessionStreamsResponse) Descriptor
func (*BatchCreateReadSessionStreamsResponse) Descriptor() ([]byte, []int)Deprecated: Use BatchCreateReadSessionStreamsResponse.ProtoReflect.Descriptor instead.
func (*BatchCreateReadSessionStreamsResponse) GetStreams
func (x *BatchCreateReadSessionStreamsResponse) GetStreams() []*Streamfunc (*BatchCreateReadSessionStreamsResponse) ProtoMessage
func (*BatchCreateReadSessionStreamsResponse) ProtoMessage()func (*BatchCreateReadSessionStreamsResponse) ProtoReflect
func (x *BatchCreateReadSessionStreamsResponse) ProtoReflect() protoreflect.Messagefunc (*BatchCreateReadSessionStreamsResponse) Reset
func (x *BatchCreateReadSessionStreamsResponse) Reset()func (*BatchCreateReadSessionStreamsResponse) String
func (x *BatchCreateReadSessionStreamsResponse) String() stringBigQueryStorageClient
type BigQueryStorageClient interface {
	// Creates a new read session. A read session divides the contents of a
	// BigQuery table into one or more streams, which can then be used to read
	// data from the table. The read session also specifies properties of the
	// data to be read, such as a list of columns or a push-down filter describing
	// the rows to be returned.
	//
	// A particular row can be read by at most one stream. When the caller has
	// reached the end of each stream in the session, then all the data in the
	// table has been read.
	//
	// Read sessions automatically expire 6 hours after they are created and do
	// not require manual clean-up by the caller.
	CreateReadSession(ctx context.Context, in *CreateReadSessionRequest, opts ...grpc.CallOption) (*ReadSession, error)
	// Reads rows from the table in the format prescribed by the read session.
	// Each response contains one or more table rows, up to a maximum of 10 MiB
	// per response; read requests which attempt to read individual rows larger
	// than this will fail.
	//
	// Each request also returns a set of stream statistics reflecting the
	// estimated total number of rows in the read stream. This number is computed
	// based on the total table size and the number of active streams in the read
	// session, and may change as other streams continue to read data.
	ReadRows(ctx context.Context, in *ReadRowsRequest, opts ...grpc.CallOption) (BigQueryStorage_ReadRowsClient, error)
	// Creates additional streams for a ReadSession. This API can be used to
	// dynamically adjust the parallelism of a batch processing task upwards by
	// adding additional workers.
	BatchCreateReadSessionStreams(ctx context.Context, in *BatchCreateReadSessionStreamsRequest, opts ...grpc.CallOption) (*BatchCreateReadSessionStreamsResponse, error)
	// Causes a single stream in a ReadSession to gracefully stop. This
	// API can be used to dynamically adjust the parallelism of a batch processing
	// task downwards without losing data.
	//
	// This API does not delete the stream -- it remains visible in the
	// ReadSession, and any data processed by the stream is not released to other
	// streams. However, no additional data will be assigned to the stream once
	// this call completes. Callers must continue reading data on the stream until
	// the end of the stream is reached so that data which has already been
	// assigned to the stream will be processed.
	//
	// This method will return an error if there are no other live streams
	// in the Session, or if SplitReadStream() has been called on the given
	// Stream.
	FinalizeStream(ctx context.Context, in *FinalizeStreamRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
	// Splits a given read stream into two Streams. These streams are referred to
	// as the primary and the residual of the split. The original stream can still
	// be read from in the same manner as before. Both of the returned streams can
	// also be read from, and the total rows return by both child streams will be
	// the same as the rows read from the original stream.
	//
	// Moreover, the two child streams will be allocated back to back in the
	// original Stream. Concretely, it is guaranteed that for streams Original,
	// Primary, and Residual, that Original[0-j] = Primary[0-j] and
	// Original[j-n] = Residual[0-m] once the streams have been read to
	// completion.
	//
	// This method is guaranteed to be idempotent.
	SplitReadStream(ctx context.Context, in *SplitReadStreamRequest, opts ...grpc.CallOption) (*SplitReadStreamResponse, error)
}BigQueryStorageClient is the client API for BigQueryStorage service.
For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
func NewBigQueryStorageClient
func NewBigQueryStorageClient(cc grpc.ClientConnInterface) BigQueryStorageClientBigQueryStorageServer
type BigQueryStorageServer interface {
	// Creates a new read session. A read session divides the contents of a
	// BigQuery table into one or more streams, which can then be used to read
	// data from the table. The read session also specifies properties of the
	// data to be read, such as a list of columns or a push-down filter describing
	// the rows to be returned.
	//
	// A particular row can be read by at most one stream. When the caller has
	// reached the end of each stream in the session, then all the data in the
	// table has been read.
	//
	// Read sessions automatically expire 6 hours after they are created and do
	// not require manual clean-up by the caller.
	CreateReadSession(context.Context, *CreateReadSessionRequest) (*ReadSession, error)
	// Reads rows from the table in the format prescribed by the read session.
	// Each response contains one or more table rows, up to a maximum of 10 MiB
	// per response; read requests which attempt to read individual rows larger
	// than this will fail.
	//
	// Each request also returns a set of stream statistics reflecting the
	// estimated total number of rows in the read stream. This number is computed
	// based on the total table size and the number of active streams in the read
	// session, and may change as other streams continue to read data.
	ReadRows(*ReadRowsRequest, BigQueryStorage_ReadRowsServer) error
	// Creates additional streams for a ReadSession. This API can be used to
	// dynamically adjust the parallelism of a batch processing task upwards by
	// adding additional workers.
	BatchCreateReadSessionStreams(context.Context, *BatchCreateReadSessionStreamsRequest) (*BatchCreateReadSessionStreamsResponse, error)
	// Causes a single stream in a ReadSession to gracefully stop. This
	// API can be used to dynamically adjust the parallelism of a batch processing
	// task downwards without losing data.
	//
	// This API does not delete the stream -- it remains visible in the
	// ReadSession, and any data processed by the stream is not released to other
	// streams. However, no additional data will be assigned to the stream once
	// this call completes. Callers must continue reading data on the stream until
	// the end of the stream is reached so that data which has already been
	// assigned to the stream will be processed.
	//
	// This method will return an error if there are no other live streams
	// in the Session, or if SplitReadStream() has been called on the given
	// Stream.
	FinalizeStream(context.Context, *FinalizeStreamRequest) (*emptypb.Empty, error)
	// Splits a given read stream into two Streams. These streams are referred to
	// as the primary and the residual of the split. The original stream can still
	// be read from in the same manner as before. Both of the returned streams can
	// also be read from, and the total rows return by both child streams will be
	// the same as the rows read from the original stream.
	//
	// Moreover, the two child streams will be allocated back to back in the
	// original Stream. Concretely, it is guaranteed that for streams Original,
	// Primary, and Residual, that Original[0-j] = Primary[0-j] and
	// Original[j-n] = Residual[0-m] once the streams have been read to
	// completion.
	//
	// This method is guaranteed to be idempotent.
	SplitReadStream(context.Context, *SplitReadStreamRequest) (*SplitReadStreamResponse, error)
}BigQueryStorageServer is the server API for BigQueryStorage service. All implementations should embed UnimplementedBigQueryStorageServer for forward compatibility
BigQueryStorage_ReadRowsClient
type BigQueryStorage_ReadRowsClient interface {
	Recv() (*ReadRowsResponse, error)
	grpc.ClientStream
}BigQueryStorage_ReadRowsServer
type BigQueryStorage_ReadRowsServer interface {
	Send(*ReadRowsResponse) error
	grpc.ServerStream
}CreateReadSessionRequest
type CreateReadSessionRequest struct {
	// Required. Reference to the table to read.
	TableReference *TableReference `protobuf:"bytes,1,opt,name=table_reference,json=tableReference,proto3" json:"table_reference,omitempty"`
	// Required. String of the form `projects/{project_id}` indicating the
	// project this ReadSession is associated with. This is the project that will
	// be billed for usage.
	Parent string `protobuf:"bytes,6,opt,name=parent,proto3" json:"parent,omitempty"`
	// Any modifiers to the Table (e.g. snapshot timestamp).
	TableModifiers *TableModifiers `protobuf:"bytes,2,opt,name=table_modifiers,json=tableModifiers,proto3" json:"table_modifiers,omitempty"`
	// Initial number of streams. If unset or 0, we will
	// provide a value of streams so as to produce reasonable throughput. Must be
	// non-negative. The number of streams may be lower than the requested number,
	// depending on the amount parallelism that is reasonable for the table and
	// the maximum amount of parallelism allowed by the system.
	//
	// Streams must be read starting from offset 0.
	RequestedStreams int32 `protobuf:"varint,3,opt,name=requested_streams,json=requestedStreams,proto3" json:"requested_streams,omitempty"`
	// Read options for this session (e.g. column selection, filters).
	ReadOptions *TableReadOptions `protobuf:"bytes,4,opt,name=read_options,json=readOptions,proto3" json:"read_options,omitempty"`
	// Data output format. Currently default to Avro.
	// DATA_FORMAT_UNSPECIFIED not supported.
	Format DataFormat `protobuf:"varint,5,opt,name=format,proto3,enum=google.cloud.bigquery.storage.v1beta1.DataFormat" json:"format,omitempty"`
	// The strategy to use for distributing data among multiple streams. Currently
	// defaults to liquid sharding.
	ShardingStrategy ShardingStrategy `protobuf:"varint,7,opt,name=sharding_strategy,json=shardingStrategy,proto3,enum=google.cloud.bigquery.storage.v1beta1.ShardingStrategy" json:"sharding_strategy,omitempty"`
	// contains filtered or unexported fields
}Creates a new read session, which may include additional options such as requested parallelism, projection filters and constraints.
func (*CreateReadSessionRequest) Descriptor
func (*CreateReadSessionRequest) Descriptor() ([]byte, []int)Deprecated: Use CreateReadSessionRequest.ProtoReflect.Descriptor instead.
func (*CreateReadSessionRequest) GetFormat
func (x *CreateReadSessionRequest) GetFormat() DataFormatfunc (*CreateReadSessionRequest) GetParent
func (x *CreateReadSessionRequest) GetParent() stringfunc (*CreateReadSessionRequest) GetReadOptions
func (x *CreateReadSessionRequest) GetReadOptions() *TableReadOptionsfunc (*CreateReadSessionRequest) GetRequestedStreams
func (x *CreateReadSessionRequest) GetRequestedStreams() int32func (*CreateReadSessionRequest) GetShardingStrategy
func (x *CreateReadSessionRequest) GetShardingStrategy() ShardingStrategyfunc (*CreateReadSessionRequest) GetTableModifiers
func (x *CreateReadSessionRequest) GetTableModifiers() *TableModifiersfunc (*CreateReadSessionRequest) GetTableReference
func (x *CreateReadSessionRequest) GetTableReference() *TableReferencefunc (*CreateReadSessionRequest) ProtoMessage
func (*CreateReadSessionRequest) ProtoMessage()func (*CreateReadSessionRequest) ProtoReflect
func (x *CreateReadSessionRequest) ProtoReflect() protoreflect.Messagefunc (*CreateReadSessionRequest) Reset
func (x *CreateReadSessionRequest) Reset()func (*CreateReadSessionRequest) String
func (x *CreateReadSessionRequest) String() stringDataFormat
type DataFormat int32Data format for input or output data.
DataFormat_DATA_FORMAT_UNSPECIFIED, DataFormat_AVRO, DataFormat_ARROW
const (
	// Data format is unspecified.
	DataFormat_DATA_FORMAT_UNSPECIFIED DataFormat = 0
	// Avro is a standard open source row based file format.
	// See https://avro.apache.org/ for more details.
	DataFormat_AVRO DataFormat = 1
	// Arrow is a standard open source column-based message format.
	// See https://arrow.apache.org/ for more details.
	DataFormat_ARROW DataFormat = 3
)func (DataFormat) Descriptor
func (DataFormat) Descriptor() protoreflect.EnumDescriptorfunc (DataFormat) Enum
func (x DataFormat) Enum() *DataFormatfunc (DataFormat) EnumDescriptor
func (DataFormat) EnumDescriptor() ([]byte, []int)Deprecated: Use DataFormat.Descriptor instead.
func (DataFormat) Number
func (x DataFormat) Number() protoreflect.EnumNumberfunc (DataFormat) String
func (x DataFormat) String() stringfunc (DataFormat) Type
func (DataFormat) Type() protoreflect.EnumTypeFinalizeStreamRequest
type FinalizeStreamRequest struct {
	// Required. Stream to finalize.
	Stream *Stream `protobuf:"bytes,2,opt,name=stream,proto3" json:"stream,omitempty"`
	// contains filtered or unexported fields
}Request information for invoking FinalizeStream.
func (*FinalizeStreamRequest) Descriptor
func (*FinalizeStreamRequest) Descriptor() ([]byte, []int)Deprecated: Use FinalizeStreamRequest.ProtoReflect.Descriptor instead.
func (*FinalizeStreamRequest) GetStream
func (x *FinalizeStreamRequest) GetStream() *Streamfunc (*FinalizeStreamRequest) ProtoMessage
func (*FinalizeStreamRequest) ProtoMessage()func (*FinalizeStreamRequest) ProtoReflect
func (x *FinalizeStreamRequest) ProtoReflect() protoreflect.Messagefunc (*FinalizeStreamRequest) Reset
func (x *FinalizeStreamRequest) Reset()func (*FinalizeStreamRequest) String
func (x *FinalizeStreamRequest) String() stringProgress
type Progress struct {
	// The fraction of rows assigned to the stream that have been processed by the
	// server so far, not including the rows in the current response message.
	//
	// This value, along with `at_response_end`, can be used to interpolate the
	// progress made as the rows in the message are being processed using the
	// following formula: `at_response_start + (at_response_end -
	// at_response_start) * rows_processed_from_response / rows_in_response`.
	//
	// Note that if a filter is provided, the `at_response_end` value of the
	// previous response may not necessarily be equal to the `at_response_start`
	// value of the current response.
	AtResponseStart float32 `protobuf:"fixed32,1,opt,name=at_response_start,json=atResponseStart,proto3" json:"at_response_start,omitempty"`
	// Similar to `at_response_start`, except that this value includes the rows in
	// the current response.
	AtResponseEnd float32 `protobuf:"fixed32,2,opt,name=at_response_end,json=atResponseEnd,proto3" json:"at_response_end,omitempty"`
	// contains filtered or unexported fields
}func (*Progress) Descriptor
Deprecated: Use Progress.ProtoReflect.Descriptor instead.
func (*Progress) GetAtResponseEnd
func (*Progress) GetAtResponseStart
func (*Progress) ProtoMessage
func (*Progress) ProtoMessage()func (*Progress) ProtoReflect
func (x *Progress) ProtoReflect() protoreflect.Messagefunc (*Progress) Reset
func (x *Progress) Reset()func (*Progress) String
ReadRowsRequest
type ReadRowsRequest struct {
	// Required. Identifier of the position in the stream to start reading from.
	// The offset requested must be less than the last row read from ReadRows.
	// Requesting a larger offset is undefined.
	ReadPosition *StreamPosition `protobuf:"bytes,1,opt,name=read_position,json=readPosition,proto3" json:"read_position,omitempty"`
	// contains filtered or unexported fields
}Requesting row data via ReadRows must provide Stream position information.
func (*ReadRowsRequest) Descriptor
func (*ReadRowsRequest) Descriptor() ([]byte, []int)Deprecated: Use ReadRowsRequest.ProtoReflect.Descriptor instead.
func (*ReadRowsRequest) GetReadPosition
func (x *ReadRowsRequest) GetReadPosition() *StreamPositionfunc (*ReadRowsRequest) ProtoMessage
func (*ReadRowsRequest) ProtoMessage()func (*ReadRowsRequest) ProtoReflect
func (x *ReadRowsRequest) ProtoReflect() protoreflect.Messagefunc (*ReadRowsRequest) Reset
func (x *ReadRowsRequest) Reset()func (*ReadRowsRequest) String
func (x *ReadRowsRequest) String() stringReadRowsResponse
type ReadRowsResponse struct {
	// Row data is returned in format specified during session creation.
	//
	// Types that are assignable to Rows:
	//
	//	*ReadRowsResponse_AvroRows
	//	*ReadRowsResponse_ArrowRecordBatch
	Rows isReadRowsResponse_Rows `protobuf_oneof:"rows"`
	// Number of serialized rows in the rows block. This value is recorded here,
	// in addition to the row_count values in the output-specific messages in
	// `rows`, so that code which needs to record progress through the stream can
	// do so in an output format-independent way.
	RowCount int64 `protobuf:"varint,6,opt,name=row_count,json=rowCount,proto3" json:"row_count,omitempty"`
	// Estimated stream statistics.
	Status *StreamStatus `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"`
	// Throttling status. If unset, the latest response still describes
	// the current throttling status.
	ThrottleStatus *ThrottleStatus `protobuf:"bytes,5,opt,name=throttle_status,json=throttleStatus,proto3" json:"throttle_status,omitempty"`
	// The schema for the read. If read_options.selected_fields is set, the
	// schema may be different from the table schema as it will only contain
	// the selected fields. This schema is equivalent to the one returned by
	// CreateSession. This field is only populated in the first ReadRowsResponse
	// RPC.
	//
	// Types that are assignable to Schema:
	//
	//	*ReadRowsResponse_AvroSchema
	//	*ReadRowsResponse_ArrowSchema
	Schema isReadRowsResponse_Schema `protobuf_oneof:"schema"`
	// contains filtered or unexported fields
}Response from calling ReadRows may include row data, progress and
throttling information.
func (*ReadRowsResponse) Descriptor
func (*ReadRowsResponse) Descriptor() ([]byte, []int)Deprecated: Use ReadRowsResponse.ProtoReflect.Descriptor instead.
func (*ReadRowsResponse) GetArrowRecordBatch
func (x *ReadRowsResponse) GetArrowRecordBatch() *ArrowRecordBatchfunc (*ReadRowsResponse) GetArrowSchema
func (x *ReadRowsResponse) GetArrowSchema() *ArrowSchemafunc (*ReadRowsResponse) GetAvroRows
func (x *ReadRowsResponse) GetAvroRows() *AvroRowsfunc (*ReadRowsResponse) GetAvroSchema
func (x *ReadRowsResponse) GetAvroSchema() *AvroSchemafunc (*ReadRowsResponse) GetRowCount
func (x *ReadRowsResponse) GetRowCount() int64func (*ReadRowsResponse) GetRows
func (m *ReadRowsResponse) GetRows() isReadRowsResponse_Rowsfunc (*ReadRowsResponse) GetSchema
func (m *ReadRowsResponse) GetSchema() isReadRowsResponse_Schemafunc (*ReadRowsResponse) GetStatus
func (x *ReadRowsResponse) GetStatus() *StreamStatusfunc (*ReadRowsResponse) GetThrottleStatus
func (x *ReadRowsResponse) GetThrottleStatus() *ThrottleStatusfunc (*ReadRowsResponse) ProtoMessage
func (*ReadRowsResponse) ProtoMessage()func (*ReadRowsResponse) ProtoReflect
func (x *ReadRowsResponse) ProtoReflect() protoreflect.Messagefunc (*ReadRowsResponse) Reset
func (x *ReadRowsResponse) Reset()func (*ReadRowsResponse) String
func (x *ReadRowsResponse) String() stringReadRowsResponse_ArrowRecordBatch
type ReadRowsResponse_ArrowRecordBatch struct {
	// Serialized row data in Arrow RecordBatch format.
	ArrowRecordBatch *ArrowRecordBatch `protobuf:"bytes,4,opt,name=arrow_record_batch,json=arrowRecordBatch,proto3,oneof"`
}ReadRowsResponse_ArrowSchema
type ReadRowsResponse_ArrowSchema struct {
	// Output only. Arrow schema.
	ArrowSchema *ArrowSchema `protobuf:"bytes,8,opt,name=arrow_schema,json=arrowSchema,proto3,oneof"`
}ReadRowsResponse_AvroRows
type ReadRowsResponse_AvroRows struct {
	// Serialized row data in AVRO format.
	AvroRows *AvroRows `protobuf:"bytes,3,opt,name=avro_rows,json=avroRows,proto3,oneof"`
}ReadRowsResponse_AvroSchema
type ReadRowsResponse_AvroSchema struct {
	// Output only. Avro schema.
	AvroSchema *AvroSchema `protobuf:"bytes,7,opt,name=avro_schema,json=avroSchema,proto3,oneof"`
}ReadSession
type ReadSession struct {
	// Unique identifier for the session, in the form
	// `projects/{project_id}/locations/{location}/sessions/{session_id}`.
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// Time at which the session becomes invalid. After this time, subsequent
	// requests to read this Session will return errors.
	ExpireTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"`
	// The schema for the read. If read_options.selected_fields is set, the
	// schema may be different from the table schema as it will only contain
	// the selected fields.
	//
	// Types that are assignable to Schema:
	//
	//	*ReadSession_AvroSchema
	//	*ReadSession_ArrowSchema
	Schema isReadSession_Schema `protobuf_oneof:"schema"`
	// Streams associated with this session.
	Streams []*Stream `protobuf:"bytes,4,rep,name=streams,proto3" json:"streams,omitempty"`
	// Table that this ReadSession is reading from.
	TableReference *TableReference `protobuf:"bytes,7,opt,name=table_reference,json=tableReference,proto3" json:"table_reference,omitempty"`
	// Any modifiers which are applied when reading from the specified table.
	TableModifiers *TableModifiers `protobuf:"bytes,8,opt,name=table_modifiers,json=tableModifiers,proto3" json:"table_modifiers,omitempty"`
	// The strategy to use for distributing data among the streams.
	ShardingStrategy ShardingStrategy `protobuf:"varint,9,opt,name=sharding_strategy,json=shardingStrategy,proto3,enum=google.cloud.bigquery.storage.v1beta1.ShardingStrategy" json:"sharding_strategy,omitempty"`
	// contains filtered or unexported fields
}Information returned from a CreateReadSession request.
func (*ReadSession) Descriptor
func (*ReadSession) Descriptor() ([]byte, []int)Deprecated: Use ReadSession.ProtoReflect.Descriptor instead.
func (*ReadSession) GetArrowSchema
func (x *ReadSession) GetArrowSchema() *ArrowSchemafunc (*ReadSession) GetAvroSchema
func (x *ReadSession) GetAvroSchema() *AvroSchemafunc (*ReadSession) GetExpireTime
func (x *ReadSession) GetExpireTime() *timestamppb.Timestampfunc (*ReadSession) GetName
func (x *ReadSession) GetName() stringfunc (*ReadSession) GetSchema
func (m *ReadSession) GetSchema() isReadSession_Schemafunc (*ReadSession) GetShardingStrategy
func (x *ReadSession) GetShardingStrategy() ShardingStrategyfunc (*ReadSession) GetStreams
func (x *ReadSession) GetStreams() []*Streamfunc (*ReadSession) GetTableModifiers
func (x *ReadSession) GetTableModifiers() *TableModifiersfunc (*ReadSession) GetTableReference
func (x *ReadSession) GetTableReference() *TableReferencefunc (*ReadSession) ProtoMessage
func (*ReadSession) ProtoMessage()func (*ReadSession) ProtoReflect
func (x *ReadSession) ProtoReflect() protoreflect.Messagefunc (*ReadSession) Reset
func (x *ReadSession) Reset()func (*ReadSession) String
func (x *ReadSession) String() stringReadSession_ArrowSchema
type ReadSession_ArrowSchema struct {
	// Arrow schema.
	ArrowSchema *ArrowSchema `protobuf:"bytes,6,opt,name=arrow_schema,json=arrowSchema,proto3,oneof"`
}ReadSession_AvroSchema
type ReadSession_AvroSchema struct {
	// Avro schema.
	AvroSchema *AvroSchema `protobuf:"bytes,5,opt,name=avro_schema,json=avroSchema,proto3,oneof"`
}ShardingStrategy
type ShardingStrategy int32Strategy for distributing data among multiple streams in a read session.
ShardingStrategy_SHARDING_STRATEGY_UNSPECIFIED, ShardingStrategy_LIQUID, ShardingStrategy_BALANCED
const (
	// Same as LIQUID.
	ShardingStrategy_SHARDING_STRATEGY_UNSPECIFIED ShardingStrategy = 0
	// Assigns data to each stream based on the client's read rate. The faster the
	// client reads from a stream, the more data is assigned to the stream. In
	// this strategy, it's possible to read all data from a single stream even if
	// there are other streams present.
	ShardingStrategy_LIQUID ShardingStrategy = 1
	// Assigns data to each stream such that roughly the same number of rows can
	// be read from each stream. Because the server-side unit for assigning data
	// is collections of rows, the API does not guarantee that each stream will
	// return the same number or rows. Additionally, the limits are enforced based
	// on the number of pre-filtering rows, so some filters can lead to lopsided
	// assignments.
	ShardingStrategy_BALANCED ShardingStrategy = 2
)func (ShardingStrategy) Descriptor
func (ShardingStrategy) Descriptor() protoreflect.EnumDescriptorfunc (ShardingStrategy) Enum
func (x ShardingStrategy) Enum() *ShardingStrategyfunc (ShardingStrategy) EnumDescriptor
func (ShardingStrategy) EnumDescriptor() ([]byte, []int)Deprecated: Use ShardingStrategy.Descriptor instead.
func (ShardingStrategy) Number
func (x ShardingStrategy) Number() protoreflect.EnumNumberfunc (ShardingStrategy) String
func (x ShardingStrategy) String() stringfunc (ShardingStrategy) Type
func (ShardingStrategy) Type() protoreflect.EnumTypeSplitReadStreamRequest
type SplitReadStreamRequest struct {
	// Required. Stream to split.
	OriginalStream *Stream `protobuf:"bytes,1,opt,name=original_stream,json=originalStream,proto3" json:"original_stream,omitempty"`
	// A value in the range (0.0, 1.0) that specifies the fractional point at
	// which the original stream should be split. The actual split point is
	// evaluated on pre-filtered rows, so if a filter is provided, then there is
	// no guarantee that the division of the rows between the new child streams
	// will be proportional to this fractional value. Additionally, because the
	// server-side unit for assigning data is collections of rows, this fraction
	// will always map to to a data storage boundary on the server side.
	Fraction float32 `protobuf:"fixed32,2,opt,name=fraction,proto3" json:"fraction,omitempty"`
	// contains filtered or unexported fields
}Request information for SplitReadStream.
func (*SplitReadStreamRequest) Descriptor
func (*SplitReadStreamRequest) Descriptor() ([]byte, []int)Deprecated: Use SplitReadStreamRequest.ProtoReflect.Descriptor instead.
func (*SplitReadStreamRequest) GetFraction
func (x *SplitReadStreamRequest) GetFraction() float32func (*SplitReadStreamRequest) GetOriginalStream
func (x *SplitReadStreamRequest) GetOriginalStream() *Streamfunc (*SplitReadStreamRequest) ProtoMessage
func (*SplitReadStreamRequest) ProtoMessage()func (*SplitReadStreamRequest) ProtoReflect
func (x *SplitReadStreamRequest) ProtoReflect() protoreflect.Messagefunc (*SplitReadStreamRequest) Reset
func (x *SplitReadStreamRequest) Reset()func (*SplitReadStreamRequest) String
func (x *SplitReadStreamRequest) String() stringSplitReadStreamResponse
type SplitReadStreamResponse struct {
	// Primary stream, which contains the beginning portion of
	// |original_stream|. An empty value indicates that the original stream can no
	// longer be split.
	PrimaryStream *Stream `protobuf:"bytes,1,opt,name=primary_stream,json=primaryStream,proto3" json:"primary_stream,omitempty"`
	// Remainder stream, which contains the tail of |original_stream|. An empty
	// value indicates that the original stream can no longer be split.
	RemainderStream *Stream `protobuf:"bytes,2,opt,name=remainder_stream,json=remainderStream,proto3" json:"remainder_stream,omitempty"`
	// contains filtered or unexported fields
}Response from SplitReadStream.
func (*SplitReadStreamResponse) Descriptor
func (*SplitReadStreamResponse) Descriptor() ([]byte, []int)Deprecated: Use SplitReadStreamResponse.ProtoReflect.Descriptor instead.
func (*SplitReadStreamResponse) GetPrimaryStream
func (x *SplitReadStreamResponse) GetPrimaryStream() *Streamfunc (*SplitReadStreamResponse) GetRemainderStream
func (x *SplitReadStreamResponse) GetRemainderStream() *Streamfunc (*SplitReadStreamResponse) ProtoMessage
func (*SplitReadStreamResponse) ProtoMessage()func (*SplitReadStreamResponse) ProtoReflect
func (x *SplitReadStreamResponse) ProtoReflect() protoreflect.Messagefunc (*SplitReadStreamResponse) Reset
func (x *SplitReadStreamResponse) Reset()func (*SplitReadStreamResponse) String
func (x *SplitReadStreamResponse) String() stringStream
type Stream struct {
	// Name of the stream, in the form
	// `projects/{project_id}/locations/{location}/streams/{stream_id}`.
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// contains filtered or unexported fields
}Information about a single data stream within a read session.
func (*Stream) Descriptor
Deprecated: Use Stream.ProtoReflect.Descriptor instead.
func (*Stream) GetName
func (*Stream) ProtoMessage
func (*Stream) ProtoMessage()func (*Stream) ProtoReflect
func (x *Stream) ProtoReflect() protoreflect.Messagefunc (*Stream) Reset
func (x *Stream) Reset()func (*Stream) String
StreamPosition
type StreamPosition struct {
	// Identifier for a given Stream.
	Stream *Stream `protobuf:"bytes,1,opt,name=stream,proto3" json:"stream,omitempty"`
	// Position in the stream.
	Offset int64 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"`
	// contains filtered or unexported fields
}Expresses a point within a given stream using an offset position.
func (*StreamPosition) Descriptor
func (*StreamPosition) Descriptor() ([]byte, []int)Deprecated: Use StreamPosition.ProtoReflect.Descriptor instead.
func (*StreamPosition) GetOffset
func (x *StreamPosition) GetOffset() int64func (*StreamPosition) GetStream
func (x *StreamPosition) GetStream() *Streamfunc (*StreamPosition) ProtoMessage
func (*StreamPosition) ProtoMessage()func (*StreamPosition) ProtoReflect
func (x *StreamPosition) ProtoReflect() protoreflect.Messagefunc (*StreamPosition) Reset
func (x *StreamPosition) Reset()func (*StreamPosition) String
func (x *StreamPosition) String() stringStreamStatus
type StreamStatus struct {
	// Number of estimated rows in the current stream. May change over time as
	// different readers in the stream progress at rates which are relatively fast
	// or slow.
	EstimatedRowCount int64 `protobuf:"varint,1,opt,name=estimated_row_count,json=estimatedRowCount,proto3" json:"estimated_row_count,omitempty"`
	// A value in the range [0.0, 1.0] that represents the fraction of rows
	// assigned to this stream that have been processed by the server. In the
	// presence of read filters, the server may process more rows than it returns,
	// so this value reflects progress through the pre-filtering rows.
	//
	// This value is only populated for sessions created through the BALANCED
	// sharding strategy.
	FractionConsumed float32 `protobuf:"fixed32,2,opt,name=fraction_consumed,json=fractionConsumed,proto3" json:"fraction_consumed,omitempty"`
	// Represents the progress of the current stream.
	Progress *Progress `protobuf:"bytes,4,opt,name=progress,proto3" json:"progress,omitempty"`
	// Whether this stream can be split. For sessions that use the LIQUID sharding
	// strategy, this value is always false. For BALANCED sessions, this value is
	// false when enough data have been read such that no more splits are possible
	// at that point or beyond. For small tables or streams that are the result of
	// a chain of splits, this value may never be true.
	IsSplittable bool `protobuf:"varint,3,opt,name=is_splittable,json=isSplittable,proto3" json:"is_splittable,omitempty"`
	// contains filtered or unexported fields
}Progress information for a given Stream.
func (*StreamStatus) Descriptor
func (*StreamStatus) Descriptor() ([]byte, []int)Deprecated: Use StreamStatus.ProtoReflect.Descriptor instead.
func (*StreamStatus) GetEstimatedRowCount
func (x *StreamStatus) GetEstimatedRowCount() int64func (*StreamStatus) GetFractionConsumed
func (x *StreamStatus) GetFractionConsumed() float32func (*StreamStatus) GetIsSplittable
func (x *StreamStatus) GetIsSplittable() boolfunc (*StreamStatus) GetProgress
func (x *StreamStatus) GetProgress() *Progressfunc (*StreamStatus) ProtoMessage
func (*StreamStatus) ProtoMessage()func (*StreamStatus) ProtoReflect
func (x *StreamStatus) ProtoReflect() protoreflect.Messagefunc (*StreamStatus) Reset
func (x *StreamStatus) Reset()func (*StreamStatus) String
func (x *StreamStatus) String() stringTableModifiers
type TableModifiers struct {
	// The snapshot time of the table. If not set, interpreted as now.
	SnapshotTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=snapshot_time,json=snapshotTime,proto3" json:"snapshot_time,omitempty"`
	// contains filtered or unexported fields
}All fields in this message optional.
func (*TableModifiers) Descriptor
func (*TableModifiers) Descriptor() ([]byte, []int)Deprecated: Use TableModifiers.ProtoReflect.Descriptor instead.
func (*TableModifiers) GetSnapshotTime
func (x *TableModifiers) GetSnapshotTime() *timestamppb.Timestampfunc (*TableModifiers) ProtoMessage
func (*TableModifiers) ProtoMessage()func (*TableModifiers) ProtoReflect
func (x *TableModifiers) ProtoReflect() protoreflect.Messagefunc (*TableModifiers) Reset
func (x *TableModifiers) Reset()func (*TableModifiers) String
func (x *TableModifiers) String() stringTableReadOptions
type TableReadOptions struct {
	// Optional. The names of the fields in the table to be returned. If no
	// field names are specified, then all fields in the table are returned.
	//
	// Nested fields -- the child elements of a STRUCT field -- can be selected
	// individually using their fully-qualified names, and will be returned as
	// record fields containing only the selected nested fields. If a STRUCT
	// field is specified in the selected fields list, all of the child elements
	// will be returned.
	//
	// As an example, consider a table with the following schema:
	//
	//	{
	//	    "name": "struct_field",
	//	    "type": "RECORD",
	//	    "mode": "NULLABLE",
	//	    "fields": [
	//	        {
	//	            "name": "string_field1",
	//	            "type": "STRING",
	//
	// .              "mode": "NULLABLE"
	//
	//	        },
	//	        {
	//	            "name": "string_field2",
	//	            "type": "STRING",
	//	            "mode": "NULLABLE"
	//	        }
	//	    ]
	//	}
	//
	// Specifying "struct_field" in the selected fields list will result in a
	// read session schema with the following logical structure:
	//
	//	struct_field {
	//	    string_field1
	//	    string_field2
	//	}
	//
	// Specifying "struct_field.string_field1" in the selected fields list will
	// result in a read session schema with the following logical structure:
	//
	//	struct_field {
	//	    string_field1
	//	}
	//
	// The order of the fields in the read session schema is derived from the
	// table schema and does not correspond to the order in which the fields are
	// specified in this list.
	SelectedFields []string `protobuf:"bytes,1,rep,name=selected_fields,json=selectedFields,proto3" json:"selected_fields,omitempty"`
	// Optional. SQL text filtering statement, similar to a WHERE clause in
	// a SQL query. Aggregates are not supported.
	//
	// Examples: "int_field > 5"
	//
	//	"date_field = CAST('2014-9-27' as DATE)"
	//	"nullable_field is not NULL"
	//	"st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
	//	"numeric_field BETWEEN 1.0 AND 5.0"
	//
	// Restricted to a maximum length for 1 MB.
	RowRestriction string `protobuf:"bytes,2,opt,name=row_restriction,json=rowRestriction,proto3" json:"row_restriction,omitempty"`
	// contains filtered or unexported fields
}Options dictating how we read a table.
func (*TableReadOptions) Descriptor
func (*TableReadOptions) Descriptor() ([]byte, []int)Deprecated: Use TableReadOptions.ProtoReflect.Descriptor instead.
func (*TableReadOptions) GetRowRestriction
func (x *TableReadOptions) GetRowRestriction() stringfunc (*TableReadOptions) GetSelectedFields
func (x *TableReadOptions) GetSelectedFields() []stringfunc (*TableReadOptions) ProtoMessage
func (*TableReadOptions) ProtoMessage()func (*TableReadOptions) ProtoReflect
func (x *TableReadOptions) ProtoReflect() protoreflect.Messagefunc (*TableReadOptions) Reset
func (x *TableReadOptions) Reset()func (*TableReadOptions) String
func (x *TableReadOptions) String() stringTableReference
type TableReference struct {
	// The assigned project ID of the project.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// The ID of the dataset in the above project.
	DatasetId string `protobuf:"bytes,2,opt,name=dataset_id,json=datasetId,proto3" json:"dataset_id,omitempty"`
	// The ID of the table in the above dataset.
	TableId string `protobuf:"bytes,3,opt,name=table_id,json=tableId,proto3" json:"table_id,omitempty"`
	// contains filtered or unexported fields
}Table reference that includes just the 3 strings needed to identify a table.
func (*TableReference) Descriptor
func (*TableReference) Descriptor() ([]byte, []int)Deprecated: Use TableReference.ProtoReflect.Descriptor instead.
func (*TableReference) GetDatasetId
func (x *TableReference) GetDatasetId() stringfunc (*TableReference) GetProjectId
func (x *TableReference) GetProjectId() stringfunc (*TableReference) GetTableId
func (x *TableReference) GetTableId() stringfunc (*TableReference) ProtoMessage
func (*TableReference) ProtoMessage()func (*TableReference) ProtoReflect
func (x *TableReference) ProtoReflect() protoreflect.Messagefunc (*TableReference) Reset
func (x *TableReference) Reset()func (*TableReference) String
func (x *TableReference) String() stringThrottleStatus
type ThrottleStatus struct {
	// How much this connection is being throttled.
	// 0 is no throttling, 100 is completely throttled.
	ThrottlePercent int32 `protobuf:"varint,1,opt,name=throttle_percent,json=throttlePercent,proto3" json:"throttle_percent,omitempty"`
	// contains filtered or unexported fields
}Information on if the current connection is being throttled.
func (*ThrottleStatus) Descriptor
func (*ThrottleStatus) Descriptor() ([]byte, []int)Deprecated: Use ThrottleStatus.ProtoReflect.Descriptor instead.
func (*ThrottleStatus) GetThrottlePercent
func (x *ThrottleStatus) GetThrottlePercent() int32func (*ThrottleStatus) ProtoMessage
func (*ThrottleStatus) ProtoMessage()func (*ThrottleStatus) ProtoReflect
func (x *ThrottleStatus) ProtoReflect() protoreflect.Messagefunc (*ThrottleStatus) Reset
func (x *ThrottleStatus) Reset()func (*ThrottleStatus) String
func (x *ThrottleStatus) String() stringUnimplementedBigQueryStorageServer
type UnimplementedBigQueryStorageServer struct {
}UnimplementedBigQueryStorageServer should be embedded to have forward compatible implementations.
func (UnimplementedBigQueryStorageServer) BatchCreateReadSessionStreams
func (UnimplementedBigQueryStorageServer) BatchCreateReadSessionStreams(context.Context, *BatchCreateReadSessionStreamsRequest) (*BatchCreateReadSessionStreamsResponse, error)func (UnimplementedBigQueryStorageServer) CreateReadSession
func (UnimplementedBigQueryStorageServer) CreateReadSession(context.Context, *CreateReadSessionRequest) (*ReadSession, error)func (UnimplementedBigQueryStorageServer) FinalizeStream
func (UnimplementedBigQueryStorageServer) FinalizeStream(context.Context, *FinalizeStreamRequest) (*emptypb.Empty, error)func (UnimplementedBigQueryStorageServer) ReadRows
func (UnimplementedBigQueryStorageServer) ReadRows(*ReadRowsRequest, BigQueryStorage_ReadRowsServer) errorfunc (UnimplementedBigQueryStorageServer) SplitReadStream
func (UnimplementedBigQueryStorageServer) SplitReadStream(context.Context, *SplitReadStreamRequest) (*SplitReadStreamResponse, error)UnsafeBigQueryStorageServer
type UnsafeBigQueryStorageServer interface {
	// contains filtered or unexported methods
}UnsafeBigQueryStorageServer may be embedded to opt out of forward compatibility for this service. Use of this interface is not recommended, as added methods to BigQueryStorageServer will result in compilation errors.