Package cloud.google.com/go/bigquery/storage/apiv1/storagepb (v1.70.0)

Variables

ArrowSerializationOptions_CompressionCodec_name, ArrowSerializationOptions_CompressionCodec_value

var (
	ArrowSerializationOptions_CompressionCodec_name = map[int32]string{
		0: "COMPRESSION_UNSPECIFIED",
		1: "LZ4_FRAME",
		2: "ZSTD",
	}
	ArrowSerializationOptions_CompressionCodec_value = map[string]int32{
		"COMPRESSION_UNSPECIFIED": 0,
		"LZ4_FRAME":               1,
		"ZSTD":                    2,
	}
)

Enum value maps for ArrowSerializationOptions_CompressionCodec.

AppendRowsRequest_MissingValueInterpretation_name, AppendRowsRequest_MissingValueInterpretation_value

var (
	AppendRowsRequest_MissingValueInterpretation_name = map[int32]string{
		0: "MISSING_VALUE_INTERPRETATION_UNSPECIFIED",
		1: "NULL_VALUE",
		2: "DEFAULT_VALUE",
	}
	AppendRowsRequest_MissingValueInterpretation_value = map[string]int32{
		"MISSING_VALUE_INTERPRETATION_UNSPECIFIED": 0,
		"NULL_VALUE":    1,
		"DEFAULT_VALUE": 2,
	}
)

Enum value maps for AppendRowsRequest_MissingValueInterpretation.

StorageError_StorageErrorCode_name, StorageError_StorageErrorCode_value

var (
	StorageError_StorageErrorCode_name = map[int32]string{
		0:  "STORAGE_ERROR_CODE_UNSPECIFIED",
		1:  "TABLE_NOT_FOUND",
		2:  "STREAM_ALREADY_COMMITTED",
		3:  "STREAM_NOT_FOUND",
		4:  "INVALID_STREAM_TYPE",
		5:  "INVALID_STREAM_STATE",
		6:  "STREAM_FINALIZED",
		7:  "SCHEMA_MISMATCH_EXTRA_FIELDS",
		8:  "OFFSET_ALREADY_EXISTS",
		9:  "OFFSET_OUT_OF_RANGE",
		10: "CMEK_NOT_PROVIDED",
		11: "INVALID_CMEK_PROVIDED",
		12: "CMEK_ENCRYPTION_ERROR",
		13: "KMS_SERVICE_ERROR",
		14: "KMS_PERMISSION_DENIED",
	}
	StorageError_StorageErrorCode_value = map[string]int32{
		"STORAGE_ERROR_CODE_UNSPECIFIED": 0,
		"TABLE_NOT_FOUND":                1,
		"STREAM_ALREADY_COMMITTED":       2,
		"STREAM_NOT_FOUND":               3,
		"INVALID_STREAM_TYPE":            4,
		"INVALID_STREAM_STATE":           5,
		"STREAM_FINALIZED":               6,
		"SCHEMA_MISMATCH_EXTRA_FIELDS":   7,
		"OFFSET_ALREADY_EXISTS":          8,
		"OFFSET_OUT_OF_RANGE":            9,
		"CMEK_NOT_PROVIDED":              10,
		"INVALID_CMEK_PROVIDED":          11,
		"CMEK_ENCRYPTION_ERROR":          12,
		"KMS_SERVICE_ERROR":              13,
		"KMS_PERMISSION_DENIED":          14,
	}
)

Enum value maps for StorageError_StorageErrorCode.

RowError_RowErrorCode_name, RowError_RowErrorCode_value

var (
	RowError_RowErrorCode_name = map[int32]string{
		0: "ROW_ERROR_CODE_UNSPECIFIED",
		1: "FIELDS_ERROR",
	}
	RowError_RowErrorCode_value = map[string]int32{
		"ROW_ERROR_CODE_UNSPECIFIED": 0,
		"FIELDS_ERROR":               1,
	}
)

Enum value maps for RowError_RowErrorCode.

DataFormat_name, DataFormat_value

var (
	DataFormat_name = map[int32]string{
		0: "DATA_FORMAT_UNSPECIFIED",
		1: "AVRO",
		2: "ARROW",
	}
	DataFormat_value = map[string]int32{
		"DATA_FORMAT_UNSPECIFIED": 0,
		"AVRO":                    1,
		"ARROW":                   2,
	}
)

Enum value maps for DataFormat.

WriteStreamView_name, WriteStreamView_value

var (
	WriteStreamView_name = map[int32]string{
		0: "WRITE_STREAM_VIEW_UNSPECIFIED",
		1: "BASIC",
		2: "FULL",
	}
	WriteStreamView_value = map[string]int32{
		"WRITE_STREAM_VIEW_UNSPECIFIED": 0,
		"BASIC":                         1,
		"FULL":                          2,
	}
)

Enum value maps for WriteStreamView.

ReadSession_TableReadOptions_ResponseCompressionCodec_name, ReadSession_TableReadOptions_ResponseCompressionCodec_value

var (
	ReadSession_TableReadOptions_ResponseCompressionCodec_name = map[int32]string{
		0: "RESPONSE_COMPRESSION_CODEC_UNSPECIFIED",
		2: "RESPONSE_COMPRESSION_CODEC_LZ4",
	}
	ReadSession_TableReadOptions_ResponseCompressionCodec_value = map[string]int32{
		"RESPONSE_COMPRESSION_CODEC_UNSPECIFIED": 0,
		"RESPONSE_COMPRESSION_CODEC_LZ4":         2,
	}
)

Enum value maps for ReadSession_TableReadOptions_ResponseCompressionCodec.

WriteStream_Type_name, WriteStream_Type_value

var (
	WriteStream_Type_name = map[int32]string{
		0: "TYPE_UNSPECIFIED",
		1: "COMMITTED",
		2: "PENDING",
		3: "BUFFERED",
	}
	WriteStream_Type_value = map[string]int32{
		"TYPE_UNSPECIFIED": 0,
		"COMMITTED":        1,
		"PENDING":          2,
		"BUFFERED":         3,
	}
)

Enum value maps for WriteStream_Type.

WriteStream_WriteMode_name, WriteStream_WriteMode_value

var (
	WriteStream_WriteMode_name = map[int32]string{
		0: "WRITE_MODE_UNSPECIFIED",
		1: "INSERT",
	}
	WriteStream_WriteMode_value = map[string]int32{
		"WRITE_MODE_UNSPECIFIED": 0,
		"INSERT":                 1,
	}
)

Enum value maps for WriteStream_WriteMode.

TableFieldSchema_Type_name, TableFieldSchema_Type_value

var (
	TableFieldSchema_Type_name = map[int32]string{
		0:  "TYPE_UNSPECIFIED",
		1:  "STRING",
		2:  "INT64",
		3:  "DOUBLE",
		4:  "STRUCT",
		5:  "BYTES",
		6:  "BOOL",
		7:  "TIMESTAMP",
		8:  "DATE",
		9:  "TIME",
		10: "DATETIME",
		11: "GEOGRAPHY",
		12: "NUMERIC",
		13: "BIGNUMERIC",
		14: "INTERVAL",
		15: "JSON",
		16: "RANGE",
	}
	TableFieldSchema_Type_value = map[string]int32{
		"TYPE_UNSPECIFIED": 0,
		"STRING":           1,
		"INT64":            2,
		"DOUBLE":           3,
		"STRUCT":           4,
		"BYTES":            5,
		"BOOL":             6,
		"TIMESTAMP":        7,
		"DATE":             8,
		"TIME":             9,
		"DATETIME":         10,
		"GEOGRAPHY":        11,
		"NUMERIC":          12,
		"BIGNUMERIC":       13,
		"INTERVAL":         14,
		"JSON":             15,
		"RANGE":            16,
	}
)

Enum value maps for TableFieldSchema_Type.

TableFieldSchema_Mode_name, TableFieldSchema_Mode_value

var (
	TableFieldSchema_Mode_name = map[int32]string{
		0: "MODE_UNSPECIFIED",
		1: "NULLABLE",
		2: "REQUIRED",
		3: "REPEATED",
	}
	TableFieldSchema_Mode_value = map[string]int32{
		"MODE_UNSPECIFIED": 0,
		"NULLABLE":         1,
		"REQUIRED":         2,
		"REPEATED":         3,
	}
)

Enum value maps for TableFieldSchema_Mode.

E_ColumnName

var (
	// Setting the column_name extension allows users to reference
	// bigquery column independently of the field name in the protocol buffer
	// message.
	//
	// The intended use of this annotation is to reference a destination column
	// named using characters unavailable for protobuf field names (e.g. unicode
	// characters).
	//
	// More details about BigQuery naming limitations can be found here:
	// https://cloud.google.com/bigquery/docs/schemas#column_names
	//
	// This extension is currently experimental.
	//
	// optional string column_name = 454943157;
	E_ColumnName = &file_google_cloud_bigquery_storage_v1_annotations_proto_extTypes[0]
)

Extension fields to descriptorpb.FieldOptions.

File_google_cloud_bigquery_storage_v1_annotations_proto

var File_google_cloud_bigquery_storage_v1_annotations_proto protoreflect.FileDescriptor

File_google_cloud_bigquery_storage_v1_arrow_proto

var File_google_cloud_bigquery_storage_v1_arrow_proto protoreflect.FileDescriptor

File_google_cloud_bigquery_storage_v1_avro_proto

var File_google_cloud_bigquery_storage_v1_avro_proto protoreflect.FileDescriptor

File_google_cloud_bigquery_storage_v1_protobuf_proto

var File_google_cloud_bigquery_storage_v1_protobuf_proto protoreflect.FileDescriptor

File_google_cloud_bigquery_storage_v1_storage_proto

var File_google_cloud_bigquery_storage_v1_storage_proto protoreflect.FileDescriptor

File_google_cloud_bigquery_storage_v1_stream_proto

var File_google_cloud_bigquery_storage_v1_stream_proto protoreflect.FileDescriptor

File_google_cloud_bigquery_storage_v1_table_proto

var File_google_cloud_bigquery_storage_v1_table_proto protoreflect.FileDescriptor

Functions

func RegisterBigQueryReadServer

func RegisterBigQueryReadServer(s *grpc.Server, srv BigQueryReadServer)

func RegisterBigQueryWriteServer

func RegisterBigQueryWriteServer(s *grpc.Server, srv BigQueryWriteServer)

AppendRowsRequest

type AppendRowsRequest struct {

	// Required. The write_stream identifies the append operation. It must be
	// provided in the following scenarios:
	//
	// * In the first request to an AppendRows connection.
	//
	// * In all subsequent requests to an AppendRows connection, if you use the
	// same connection to write to multiple tables or change the input schema for
	// default streams.
	//
	// For explicitly created write streams, the format is:
	//
	// * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
	//
	// For the special default stream, the format is:
	//
	// * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
	//
	// An example of a possible sequence of requests with write_stream fields
	// within a single connection:
	//
	// * r1: {write_stream: stream_name_1}
	//
	// * r2: {write_stream: /*omit*/}
	//
	// * r3: {write_stream: /*omit*/}
	//
	// * r4: {write_stream: stream_name_2}
	//
	// * r5: {write_stream: stream_name_2}
	//
	// The destination changed in request_4, so the write_stream field must be
	// populated in all subsequent requests in this stream.
	WriteStream string `protobuf:"bytes,1,opt,name=write_stream,json=writeStream,proto3" json:"write_stream,omitempty"`
	// If present, the write is only performed if the next append offset is same
	// as the provided value. If not present, the write is performed at the
	// current end of stream. Specifying a value for this field is not allowed
	// when calling AppendRows for the '_default' stream.
	Offset *wrapperspb.Int64Value `protobuf:"bytes,2,opt,name=offset,proto3" json:"offset,omitempty"`
	// Input rows. The `writer_schema` field must be specified at the initial
	// request and currently, it will be ignored if specified in following
	// requests. Following requests must have data in the same format as the
	// initial request.
	//
	// Types that are assignable to Rows:
	//
	//	*AppendRowsRequest_ProtoRows
	//	*AppendRowsRequest_ArrowRows
	Rows isAppendRowsRequest_Rows `protobuf_oneof:"rows"`
	// Id set by client to annotate its identity. Only initial request setting is
	// respected.
	TraceId string `protobuf:"bytes,6,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`
	// A map to indicate how to interpret missing value for some fields. Missing
	// values are fields present in user schema but missing in rows. The key is
	// the field name. The value is the interpretation of missing values for the
	// field.
	//
	// For example, a map {'foo': NULL_VALUE, 'bar': DEFAULT_VALUE} means all
	// missing values in field foo are interpreted as NULL, all missing values in
	// field bar are interpreted as the default value of field bar in table
	// schema.
	//
	// If a field is not in this map and has missing values, the missing values
	// in this field are interpreted as NULL.
	//
	// This field only applies to the current request, it won't affect other
	// requests on the connection.
	//
	// Currently, field name can only be top-level column name, can't be a struct
	// field path like 'foo.bar'.
	MissingValueInterpretations map[string]AppendRowsRequest_MissingValueInterpretation `protobuf:"bytes,7,rep,name=missing_value_interpretations,json=missingValueInterpretations,proto3" json:"missing_value_interpretations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3,enum=google.cloud.bigquery.storage.v1.AppendRowsRequest_MissingValueInterpretation"`
	// Optional. Default missing value interpretation for all columns in the
	// table. When a value is specified on an `AppendRowsRequest`, it is applied
	// to all requests on the connection from that point forward, until a
	// subsequent `AppendRowsRequest` sets it to a different value.
	// `missing_value_interpretation` can override
	// `default_missing_value_interpretation`. For example, if you want to write
	// `NULL` instead of using default values for some columns, you can set
	// `default_missing_value_interpretation` to `DEFAULT_VALUE` and at the same
	// time, set `missing_value_interpretations` to `NULL_VALUE` on those columns.
	DefaultMissingValueInterpretation AppendRowsRequest_MissingValueInterpretation `protobuf:"varint,8,opt,name=default_missing_value_interpretation,json=defaultMissingValueInterpretation,proto3,enum=google.cloud.bigquery.storage.v1.AppendRowsRequest_MissingValueInterpretation" json:"default_missing_value_interpretation,omitempty"`
	// contains filtered or unexported fields
}

Request message for AppendRows.

Because AppendRows is a bidirectional streaming RPC, certain parts of the AppendRowsRequest need only be specified for the first request before switching table destinations. You can also switch table destinations within the same connection for the default stream.

The size of a single AppendRowsRequest must be less than 10 MB in size. Requests larger than this return an error, typically INVALID_ARGUMENT.

func (*AppendRowsRequest) Descriptor

func (*AppendRowsRequest) Descriptor() ([]byte, []int)

Deprecated: Use AppendRowsRequest.ProtoReflect.Descriptor instead.

func (*AppendRowsRequest) GetArrowRows

func (*AppendRowsRequest) GetDefaultMissingValueInterpretation

func (x *AppendRowsRequest) GetDefaultMissingValueInterpretation() AppendRowsRequest_MissingValueInterpretation

func (*AppendRowsRequest) GetMissingValueInterpretations

func (x *AppendRowsRequest) GetMissingValueInterpretations() map[string]AppendRowsRequest_MissingValueInterpretation

func (*AppendRowsRequest) GetOffset

func (x *AppendRowsRequest) GetOffset() *wrapperspb.Int64Value

func (*AppendRowsRequest) GetProtoRows

func (*AppendRowsRequest) GetRows

func (m *AppendRowsRequest) GetRows() isAppendRowsRequest_Rows

func (*AppendRowsRequest) GetTraceId

func (x *AppendRowsRequest) GetTraceId() string

func (*AppendRowsRequest) GetWriteStream

func (x *AppendRowsRequest) GetWriteStream() string

func (*AppendRowsRequest) ProtoMessage

func (*AppendRowsRequest) ProtoMessage()

func (*AppendRowsRequest) ProtoReflect

func (x *AppendRowsRequest) ProtoReflect() protoreflect.Message

func (*AppendRowsRequest) Reset

func (x *AppendRowsRequest) Reset()

func (*AppendRowsRequest) String

func (x *AppendRowsRequest) String() string

AppendRowsRequest_ArrowData

type AppendRowsRequest_ArrowData struct {

	// Optional. Arrow Schema used to serialize the data.
	WriterSchema *ArrowSchema `protobuf:"bytes,1,opt,name=writer_schema,json=writerSchema,proto3" json:"writer_schema,omitempty"`
	// Required. Serialized row data in Arrow format.
	Rows *ArrowRecordBatch `protobuf:"bytes,2,opt,name=rows,proto3" json:"rows,omitempty"`
	// contains filtered or unexported fields
}

Arrow schema and data. Arrow format is an experimental feature only selected for allowlisted customers.

func (*AppendRowsRequest_ArrowData) Descriptor

func (*AppendRowsRequest_ArrowData) Descriptor() ([]byte, []int)

Deprecated: Use AppendRowsRequest_ArrowData.ProtoReflect.Descriptor instead.

func (*AppendRowsRequest_ArrowData) GetRows

func (*AppendRowsRequest_ArrowData) GetWriterSchema

func (x *AppendRowsRequest_ArrowData) GetWriterSchema() *ArrowSchema

func (*AppendRowsRequest_ArrowData) ProtoMessage

func (*AppendRowsRequest_ArrowData) ProtoMessage()

func (*AppendRowsRequest_ArrowData) ProtoReflect

func (*AppendRowsRequest_ArrowData) Reset

func (x *AppendRowsRequest_ArrowData) Reset()

func (*AppendRowsRequest_ArrowData) String

func (x *AppendRowsRequest_ArrowData) String() string

AppendRowsRequest_ArrowRows

type AppendRowsRequest_ArrowRows struct {
	// Rows in arrow format. This is an experimental feature only selected for
	// allowlisted customers.
	ArrowRows *AppendRowsRequest_ArrowData `protobuf:"bytes,5,opt,name=arrow_rows,json=arrowRows,proto3,oneof"`
}

AppendRowsRequest_MissingValueInterpretation

type AppendRowsRequest_MissingValueInterpretation int32

An enum to indicate how to interpret missing values of fields that are present in user schema but missing in rows. A missing value can represent a NULL or a column default value defined in BigQuery table schema.

AppendRowsRequest_MISSING_VALUE_INTERPRETATION_UNSPECIFIED, AppendRowsRequest_NULL_VALUE, AppendRowsRequest_DEFAULT_VALUE

const (
	// Invalid missing value interpretation. Requests with this value will be
	// rejected.
	AppendRowsRequest_MISSING_VALUE_INTERPRETATION_UNSPECIFIED AppendRowsRequest_MissingValueInterpretation = 0
	// Missing value is interpreted as NULL.
	AppendRowsRequest_NULL_VALUE AppendRowsRequest_MissingValueInterpretation = 1
	// Missing value is interpreted as column default value if declared in the
	// table schema, NULL otherwise.
	AppendRowsRequest_DEFAULT_VALUE AppendRowsRequest_MissingValueInterpretation = 2
)

func (AppendRowsRequest_MissingValueInterpretation) Descriptor

func (AppendRowsRequest_MissingValueInterpretation) Enum

func (AppendRowsRequest_MissingValueInterpretation) EnumDescriptor

func (AppendRowsRequest_MissingValueInterpretation) EnumDescriptor() ([]byte, []int)

Deprecated: Use AppendRowsRequest_MissingValueInterpretation.Descriptor instead.

func (AppendRowsRequest_MissingValueInterpretation) Number

func (AppendRowsRequest_MissingValueInterpretation) String

func (AppendRowsRequest_MissingValueInterpretation) Type

AppendRowsRequest_ProtoData

type AppendRowsRequest_ProtoData struct {

	// The protocol buffer schema used to serialize the data. Provide this value
	// whenever:
	//
	// * You send the first request of an RPC connection.
	//
	// * You change the input schema.
	//
	// * You specify a new destination table.
	WriterSchema *ProtoSchema `protobuf:"bytes,1,opt,name=writer_schema,json=writerSchema,proto3" json:"writer_schema,omitempty"`
	// Serialized row data in protobuf message format.
	// Currently, the backend expects the serialized rows to adhere to
	// proto2 semantics when appending rows, particularly with respect to
	// how default values are encoded.
	Rows *ProtoRows `protobuf:"bytes,2,opt,name=rows,proto3" json:"rows,omitempty"`
	// contains filtered or unexported fields
}

ProtoData contains the data rows and schema when constructing append requests.

func (*AppendRowsRequest_ProtoData) Descriptor

func (*AppendRowsRequest_ProtoData) Descriptor() ([]byte, []int)

Deprecated: Use AppendRowsRequest_ProtoData.ProtoReflect.Descriptor instead.

func (*AppendRowsRequest_ProtoData) GetRows

func (x *AppendRowsRequest_ProtoData) GetRows() *ProtoRows

func (*AppendRowsRequest_ProtoData) GetWriterSchema

func (x *AppendRowsRequest_ProtoData) GetWriterSchema() *ProtoSchema

func (*AppendRowsRequest_ProtoData) ProtoMessage

func (*AppendRowsRequest_ProtoData) ProtoMessage()

func (*AppendRowsRequest_ProtoData) ProtoReflect

func (*AppendRowsRequest_ProtoData) Reset

func (x *AppendRowsRequest_ProtoData) Reset()

func (*AppendRowsRequest_ProtoData) String

func (x *AppendRowsRequest_ProtoData) String() string

AppendRowsRequest_ProtoRows

type AppendRowsRequest_ProtoRows struct {
	// Rows in proto format.
	ProtoRows *AppendRowsRequest_ProtoData `protobuf:"bytes,4,opt,name=proto_rows,json=protoRows,proto3,oneof"`
}

AppendRowsResponse

type AppendRowsResponse struct {

	// Types that are assignable to Response:
	//
	//	*AppendRowsResponse_AppendResult_
	//	*AppendRowsResponse_Error
	Response isAppendRowsResponse_Response `protobuf_oneof:"response"`
	// If backend detects a schema update, pass it to user so that user can
	// use it to input new type of message. It will be empty when no schema
	// updates have occurred.
	UpdatedSchema *TableSchema `protobuf:"bytes,3,opt,name=updated_schema,json=updatedSchema,proto3" json:"updated_schema,omitempty"`
	// If a request failed due to corrupted rows, no rows in the batch will be
	// appended. The API will return row level error info, so that the caller can
	// remove the bad rows and retry the request.
	RowErrors []*RowError `protobuf:"bytes,4,rep,name=row_errors,json=rowErrors,proto3" json:"row_errors,omitempty"`
	// The target of the append operation. Matches the write_stream in the
	// corresponding request.
	WriteStream string `protobuf:"bytes,5,opt,name=write_stream,json=writeStream,proto3" json:"write_stream,omitempty"`
	// contains filtered or unexported fields
}

Response message for AppendRows.

func (*AppendRowsResponse) Descriptor

func (*AppendRowsResponse) Descriptor() ([]byte, []int)

Deprecated: Use AppendRowsResponse.ProtoReflect.Descriptor instead.

func (*AppendRowsResponse) GetAppendResult

func (*AppendRowsResponse) GetError

func (x *AppendRowsResponse) GetError() *status.Status

func (*AppendRowsResponse) GetResponse

func (m *AppendRowsResponse) GetResponse() isAppendRowsResponse_Response

func (*AppendRowsResponse) GetRowErrors

func (x *AppendRowsResponse) GetRowErrors() []*RowError

func (*AppendRowsResponse) GetUpdatedSchema

func (x *AppendRowsResponse) GetUpdatedSchema() *TableSchema

func (*AppendRowsResponse) GetWriteStream

func (x *AppendRowsResponse) GetWriteStream() string

func (*AppendRowsResponse) ProtoMessage

func (*AppendRowsResponse) ProtoMessage()

func (*AppendRowsResponse) ProtoReflect

func (x *AppendRowsResponse) ProtoReflect() protoreflect.Message

func (*AppendRowsResponse) Reset

func (x *AppendRowsResponse) Reset()

func (*AppendRowsResponse) String

func (x *AppendRowsResponse) String() string

AppendRowsResponse_AppendResult

type AppendRowsResponse_AppendResult struct {

	// The row offset at which the last append occurred. The offset will not be
	// set if appending using default streams.
	Offset *wrapperspb.Int64Value `protobuf:"bytes,1,opt,name=offset,proto3" json:"offset,omitempty"`
	// contains filtered or unexported fields
}

AppendResult is returned for successful append requests.

func (*AppendRowsResponse_AppendResult) Descriptor

func (*AppendRowsResponse_AppendResult) Descriptor() ([]byte, []int)

Deprecated: Use AppendRowsResponse_AppendResult.ProtoReflect.Descriptor instead.

func (*AppendRowsResponse_AppendResult) GetOffset

func (*AppendRowsResponse_AppendResult) ProtoMessage

func (*AppendRowsResponse_AppendResult) ProtoMessage()

func (*AppendRowsResponse_AppendResult) ProtoReflect

func (*AppendRowsResponse_AppendResult) Reset

func (*AppendRowsResponse_AppendResult) String

AppendRowsResponse_AppendResult_

type AppendRowsResponse_AppendResult_ struct {
	// Result if the append is successful.
	AppendResult *AppendRowsResponse_AppendResult `protobuf:"bytes,1,opt,name=append_result,json=appendResult,proto3,oneof"`
}

AppendRowsResponse_Error

type AppendRowsResponse_Error struct {
	// Error returned when problems were encountered.  If present,
	// it indicates rows were not accepted into the system.
	// Users can retry or continue with other append requests within the
	// same connection.
	//
	// Additional information about error signalling:
	//
	// ALREADY_EXISTS: Happens when an append specified an offset, and the
	// backend already has received data at this offset.  Typically encountered
	// in retry scenarios, and can be ignored.
	//
	// OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
	// the current end of the stream.
	//
	// INVALID_ARGUMENT: Indicates a malformed request or data.
	//
	// ABORTED: Request processing is aborted because of prior failures.  The
	// request can be retried if previous failure is addressed.
	//
	// INTERNAL: Indicates server side error(s) that can be retried.
	Error *status.Status `protobuf:"bytes,2,opt,name=error,proto3,oneof"`
}

ArrowRecordBatch

type ArrowRecordBatch struct {

	// IPC-serialized Arrow RecordBatch.
	SerializedRecordBatch []byte `protobuf:"bytes,1,opt,name=serialized_record_batch,json=serializedRecordBatch,proto3" json:"serialized_record_batch,omitempty"`
	// [Deprecated] The count of rows in `serialized_record_batch`.
	// Please use the format-independent ReadRowsResponse.row_count instead.
	//
	// Deprecated: Marked as deprecated in google/cloud/bigquery/storage/v1/arrow.proto.
	RowCount int64 `protobuf:"varint,2,opt,name=row_count,json=rowCount,proto3" json:"row_count,omitempty"`
	// contains filtered or unexported fields
}

Arrow RecordBatch.

func (*ArrowRecordBatch) Descriptor

func (*ArrowRecordBatch) Descriptor() ([]byte, []int)

Deprecated: Use ArrowRecordBatch.ProtoReflect.Descriptor instead.

func (*ArrowRecordBatch) GetRowCount

func (x *ArrowRecordBatch) GetRowCount() int64

Deprecated: Marked as deprecated in google/cloud/bigquery/storage/v1/arrow.proto.

func (*ArrowRecordBatch) GetSerializedRecordBatch

func (x *ArrowRecordBatch) GetSerializedRecordBatch() []byte

func (*ArrowRecordBatch) ProtoMessage

func (*ArrowRecordBatch) ProtoMessage()

func (*ArrowRecordBatch) ProtoReflect

func (x *ArrowRecordBatch) ProtoReflect() protoreflect.Message

func (*ArrowRecordBatch) Reset

func (x *ArrowRecordBatch) Reset()

func (*ArrowRecordBatch) String

func (x *ArrowRecordBatch) String() string

ArrowSchema

type ArrowSchema struct {

	// IPC serialized Arrow schema.
	SerializedSchema []byte `protobuf:"bytes,1,opt,name=serialized_schema,json=serializedSchema,proto3" json:"serialized_schema,omitempty"`
	// contains filtered or unexported fields
}

Arrow schema as specified in https://arrow.apache.org/docs/python/api/datatypes.html and serialized to bytes using IPC: https://arrow.apache.org/docs/format/Columnar.html#serialization-and-interprocess-communication-ipc

See code samples on how this message can be deserialized.

func (*ArrowSchema) Descriptor

func (*ArrowSchema) Descriptor() ([]byte, []int)

Deprecated: Use ArrowSchema.ProtoReflect.Descriptor instead.

func (*ArrowSchema) GetSerializedSchema

func (x *ArrowSchema) GetSerializedSchema() []byte

func (*ArrowSchema) ProtoMessage

func (*ArrowSchema) ProtoMessage()

func (*ArrowSchema) ProtoReflect

func (x *ArrowSchema) ProtoReflect() protoreflect.Message

func (*ArrowSchema) Reset

func (x *ArrowSchema) Reset()

func (*ArrowSchema) String

func (x *ArrowSchema) String() string

ArrowSerializationOptions

type ArrowSerializationOptions struct {

	// The compression codec to use for Arrow buffers in serialized record
	// batches.
	BufferCompression ArrowSerializationOptions_CompressionCodec `protobuf:"varint,2,opt,name=buffer_compression,json=bufferCompression,proto3,enum=google.cloud.bigquery.storage.v1.ArrowSerializationOptions_CompressionCodec" json:"buffer_compression,omitempty"`
	// contains filtered or unexported fields
}

Contains options specific to Arrow Serialization.

func (*ArrowSerializationOptions) Descriptor

func (*ArrowSerializationOptions) Descriptor() ([]byte, []int)

Deprecated: Use ArrowSerializationOptions.ProtoReflect.Descriptor instead.

func (*ArrowSerializationOptions) GetBufferCompression

func (*ArrowSerializationOptions) ProtoMessage

func (*ArrowSerializationOptions) ProtoMessage()

func (*ArrowSerializationOptions) ProtoReflect

func (*ArrowSerializationOptions) Reset

func (x *ArrowSerializationOptions) Reset()

func (*ArrowSerializationOptions) String

func (x *ArrowSerializationOptions) String() string

ArrowSerializationOptions_CompressionCodec

type ArrowSerializationOptions_CompressionCodec int32

Compression codec's supported by Arrow.

ArrowSerializationOptions_COMPRESSION_UNSPECIFIED, ArrowSerializationOptions_LZ4_FRAME, ArrowSerializationOptions_ZSTD

const (
	// If unspecified no compression will be used.
	ArrowSerializationOptions_COMPRESSION_UNSPECIFIED ArrowSerializationOptions_CompressionCodec = 0
	// LZ4 Frame (https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md)
	ArrowSerializationOptions_LZ4_FRAME ArrowSerializationOptions_CompressionCodec = 1
	// Zstandard compression.
	ArrowSerializationOptions_ZSTD ArrowSerializationOptions_CompressionCodec = 2
)

func (ArrowSerializationOptions_CompressionCodec) Descriptor

func (ArrowSerializationOptions_CompressionCodec) Enum

func (ArrowSerializationOptions_CompressionCodec) EnumDescriptor

func (ArrowSerializationOptions_CompressionCodec) EnumDescriptor() ([]byte, []int)

Deprecated: Use ArrowSerializationOptions_CompressionCodec.Descriptor instead.

func (ArrowSerializationOptions_CompressionCodec) Number

func (ArrowSerializationOptions_CompressionCodec) String

func (ArrowSerializationOptions_CompressionCodec) Type

AvroRows

type AvroRows struct {

	// Binary serialized rows in a block.
	SerializedBinaryRows []byte `protobuf:"bytes,1,opt,name=serialized_binary_rows,json=serializedBinaryRows,proto3" json:"serialized_binary_rows,omitempty"`
	// [Deprecated] The count of rows in the returning block.
	// Please use the format-independent ReadRowsResponse.row_count instead.
	//
	// Deprecated: Marked as deprecated in google/cloud/bigquery/storage/v1/avro.proto.
	RowCount int64 `protobuf:"varint,2,opt,name=row_count,json=rowCount,proto3" json:"row_count,omitempty"`
	// contains filtered or unexported fields
}

Avro rows.

func (*AvroRows) Descriptor

func (*AvroRows) Descriptor() ([]byte, []int)

Deprecated: Use AvroRows.ProtoReflect.Descriptor instead.

func (*AvroRows) GetRowCount

func (x *AvroRows) GetRowCount() int64

Deprecated: Marked as deprecated in google/cloud/bigquery/storage/v1/avro.proto.

func (*AvroRows) GetSerializedBinaryRows

func (x *AvroRows) GetSerializedBinaryRows() []byte

func (*AvroRows) ProtoMessage

func (*AvroRows) ProtoMessage()

func (*AvroRows) ProtoReflect

func (x *AvroRows) ProtoReflect() protoreflect.Message

func (*AvroRows) Reset

func (x *AvroRows) Reset()

func (*AvroRows) String

func (x *AvroRows) String() string

AvroSchema

type AvroSchema struct {

	// Json serialized schema, as described at
	// https://avro.apache.org/docs/1.8.1/spec.html.
	Schema string `protobuf:"bytes,1,opt,name=schema,proto3" json:"schema,omitempty"`
	// contains filtered or unexported fields
}

Avro schema.

func (*AvroSchema) Descriptor

func (*AvroSchema) Descriptor() ([]byte, []int)

Deprecated: Use AvroSchema.ProtoReflect.Descriptor instead.

func (*AvroSchema) GetSchema

func (x *AvroSchema) GetSchema() string

func (*AvroSchema) ProtoMessage

func (*AvroSchema) ProtoMessage()

func (*AvroSchema) ProtoReflect

func (x *AvroSchema) ProtoReflect() protoreflect.Message

func (*AvroSchema) Reset

func (x *AvroSchema) Reset()

func (*AvroSchema) String

func (x *AvroSchema) String() string

AvroSerializationOptions

type AvroSerializationOptions struct {

	// Enable displayName attribute in Avro schema.
	//
	// The Avro specification requires field names to be alphanumeric.  By
	// default, in cases when column names do not conform to these requirements
	// (e.g. non-ascii unicode codepoints) and Avro is requested as an output
	// format, the CreateReadSession call will fail.
	//
	// Setting this field to true, populates avro field names with a placeholder
	// value and populates a "displayName" attribute for every avro field with the
	// original column name.
	EnableDisplayNameAttribute bool `protobuf:"varint,1,opt,name=enable_display_name_attribute,json=enableDisplayNameAttribute,proto3" json:"enable_display_name_attribute,omitempty"`
	// contains filtered or unexported fields
}

Contains options specific to Avro Serialization.

func (*AvroSerializationOptions) Descriptor

func (*AvroSerializationOptions) Descriptor() ([]byte, []int)

Deprecated: Use AvroSerializationOptions.ProtoReflect.Descriptor instead.

func (*AvroSerializationOptions) GetEnableDisplayNameAttribute

func (x *AvroSerializationOptions) GetEnableDisplayNameAttribute() bool

func (*AvroSerializationOptions) ProtoMessage

func (*AvroSerializationOptions) ProtoMessage()

func (*AvroSerializationOptions) ProtoReflect

func (x *AvroSerializationOptions) ProtoReflect() protoreflect.Message

func (*AvroSerializationOptions) Reset

func (x *AvroSerializationOptions) Reset()

func (*AvroSerializationOptions) String

func (x *AvroSerializationOptions) String() string

BatchCommitWriteStreamsRequest

type BatchCommitWriteStreamsRequest struct {

	// Required. Parent table that all the streams should belong to, in the form
	// of `projects/{project}/datasets/{dataset}/tables/{table}`.
	Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
	// Required. The group of streams that will be committed atomically.
	WriteStreams []string `protobuf:"bytes,2,rep,name=write_streams,json=writeStreams,proto3" json:"write_streams,omitempty"`
	// contains filtered or unexported fields
}

Request message for BatchCommitWriteStreams.

func (*BatchCommitWriteStreamsRequest) Descriptor

func (*BatchCommitWriteStreamsRequest) Descriptor() ([]byte, []int)

Deprecated: Use BatchCommitWriteStreamsRequest.ProtoReflect.Descriptor instead.

func (*BatchCommitWriteStreamsRequest) GetParent

func (x *BatchCommitWriteStreamsRequest) GetParent() string

func (*BatchCommitWriteStreamsRequest) GetWriteStreams

func (x *BatchCommitWriteStreamsRequest) GetWriteStreams() []string

func (*BatchCommitWriteStreamsRequest) ProtoMessage

func (*BatchCommitWriteStreamsRequest) ProtoMessage()

func (*BatchCommitWriteStreamsRequest) ProtoReflect

func (*BatchCommitWriteStreamsRequest) Reset

func (x *BatchCommitWriteStreamsRequest) Reset()

func (*BatchCommitWriteStreamsRequest) String

BatchCommitWriteStreamsResponse

type BatchCommitWriteStreamsResponse struct {

	// The time at which streams were committed in microseconds granularity.
	// This field will only exist when there are no stream errors.
	// **Note** if this field is not set, it means the commit was not successful.
	CommitTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=commit_time,json=commitTime,proto3" json:"commit_time,omitempty"`
	// Stream level error if commit failed. Only streams with error will be in
	// the list.
	// If empty, there is no error and all streams are committed successfully.
	// If non empty, certain streams have errors and ZERO stream is committed due
	// to atomicity guarantee.
	StreamErrors []*StorageError `protobuf:"bytes,2,rep,name=stream_errors,json=streamErrors,proto3" json:"stream_errors,omitempty"`
	// contains filtered or unexported fields
}

Response message for BatchCommitWriteStreams.

func (*BatchCommitWriteStreamsResponse) Descriptor

func (*BatchCommitWriteStreamsResponse) Descriptor() ([]byte, []int)

Deprecated: Use BatchCommitWriteStreamsResponse.ProtoReflect.Descriptor instead.

func (*BatchCommitWriteStreamsResponse) GetCommitTime

func (*BatchCommitWriteStreamsResponse) GetStreamErrors

func (x *BatchCommitWriteStreamsResponse) GetStreamErrors() []*StorageError

func (*BatchCommitWriteStreamsResponse) ProtoMessage

func (*BatchCommitWriteStreamsResponse) ProtoMessage()

func (*BatchCommitWriteStreamsResponse) ProtoReflect

func (*BatchCommitWriteStreamsResponse) Reset

func (*BatchCommitWriteStreamsResponse) String

BigQueryReadClient

type BigQueryReadClient interface {
	// Creates a new read session. A read session divides the contents of a
	// BigQuery table into one or more streams, which can then be used to read
	// data from the table. The read session also specifies properties of the
	// data to be read, such as a list of columns or a push-down filter describing
	// the rows to be returned.
	//
	// A particular row can be read by at most one stream. When the caller has
	// reached the end of each stream in the session, then all the data in the
	// table has been read.
	//
	// Data is assigned to each stream such that roughly the same number of
	// rows can be read from each stream. Because the server-side unit for
	// assigning data is collections of rows, the API does not guarantee that
	// each stream will return the same number or rows. Additionally, the
	// limits are enforced based on the number of pre-filtered rows, so some
	// filters can lead to lopsided assignments.
	//
	// Read sessions automatically expire 6 hours after they are created and do
	// not require manual clean-up by the caller.
	CreateReadSession(ctx context.Context, in *CreateReadSessionRequest, opts ...grpc.CallOption) (*ReadSession, error)
	// Reads rows from the stream in the format prescribed by the ReadSession.
	// Each response contains one or more table rows, up to a maximum of 100 MiB
	// per response; read requests which attempt to read individual rows larger
	// than 100 MiB will fail.
	//
	// Each request also returns a set of stream statistics reflecting the current
	// state of the stream.
	ReadRows(ctx context.Context, in *ReadRowsRequest, opts ...grpc.CallOption) (BigQueryRead_ReadRowsClient, error)
	// Splits a given `ReadStream` into two `ReadStream` objects. These
	// `ReadStream` objects are referred to as the primary and the residual
	// streams of the split. The original `ReadStream` can still be read from in
	// the same manner as before. Both of the returned `ReadStream` objects can
	// also be read from, and the rows returned by both child streams will be
	// the same as the rows read from the original stream.
	//
	// Moreover, the two child streams will be allocated back-to-back in the
	// original `ReadStream`. Concretely, it is guaranteed that for streams
	// original, primary, and residual, that original[0-j] = primary[0-j] and
	// original[j-n] = residual[0-m] once the streams have been read to
	// completion.
	SplitReadStream(ctx context.Context, in *SplitReadStreamRequest, opts ...grpc.CallOption) (*SplitReadStreamResponse, error)
}

BigQueryReadClient is the client API for BigQueryRead service.

For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.

func NewBigQueryReadClient

func NewBigQueryReadClient(cc grpc.ClientConnInterface) BigQueryReadClient

BigQueryReadServer

type BigQueryReadServer interface {
	// Creates a new read session. A read session divides the contents of a
	// BigQuery table into one or more streams, which can then be used to read
	// data from the table. The read session also specifies properties of the
	// data to be read, such as a list of columns or a push-down filter describing
	// the rows to be returned.
	//
	// A particular row can be read by at most one stream. When the caller has
	// reached the end of each stream in the session, then all the data in the
	// table has been read.
	//
	// Data is assigned to each stream such that roughly the same number of
	// rows can be read from each stream. Because the server-side unit for
	// assigning data is collections of rows, the API does not guarantee that
	// each stream will return the same number or rows. Additionally, the
	// limits are enforced based on the number of pre-filtered rows, so some
	// filters can lead to lopsided assignments.
	//
	// Read sessions automatically expire 6 hours after they are created and do
	// not require manual clean-up by the caller.
	CreateReadSession(context.Context, *CreateReadSessionRequest) (*ReadSession, error)
	// Reads rows from the stream in the format prescribed by the ReadSession.
	// Each response contains one or more table rows, up to a maximum of 100 MiB
	// per response; read requests which attempt to read individual rows larger
	// than 100 MiB will fail.
	//
	// Each request also returns a set of stream statistics reflecting the current
	// state of the stream.
	ReadRows(*ReadRowsRequest, BigQueryRead_ReadRowsServer) error
	// Splits a given `ReadStream` into two `ReadStream` objects. These
	// `ReadStream` objects are referred to as the primary and the residual
	// streams of the split. The original `ReadStream` can still be read from in
	// the same manner as before. Both of the returned `ReadStream` objects can
	// also be read from, and the rows returned by both child streams will be
	// the same as the rows read from the original stream.
	//
	// Moreover, the two child streams will be allocated back-to-back in the
	// original `ReadStream`. Concretely, it is guaranteed that for streams
	// original, primary, and residual, that original[0-j] = primary[0-j] and
	// original[j-n] = residual[0-m] once the streams have been read to
	// completion.
	SplitReadStream(context.Context, *SplitReadStreamRequest) (*SplitReadStreamResponse, error)
}

BigQueryReadServer is the server API for BigQueryRead service.

BigQueryRead_ReadRowsClient

type BigQueryRead_ReadRowsClient interface {
	Recv() (*ReadRowsResponse, error)
	grpc.ClientStream
}

BigQueryRead_ReadRowsServer

type BigQueryRead_ReadRowsServer interface {
	Send(*ReadRowsResponse) error
	grpc.ServerStream
}

BigQueryWriteClient

type BigQueryWriteClient interface {
	// Creates a write stream to the given table.
	// Additionally, every table has a special stream named '_default'
	// to which data can be written. This stream doesn't need to be created using
	// CreateWriteStream. It is a stream that can be used simultaneously by any
	// number of clients. Data written to this stream is considered committed as
	// soon as an acknowledgement is received.
	CreateWriteStream(ctx context.Context, in *CreateWriteStreamRequest, opts ...grpc.CallOption) (*WriteStream, error)
	// Appends data to the given stream.
	//
	// If `offset` is specified, the `offset` is checked against the end of
	// stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an
	// attempt is made to append to an offset beyond the current end of the stream
	// or `ALREADY_EXISTS` if user provides an `offset` that has already been
	// written to. User can retry with adjusted offset within the same RPC
	// connection. If `offset` is not specified, append happens at the end of the
	// stream.
	//
	// The response contains an optional offset at which the append
	// happened.  No offset information will be returned for appends to a
	// default stream.
	//
	// Responses are received in the same order in which requests are sent.
	// There will be one response for each successful inserted request.  Responses
	// may optionally embed error information if the originating AppendRequest was
	// not successfully processed.
	//
	// The specifics of when successfully appended data is made visible to the
	// table are governed by the type of stream:
	//
	// * For COMMITTED streams (which includes the default stream), data is
	// visible immediately upon successful append.
	//
	// * For BUFFERED streams, data is made visible via a subsequent `FlushRows`
	// rpc which advances a cursor to a newer offset in the stream.
	//
	// * For PENDING streams, data is not made visible until the stream itself is
	// finalized (via the `FinalizeWriteStream` rpc), and the stream is explicitly
	// committed via the `BatchCommitWriteStreams` rpc.
	AppendRows(ctx context.Context, opts ...grpc.CallOption) (BigQueryWrite_AppendRowsClient, error)
	// Gets information about a write stream.
	GetWriteStream(ctx context.Context, in *GetWriteStreamRequest, opts ...grpc.CallOption) (*WriteStream, error)
	// Finalize a write stream so that no new data can be appended to the
	// stream. Finalize is not supported on the '_default' stream.
	FinalizeWriteStream(ctx context.Context, in *FinalizeWriteStreamRequest, opts ...grpc.CallOption) (*FinalizeWriteStreamResponse, error)
	// Atomically commits a group of `PENDING` streams that belong to the same
	// `parent` table.
	//
	// Streams must be finalized before commit and cannot be committed multiple
	// times. Once a stream is committed, data in the stream becomes available
	// for read operations.
	BatchCommitWriteStreams(ctx context.Context, in *BatchCommitWriteStreamsRequest, opts ...grpc.CallOption) (*BatchCommitWriteStreamsResponse, error)
	// Flushes rows to a BUFFERED stream.
	//
	// If users are appending rows to BUFFERED stream, flush operation is
	// required in order for the rows to become available for reading. A
	// Flush operation flushes up to any previously flushed offset in a BUFFERED
	// stream, to the offset specified in the request.
	//
	// Flush is not supported on the _default stream, since it is not BUFFERED.
	FlushRows(ctx context.Context, in *FlushRowsRequest, opts ...grpc.CallOption) (*FlushRowsResponse, error)
}

BigQueryWriteClient is the client API for BigQueryWrite service.

For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.

func NewBigQueryWriteClient

func NewBigQueryWriteClient(cc grpc.ClientConnInterface) BigQueryWriteClient

BigQueryWriteServer

type BigQueryWriteServer interface {
	// Creates a write stream to the given table.
	// Additionally, every table has a special stream named '_default'
	// to which data can be written. This stream doesn't need to be created using
	// CreateWriteStream. It is a stream that can be used simultaneously by any
	// number of clients. Data written to this stream is considered committed as
	// soon as an acknowledgement is received.
	CreateWriteStream(context.Context, *CreateWriteStreamRequest) (*WriteStream, error)
	// Appends data to the given stream.
	//
	// If `offset` is specified, the `offset` is checked against the end of
	// stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an
	// attempt is made to append to an offset beyond the current end of the stream
	// or `ALREADY_EXISTS` if user provides an `offset` that has already been
	// written to. User can retry with adjusted offset within the same RPC
	// connection. If `offset` is not specified, append happens at the end of the
	// stream.
	//
	// The response contains an optional offset at which the append
	// happened.  No offset information will be returned for appends to a
	// default stream.
	//
	// Responses are received in the same order in which requests are sent.
	// There will be one response for each successful inserted request.  Responses
	// may optionally embed error information if the originating AppendRequest was
	// not successfully processed.
	//
	// The specifics of when successfully appended data is made visible to the
	// table are governed by the type of stream:
	//
	// * For COMMITTED streams (which includes the default stream), data is
	// visible immediately upon successful append.
	//
	// * For BUFFERED streams, data is made visible via a subsequent `FlushRows`
	// rpc which advances a cursor to a newer offset in the stream.
	//
	// * For PENDING streams, data is not made visible until the stream itself is
	// finalized (via the `FinalizeWriteStream` rpc), and the stream is explicitly
	// committed via the `BatchCommitWriteStreams` rpc.
	AppendRows(BigQueryWrite_AppendRowsServer) error
	// Gets information about a write stream.
	GetWriteStream(context.Context, *GetWriteStreamRequest) (*WriteStream, error)
	// Finalize a write stream so that no new data can be appended to the
	// stream. Finalize is not supported on the '_default' stream.
	FinalizeWriteStream(context.Context, *FinalizeWriteStreamRequest) (*FinalizeWriteStreamResponse, error)
	// Atomically commits a group of `PENDING` streams that belong to the same
	// `parent` table.
	//
	// Streams must be finalized before commit and cannot be committed multiple
	// times. Once a stream is committed, data in the stream becomes available
	// for read operations.
	BatchCommitWriteStreams(context.Context, *BatchCommitWriteStreamsRequest) (*BatchCommitWriteStreamsResponse, error)
	// Flushes rows to a BUFFERED stream.
	//
	// If users are appending rows to BUFFERED stream, flush operation is
	// required in order for the rows to become available for reading. A
	// Flush operation flushes up to any previously flushed offset in a BUFFERED
	// stream, to the offset specified in the request.
	//
	// Flush is not supported on the _default stream, since it is not BUFFERED.
	FlushRows(context.Context, *FlushRowsRequest) (*FlushRowsResponse, error)
}

BigQueryWriteServer is the server API for BigQueryWrite service.

BigQueryWrite_AppendRowsClient

type BigQueryWrite_AppendRowsClient interface {
	Send(*AppendRowsRequest) error
	Recv() (*AppendRowsResponse, error)
	grpc.ClientStream
}

BigQueryWrite_AppendRowsServer

type BigQueryWrite_AppendRowsServer interface {
	Send(*AppendRowsResponse) error
	Recv() (*AppendRowsRequest, error)
	grpc.ServerStream
}

CreateReadSessionRequest

type CreateReadSessionRequest struct {

	// Required. The request project that owns the session, in the form of
	// `projects/{project_id}`.
	Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
	// Required. Session to be created.
	ReadSession *ReadSession `protobuf:"bytes,2,opt,name=read_session,json=readSession,proto3" json:"read_session,omitempty"`
	// Max initial number of streams. If unset or zero, the server will
	// provide a value of streams so as to produce reasonable throughput. Must be
	// non-negative. The number of streams may be lower than the requested number,
	// depending on the amount parallelism that is reasonable for the table.
	// There is a default system max limit of 1,000.
	//
	// This must be greater than or equal to preferred_min_stream_count.
	// Typically, clients should either leave this unset to let the system to
	// determine an upper bound OR set this a size for the maximum "units of work"
	// it can gracefully handle.
	MaxStreamCount int32 `protobuf:"varint,3,opt,name=max_stream_count,json=maxStreamCount,proto3" json:"max_stream_count,omitempty"`
	// The minimum preferred stream count. This parameter can be used to inform
	// the service that there is a desired lower bound on the number of streams.
	// This is typically a target parallelism of the client (e.g. a Spark
	// cluster with N-workers would set this to a low multiple of N to ensure
	// good cluster utilization).
	//
	// The system will make a best effort to provide at least this number of
	// streams, but in some cases might provide less.
	PreferredMinStreamCount int32 `protobuf:"varint,4,opt,name=preferred_min_stream_count,json=preferredMinStreamCount,proto3" json:"preferred_min_stream_count,omitempty"`
	// contains filtered or unexported fields
}

Request message for CreateReadSession.

func (*CreateReadSessionRequest) Descriptor

func (*CreateReadSessionRequest) Descriptor() ([]byte, []int)

Deprecated: Use CreateReadSessionRequest.ProtoReflect.Descriptor instead.

func (*CreateReadSessionRequest) GetMaxStreamCount

func (x *CreateReadSessionRequest) GetMaxStreamCount() int32

func (*CreateReadSessionRequest) GetParent

func (x *CreateReadSessionRequest) GetParent() string

func (*CreateReadSessionRequest) GetPreferredMinStreamCount

func (x *CreateReadSessionRequest) GetPreferredMinStreamCount() int32

func (*CreateReadSessionRequest) GetReadSession

func (x *CreateReadSessionRequest) GetReadSession() *ReadSession

func (*CreateReadSessionRequest) ProtoMessage

func (*CreateReadSessionRequest) ProtoMessage()

func (*CreateReadSessionRequest) ProtoReflect

func (x *CreateReadSessionRequest) ProtoReflect() protoreflect.Message

func (*CreateReadSessionRequest) Reset

func (x *CreateReadSessionRequest) Reset()

func (*CreateReadSessionRequest) String

func (x *CreateReadSessionRequest) String() string

CreateWriteStreamRequest

type CreateWriteStreamRequest struct {

	// Required. Reference to the table to which the stream belongs, in the format
	// of `projects/{project}/datasets/{dataset}/tables/{table}`.
	Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
	// Required. Stream to be created.
	WriteStream *WriteStream `protobuf:"bytes,2,opt,name=write_stream,json=writeStream,proto3" json:"write_stream,omitempty"`
	// contains filtered or unexported fields
}

Request message for CreateWriteStream.

func (*CreateWriteStreamRequest) Descriptor

func (*CreateWriteStreamRequest) Descriptor() ([]byte, []int)

Deprecated: Use CreateWriteStreamRequest.ProtoReflect.Descriptor instead.

func (*CreateWriteStreamRequest) GetParent

func (x *CreateWriteStreamRequest) GetParent() string

func (*CreateWriteStreamRequest) GetWriteStream

func (x *CreateWriteStreamRequest) GetWriteStream() *WriteStream

func (*CreateWriteStreamRequest) ProtoMessage

func (*CreateWriteStreamRequest) ProtoMessage()

func (*CreateWriteStreamRequest) ProtoReflect

func (x *CreateWriteStreamRequest) ProtoReflect() protoreflect.Message

func (*CreateWriteStreamRequest) Reset

func (x *CreateWriteStreamRequest) Reset()

func (*CreateWriteStreamRequest) String

func (x *CreateWriteStreamRequest) String() string

DataFormat

type DataFormat int32

Data format for input or output data.

DataFormat_DATA_FORMAT_UNSPECIFIED, DataFormat_AVRO, DataFormat_ARROW

const (
	// Data format is unspecified.
	DataFormat_DATA_FORMAT_UNSPECIFIED DataFormat = 0
	// Avro is a standard open source row based file format.
	// See https://avro.apache.org/ for more details.
	DataFormat_AVRO DataFormat = 1
	// Arrow is a standard open source column-based message format.
	// See https://arrow.apache.org/ for more details.
	DataFormat_ARROW DataFormat = 2
)

func (DataFormat) Descriptor

func (DataFormat) Descriptor() protoreflect.EnumDescriptor

func (DataFormat) Enum

func (x DataFormat) Enum() *DataFormat

func (DataFormat) EnumDescriptor

func (DataFormat) EnumDescriptor() ([]byte, []int)

Deprecated: Use DataFormat.Descriptor instead.

func (DataFormat) Number

func (x DataFormat) Number() protoreflect.EnumNumber

func (DataFormat) String

func (x DataFormat) String() string

func (DataFormat) Type

FinalizeWriteStreamRequest

type FinalizeWriteStreamRequest struct {

	// Required. Name of the stream to finalize, in the form of
	// `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// contains filtered or unexported fields
}

Request message for invoking FinalizeWriteStream.

func (*FinalizeWriteStreamRequest) Descriptor

func (*FinalizeWriteStreamRequest) Descriptor() ([]byte, []int)

Deprecated: Use FinalizeWriteStreamRequest.ProtoReflect.Descriptor instead.

func (*FinalizeWriteStreamRequest) GetName

func (x *FinalizeWriteStreamRequest) GetName() string

func (*FinalizeWriteStreamRequest) ProtoMessage

func (*FinalizeWriteStreamRequest) ProtoMessage()

func (*FinalizeWriteStreamRequest) ProtoReflect

func (*FinalizeWriteStreamRequest) Reset

func (x *FinalizeWriteStreamRequest) Reset()

func (*FinalizeWriteStreamRequest) String

func (x *FinalizeWriteStreamRequest) String() string

FinalizeWriteStreamResponse

type FinalizeWriteStreamResponse struct {

	// Number of rows in the finalized stream.
	RowCount int64 `protobuf:"varint,1,opt,name=row_count,json=rowCount,proto3" json:"row_count,omitempty"`
	// contains filtered or unexported fields
}

Response message for FinalizeWriteStream.

func (*FinalizeWriteStreamResponse) Descriptor

func (*FinalizeWriteStreamResponse) Descriptor() ([]byte, []int)

Deprecated: Use FinalizeWriteStreamResponse.ProtoReflect.Descriptor instead.

func (*FinalizeWriteStreamResponse) GetRowCount

func (x *FinalizeWriteStreamResponse) GetRowCount() int64

func (*FinalizeWriteStreamResponse) ProtoMessage

func (*FinalizeWriteStreamResponse) ProtoMessage()

func (*FinalizeWriteStreamResponse) ProtoReflect

func (*FinalizeWriteStreamResponse) Reset

func (x *FinalizeWriteStreamResponse) Reset()

func (*FinalizeWriteStreamResponse) String

func (x *FinalizeWriteStreamResponse) String() string

FlushRowsRequest

type FlushRowsRequest struct {

	// Required. The stream that is the target of the flush operation.
	WriteStream string `protobuf:"bytes,1,opt,name=write_stream,json=writeStream,proto3" json:"write_stream,omitempty"`
	// Ending offset of the flush operation. Rows before this offset(including
	// this offset) will be flushed.
	Offset *wrapperspb.Int64Value `protobuf:"bytes,2,opt,name=offset,proto3" json:"offset,omitempty"`
	// contains filtered or unexported fields
}

Request message for FlushRows.

func (*FlushRowsRequest) Descriptor

func (*FlushRowsRequest) Descriptor() ([]byte, []int)

Deprecated: Use FlushRowsRequest.ProtoReflect.Descriptor instead.

func (*FlushRowsRequest) GetOffset

func (x *FlushRowsRequest) GetOffset() *wrapperspb.Int64Value

func (*FlushRowsRequest) GetWriteStream

func (x *FlushRowsRequest) GetWriteStream()