Package cloud.google.com/go/bigquery/storage/apiv1/storagepb (v1.46.0)

Variables

ArrowSerializationOptions_CompressionCodec_name, ArrowSerializationOptions_CompressionCodec_value

var (
	ArrowSerializationOptions_CompressionCodec_name = map[int32]string{
		0: "COMPRESSION_UNSPECIFIED",
		1: "LZ4_FRAME",
		2: "ZSTD",
	}
	ArrowSerializationOptions_CompressionCodec_value = map[string]int32{
		"COMPRESSION_UNSPECIFIED": 0,
		"LZ4_FRAME":               1,
		"ZSTD":                    2,
	}
)

Enum value maps for ArrowSerializationOptions_CompressionCodec.

AppendRowsRequest_MissingValueInterpretation_name, AppendRowsRequest_MissingValueInterpretation_value

var (
	AppendRowsRequest_MissingValueInterpretation_name = map[int32]string{
		0: "MISSING_VALUE_INTERPRETATION_UNSPECIFIED",
		1: "NULL_VALUE",
		2: "DEFAULT_VALUE",
	}
	AppendRowsRequest_MissingValueInterpretation_value = map[string]int32{
		"MISSING_VALUE_INTERPRETATION_UNSPECIFIED": 0,
		"NULL_VALUE":    1,
		"DEFAULT_VALUE": 2,
	}
)

Enum value maps for AppendRowsRequest_MissingValueInterpretation.

StorageError_StorageErrorCode_name, StorageError_StorageErrorCode_value

var (
	StorageError_StorageErrorCode_name = map[int32]string{
		0: "STORAGE_ERROR_CODE_UNSPECIFIED",
		1: "TABLE_NOT_FOUND",
		2: "STREAM_ALREADY_COMMITTED",
		3: "STREAM_NOT_FOUND",
		4: "INVALID_STREAM_TYPE",
		5: "INVALID_STREAM_STATE",
		6: "STREAM_FINALIZED",
		7: "SCHEMA_MISMATCH_EXTRA_FIELDS",
		8: "OFFSET_ALREADY_EXISTS",
		9: "OFFSET_OUT_OF_RANGE",
	}
	StorageError_StorageErrorCode_value = map[string]int32{
		"STORAGE_ERROR_CODE_UNSPECIFIED": 0,
		"TABLE_NOT_FOUND":                1,
		"STREAM_ALREADY_COMMITTED":       2,
		"STREAM_NOT_FOUND":               3,
		"INVALID_STREAM_TYPE":            4,
		"INVALID_STREAM_STATE":           5,
		"STREAM_FINALIZED":               6,
		"SCHEMA_MISMATCH_EXTRA_FIELDS":   7,
		"OFFSET_ALREADY_EXISTS":          8,
		"OFFSET_OUT_OF_RANGE":            9,
	}
)

Enum value maps for StorageError_StorageErrorCode.

RowError_RowErrorCode_name, RowError_RowErrorCode_value

var (
	RowError_RowErrorCode_name = map[int32]string{
		0: "ROW_ERROR_CODE_UNSPECIFIED",
		1: "FIELDS_ERROR",
	}
	RowError_RowErrorCode_value = map[string]int32{
		"ROW_ERROR_CODE_UNSPECIFIED": 0,
		"FIELDS_ERROR":               1,
	}
)

Enum value maps for RowError_RowErrorCode.

DataFormat_name, DataFormat_value

var (
	DataFormat_name = map[int32]string{
		0: "DATA_FORMAT_UNSPECIFIED",
		1: "AVRO",
		2: "ARROW",
	}
	DataFormat_value = map[string]int32{
		"DATA_FORMAT_UNSPECIFIED": 0,
		"AVRO":                    1,
		"ARROW":                   2,
	}
)

Enum value maps for DataFormat.

WriteStreamView_name, WriteStreamView_value

var (
	WriteStreamView_name = map[int32]string{
		0: "WRITE_STREAM_VIEW_UNSPECIFIED",
		1: "BASIC",
		2: "FULL",
	}
	WriteStreamView_value = map[string]int32{
		"WRITE_STREAM_VIEW_UNSPECIFIED": 0,
		"BASIC":                         1,
		"FULL":                          2,
	}
)

Enum value maps for WriteStreamView.

WriteStream_Type_name, WriteStream_Type_value

var (
	WriteStream_Type_name = map[int32]string{
		0: "TYPE_UNSPECIFIED",
		1: "COMMITTED",
		2: "PENDING",
		3: "BUFFERED",
	}
	WriteStream_Type_value = map[string]int32{
		"TYPE_UNSPECIFIED": 0,
		"COMMITTED":        1,
		"PENDING":          2,
		"BUFFERED":         3,
	}
)

Enum value maps for WriteStream_Type.

WriteStream_WriteMode_name, WriteStream_WriteMode_value

var (
	WriteStream_WriteMode_name = map[int32]string{
		0: "WRITE_MODE_UNSPECIFIED",
		1: "INSERT",
	}
	WriteStream_WriteMode_value = map[string]int32{
		"WRITE_MODE_UNSPECIFIED": 0,
		"INSERT":                 1,
	}
)

Enum value maps for WriteStream_WriteMode.

TableFieldSchema_Type_name, TableFieldSchema_Type_value

var (
	TableFieldSchema_Type_name = map[int32]string{
		0:  "TYPE_UNSPECIFIED",
		1:  "STRING",
		2:  "INT64",
		3:  "DOUBLE",
		4:  "STRUCT",
		5:  "BYTES",
		6:  "BOOL",
		7:  "TIMESTAMP",
		8:  "DATE",
		9:  "TIME",
		10: "DATETIME",
		11: "GEOGRAPHY",
		12: "NUMERIC",
		13: "BIGNUMERIC",
		14: "INTERVAL",
		15: "JSON",
	}
	TableFieldSchema_Type_value = map[string]int32{
		"TYPE_UNSPECIFIED": 0,
		"STRING":           1,
		"INT64":            2,
		"DOUBLE":           3,
		"STRUCT":           4,
		"BYTES":            5,
		"BOOL":             6,
		"TIMESTAMP":        7,
		"DATE":             8,
		"TIME":             9,
		"DATETIME":         10,
		"GEOGRAPHY":        11,
		"NUMERIC":          12,
		"BIGNUMERIC":       13,
		"INTERVAL":         14,
		"JSON":             15,
	}
)

Enum value maps for TableFieldSchema_Type.

TableFieldSchema_Mode_name, TableFieldSchema_Mode_value

var (
	TableFieldSchema_Mode_name = map[int32]string{
		0: "MODE_UNSPECIFIED",
		1: "NULLABLE",
		2: "REQUIRED",
		3: "REPEATED",
	}
	TableFieldSchema_Mode_value = map[string]int32{
		"MODE_UNSPECIFIED": 0,
		"NULLABLE":         1,
		"REQUIRED":         2,
		"REPEATED":         3,
	}
)

Enum value maps for TableFieldSchema_Mode.

E_ColumnName

var (
	// Setting the column_name extension allows users to reference
	// bigquery column independently of the field name in the protocol buffer
	// message.
	//
	// The intended use of this annotation is to reference a destination column
	// named using characters unavailable for protobuf field names (e.g. unicode
	// characters).
	//
	// More details about BigQuery naming limitations can be found here:
	// https://cloud.google.com/bigquery/docs/schemas#column_names
	//
	// This extension is currently experimental.
	//
	// optional string column_name = 454943157;
	E_ColumnName = &file_google_cloud_bigquery_storage_v1_annotations_proto_extTypes[0]
)

Extension fields to descriptorpb.FieldOptions.

File_google_cloud_bigquery_storage_v1_annotations_proto

var File_google_cloud_bigquery_storage_v1_annotations_proto protoreflect.FileDescriptor

File_google_cloud_bigquery_storage_v1_arrow_proto

var File_google_cloud_bigquery_storage_v1_arrow_proto protoreflect.FileDescriptor

File_google_cloud_bigquery_storage_v1_avro_proto

var File_google_cloud_bigquery_storage_v1_avro_proto protoreflect.FileDescriptor

File_google_cloud_bigquery_storage_v1_protobuf_proto

var File_google_cloud_bigquery_storage_v1_protobuf_proto protoreflect.FileDescriptor

File_google_cloud_bigquery_storage_v1_storage_proto

var File_google_cloud_bigquery_storage_v1_storage_proto protoreflect.FileDescriptor

File_google_cloud_bigquery_storage_v1_stream_proto

var File_google_cloud_bigquery_storage_v1_stream_proto protoreflect.FileDescriptor

File_google_cloud_bigquery_storage_v1_table_proto

var File_google_cloud_bigquery_storage_v1_table_proto protoreflect.FileDescriptor

Functions

func RegisterBigQueryReadServer

func RegisterBigQueryReadServer(s *grpc.Server, srv BigQueryReadServer)

func RegisterBigQueryWriteServer

func RegisterBigQueryWriteServer(s *grpc.Server, srv BigQueryWriteServer)

AppendRowsRequest

type AppendRowsRequest struct {
	WriteStream string `protobuf:"bytes,1,opt,name=write_stream,json=writeStream,proto3" json:"write_stream,omitempty"`

	Offset *wrapperspb.Int64Value `protobuf:"bytes,2,opt,name=offset,proto3" json:"offset,omitempty"`

	Rows isAppendRowsRequest_Rows `protobuf_oneof:"rows"`

	TraceId string `protobuf:"bytes,6,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`

	MissingValueInterpretations map[string]AppendRowsRequest_MissingValueInterpretation "" /* 316 byte string literal not displayed */

}

Request message for AppendRows.

Due to the nature of AppendRows being a bidirectional streaming RPC, certain parts of the AppendRowsRequest need only be specified for the first request sent each time the gRPC network connection is opened/reopened.

The size of a single AppendRowsRequest must be less than 10 MB in size. Requests larger than this return an error, typically INVALID_ARGUMENT.

func (*AppendRowsRequest) Descriptor

func (*AppendRowsRequest) Descriptor() ([]byte, []int)

Deprecated: Use AppendRowsRequest.ProtoReflect.Descriptor instead.

func (*AppendRowsRequest) GetMissingValueInterpretations

func (x *AppendRowsRequest) GetMissingValueInterpretations() map[string]AppendRowsRequest_MissingValueInterpretation

func (*AppendRowsRequest) GetOffset

func (x *AppendRowsRequest) GetOffset() *wrapperspb.Int64Value

func (*AppendRowsRequest) GetProtoRows

func (*AppendRowsRequest) GetRows

func (m *AppendRowsRequest) GetRows() isAppendRowsRequest_Rows

func (*AppendRowsRequest) GetTraceId

func (x *AppendRowsRequest) GetTraceId() string

func (*AppendRowsRequest) GetWriteStream

func (x *AppendRowsRequest) GetWriteStream() string

func (*AppendRowsRequest) ProtoMessage

func (*AppendRowsRequest) ProtoMessage()

func (*AppendRowsRequest) ProtoReflect

func (x *AppendRowsRequest) ProtoReflect() protoreflect.Message

func (*AppendRowsRequest) Reset

func (x *AppendRowsRequest) Reset()

func (*AppendRowsRequest) String

func (x *AppendRowsRequest) String() string

AppendRowsRequest_MissingValueInterpretation

type AppendRowsRequest_MissingValueInterpretation int32

An enum to indicate how to interpret missing values. Missing values are fields present in user schema but missing in rows. A missing value can represent a NULL or a column default value defined in BigQuery table schema.

AppendRowsRequest_MISSING_VALUE_INTERPRETATION_UNSPECIFIED, AppendRowsRequest_NULL_VALUE, AppendRowsRequest_DEFAULT_VALUE

const (
	// Invalid missing value interpretation. Requests with this value will be
	// rejected.
	AppendRowsRequest_MISSING_VALUE_INTERPRETATION_UNSPECIFIED AppendRowsRequest_MissingValueInterpretation = 0
	// Missing value is interpreted as NULL.
	AppendRowsRequest_NULL_VALUE AppendRowsRequest_MissingValueInterpretation = 1
	// Missing value is interpreted as column default value if declared in the
	// table schema, NULL otherwise.
	AppendRowsRequest_DEFAULT_VALUE AppendRowsRequest_MissingValueInterpretation = 2
)

func (AppendRowsRequest_MissingValueInterpretation) Descriptor

func (AppendRowsRequest_MissingValueInterpretation) Enum

func (AppendRowsRequest_MissingValueInterpretation) EnumDescriptor

func (AppendRowsRequest_MissingValueInterpretation) EnumDescriptor() ([]byte, []int)

Deprecated: Use AppendRowsRequest_MissingValueInterpretation.Descriptor instead.

func (AppendRowsRequest_MissingValueInterpretation) Number

func (AppendRowsRequest_MissingValueInterpretation) String

func (AppendRowsRequest_MissingValueInterpretation) Type

AppendRowsRequest_ProtoData

type AppendRowsRequest_ProtoData struct {

	// Proto schema used to serialize the data.  This value only needs to be
	// provided as part of the first request on a gRPC network connection,
	// and will be ignored for subsequent requests on the connection.
	WriterSchema *ProtoSchema `protobuf:"bytes,1,opt,name=writer_schema,json=writerSchema,proto3" json:"writer_schema,omitempty"`
	// Serialized row data in protobuf message format.
	// Currently, the backend expects the serialized rows to adhere to
	// proto2 semantics when appending rows, particularly with respect to
	// how default values are encoded.
	Rows *ProtoRows `protobuf:"bytes,2,opt,name=rows,proto3" json:"rows,omitempty"`
	// contains filtered or unexported fields
}

ProtoData contains the data rows and schema when constructing append requests.

func (*AppendRowsRequest_ProtoData) Descriptor

func (*AppendRowsRequest_ProtoData) Descriptor() ([]byte, []int)

Deprecated: Use AppendRowsRequest_ProtoData.ProtoReflect.Descriptor instead.

func (*AppendRowsRequest_ProtoData) GetRows

func (x *AppendRowsRequest_ProtoData) GetRows() *ProtoRows

func (*AppendRowsRequest_ProtoData) GetWriterSchema

func (x *AppendRowsRequest_ProtoData) GetWriterSchema() *ProtoSchema

func (*AppendRowsRequest_ProtoData) ProtoMessage

func (*AppendRowsRequest_ProtoData) ProtoMessage()

func (*AppendRowsRequest_ProtoData) ProtoReflect

func (*AppendRowsRequest_ProtoData) Reset

func (x *AppendRowsRequest_ProtoData) Reset()

func (*AppendRowsRequest_ProtoData) String

func (x *AppendRowsRequest_ProtoData) String() string

AppendRowsRequest_ProtoRows

type AppendRowsRequest_ProtoRows struct {
	// Rows in proto format.
	ProtoRows *AppendRowsRequest_ProtoData `protobuf:"bytes,4,opt,name=proto_rows,json=protoRows,proto3,oneof"`
}

AppendRowsResponse

type AppendRowsResponse struct {

	// Types that are assignable to Response:
	//
	//	*AppendRowsResponse_AppendResult_
	//	*AppendRowsResponse_Error
	Response isAppendRowsResponse_Response `protobuf_oneof:"response"`
	// If backend detects a schema update, pass it to user so that user can
	// use it to input new type of message. It will be empty when no schema
	// updates have occurred.
	UpdatedSchema *TableSchema `protobuf:"bytes,3,opt,name=updated_schema,json=updatedSchema,proto3" json:"updated_schema,omitempty"`
	// If a request failed due to corrupted rows, no rows in the batch will be
	// appended. The API will return row level error info, so that the caller can
	// remove the bad rows and retry the request.
	RowErrors []*RowError `protobuf:"bytes,4,rep,name=row_errors,json=rowErrors,proto3" json:"row_errors,omitempty"`
	// The target of the append operation. Matches the write_stream in the
	// corresponding request.
	WriteStream string `protobuf:"bytes,5,opt,name=write_stream,json=writeStream,proto3" json:"write_stream,omitempty"`
	// contains filtered or unexported fields
}

Response message for AppendRows.

func (*AppendRowsResponse) Descriptor

func (*AppendRowsResponse) Descriptor() ([]byte, []int)

Deprecated: Use AppendRowsResponse.ProtoReflect.Descriptor instead.

func (*AppendRowsResponse) GetAppendResult

func (*AppendRowsResponse) GetError

func (x *AppendRowsResponse) GetError() *status.Status

func (*AppendRowsResponse) GetResponse

func (m *AppendRowsResponse) GetResponse() isAppendRowsResponse_Response

func (*AppendRowsResponse) GetRowErrors

func (x *AppendRowsResponse) GetRowErrors() []*RowError

func (*AppendRowsResponse) GetUpdatedSchema

func (x *AppendRowsResponse) GetUpdatedSchema() *TableSchema

func (*AppendRowsResponse) GetWriteStream

func (x *AppendRowsResponse) GetWriteStream() string

func (*AppendRowsResponse) ProtoMessage

func (*AppendRowsResponse) ProtoMessage()

func (*AppendRowsResponse) ProtoReflect

func (x *AppendRowsResponse) ProtoReflect() protoreflect.Message

func (*AppendRowsResponse) Reset

func (x *AppendRowsResponse) Reset()

func (*AppendRowsResponse) String

func (x *AppendRowsResponse) String() string

AppendRowsResponse_AppendResult

type AppendRowsResponse_AppendResult struct {

	// The row offset at which the last append occurred. The offset will not be
	// set if appending using default streams.
	Offset *wrapperspb.Int64Value `protobuf:"bytes,1,opt,name=offset,proto3" json:"offset,omitempty"`
	// contains filtered or unexported fields
}

AppendResult is returned for successful append requests.

func (*AppendRowsResponse_AppendResult) Descriptor

func (*AppendRowsResponse_AppendResult) Descriptor() ([]byte, []int)

Deprecated: Use AppendRowsResponse_AppendResult.ProtoReflect.Descriptor instead.

func (*AppendRowsResponse_AppendResult) GetOffset

func (*AppendRowsResponse_AppendResult) ProtoMessage

func (*AppendRowsResponse_AppendResult) ProtoMessage()

func (*AppendRowsResponse_AppendResult) ProtoReflect

func (*AppendRowsResponse_AppendResult) Reset

func (*AppendRowsResponse_AppendResult) String

AppendRowsResponse_AppendResult_

type AppendRowsResponse_AppendResult_ struct {
	// Result if the append is successful.
	AppendResult *AppendRowsResponse_AppendResult `protobuf:"bytes,1,opt,name=append_result,json=appendResult,proto3,oneof"`
}

AppendRowsResponse_Error

type AppendRowsResponse_Error struct {
	// Error returned when problems were encountered.  If present,
	// it indicates rows were not accepted into the system.
	// Users can retry or continue with other append requests within the
	// same connection.
	//
	// Additional information about error signalling:
	//
	// ALREADY_EXISTS: Happens when an append specified an offset, and the
	// backend already has received data at this offset.  Typically encountered
	// in retry scenarios, and can be ignored.
	//
	// OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
	// the current end of the stream.
	//
	// INVALID_ARGUMENT: Indicates a malformed request or data.
	//
	// ABORTED: Request processing is aborted because of prior failures.  The
	// request can be retried if previous failure is addressed.
	//
	// INTERNAL: Indicates server side error(s) that can be retried.
	Error *status.Status `protobuf:"bytes,2,opt,name=error,proto3,oneof"`
}

ArrowRecordBatch

type ArrowRecordBatch struct {

	// IPC-serialized Arrow RecordBatch.
	SerializedRecordBatch []byte `protobuf:"bytes,1,opt,name=serialized_record_batch,json=serializedRecordBatch,proto3" json:"serialized_record_batch,omitempty"`
	// [Deprecated] The count of rows in `serialized_record_batch`.
	// Please use the format-independent ReadRowsResponse.row_count instead.
	//
	// Deprecated: Do not use.
	RowCount int64 `protobuf:"varint,2,opt,name=row_count,json=rowCount,proto3" json:"row_count,omitempty"`
	// contains filtered or unexported fields
}

Arrow RecordBatch.

func (*ArrowRecordBatch) Descriptor

func (*ArrowRecordBatch) Descriptor() ([]byte, []int)

Deprecated: Use ArrowRecordBatch.ProtoReflect.Descriptor instead.

func (*ArrowRecordBatch) GetRowCount

func (x *ArrowRecordBatch) GetRowCount() int64

Deprecated: Do not use.

func (*ArrowRecordBatch) GetSerializedRecordBatch

func (x *ArrowRecordBatch) GetSerializedRecordBatch() []byte

func (*ArrowRecordBatch) ProtoMessage

func (*ArrowRecordBatch) ProtoMessage()

func (*ArrowRecordBatch) ProtoReflect

func (x *ArrowRecordBatch) ProtoReflect() protoreflect.Message

func (*ArrowRecordBatch) Reset

func (x *ArrowRecordBatch) Reset()

func (*ArrowRecordBatch) String

func (x *ArrowRecordBatch) String() string

ArrowSchema

type ArrowSchema struct {

	// IPC serialized Arrow schema.
	SerializedSchema []byte `protobuf:"bytes,1,opt,name=serialized_schema,json=serializedSchema,proto3" json:"serialized_schema,omitempty"`
	// contains filtered or unexported fields
}

Arrow schema as specified in https://arrow.apache.org/docs/python/api/datatypes.html and serialized to bytes using IPC: https://arrow.apache.org/docs/format/Columnar.html#serialization-and-interprocess-communication-ipc

See code samples on how this message can be deserialized.

func (*ArrowSchema) Descriptor

func (*ArrowSchema) Descriptor() ([]byte, []int)

Deprecated: Use ArrowSchema.ProtoReflect.Descriptor instead.

func (*ArrowSchema) GetSerializedSchema

func (x *ArrowSchema) GetSerializedSchema() []byte

func (*ArrowSchema) ProtoMessage

func (*ArrowSchema) ProtoMessage()

func (*ArrowSchema) ProtoReflect

func (x *ArrowSchema) ProtoReflect() protoreflect.Message

func (*ArrowSchema) Reset

func (x *ArrowSchema) Reset()

func (*ArrowSchema) String

func (x *ArrowSchema) String() string

ArrowSerializationOptions

type ArrowSerializationOptions struct {
	BufferCompression ArrowSerializationOptions_CompressionCodec "" /* 194 byte string literal not displayed */

}

Contains options specific to Arrow Serialization.

func (*ArrowSerializationOptions) Descriptor

func (*ArrowSerializationOptions) Descriptor() ([]byte, []int)

Deprecated: Use ArrowSerializationOptions.ProtoReflect.Descriptor instead.

func (*ArrowSerializationOptions) GetBufferCompression

func (*ArrowSerializationOptions) ProtoMessage

func (*ArrowSerializationOptions) ProtoMessage()

func (*ArrowSerializationOptions) ProtoReflect

func (*ArrowSerializationOptions) Reset

func (x *ArrowSerializationOptions) Reset()

func (*ArrowSerializationOptions) String

func (x *ArrowSerializationOptions) String() string

ArrowSerializationOptions_CompressionCodec

type ArrowSerializationOptions_CompressionCodec int32

Compression codec's supported by Arrow.

ArrowSerializationOptions_COMPRESSION_UNSPECIFIED, ArrowSerializationOptions_LZ4_FRAME, ArrowSerializationOptions_ZSTD

const (
	// If unspecified no compression will be used.
	ArrowSerializationOptions_COMPRESSION_UNSPECIFIED ArrowSerializationOptions_CompressionCodec = 0
	// LZ4 Frame (https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md)
	ArrowSerializationOptions_LZ4_FRAME ArrowSerializationOptions_CompressionCodec = 1
	// Zstandard compression.
	ArrowSerializationOptions_ZSTD ArrowSerializationOptions_CompressionCodec = 2
)

func (ArrowSerializationOptions_CompressionCodec) Descriptor

func (ArrowSerializationOptions_CompressionCodec) Enum

func (ArrowSerializationOptions_CompressionCodec) EnumDescriptor

func (ArrowSerializationOptions_CompressionCodec) EnumDescriptor() ([]byte, []int)

Deprecated: Use ArrowSerializationOptions_CompressionCodec.Descriptor instead.

func (ArrowSerializationOptions_CompressionCodec) Number

func (ArrowSerializationOptions_CompressionCodec) String

func (ArrowSerializationOptions_CompressionCodec) Type

AvroRows

type AvroRows struct {

	// Binary serialized rows in a block.
	SerializedBinaryRows []byte `protobuf:"bytes,1,opt,name=serialized_binary_rows,json=serializedBinaryRows,proto3" json:"serialized_binary_rows,omitempty"`
	// [Deprecated] The count of rows in the returning block.
	// Please use the format-independent ReadRowsResponse.row_count instead.
	//
	// Deprecated: Do not use.
	RowCount int64 `protobuf:"varint,2,opt,name=row_count,json=rowCount,proto3" json:"row_count,omitempty"`
	// contains filtered or unexported fields
}

Avro rows.

func (*AvroRows) Descriptor

func (*AvroRows) Descriptor() ([]byte, []int)

Deprecated: Use AvroRows.ProtoReflect.Descriptor instead.

func (*AvroRows) GetRowCount

func (x *AvroRows) GetRowCount() int64

Deprecated: Do not use.

func (*AvroRows) GetSerializedBinaryRows

func (x *AvroRows) GetSerializedBinaryRows() []byte

func (*AvroRows) ProtoMessage

func (*AvroRows) ProtoMessage()

func (*AvroRows) ProtoReflect

func (x *AvroRows) ProtoReflect() protoreflect.Message

func (*AvroRows) Reset

func (x *AvroRows) Reset()

func (*AvroRows) String

func (x *AvroRows) String() string

AvroSchema

type AvroSchema struct {

	// Json serialized schema, as described at
	// https://avro.apache.org/docs/1.8.1/spec.html.
	Schema string `protobuf:"bytes,1,opt,name=schema,proto3" json:"schema,omitempty"`
	// contains filtered or unexported fields
}

Avro schema.

func (*AvroSchema) Descriptor

func (*AvroSchema) Descriptor() ([]byte, []int)

Deprecated: Use AvroSchema.ProtoReflect.Descriptor instead.

func (*AvroSchema) GetSchema

func (x *AvroSchema) GetSchema() string

func (*AvroSchema) ProtoMessage

func (*AvroSchema) ProtoMessage()

func (*AvroSchema) ProtoReflect

func (x *AvroSchema) ProtoReflect() protoreflect.Message

func (*AvroSchema) Reset

func (x *AvroSchema) Reset()

func (*AvroSchema) String

func (x *AvroSchema) String() string

AvroSerializationOptions

type AvroSerializationOptions struct {
	EnableDisplayNameAttribute bool "" /* 144 byte string literal not displayed */

}

Contains options specific to Avro Serialization.

func (*AvroSerializationOptions) Descriptor

func (*AvroSerializationOptions) Descriptor() ([]byte, []int)

Deprecated: Use AvroSerializationOptions.ProtoReflect.Descriptor instead.

func (*AvroSerializationOptions) GetEnableDisplayNameAttribute

func (x *AvroSerializationOptions) GetEnableDisplayNameAttribute() bool

func (*AvroSerializationOptions) ProtoMessage

func (*AvroSerializationOptions) ProtoMessage()

func (*AvroSerializationOptions) ProtoReflect

func (x *AvroSerializationOptions) ProtoReflect() protoreflect.Message

func (*AvroSerializationOptions) Reset

func (x *AvroSerializationOptions) Reset()

func (*AvroSerializationOptions) String

func (x *AvroSerializationOptions) String() string

BatchCommitWriteStreamsRequest

type BatchCommitWriteStreamsRequest struct {

	// Required. Parent table that all the streams should belong to, in the form
	// of `projects/{project}/datasets/{dataset}/tables/{table}`.
	Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
	// Required. The group of streams that will be committed atomically.
	WriteStreams []string `protobuf:"bytes,2,rep,name=write_streams,json=writeStreams,proto3" json:"write_streams,omitempty"`
	// contains filtered or unexported fields
}

Request message for BatchCommitWriteStreams.

func (*BatchCommitWriteStreamsRequest) Descriptor

func (*BatchCommitWriteStreamsRequest) Descriptor() ([]byte, []int)

Deprecated: Use BatchCommitWriteStreamsRequest.ProtoReflect.Descriptor instead.

func (*BatchCommitWriteStreamsRequest) GetParent

func (x *BatchCommitWriteStreamsRequest) GetParent() string

func (*BatchCommitWriteStreamsRequest) GetWriteStreams

func (x *BatchCommitWriteStreamsRequest) GetWriteStreams() []string

func (*BatchCommitWriteStreamsRequest) ProtoMessage

func (*BatchCommitWriteStreamsRequest) ProtoMessage()

func (*BatchCommitWriteStreamsRequest) ProtoReflect

func (*BatchCommitWriteStreamsRequest) Reset

func (x *BatchCommitWriteStreamsRequest) Reset()

func (*BatchCommitWriteStreamsRequest) String

BatchCommitWriteStreamsResponse

type BatchCommitWriteStreamsResponse struct {

	// The time at which streams were committed in microseconds granularity.
	// This field will only exist when there are no stream errors.
	// **Note** if this field is not set, it means the commit was not successful.
	CommitTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=commit_time,json=commitTime,proto3" json:"commit_time,omitempty"`
	// Stream level error if commit failed. Only streams with error will be in
	// the list.
	// If empty, there is no error and all streams are committed successfully.
	// If non empty, certain streams have errors and ZERO stream is committed due
	// to atomicity guarantee.
	StreamErrors []*StorageError `protobuf:"bytes,2,rep,name=stream_errors,json=streamErrors,proto3" json:"stream_errors,omitempty"`
	// contains filtered or unexported fields
}

Response message for BatchCommitWriteStreams.

func (*BatchCommitWriteStreamsResponse) Descriptor

func (*BatchCommitWriteStreamsResponse) Descriptor() ([]byte, []int)

Deprecated: Use BatchCommitWriteStreamsResponse.ProtoReflect.Descriptor instead.

func (*BatchCommitWriteStreamsResponse) GetCommitTime

func (*BatchCommitWriteStreamsResponse) GetStreamErrors

func (x *BatchCommitWriteStreamsResponse) GetStreamErrors() []*StorageError

func (*BatchCommitWriteStreamsResponse) ProtoMessage

func (*BatchCommitWriteStreamsResponse) ProtoMessage()

func (*BatchCommitWriteStreamsResponse) ProtoReflect

func (*BatchCommitWriteStreamsResponse) Reset

func (*BatchCommitWriteStreamsResponse) String

BigQueryReadClient

type BigQueryReadClient interface {
	// Creates a new read session. A read session divides the contents of a
	// BigQuery table into one or more streams, which can then be used to read
	// data from the table. The read session also specifies properties of the
	// data to be read, such as a list of columns or a push-down filter describing
	// the rows to be returned.
	//
	// A particular row can be read by at most one stream. When the caller has
	// reached the end of each stream in the session, then all the data in the
	// table has been read.
	//
	// Data is assigned to each stream such that roughly the same number of
	// rows can be read from each stream. Because the server-side unit for
	// assigning data is collections of rows, the API does not guarantee that
	// each stream will return the same number or rows. Additionally, the
	// limits are enforced based on the number of pre-filtered rows, so some
	// filters can lead to lopsided assignments.
	//
	// Read sessions automatically expire 6 hours after they are created and do
	// not require manual clean-up by the caller.
	CreateReadSession(ctx context.Context, in *CreateReadSessionRequest, opts ...grpc.CallOption) (*ReadSession, error)
	// Reads rows from the stream in the format prescribed by the ReadSession.
	// Each response contains one or more table rows, up to a maximum of 100 MiB
	// per response; read requests which attempt to read individual rows larger
	// than 100 MiB will fail.
	//
	// Each request also returns a set of stream statistics reflecting the current
	// state of the stream.
	ReadRows(ctx context.Context, in *ReadRowsRequest, opts ...grpc.CallOption) (BigQueryRead_ReadRowsClient, error)
	// Splits a given `ReadStream` into two `ReadStream` objects. These
	// `ReadStream` objects are referred to as the primary and the residual
	// streams of the split. The original `ReadStream` can still be read from in
	// the same manner as before. Both of the returned `ReadStream` objects can
	// also be read from, and the rows returned by both child streams will be
	// the same as the rows read from the original stream.
	//
	// Moreover, the two child streams will be allocated back-to-back in the
	// original `ReadStream`. Concretely, it is guaranteed that for streams
	// original, primary, and residual, that original[0-j] = primary[0-j] and
	// original[j-n] = residual[0-m] once the streams have been read to
	// completion.
	SplitReadStream(ctx context.Context, in *SplitReadStreamRequest, opts ...grpc.CallOption) (*SplitReadStreamResponse, error)
}

BigQueryReadClient is the client API for BigQueryRead service.

For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.

func NewBigQueryReadClient

func NewBigQueryReadClient(cc grpc.ClientConnInterface) BigQueryReadClient

BigQueryReadServer

type BigQueryReadServer interface {
	// Creates a new read session. A read session divides the contents of a
	// BigQuery table into one or more streams, which can then be used to read
	// data from the table. The read session also specifies properties of the
	// data to be read, such as a list of columns or a push-down filter describing
	// the rows to be returned.
	//
	// A particular row can be read by at most one stream. When the caller has
	// reached the end of each stream in the session, then all the data in the
	// table has been read.
	//
	// Data is assigned to each stream such that roughly the same number of
	// rows can be read from each stream. Because the server-side unit for
	// assigning data is collections of rows, the API does not guarantee that
	// each stream will return the same number or rows. Additionally, the
	// limits are enforced based on the number of pre-filtered rows, so some
	// filters can lead to lopsided assignments.
	//
	// Read sessions automatically expire 6 hours after they are created and do
	// not require manual clean-up by the caller.
	CreateReadSession(context.Context, *CreateReadSessionRequest) (*ReadSession, error)
	// Reads rows from the stream in the format prescribed by the ReadSession.
	// Each response contains one or more table rows, up to a maximum of 100 MiB
	// per response; read requests which attempt to read individual rows larger
	// than 100 MiB will fail.
	//
	// Each request also returns a set of stream statistics reflecting the current
	// state of the stream.
	ReadRows(*ReadRowsRequest, BigQueryRead_ReadRowsServer) error
	// Splits a given `ReadStream` into two `ReadStream` objects. These
	// `ReadStream` objects are referred to as the primary and the residual
	// streams of the split. The original `ReadStream` can still be read from in
	// the same manner as before. Both of the returned `ReadStream` objects can
	// also be read from, and the rows returned by both child streams will be
	// the same as the rows read from the original stream.
	//
	// Moreover, the two child streams will be allocated back-to-back in the
	// original `ReadStream`. Concretely, it is guaranteed that for streams
	// original, primary, and residual, that original[0-j] = primary[0-j] and
	// original[j-n] = residual[0-m] once the streams have been read to
	// completion.
	SplitReadStream(context.Context, *SplitReadStreamRequest) (*SplitReadStreamResponse, error)
}

BigQueryReadServer is the server API for BigQueryRead service.

BigQueryRead_ReadRowsClient

type BigQueryRead_ReadRowsClient interface {
	Recv() (*ReadRowsResponse, error)
	grpc.ClientStream
}

BigQueryRead_ReadRowsServer

type BigQueryRead_ReadRowsServer interface {
	Send(*ReadRowsResponse) error
	grpc.ServerStream
}

BigQueryWriteClient

type BigQueryWriteClient interface {
	// Creates a write stream to the given table.
	// Additionally, every table has a special stream named '_default'
	// to which data can be written. This stream doesn't need to be created using
	// CreateWriteStream. It is a stream that can be used simultaneously by any
	// number of clients. Data written to this stream is considered committed as
	// soon as an acknowledgement is received.
	CreateWriteStream(ctx context.Context, in *CreateWriteStreamRequest, opts ...grpc.CallOption) (*WriteStream, error)
	// Appends data to the given stream.
	//
	// If `offset` is specified, the `offset` is checked against the end of
	// stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an
	// attempt is made to append to an offset beyond the current end of the stream
	// or `ALREADY_EXISTS` if user provides an `offset` that has already been
	// written to. User can retry with adjusted offset within the same RPC
	// connection. If `offset` is not specified, append happens at the end of the
	// stream.
	//
	// The response contains an optional offset at which the append
	// happened.  No offset information will be returned for appends to a
	// default stream.
	//
	// Responses are received in the same order in which requests are sent.
	// There will be one response for each successful inserted request.  Responses
	// may optionally embed error information if the originating AppendRequest was
	// not successfully processed.
	//
	// The specifics of when successfully appended data is made visible to the
	// table are governed by the type of stream:
	//
	// * For COMMITTED streams (which includes the default stream), data is
	// visible immediately upon successful append.
	//
	// * For BUFFERED streams, data is made visible via a subsequent `FlushRows`
	// rpc which advances a cursor to a newer offset in the stream.
	//
	// * For PENDING streams, data is not made visible until the stream itself is
	// finalized (via the `FinalizeWriteStream` rpc), and the stream is explicitly
	// committed via the `BatchCommitWriteStreams` rpc.
	AppendRows(ctx context.Context, opts ...grpc.CallOption) (BigQueryWrite_AppendRowsClient, error)
	// Gets information about a write stream.
	GetWriteStream(ctx context.Context, in *GetWriteStreamRequest, opts ...grpc.CallOption) (*WriteStream, error)
	// Finalize a write stream so that no new data can be appended to the
	// stream. Finalize is not supported on the '_default' stream.
	FinalizeWriteStream(ctx context.Context, in *FinalizeWriteStreamRequest, opts ...grpc.CallOption) (*FinalizeWriteStreamResponse, error)
	// Atomically commits a group of `PENDING` streams that belong to the same
	// `parent` table.
	//
	// Streams must be finalized before commit and cannot be committed multiple
	// times. Once a stream is committed, data in the stream becomes available
	// for read operations.
	BatchCommitWriteStreams(ctx context.Context, in *BatchCommitWriteStreamsRequest, opts ...grpc.CallOption) (*BatchCommitWriteStreamsResponse, error)
	// Flushes rows to a BUFFERED stream.
	//
	// If users are appending rows to BUFFERED stream, flush operation is
	// required in order for the rows to become available for reading. A
	// Flush operation flushes up to any previously flushed offset in a BUFFERED
	// stream, to the offset specified in the request.
	//
	// Flush is not supported on the _default stream, since it is not BUFFERED.
	FlushRows(ctx context.Context, in *FlushRowsRequest, opts ...grpc.CallOption) (*FlushRowsResponse, error)
}

BigQueryWriteClient is the client API for BigQueryWrite service.

For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.

func NewBigQueryWriteClient

func NewBigQueryWriteClient(cc grpc.ClientConnInterface) BigQueryWriteClient

BigQueryWriteServer

type BigQueryWriteServer interface {
	// Creates a write stream to the given table.
	// Additionally, every table has a special stream named '_default'
	// to which data can be written. This stream doesn't need to be created using
	// CreateWriteStream. It is a stream that can be used simultaneously by any
	// number of clients. Data written to this stream is considered committed as
	// soon as an acknowledgement is received.
	CreateWriteStream(context.Context, *CreateWriteStreamRequest) (*WriteStream, error)
	// Appends data to the given stream.
	//
	// If `offset` is specified, the `offset` is checked against the end of
	// stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an
	// attempt is made to append to an offset beyond the current end of the stream
	// or `ALREADY_EXISTS` if user provides an `offset` that has already been
	// written to. User can retry with adjusted offset within the same RPC
	// connection. If `offset` is not specified, append happens at the end of the
	// stream.
	//
	// The response contains an optional offset at which the append
	// happened.  No offset information will be returned for appends to a
	// default stream.
	//
	// Responses are received in the same order in which requests are sent.
	// There will be one response for each successful inserted request.  Responses
	// may optionally embed error information if the originating AppendRequest was
	// not successfully processed.
	//
	// The specifics of when successfully appended data is made visible to the
	// table are governed by the type of stream:
	//
	// * For COMMITTED streams (which includes the default stream), data is
	// visible immediately upon successful append.
	//
	// * For BUFFERED streams, data is made visible via a subsequent `FlushRows`
	// rpc which advances a cursor to a newer offset in the stream.
	//
	// * For PENDING streams, data is not made visible until the stream itself is
	// finalized (via the `FinalizeWriteStream` rpc), and the stream is explicitly
	// committed via the `BatchCommitWriteStreams` rpc.
	AppendRows(BigQueryWrite_AppendRowsServer) error
	// Gets information about a write stream.
	GetWriteStream(context.Context, *GetWriteStreamRequest) (*