Documentation
¶
Index ¶
- Constants
- Variables
- func Ptr[T any](t T) *T
- func SetDefaultBaseURLs(baseURLParams BaseURLParameters)
- type APIAuth
- type APIAuthAPIKeyConfig
- type APIError
- type APIKeyConfig
- type APISpec
- type ActivityEnd
- type ActivityHandling
- type ActivityStart
- type AdapterSize
- type AudioTranscriptionConfig
- type AuthConfig
- type AuthConfigGoogleServiceAccountConfig
- type AuthConfigHTTPBasicAuthConfig
- type AuthConfigOauthConfig
- type AuthConfigOidcConfig
- type AuthType
- type AutomaticActivityDetection
- type Backend
- type BaseURLParameters
- type BatchJob
- type BatchJobDestination
- type BatchJobSource
- type Batches
- func (b Batches) All(ctx context.Context) iter.Seq2[*BatchJob, error]
- func (m Batches) Cancel(ctx context.Context, name string, config *CancelBatchJobConfig) error
- func (b Batches) Create(ctx context.Context, model string, src *BatchJobSource, ...) (*BatchJob, error)
- func (m Batches) Delete(ctx context.Context, name string, config *DeleteBatchJobConfig) (*DeleteResourceJob, error)
- func (m Batches) Get(ctx context.Context, name string, config *GetBatchJobConfig) (*BatchJob, error)
- func (b Batches) List(ctx context.Context, config *ListBatchJobsConfig) (Page[BatchJob], error)
- type Behavior
- type Blob
- type BlockedReason
- type CachedContent
- type CachedContentUsageMetadata
- type Caches
- func (m Caches) All(ctx context.Context) iter.Seq2[*CachedContent, error]
- func (m Caches) Create(ctx context.Context, model string, config *CreateCachedContentConfig) (*CachedContent, error)
- func (m Caches) Delete(ctx context.Context, name string, config *DeleteCachedContentConfig) (*DeleteCachedContentResponse, error)
- func (m Caches) Get(ctx context.Context, name string, config *GetCachedContentConfig) (*CachedContent, error)
- func (m Caches) List(ctx context.Context, config *ListCachedContentsConfig) (Page[CachedContent], error)
- func (m Caches) Update(ctx context.Context, name string, config *UpdateCachedContentConfig) (*CachedContent, error)
- type CancelBatchJobConfig
- type CancelTuningJobConfig
- type Candidate
- type Chat
- func (c *Chat) History(curated bool) []*Content
- func (c *Chat) Send(ctx context.Context, parts ...*Part) (*GenerateContentResponse, error)
- func (c *Chat) SendMessage(ctx context.Context, parts ...Part) (*GenerateContentResponse, error)
- func (c *Chat) SendMessageStream(ctx context.Context, parts ...Part) iter.Seq2[*GenerateContentResponse, error]
- func (c *Chat) SendStream(ctx context.Context, parts ...*Part) iter.Seq2[*GenerateContentResponse, error]
- type Chats
- type Checkpoint
- type Citation
- type CitationMetadata
- type Client
- type ClientConfig
- type CodeExecutionResult
- type ComputeTokensConfig
- type ComputeTokensResponse
- type Content
- func NewContentFromBytes(data []byte, mimeType string, role Role) *Content
- func NewContentFromCodeExecutionResult(outcome Outcome, output string, role Role) *Content
- func NewContentFromExecutableCode(code string, language Language, role Role) *Content
- func NewContentFromFunctionCall(name string, args map[string]any, role Role) *Content
- func NewContentFromFunctionResponse(name string, response map[string]any, role Role) *Content
- func NewContentFromParts(parts []*Part, role Role) *Content
- func NewContentFromText(text string, role Role) *Content
- func NewContentFromURI(fileURI, mimeType string, role Role) *Content
- func Text(text string) []*Content
- type ContentEmbedding
- type ContentEmbeddingStatistics
- type ContextWindowCompressionConfig
- type ControlReferenceConfig
- type ControlReferenceImage
- type ControlReferenceType
- type CountTokensConfig
- type CountTokensResponse
- type CreateBatchJobConfig
- type CreateCachedContentConfig
- type CreateFileConfig
- type CreateFileResponse
- type CreateTuningJobConfig
- type DatasetDistribution
- type DatasetDistributionDistributionBucket
- type DatasetStats
- type DeleteBatchJobConfig
- type DeleteCachedContentConfig
- type DeleteCachedContentResponse
- type DeleteFileConfig
- type DeleteFileResponse
- type DeleteModelConfig
- type DeleteModelResponse
- type DeleteResourceJob
- type DistillationDataStats
- type DownloadFileConfig
- type DownloadURI
- type DynamicRetrievalConfig
- type DynamicRetrievalConfigMode
- type EditImageConfig
- type EditImageResponse
- type EditMode
- type EmbedContentConfig
- type EmbedContentMetadata
- type EmbedContentResponse
- type EncryptionSpec
- type EndSensitivity
- type Endpoint
- type EnterpriseWebSearch
- type EntityLabel
- type Environment
- type ExecutableCode
- type ExternalAPI
- type ExternalAPIElasticSearchParams
- type ExternalAPISimpleSearchParams
- type ExtrasRequestProvider
- type FeatureSelectionPreference
- type FetchPredictOperationConfig
- type File
- type FileData
- type FileSource
- type FileState
- type FileStatus
- type Files
- func (m Files) All(ctx context.Context) iter.Seq2[*File, error]
- func (m Files) Delete(ctx context.Context, name string, config *DeleteFileConfig) (*DeleteFileResponse, error)
- func (m Files) Download(ctx context.Context, uri DownloadURI, config *DownloadFileConfig) ([]byte, error)
- func (m Files) Get(ctx context.Context, name string, config *GetFileConfig) (*File, error)
- func (m Files) List(ctx context.Context, config *ListFilesConfig) (Page[File], error)
- func (m Files) Upload(ctx context.Context, r io.Reader, config *UploadFileConfig) (*File, error)
- func (m Files) UploadFromPath(ctx context.Context, path string, config *UploadFileConfig) (*File, error)
- type FinishReason
- type FunctionCall
- type FunctionCallingConfig
- type FunctionCallingConfigMode
- type FunctionDeclaration
- type FunctionResponse
- type FunctionResponseScheduling
- type GeminiPreferenceExample
- type GeminiPreferenceExampleCompletion
- type GenerateContentConfig
- type GenerateContentResponse
- func (r *GenerateContentResponse) CodeExecutionResult() string
- func (r *GenerateContentResponse) ExecutableCode() string
- func (r *GenerateContentResponse) FunctionCalls() []*FunctionCall
- func (g *GenerateContentResponse) MarshalJSON() ([]byte, error)
- func (r *GenerateContentResponse) Text() string
- func (g *GenerateContentResponse) UnmarshalJSON(data []byte) error
- type GenerateContentResponsePromptFeedback
- type GenerateContentResponseUsageMetadata
- type GenerateImagesConfig
- type GenerateImagesResponse
- type GenerateVideosConfig
- type GenerateVideosOperation
- type GenerateVideosResponse
- type GenerateVideosSource
- type GeneratedImage
- type GeneratedImageMask
- type GeneratedVideo
- type GenerationConfig
- type GenerationConfigRoutingConfig
- type GenerationConfigRoutingConfigAutoRoutingMode
- type GenerationConfigRoutingConfigManualRoutingMode
- type GenerationConfigThinkingConfig
- type GetBatchJobConfig
- type GetCachedContentConfig
- type GetFileConfig
- type GetModelConfig
- type GetOperationConfig
- type GetTuningJobConfig
- type GoogleMaps
- type GoogleRpcStatus
- type GoogleSearch
- type GoogleSearchRetrieval
- type GroundingChunk
- type GroundingChunkMaps
- type GroundingChunkMapsPlaceAnswerSources
- type GroundingChunkMapsPlaceAnswerSourcesAuthorAttribution
- type GroundingChunkMapsPlaceAnswerSourcesReviewSnippet
- type GroundingChunkRetrievedContext
- type GroundingChunkWeb
- type GroundingMetadata
- type GroundingSupport
- type HTTPOptions
- type HTTPResponse
- type HarmBlockMethod
- type HarmBlockThreshold
- type HarmCategory
- type HarmProbability
- type HarmSeverity
- type Image
- type ImagePromptLanguage
- type InlinedRequest
- type InlinedResponse
- type Interval
- type JobError
- type JobState
- type Language
- type LatLng
- type ListBatchJobsConfig
- type ListBatchJobsResponse
- type ListCachedContentsConfig
- type ListCachedContentsResponse
- type ListFilesConfig
- type ListFilesResponse
- type ListModelsConfig
- type ListModelsResponse
- type ListTuningJobsConfig
- type ListTuningJobsResponse
- type Live
- type LiveClientContent
- type LiveClientContentInput
- type LiveClientMessage
- type LiveClientRealtimeInput
- type LiveClientSetup
- type LiveClientToolResponse
- type LiveConnectConfig
- type LiveRealtimeInput
- type LiveSendClientContentParameters
- type LiveSendRealtimeInputParameters
- type LiveSendToolResponseParameters
- type LiveServerContent
- type LiveServerGoAway
- type LiveServerMessage
- type LiveServerSessionResumptionUpdate
- type LiveServerSetupComplete
- type LiveServerToolCall
- type LiveServerToolCallCancellation
- type LiveToolResponseInput
- type LogprobsResult
- type LogprobsResultCandidate
- type LogprobsResultTopCandidates
- type MaskReferenceConfig
- type MaskReferenceImage
- type MaskReferenceMode
- type MediaModality
- type MediaResolution
- type Modality
- type ModalityTokenCount
- type Mode
- type Model
- type ModelSelectionConfig
- type Models
- func (m Models) All(ctx context.Context) iter.Seq2[*Model, error]
- func (m Models) ComputeTokens(ctx context.Context, model string, contents []*Content, ...) (*ComputeTokensResponse, error)
- func (m Models) CountTokens(ctx context.Context, model string, contents []*Content, ...) (*CountTokensResponse, error)
- func (m Models) Delete(ctx context.Context, model string, config *DeleteModelConfig) (*DeleteModelResponse, error)
- func (m Models) EditImage(ctx context.Context, model, prompt string, referenceImages []ReferenceImage, ...) (*EditImageResponse, error)
- func (m Models) EmbedContent(ctx context.Context, model string, contents []*Content, ...) (*EmbedContentResponse, error)
- func (m Models) GenerateContent(ctx context.Context, model string, contents []*Content, ...) (*GenerateContentResponse, error)
- func (m Models) GenerateContentStream(ctx context.Context, model string, contents []*Content, ...) iter.Seq2[*GenerateContentResponse, error]
- func (m Models) GenerateImages(ctx context.Context, model string, prompt string, config *GenerateImagesConfig) (*GenerateImagesResponse, error)
- func (m Models) GenerateVideos(ctx context.Context, model string, prompt string, image *Image, ...) (*GenerateVideosOperation, error)
- func (m Models) GenerateVideosFromSource(ctx context.Context, model string, source *GenerateVideosSource, ...) (*GenerateVideosOperation, error)
- func (m Models) Get(ctx context.Context, model string, config *GetModelConfig) (*Model, error)
- func (m Models) List(ctx context.Context, config *ListModelsConfig) (Page[Model], error)
- func (m Models) RecontextImage(ctx context.Context, model string, source *RecontextImageSource, ...) (*RecontextImageResponse, error)
- func (m Models) SegmentImage(ctx context.Context, model string, source *SegmentImageSource, ...) (*SegmentImageResponse, error)
- func (m Models) Update(ctx context.Context, model string, config *UpdateModelConfig) (*Model, error)
- func (m Models) UpscaleImage(ctx context.Context, model string, image *Image, upscaleFactor string, ...) (*UpscaleImageResponse, error)
- type MultiSpeakerVoiceConfig
- type Operations
- type Outcome
- type Page
- type Part
- func NewPartFromBytes(data []byte, mimeType string) *Part
- func NewPartFromCodeExecutionResult(outcome Outcome, output string) *Part
- func NewPartFromExecutableCode(code string, language Language) *Part
- func NewPartFromFile(file File) *Part
- func NewPartFromFunctionCall(name string, args map[string]any) *Part
- func NewPartFromFunctionResponse(name string, response map[string]any) *Part
- func NewPartFromText(text string) *Part
- func NewPartFromURI(fileURI, mimeType string) *Part
- type PartnerModelTuningSpec
- type PersonGeneration
- type PreTunedModel
- type PrebuiltVoiceConfig
- type PreferenceOptimizationDataStats
- type ProactivityConfig
- type ProductImage
- type RAGChunk
- type RAGChunkPageSpan
- type RAGRetrievalConfig
- type RAGRetrievalConfigFilter
- type RAGRetrievalConfigHybridSearch
- type RAGRetrievalConfigRanking
- type RAGRetrievalConfigRankingLlmRanker
- type RAGRetrievalConfigRankingRankService
- type RawReferenceImage
- type RealtimeInputConfig
- type RecontextImageConfig
- type RecontextImageResponse
- type RecontextImageSource
- type ReferenceImage
- type Retrieval
- type RetrievalConfig
- type RetrievalMetadata
- type Role
- type SafetyAttributes
- type SafetyFilterLevel
- type SafetyRating
- type SafetySetting
- type Schema
- type ScribbleImage
- type SearchEntryPoint
- type Segment
- type SegmentImageConfig
- type SegmentImageResponse
- type SegmentImageSource
- type SegmentMode
- type Session
- type SessionResumptionConfig
- type SlidingWindow
- type SpeakerVoiceConfig
- type SpeechConfig
- type StartSensitivity
- type StyleReferenceConfig
- type StyleReferenceImage
- type SubjectReferenceConfig
- type SubjectReferenceImage
- type SubjectReferenceType
- type SupervisedHyperParameters
- type SupervisedTuningDataStats
- type SupervisedTuningDatasetDistribution
- type SupervisedTuningDatasetDistributionDatasetBucket
- type SupervisedTuningSpec
- type ThinkingConfig
- type TokensInfo
- type Tool
- type ToolCodeExecution
- type ToolComputerUse
- type ToolConfig
- type TrafficType
- type Transcription
- type TunedModel
- type TunedModelCheckpoint
- type TunedModelInfo
- type TuningDataStats
- type TuningDataset
- type TuningExample
- type TuningJob
- type TuningMode
- type TuningOperation
- type TuningValidationDataset
- type Tunings
- func (t Tunings) All(ctx context.Context) iter.Seq2[*TuningJob, error]
- func (m Tunings) Cancel(ctx context.Context, name string, config *CancelTuningJobConfig) error
- func (t Tunings) Get(ctx context.Context, name string, config *GetTuningJobConfig) (*TuningJob, error)
- func (t Tunings) List(ctx context.Context, config *ListTuningJobsConfig) (Page[TuningJob], error)
- func (t Tunings) Tune(ctx context.Context, baseModel string, trainingDataset *TuningDataset, ...) (*TuningJob, error)
- type TurnCoverage
- type Type
- type URLContext
- type URLContextMetadata
- type URLMetadata
- type URLRetrievalStatus
- type UpdateCachedContentConfig
- type UpdateModelConfig
- type UploadFileConfig
- type UpscaleImageConfig
- type UpscaleImageResponse
- type UrlRetrievalStatus
- type UsageMetadata
- type VertexAISearch
- type VertexAISearchDataStoreSpec
- type VertexRAGStore
- type VertexRAGStoreRAGResource
- type Video
- type VideoCompressionQuality
- type VideoGenerationReferenceImage
- type VideoGenerationReferenceType
- type VideoMetadata
- type VoiceConfig
Examples ¶
- Chats (Geminiapi)
- Chats (Stream_geminiapi)
- Chats (Stream_vertexai)
- Chats (Vertexai)
- Models.GenerateContent (CodeExecution_geminiapi)
- Models.GenerateContent (CodeExecution_vertexai)
- Models.GenerateContent (Config_geminiapi)
- Models.GenerateContent (Config_vertexai)
- Models.GenerateContent (GcsURI_vertexai)
- Models.GenerateContent (GoogleSearchRetrieval_geminiapi)
- Models.GenerateContent (GoogleSearchRetrieval_vertexai)
- Models.GenerateContent (HttpURL_vertexai)
- Models.GenerateContent (InlineAudio_geminiapi)
- Models.GenerateContent (InlineAudio_vertexai)
- Models.GenerateContent (InlineImage_geminiapi)
- Models.GenerateContent (InlineImage_vertexai)
- Models.GenerateContent (InlinePDF_geminiapi)
- Models.GenerateContent (InlinePDF_vertexai)
- Models.GenerateContent (InlineVideo_geminiapi)
- Models.GenerateContent (InlineVideo_vertexai)
- Models.GenerateContent (SystemInstruction_geminiapi)
- Models.GenerateContent (SystemInstruction_vertexai)
- Models.GenerateContent (Text_geminiapi)
- Models.GenerateContent (Text_vertexai)
- Models.GenerateContent (Texts_geminiapi)
- Models.GenerateContent (Texts_vertexai)
- Models.GenerateContent (ThirdPartyModel_vertexai)
- Models.GenerateContentStream (Text_geminiapi)
- Models.GenerateContentStream (Text_vertexai)
- NewClient (Geminiapi)
- NewClient (Vertexai)
Constants ¶
const ( RoleUser = "user" RoleModel = "model" )
Variables ¶
var ErrPageDone = errors.New("no more pages")
ErrPageDone is the error returned by an iterator's Next method when no more pages are available.
Functions ¶
func Ptr ¶
func Ptr[T any](t T) *T
Ptr returns a pointer to its argument. It can be used to initialize pointer fields:
genai.GenerateContentConfig{Temperature: genai.Ptr(0.5)}
func SetDefaultBaseURLs ¶ added in v1.3.0
func SetDefaultBaseURLs(baseURLParams BaseURLParameters)
SetDefaultBaseURLs overrides the base URLs for the Gemini API and Vertex AI API.
[HTTPOptions.BaseURL] takes precedence over URLs set here.
Note: This function should be called before initializing the SDK. If the base URLs are set after initializing the SDK, the base URLs will not be updated.
Types ¶
type APIAuth ¶ added in v1.14.0
type APIAuth struct { // The API secret. APIKeyConfig *APIAuthAPIKeyConfig `json:"apiKeyConfig,omitempty"` }
The generic reusable API auth config. Deprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto) instead.
type APIAuthAPIKeyConfig ¶ added in v1.14.0
type APIAuthAPIKeyConfig struct { // Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version} APIKeySecretVersion string `json:"apiKeySecretVersion,omitempty"` // The API key string. Either this or `api_key_secret_version` must be set. APIKeyString string `json:"apiKeyString,omitempty"` }
The API secret.
type APIError ¶ added in v0.7.0
type APIError struct { // Code is the HTTP response status code. Code int `json:"code,omitempty"` // Message is the server response message. Message string `json:"message,omitempty"` // Status is the server response status. Status string `json:"status,omitempty"` // Details field provides more context to an error. Details []map[string]any `json:"details,omitempty"` }
APIError contains an error response from the server.
type APIKeyConfig ¶ added in v1.4.0
type APIKeyConfig struct { // Optional. The API key to be used in the request directly. APIKeyString string `json:"apiKeyString,omitempty"` }
Config for authentication with API key.
type ActivityEnd ¶ added in v1.0.0
type ActivityEnd struct { }
Marks the end of user activity. This can only be sent if automatic (i.e. server-side) activity detection is disabled.
type ActivityHandling ¶ added in v1.0.0
type ActivityHandling string
The different ways of handling user activity.
const ( // If unspecified, the default behavior is `START_OF_ACTIVITY_INTERRUPTS`. ActivityHandlingUnspecified ActivityHandling = "ACTIVITY_HANDLING_UNSPECIFIED" // If true, start of activity will interrupt the model's response (also called "barge // in"). The model's current response will be cut-off in the moment of the interruption. // This is the default behavior. ActivityHandlingStartOfActivityInterrupts ActivityHandling = "START_OF_ACTIVITY_INTERRUPTS" // The model's response will not be interrupted. ActivityHandlingNoInterruption ActivityHandling = "NO_INTERRUPTION" )
type ActivityStart ¶ added in v1.0.0
type ActivityStart struct { }
Marks the start of user activity. This can only be sent if automatic (i.e. server-side) activity detection is disabled.
type AdapterSize ¶ added in v1.16.0
type AdapterSize string
Adapter size for tuning.
const ( // Adapter size is unspecified. AdapterSizeUnspecified AdapterSize = "ADAPTER_SIZE_UNSPECIFIED" // Adapter size 1. AdapterSizeOne AdapterSize = "ADAPTER_SIZE_ONE" // Adapter size 2. AdapterSizeTwo AdapterSize = "ADAPTER_SIZE_TWO" // Adapter size 4. AdapterSizeFour AdapterSize = "ADAPTER_SIZE_FOUR" // Adapter size 8. AdapterSizeEight AdapterSize = "ADAPTER_SIZE_EIGHT" // Adapter size 16. AdapterSizeSixteen AdapterSize = "ADAPTER_SIZE_SIXTEEN" // Adapter size 32. AdapterSizeThirtyTwo AdapterSize = "ADAPTER_SIZE_THIRTY_TWO" )
type AudioTranscriptionConfig ¶ added in v1.0.0
type AudioTranscriptionConfig struct { }
The audio transcription configuration in Setup.
type AuthConfig ¶ added in v1.4.0
type AuthConfig struct { // Optional. Config for API key auth. APIKeyConfig *APIKeyConfig `json:"apiKeyConfig,omitempty"` // Type of auth scheme. AuthType AuthType `json:"authType,omitempty"` // Config for Google Service Account auth. GoogleServiceAccountConfig *AuthConfigGoogleServiceAccountConfig `json:"googleServiceAccountConfig,omitempty"` // Config for HTTP Basic auth. HTTPBasicAuthConfig *AuthConfigHTTPBasicAuthConfig `json:"httpBasicAuthConfig,omitempty"` // Config for user oauth. OauthConfig *AuthConfigOauthConfig `json:"oauthConfig,omitempty"` // Config for user OIDC auth. OidcConfig *AuthConfigOidcConfig `json:"oidcConfig,omitempty"` }
Auth configuration to run the extension.
type AuthConfigGoogleServiceAccountConfig ¶ added in v1.4.0
type AuthConfigGoogleServiceAccountConfig struct { // Optional. The service account that the extension execution service runs as. - If // the service account is specified, the `iam.serviceAccounts.getAccessToken` permission // should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) // on the specified service account. - If not specified, the Vertex AI Extension Service // Agent will be used to execute the Extension. ServiceAccount string `json:"serviceAccount,omitempty"` }
Config for Google Service Account Authentication.
type AuthConfigHTTPBasicAuthConfig ¶ added in v1.4.0
type AuthConfigHTTPBasicAuthConfig struct { // Required. The name of the SecretManager secret version resource storing the base64 // encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` // - If specified, the `secretmanager.versions.access` permission should be granted // to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) // on the specified resource. CredentialSecret string `json:"credentialSecret,omitempty"` }
Config for HTTP Basic Authentication.
type AuthConfigOauthConfig ¶ added in v1.4.0
type AuthConfigOauthConfig struct { // Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] // at request time. AccessToken string `json:"accessToken,omitempty"` // The service account used to generate access tokens for executing the Extension. - // If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission // should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) // on the provided service account. ServiceAccount string `json:"serviceAccount,omitempty"` }
Config for user oauth.
type AuthConfigOidcConfig ¶ added in v1.4.0
type AuthConfigOidcConfig struct { // OpenID Connect formatted ID token for extension endpoint. Only used to propagate // token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time. IDToken string `json:"idToken,omitempty"` // The service account used to generate an OpenID Connect (OIDC)-compatible JWT token // signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). // - The audience for the token will be set to the URL in the server URL defined in // the OpenAPI spec. - If the service account is provided, the service account should // grant `iam.serviceAccounts.getOpenIDToken` permission to Vertex AI Extension Service // Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). ServiceAccount string `json:"serviceAccount,omitempty"` }
Config for user OIDC auth.
type AuthType ¶ added in v1.4.0
type AuthType string
Type of auth scheme.
const ( AuthTypeUnspecified AuthType = "AUTH_TYPE_UNSPECIFIED" // No Auth. AuthTypeNoAuth AuthType = "NO_AUTH" // API Key Auth. AuthTypeAPIKeyAuth AuthType = "API_KEY_AUTH" // HTTP Basic Auth. AuthTypeHTTPBasicAuth AuthType = "HTTP_BASIC_AUTH" // Google Service Account Auth. AuthTypeGoogleServiceAccountAuth AuthType = "GOOGLE_SERVICE_ACCOUNT_AUTH" // OAuth auth. AuthTypeOauth AuthType = "OAUTH" // OpenID Connect (OIDC) Auth. AuthTypeOidcAuth AuthType = "OIDC_AUTH" )
type AutomaticActivityDetection ¶ added in v1.0.0
type AutomaticActivityDetection struct { // Optional. If enabled, detected voice and text input count as activity. If disabled, // the client must send activity signals. Disabled bool `json:"disabled,omitempty"` // Optional. Determines how likely speech is to be detected. StartOfSpeechSensitivity StartSensitivity `json:"startOfSpeechSensitivity,omitempty"` // Optional. Determines how likely detected speech is ended. EndOfSpeechSensitivity EndSensitivity `json:"endOfSpeechSensitivity,omitempty"` // Optional. The required duration of detected speech before start-of-speech is committed. // The lower this value the more sensitive the start-of-speech detection is and the // shorter speech can be recognized. However, this also increases the probability of // false positives. PrefixPaddingMs *int32 `json:"prefixPaddingMs,omitempty"` // Optional. The required duration of detected non-speech (e.g. silence) before end-of-speech // is committed. The larger this value, the longer speech gaps can be without interrupting // the user's activity but this will increase the model's latency. SilenceDurationMs *int32 `json:"silenceDurationMs,omitempty"` }
Configures automatic detection of activity.
type Backend ¶
type Backend int
Backend is the GenAI backend to use for the client.
const ( // BackendUnspecified causes the backend determined automatically. If the // GOOGLE_GENAI_USE_VERTEXAI environment variable is set to "1" or "true", then // the backend is BackendVertexAI. Otherwise, if GOOGLE_GENAI_USE_VERTEXAI // is unset or set to any other value, then BackendGeminiAPI is used. Explicitly // setting the backend in ClientConfig overrides the environment variable. BackendUnspecified Backend = iota // BackendGeminiAPI is the Gemini API backend. BackendGeminiAPI // BackendVertexAI is the Vertex AI backend. BackendVertexAI )
type BaseURLParameters ¶ added in v1.3.0
BaseURLParameters are parameters for setting the base URLs for the Gemini API and Vertex AI API.
type BatchJob ¶ added in v1.13.0
type BatchJob struct { // The resource name of the BatchJob. Output only.". Name string `json:"name,omitempty"` // The display name of the BatchJob. DisplayName string `json:"displayName,omitempty"` // The state of the BatchJob. State JobState `json:"state,omitempty"` // Output only. Only populated when the job's state is JOB_STATE_FAILED or JOB_STATE_CANCELLED. Error *JobError `json:"error,omitempty"` // The time when the BatchJob was created. CreateTime time.Time `json:"createTime,omitempty"` // Output only. Time when the Job for the first time entered the `JOB_STATE_RUNNING` // state. StartTime time.Time `json:"startTime,omitempty"` // The time when the BatchJob was completed. EndTime time.Time `json:"endTime,omitempty"` // The time when the BatchJob was last updated. UpdateTime time.Time `json:"updateTime,omitempty"` // The name of the model that produces the predictions via the BatchJob. Model string `json:"model,omitempty"` // Configuration for the input data. Src *BatchJobSource `json:"src,omitempty"` // Configuration for the output data. Dest *BatchJobDestination `json:"dest,omitempty"` }
Config for batches.create return value.
func (*BatchJob) MarshalJSON ¶ added in v1.16.0
func (*BatchJob) UnmarshalJSON ¶ added in v1.16.0
type BatchJobDestination ¶ added in v1.13.0
type BatchJobDestination struct { // Storage format of the output files. Must be one of: // 'jsonl', 'bigquery'. Format string `json:"format,omitempty"` // Optional. The Google Cloud Storage URI to the output file. GCSURI string `json:"gcsUri,omitempty"` // Optional. The BigQuery URI to the output table. BigqueryURI string `json:"bigqueryUri,omitempty"` // Optional. The Gemini Developer API's file resource name of the output data // (e.g. "files/12345"). The file will be a JSONL file with a single response // per line. The responses will be GenerateContentResponse messages formatted // as JSON. The responses will be written in the same order as the input // requests. FileName string `json:"fileName,omitempty"` // Optional. The responses to the requests in the batch. Returned when the batch was // built using inlined requests. The responses will be in the same order as // the input requests. InlinedResponses []*InlinedResponse `json:"inlinedResponses,omitempty"` }
Config for `des` parameter.
type BatchJobSource ¶ added in v1.13.0
type BatchJobSource struct { // Storage format of the input files. Must be one of: // 'jsonl', 'bigquery'. Format string `json:"format,omitempty"` // Optional. The Google Cloud Storage URIs to input files. GCSURI []string `json:"gcsUri,omitempty"` // Optional. The BigQuery URI to input table. BigqueryURI string `json:"bigqueryUri,omitempty"` // Optional. The Gemini Developer API's file resource name of the input data // (e.g. "files/12345"). FileName string `json:"fileName,omitempty"` // Optional. The Gemini Developer API's inlined input data to run batch job. InlinedRequests []*InlinedRequest `json:"inlinedRequests,omitempty"` }
Config for `src` parameter.
type Batches ¶ added in v1.13.0
type Batches struct {
// contains filtered or unexported fields
}
Batches provides methods for managing the batch jobs. You don't need to initiate this struct. Create a client instance via NewClient, and then access Batches through client.Batches field.
func (Batches) All ¶ added in v1.13.0
All retrieves all batch job resources.
This method handles pagination internally, making multiple API calls as needed to fetch all entries. It returns an iterator that yields each cached content entry one by one. You do not need to manage pagination tokens or make multiple calls to retrieve all data.
func (Batches) Create ¶ added in v1.13.0
func (b Batches) Create(ctx context.Context, model string, src *BatchJobSource, config *CreateBatchJobConfig) (*BatchJob, error)
Create a batch job.
func (Batches) Delete ¶ added in v1.14.0
func (m Batches) Delete(ctx context.Context, name string, config *DeleteBatchJobConfig) (*DeleteResourceJob, error)
Delete deletes a batch job resource.
type Behavior ¶ added in v1.6.0
type Behavior string
Defines the function behavior. Defaults to `BLOCKING`.
const ( // This value is unused. BehaviorUnspecified Behavior = "UNSPECIFIED" // If set, the system will wait to receive the function response before continuing the // conversation. BehaviorBlocking Behavior = "BLOCKING" // If set, the system will not wait to receive the function response. Instead, it will // attempt to handle function responses as they become available while maintaining the // conversation between the user and the model. BehaviorNonBlocking Behavior = "NON_BLOCKING" )
type Blob ¶
type Blob struct { // Optional. Display name of the blob. Used to provide a label or filename to distinguish // blobs. This field is not currently used in the Gemini GenerateContent calls. DisplayName string `json:"displayName,omitempty"` // Required. Raw bytes. Data []byte `json:"data,omitempty"` // Required. The IANA standard MIME type of the source data. MIMEType string `json:"mimeType,omitempty"` }
Content blob.
type BlockedReason ¶
type BlockedReason string
Blocked reason.
const ( // Unspecified blocked reason. BlockedReasonUnspecified BlockedReason = "BLOCKED_REASON_UNSPECIFIED" // Candidates blocked due to safety. BlockedReasonSafety BlockedReason = "SAFETY" // Candidates blocked due to other reason. BlockedReasonOther BlockedReason = "OTHER" // Candidates blocked due to the terms which are included from the terminology blocklist. BlockedReasonBlocklist BlockedReason = "BLOCKLIST" // Candidates blocked due to prohibited content. BlockedReasonProhibitedContent BlockedReason = "PROHIBITED_CONTENT" // Candidates blocked due to unsafe image generation content. BlockedReasonImageSafety BlockedReason = "IMAGE_SAFETY" )
type CachedContent ¶ added in v0.1.0
type CachedContent struct { // Optional. The server-generated resource name of the cached content. Name string `json:"name,omitempty"` // Optional. The user-generated meaningful display name of the cached content. DisplayName string `json:"displayName,omitempty"` // Optional. The name of the publisher model to use for cached content. Model string `json:"model,omitempty"` // Optional. Creation time of the cache entry. CreateTime time.Time `json:"createTime,omitempty"` // Optional. When the cache entry was last updated in UTC time. UpdateTime time.Time `json:"updateTime,omitempty"` // Optional. Expiration time of the cached content. ExpireTime time.Time `json:"expireTime,omitempty"` // Optional. Metadata on the usage of the cached content. UsageMetadata *CachedContentUsageMetadata `json:"usageMetadata,omitempty"` }
A resource used in LLM queries for users to explicitly specify what to cache.
func (*CachedContent) MarshalJSON ¶ added in v0.6.0
func (c *CachedContent) MarshalJSON() ([]byte, error)
func (*CachedContent) UnmarshalJSON ¶ added in v1.16.0
func (c *CachedContent) UnmarshalJSON(data []byte) error
type CachedContentUsageMetadata ¶ added in v0.1.0
type CachedContentUsageMetadata struct { // Duration of audio in seconds. AudioDurationSeconds int32 `json:"audioDurationSeconds,omitempty"` // Number of images. ImageCount int32 `json:"imageCount,omitempty"` // Number of text characters. TextCount int32 `json:"textCount,omitempty"` // Total number of tokens that the cached content consumes. TotalTokenCount int32 `json:"totalTokenCount,omitempty"` // Duration of video in seconds. VideoDurationSeconds int32 `json:"videoDurationSeconds,omitempty"` }
Metadata on the usage of the cached content.
type Caches ¶ added in v0.1.0
type Caches struct {
// contains filtered or unexported fields
}
Caches provides methods for managing the context caching. You don't need to initiate this struct. Create a client instance via NewClient, and then access Caches through client.Caches field.
func (Caches) All ¶ added in v0.4.0
All retrieves all cached content resources.
This method handles pagination internally, making multiple API calls as needed to fetch all entries. It returns an iterator that yields each cached content entry one by one. You do not need to manage pagination tokens or make multiple calls to retrieve all data.
func (Caches) Create ¶ added in v0.1.0
func (m Caches) Create(ctx context.Context, model string, config *CreateCachedContentConfig) (*CachedContent, error)
Create creates a new cached content resource.
func (Caches) Delete ¶ added in v0.1.0
func (m Caches) Delete(ctx context.Context, name string, config *DeleteCachedContentConfig) (*DeleteCachedContentResponse, error)
Delete deletes a cached content resource.
func (Caches) Get ¶ added in v0.1.0
func (m Caches) Get(ctx context.Context, name string, config *GetCachedContentConfig) (*CachedContent, error)
Get gets a cached content resource.
func (Caches) List ¶ added in v0.4.0
func (m Caches) List(ctx context.Context, config *ListCachedContentsConfig) (Page[CachedContent], error)
List retrieves a paginated list of cached content resources.
func (Caches) Update ¶ added in v0.1.0
func (m Caches) Update(ctx context.Context, name string, config *UpdateCachedContentConfig) (*CachedContent, error)
Update updates a cached content resource.
type CancelBatchJobConfig ¶ added in v1.13.0
type CancelBatchJobConfig struct { // Optional. Used to override HTTP request options. HTTPOptions *HTTPOptions `json:"httpOptions,omitempty"` }
Optional parameters.
type CancelTuningJobConfig ¶ added in v1.22.0
type CancelTuningJobConfig struct { // Optional. Used to override HTTP request options. HTTPOptions *HTTPOptions `json:"httpOptions,omitempty"` }
Optional parameters for tunings.cancel method.
type Candidate ¶
type Candidate struct { // Optional. Contains the multi-part content of the response. Content *Content `json:"content,omitempty"` // Optional. Source attribution of the generated content. CitationMetadata *CitationMetadata `json:"citationMetadata,omitempty"` // Optional. Describes the reason the model stopped generating tokens. FinishMessage string `json:"finishMessage,omitempty"` // Optional. Number of tokens for this candidate. // This field is only available in the Gemini API. TokenCount int32 `json:"tokenCount,omitempty"` // Optional. The reason why the model stopped generating tokens. // If empty, the model has not stopped generating the tokens. FinishReason FinishReason `json:"finishReason,omitempty"` // Optional. Metadata related to URL context retrieval tool. URLContextMetadata *URLContextMetadata `json:"urlContextMetadata,omitempty"` // Output only. Average log probability score of the candidate. AvgLogprobs float64 `json:"avgLogprobs,omitempty"` // Output only. Metadata specifies sources used to ground generated content. GroundingMetadata *GroundingMetadata `json:"groundingMetadata,omitempty"` // Output only. Index of the candidate. Index int32 `json:"index,omitempty"` // Output only. Log-likelihood scores for the response tokens and top tokens LogprobsResult *LogprobsResult `json:"logprobsResult,omitempty"` // Output only. List of ratings for the safety of a response candidate. There is at // most one rating per category. SafetyRatings []*SafetyRating `json:"safetyRatings,omitempty"` }
A response candidate generated from the model.
type Chat ¶ added in v0.7.0
type Chat struct { Models // contains filtered or unexported fields }
Chat represents a single chat session (multi-turn conversation) with the model.
client, _ := genai.NewClient(ctx, &genai.ClientConfig{}) chat, _ := client.Chats.Create(ctx, "gemini-2.0-flash", nil, nil) result, err = chat.SendMessage(ctx, genai.Part{Text: "What is 1 + 2?"})
func (*Chat) History ¶ added in v1.0.0
History returns the chat history. Returns the curated history if curated is true, otherwise returns the comprehensive history.
func (*Chat) Send ¶ added in v1.5.0
Send function sends the conversation history with the additional user's message and returns the model's response.
func (*Chat) SendMessage ¶ added in v0.7.0
SendMessage is a wrapper around Send.
func (*Chat) SendMessageStream ¶ added in v1.0.0
func (c *Chat) SendMessageStream(ctx context.Context, parts ...Part) iter.Seq2[*GenerateContentResponse, error]
SendMessageStream is a wrapper around SendStream.
func (*Chat) SendStream ¶ added in v1.5.0
func (c *Chat) SendStream(ctx context.Context, parts ...*Part) iter.Seq2[*GenerateContentResponse, error]
SendStream function sends the conversation history with the additional user's message and returns the model's response.
type Chats ¶ added in v0.7.0
type Chats struct {
// contains filtered or unexported fields
}
Chats provides util functions for creating a new chat session. You don't need to initiate this struct. Create a client instance via NewClient, and then access Chats through client.Models field.
Example (Geminiapi) ¶
package main import ( "context" "encoding/json" "fmt" "log" "google.golang.org/genai" ) // Your Google API key const apiKey = "your-api-key" func main() { ctx := context.Background() client, err := genai.NewClient(ctx, &genai.ClientConfig{ APIKey: apiKey, Backend: genai.BackendGeminiAPI, }) if err != nil { log.Fatal(err) } chat, err := client.Chats.Create(ctx, "gemini-2.0-flash", nil, nil) if err != nil { log.Fatal(err) } result, err := chat.SendMessage(ctx, genai.Part{Text: "What's the weather in New York?"}) if err != nil { log.Fatal(err) } debugPrint(result) result, err = chat.SendMessage(ctx, genai.Part{Text: "How about San Francisco?"}) if err != nil { log.Fatal(err) } debugPrint(result) } func debugPrint[T any](r *T) { response, err := json.MarshalIndent(*r, "", " ") if err != nil { log.Fatal(err) } fmt.Println(string(response)) }
Example (Stream_geminiapi) ¶
package main import ( "context" "encoding/json" "fmt" "log" "google.golang.org/genai" ) // Your Google API key const apiKey = "your-api-key" func main() { ctx := context.Background() client, err := genai.NewClient(ctx, &genai.ClientConfig{ APIKey: apiKey, Backend: genai.BackendGeminiAPI, }) if err != nil { log.Fatal(err) } chat, err := client.Chats.Create(ctx, "gemini-2.0-flash", nil, nil) if err != nil { log.Fatal(err) } for result, err := range chat.SendMessageStream(ctx, genai.Part{Text: "What's the weather in New York?"}) { if err != nil { log.Fatal(err) } debugPrint(result) } for result, err := range chat.SendMessageStream(ctx, genai.Part{Text: "How about San Francisco?"}) { if err != nil { log.Fatal(err) } debugPrint(result) } } func debugPrint[T any](r *T) { response, err := json.MarshalIndent(*r, "", " ") if err != nil { log.Fatal(err) } fmt.Println(string(response)) }
Example (Stream_vertexai) ¶
package main import ( "context" "encoding/json" "fmt" "log" "google.golang.org/genai" ) // Your GCP project const project = "your-project" // A GCP location like "us-central1" const location = "some-gcp-location" func main() { ctx := context.Background() client, err := genai.NewClient(ctx, &genai.ClientConfig{ Project: project, Location: location, Backend: genai.BackendVertexAI, }) if err != nil { log.Fatal(err) } chat, err := client.Chats.Create(ctx, "gemini-2.0-flash", nil, nil) if err != nil { log.Fatal(err) } for result, err := range chat.SendMessageStream(ctx, genai.Part{Text: "What's the weather in New York?"}) { if err != nil { log.Fatal(err) } debugPrint(result) } for result, err := range chat.SendMessageStream(ctx, genai.Part{Text: "How about San Francisco?"}) { if err != nil { log.Fatal(err) } debugPrint(result) } } func debugPrint[T any](r *T) { response, err := json.MarshalIndent(*r, "", " ") if err != nil { log.Fatal(err) } fmt.Println(string(response)) }
Example (Vertexai) ¶
package main import ( "context" "encoding/json" "fmt" "log" "google.golang.org/genai" ) // Your GCP project const project = "your-project" // A GCP location like "us-central1" const location = "some-gcp-location" func main() { ctx := context.Background() client, err := genai.NewClient(ctx, &genai.ClientConfig{ Project: project, Location: location, Backend: genai.BackendVertexAI, }) if err != nil { log.Fatal(err) } chat, err := client.Chats.Create(ctx, "gemini-2.0-flash", nil, nil) if err != nil { log.Fatal(err) } result, err := chat.SendMessage(ctx, genai.Part{Text: "What's the weather in New York?"}) if err != nil { log.Fatal(err) } debugPrint(result) result, err = chat.SendMessage(ctx, genai.Part{Text: "How about San Francisco?"}) if err != nil { log.Fatal(err) } debugPrint(result) } func debugPrint[T any](r *T) { response, err := json.MarshalIndent(*r, "", " ") if err != nil { log.Fatal(err) } fmt.Println(string(response)) }
type Checkpoint ¶ added in v1.5.0
type Checkpoint struct { // Optional. The ID of the checkpoint. CheckpointID string `json:"checkpointId,omitempty"` // Optional. The epoch of the checkpoint. Epoch int64 `json:"epoch,omitempty,string"` // Optional. The step of the checkpoint. Step int64 `json:"step,omitempty,string"` }
Describes the machine learning model version checkpoint.
type Citation ¶
type Citation struct { // Output only. End index into the content. EndIndex int32 `json:"endIndex,omitempty"` // Output only. License of the attribution. License string `json:"license,omitempty"` // Output only. Publication date of the attribution. PublicationDate civil.Date `json:"publicationDate,omitempty"` // Output only. Start index into the content. StartIndex int32 `json:"startIndex,omitempty"` // Output only. Title of the attribution. Title string `json:"title,omitempty"` // Output only. URL reference of the attribution. URI string `json:"uri,omitempty"` }
Source attributions for content.
func (*Citation) MarshalJSON ¶ added in v0.6.0
func (*Citation) UnmarshalJSON ¶ added in v0.4.0
type CitationMetadata ¶
type CitationMetadata struct { // Optional. Contains citation information when the model directly quotes, at // length, from another source. Can include traditional websites and code // repositories. Citations []*Citation `json:"citations,omitempty"` }
Citation information when the model quotes another source.
type Client ¶
type Client struct { // Models provides access to the Models service. Models *Models // Live provides access to the Live service. Live *Live // Caches provides access to the Caches service. Caches *Caches // Chats provides util functions for creating a new chat session. Chats *Chats // Files provides access to the Files service. Files *Files // Operations provides access to long-running operations. Operations *Operations // Batches provides access to the Batch service. Batches *Batches // Tunings provides access to the Tunings service. Tunings *Tunings // contains filtered or unexported fields }
Client is the GenAI client. It provides access to the various GenAI services.
func NewClient ¶
func NewClient(ctx context.Context, cc *ClientConfig) (*Client, error)
NewClient creates a new GenAI client.
You can configure the client by passing in a ClientConfig struct.
If a nil ClientConfig is provided, the client will be configured using default settings and environment variables:
Environment Variables for BackendGeminiAPI:
GEMINI_API_KEY: Specifies the API key for the Gemini API.
GOOGLE_API_KEY: Can also be used to specify the API key for the Gemini API. If both GOOGLE_API_KEY and GEMINI_API_KEY are set, GOOGLE_API_KEY will be used.
Environment Variables for BackendVertexAI:
GOOGLE_GENAI_USE_VERTEXAI: Must be set to "1" or "true" to use the Vertex AI backend.
GOOGLE_CLOUD_PROJECT: Required. Specifies the GCP project ID.
GOOGLE_CLOUD_LOCATION or GOOGLE_CLOUD_REGION: Required. Specifies the GCP location/region.
If using the Vertex AI backend and no credentials are provided in the ClientConfig, the client will attempt to use application default credentials.
Example (Geminiapi) ¶
This example shows how to create a new client for Gemini API.
package main import ( "context" "fmt" "log" "google.golang.org/genai" ) // Your Google API key const apiKey = "your-api-key" func main() { ctx := context.Background() client, err := genai.NewClient(ctx, &genai.ClientConfig{ APIKey: apiKey, Backend: genai.BackendGeminiAPI, }) if err != nil { log.Fatalf("failed to create client: %v", err) } fmt.Println(client.ClientConfig().APIKey) }
Example (Vertexai) ¶
This example shows how to create a new client for Vertex AI.
package main import ( "context" "fmt" "log" "google.golang.org/genai" ) // Your GCP project const project = "your-project" // A GCP location like "us-central1" const location = "some-gcp-location" func main() { ctx := context.Background() client, err := genai.NewClient(ctx, &genai.ClientConfig{ Project: project, Location: location, Backend: genai.BackendVertexAI, }) if err != nil { log.Fatalf("failed to create client: %v", err) } fmt.Println(client.ClientConfig().Backend) }
func (Client) ClientConfig ¶
func (c Client) ClientConfig() ClientConfig
ClientConfig returns the ClientConfig for the client.
The returned ClientConfig is a copy of the ClientConfig used to create the client.
type ClientConfig ¶
type ClientConfig struct { // Optional. API Key for GenAI. Required for BackendGeminiAPI. // Can also be set via the GOOGLE_API_KEY or GEMINI_API_KEY environment variable. // Get a Gemini API key: https://ai.google.dev/gemini-api/docs/api-key APIKey string // Optional. Backend for GenAI. See Backend constants. Defaults to BackendGeminiAPI unless explicitly set to BackendVertexAI, // or the environment variable GOOGLE_GENAI_USE_VERTEXAI is set to "1" or "true". Backend Backend // Optional. GCP Project ID for Vertex AI. Required for BackendVertexAI. // Can also be set via the GOOGLE_CLOUD_PROJECT environment variable. // Find your Project ID: https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects Project string // Optional. GCP Location/Region for Vertex AI. Required for BackendVertexAI. // Can also be set via the GOOGLE_CLOUD_LOCATION or GOOGLE_CLOUD_REGION environment variable. // Generative AI locations: https://cloud.google.com/vertex-ai/generative-ai/docs/learn/locations. Location string // Optional. Google credentials. If not specified, [Application Default Credentials] will be used. // // [Application Default Credentials]: https://developers.google.com/accounts/docs/application-default-credentials Credentials *auth.Credentials // Optional HTTP client to use. If nil, a default client will be created. // For Vertex AI, this client must handle authentication appropriately. HTTPClient *http.Client // Optional HTTP options to override. HTTPOptions HTTPOptions // contains filtered or unexported fields }
ClientConfig is the configuration for the GenAI client.
type CodeExecutionResult ¶
type CodeExecutionResult struct { // Required. Outcome of the code execution. Outcome Outcome `json:"outcome,omitempty"` // Optional. Contains stdout when code execution is successful, stderr or other description // otherwise. Output string `json:"output,omitempty"` }
Result of executing the ExecutableCode. Only generated when using the [CodeExecution] tool, and always follows a `part` containing the ExecutableCode.
type ComputeTokensConfig ¶ added in v0.1.0
type ComputeTokensConfig struct { // Optional. Used to override HTTP request options. HTTPOptions *HTTPOptions `json:"httpOptions,omitempty"` }
Optional parameters for computing tokens.
type ComputeTokensResponse ¶ added in v0.1.0
type ComputeTokensResponse struct { // Optional. Used to retain the full HTTP response. SDKHTTPResponse *HTTPResponse `json:"sdkHttpResponse,omitempty"` // Lists of tokens info from the input. A ComputeTokensRequest could have multiple instances // with a prompt in each instance. We also need to return lists of tokens info for the // request with multiple instances. TokensInfo []*TokensInfo `json:"tokensInfo,omitempty"` }
Response for computing tokens.
type Content ¶
type Content struct { // Optional. List of parts that constitute a single message. Each part may have // a different IANA MIME type. Parts []*Part `json:"parts,omitempty"` // Optional. The producer of the content. Must be either 'user' or // 'model'. Useful to set for multi-turn conversations, otherwise can be // empty. If role is not specified, SDK will determine the role. Role string `json:"role,omitempty"` }
Contains the multi-part content of a message.
func NewContentFromBytes ¶ added in v0.7.0
NewContentFromBytes builds a Content from a byte slice and mime type. If role is the empty string, it defaults to RoleUser.
func NewContentFromCodeExecutionResult ¶ added in v0.7.0
NewContentFromCodeExecutionResult builds a Content from a given Outcome and std output of the code execution. If role is the empty string, it defaults to RoleUser.
func NewContentFromExecutableCode ¶ added in v0.7.0
NewContentFromExecutableCode builds a Content from a single piece of source code in the given Language. If role is the empty string, it defaults to RoleUser.
func NewContentFromFunctionCall ¶ added in v0.7.0
NewContentFromFunctionCall builds a Content from a single FunctionCall given the function name and args. If role is the empty string, it defaults to RoleUser.
func NewContentFromFunctionResponse ¶ added in v0.7.0
NewContentFromFunctionResponse builds a Content from a single FunctionResponse given the function name and response. If role is the empty string, it defaults to RoleUser.
func NewContentFromParts ¶ added in v0.7.0
NewContentFromParts builds a Content from a list of parts and a Role. If role is the empty string, it defaults to RoleUser.
func NewContentFromText ¶ added in v0.7.0
NewContentFromText builds a Content from a text string. If role is the empty string, it defaults to RoleUser.
func NewContentFromURI ¶ added in v0.7.0
NewContentFromURI builds a Content from a file URI and mime type. If role is the empty string, it defaults to RoleUser.
type ContentEmbedding ¶ added in v0.5.0
type ContentEmbedding struct { // A list of floats representing an embedding. Values []float32 `json:"values,omitempty"` // Vertex API only. Statistics of the input text associated with this // embedding. Statistics *ContentEmbeddingStatistics `json:"statistics,omitempty"` }
The embedding generated from an input content.
type ContentEmbeddingStatistics ¶ added in v0.5.0
type ContentEmbeddingStatistics struct { // Vertex API only. If the input text was truncated due to having // a length longer than the allowed maximum input. Truncated bool `json:"truncated,omitempty"` // Vertex API only. Number of tokens of the input text. TokenCount float32 `json:"tokenCount,omitempty"` }
Statistics of the input text associated with the result of content embedding.
type ContextWindowCompressionConfig ¶ added in v1.0.0
type ContextWindowCompressionConfig struct { // Optional. Number of tokens (before running turn) that triggers context window compression // mechanism. TriggerTokens *int64 `json:"triggerTokens,omitempty,string"` // Optional. Sliding window compression mechanism. SlidingWindow *SlidingWindow `json:"slidingWindow,omitempty"` }
Enables context window compression -- mechanism managing model context window so it does not exceed given length.
type ControlReferenceConfig ¶
type ControlReferenceConfig struct { // Optional. The type of control reference image to use. ControlType ControlReferenceType `json:"controlType,omitempty"` // Optional. Defaults to False. When set to True, the control image will be // computed by the model based on the control type. When set to False, // the control image must be provided by the user. EnableControlImageComputation bool `json:"enableControlImageComputation,omitempty"` }
Configuration for a Control reference image.
type ControlReferenceImage ¶
type ControlReferenceImage struct { // Optional. The reference image for the editing operation. ReferenceImage *Image `json:"referenceImage,omitempty"` // Optional. The ID of the reference image. ReferenceID int32 `json:"referenceId,omitempty"` // Optional. Configuration for the control reference image. Config *ControlReferenceConfig `json:"config,omitempty"` // contains filtered or unexported fields }
A control image is an image that represents a sketch image of areas for the model to fill in based on the prompt. Its image is either a control image provided by the user, or a regular image which the backend will use to generate a control image of. In the case of the latter, the EnableControlImageComputation field in the config should be set to true.
func NewControlReferenceImage ¶ added in v0.5.0
func NewControlReferenceImage(referenceImage *Image, referenceID int32, config *ControlReferenceConfig) *ControlReferenceImage
NewControlReferenceImage creates a new ControlReferenceImage.
type ControlReferenceType ¶
type ControlReferenceType string
Enum representing the control type of a control reference image.
const ( ControlReferenceTypeDefault ControlReferenceType = "CONTROL_TYPE_DEFAULT" ControlReferenceTypeCanny ControlReferenceType = "CONTROL_TYPE_CANNY" ControlReferenceTypeScribble ControlReferenceType = "CONTROL_TYPE_SCRIBBLE" ControlReferenceTypeFaceMesh ControlReferenceType = "CONTROL_TYPE_FACE_MESH" )
type CountTokensConfig ¶ added in v0.1.0
type CountTokensConfig struct { // Optional. Used to override HTTP request options. HTTPOptions *HTTPOptions `json:"httpOptions,omitempty"` // Optional. Instructions for the model to steer it toward better performance. SystemInstruction *Content `json:"systemInstruction,omitempty"` // Optional. Code that enables the system to interact with external systems to // perform an action outside of the knowledge and scope of the model. Tools []*Tool `json:"tools,omitempty"` // Optional. Configuration that the model uses to generate the response. Not // supported by the Gemini Developer API. GenerationConfig *GenerationConfig `json:"generationConfig,omitempty"` }
Config for the count_tokens method.
type CountTokensResponse ¶ added in v0.1.0
type CountTokensResponse struct { // Optional. Used to retain the full HTTP response. SDKHTTPResponse *HTTPResponse `json:"sdkHttpResponse,omitempty"` // Total number of tokens. TotalTokens int32 `json:"totalTokens,omitempty"` // Number of tokens in the cached part of the prompt (the cached content). This field // is only available in the Gemini API. CachedContentTokenCount int32 `json:"cachedContentTokenCount,omitempty"` }
Response for counting tokens.
type CreateBatchJobConfig ¶ added in v1.13.0
type CreateBatchJobConfig struct { // Optional. Used to override HTTP request options. HTTPOptions *HTTPOptions `json:"httpOptions,omitempty"` // Optional. The user-defined name of this BatchJob. DisplayName string `json:"displayName,omitempty"` // GCS or BigQuery URI prefix for the output predictions. Example: // "gs://path/to/output/data" or "bq://projectId.bqDatasetId.bqTableId". Dest *BatchJobDestination `json:"dest,omitempty"` }
Config for optional parameters.
type CreateCachedContentConfig ¶ added in v0.1.0
type CreateCachedContentConfig struct { // Optional. Used to override HTTP request options. HTTPOptions *HTTPOptions `json:"httpOptions,omitempty"` // Optional. The TTL for this resource. The expiration time is computed: now + TTL. TTL time.Duration `json:"ttl,omitempty"` // Optional. Timestamp of when this resource is considered expired. ExpireTime time.Time `json:"expireTime,omitempty"` // Optional. The user-generated meaningful display name of the cached content. DisplayName string `json:"displayName,omitempty"` // Optional. The content to cache. Contents []*Content `json:"contents,omitempty"` // Optional. Developer set system instruction. SystemInstruction *Content `json:"systemInstruction,omitempty"` // Optional. A list of `Tools` the model may use to generate the next response. Tools []*Tool `json:"tools,omitempty"` // Optional. Configuration for the tools to use. This config is shared for all tools. ToolConfig *ToolConfig `json:"toolConfig,omitempty"` // Optional. The Cloud KMS resource identifier of the customer managed // encryption key used to protect a resource. // The key needs to be in the same region as where the compute resource is // created. See // https://cloud.google.com/vertex-ai/docs/general/cmek for more // details. If this is set, then all created CachedContent objects // will be encrypted with the provided encryption key. // Allowed formats: projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key} KmsKeyName string `json:"kmsKeyName,omitempty"` }
Optional configuration for cached content creation.
func (*CreateCachedContentConfig) MarshalJSON ¶ added in v0.6.0
func (c *CreateCachedContentConfig) MarshalJSON() ([]byte, error)
func (*CreateCachedContentConfig) UnmarshalJSON ¶ added in v0.7.0
func (c *CreateCachedContentConfig) UnmarshalJSON(data []byte) error
type CreateFileConfig ¶ added in v1.0.0
type CreateFileConfig struct { // Optional. Used to override HTTP request options. HTTPOptions *HTTPOptions `json:"httpOptions,omitempty"` // Optional. If true, the raw HTTP response will be returned in the 'sdk_http_response' // field. ShouldReturnHTTPResponse bool `json:"shouldReturnHttpResponse,omitempty"` }
Used to override the default configuration.
type CreateFileResponse ¶ added in v1.0.0
type CreateFileResponse struct { // Optional. Used to retain the full HTTP response. SDKHTTPResponse *HTTPResponse `json:"sdkHttpResponse,omitempty"` }
Response for the create file method.
type CreateTuningJobConfig ¶ added in v1.16.0
type CreateTuningJobConfig struct { // Optional. Used to override HTTP request options. HTTPOptions *HTTPOptions `json:"httpOptions,omitempty"` // Optional. Cloud Storage path to file containing training dataset for tuning. The // dataset must be formatted as a JSONL file. ValidationDataset *TuningValidationDataset `json:"validationDataset,omitempty"` // Optional. The display name of the tuned Model. The name can be up to 128 characters // long and can consist of any UTF-8 characters. TunedModelDisplayName string `json:"tunedModelDisplayName,omitempty"` // Optional. The description of the TuningJob Description string `json:"description,omitempty"` // Optional. Number of complete passes the model makes over the entire training dataset // during training. EpochCount *int32 `json:"epochCount,omitempty"` // Optional. Multiplier for adjusting the default learning rate. LearningRateMultiplier *float32 `json:"learningRateMultiplier,omitempty"` // Optional. If set to true, disable intermediate checkpoints for SFT and only the last // checkpoint will be exported. Otherwise, enable intermediate checkpoints for SFT. ExportLastCheckpointOnly *bool `json:"exportLastCheckpointOnly,omitempty"` // Optional. The optional checkpoint ID of the pre-tuned model to use for tuning, if // applicable. PreTunedModelCheckpointID string `json:"preTunedModelCheckpointId,omitempty"` // Optional. Adapter size for tuning. AdapterSize AdapterSize `json:"adapterSize,omitempty"` // Optional. The batch size hyperparameter for tuning. If not set, a default of 4 or // 16 will be used based on the number of training examples. BatchSize *int32 `json:"batchSize,omitempty"` // Optional. The learning rate hyperparameter for tuning. If not set, a default of 0.001 // or 0.0002 will be calculated based on the number of training examples. LearningRate *float32 `json:"learningRate,omitempty"` }
Supervised fine-tuning job creation request - optional fields.
type DatasetDistribution ¶ added in v1.16.0
type DatasetDistribution struct { // Output only. Defines the histogram bucket. Buckets []*DatasetDistributionDistributionBucket `json:"buckets,omitempty"` // Output only. The maximum of the population values. Max float64 `json:"max,omitempty"` // Output only. The arithmetic mean of the values in the population. Mean float64 `json:"mean,omitempty"` // Output only. The median of the values in the population. Median float64 `json:"median,omitempty"` // Output only. The minimum of the population values. Min float64 `json:"min,omitempty"` // Output only. The 5th percentile of the values in the population. P5 float64 `json:"p5,omitempty"` // Output only. The 95th percentile of the values in the population. P95 float64 `json:"p95,omitempty"` // Output only. Sum of a given population of values. Sum float64 `json:"sum,omitempty"` }
Distribution computed over a tuning dataset.
type DatasetDistributionDistributionBucket ¶ added in v1.16.0
type DatasetDistributionDistributionBucket struct { // Output only. Number of values in the bucket. Count int64 `json:"count,omitempty,string"` // Output only. Left bound of the bucket. Left float64 `json:"left,omitempty"` // Output only. Right bound of the bucket. Right float64 `json:"right,omitempty"` }
Dataset bucket used to create a histogram for the distribution given a population of values.
type DatasetStats ¶ added in v1.16.0
type DatasetStats struct { // Output only. Number of billable characters in the tuning dataset. TotalBillableCharacterCount int64 `json:"totalBillableCharacterCount,omitempty,string"` // Output only. Number of tuning characters in the tuning dataset. TotalTuningCharacterCount int64 `json:"totalTuningCharacterCount,omitempty,string"` // Output only. Number of examples in the tuning dataset. TuningDatasetExampleCount int64 `json:"tuningDatasetExampleCount,omitempty,string"` // Output only. Number of tuning steps for this Tuning Job. TuningStepCount int64 `json:"tuningStepCount,omitempty,string"` // Output only. Sample user messages in the training dataset uri. UserDatasetExamples []*Content `json:"userDatasetExamples,omitempty"` // Output only. Dataset distributions for the user input tokens. UserInputTokenDistribution *DatasetDistribution `json:"userInputTokenDistribution,omitempty"` // Output only. Dataset distributions for the messages per example. UserMessagePerExampleDistribution *DatasetDistribution `json:"userMessagePerExampleDistribution,omitempty"` // Output only. Dataset distributions for the user output tokens. UserOutputTokenDistribution *DatasetDistribution `json:"userOutputTokenDistribution,omitempty"` }
Statistics computed over a tuning dataset.
type DeleteBatchJobConfig ¶ added in v1.14.0
type DeleteBatchJobConfig struct { // Optional. Used to override HTTP request options. HTTPOptions *HTTPOptions `json:"httpOptions,omitempty"` }
Optional parameters for models.get method.
type DeleteCachedContentConfig ¶ added in v0.1.0
type DeleteCachedContentConfig struct { // Optional. Used to override HTTP request options. HTTPOptions *HTTPOptions `json:"httpOptions,omitempty"` }
Optional parameters for caches.delete method.
type DeleteCachedContentResponse ¶ added in v0.1.0
type DeleteCachedContentResponse struct { // Optional. Used to retain the full HTTP response. SDKHTTPResponse *HTTPResponse `json:"sdkHttpResponse,omitempty"` }
Empty response for caches.delete method.
type DeleteFileConfig ¶ added in v1.0.0
type DeleteFileConfig struct { // Optional. Used to override HTTP request options. HTTPOptions *HTTPOptions `json:"httpOptions,omitempty"` }
Used to override the default configuration.
type DeleteFileResponse ¶ added in v1.0.0
type DeleteFileResponse struct { // Optional. Used to retain the full HTTP response. SDKHTTPResponse *HTTPResponse `json:"sdkHttpResponse,omitempty"` }
Response for the delete file method.
type DeleteModelConfig ¶ added in v0.4.0
type DeleteModelConfig struct { // Optional. Used to override HTTP request options. HTTPOptions *HTTPOptions `json:"httpOptions,omitempty"` }
Configuration for deleting a tuned model.
type DeleteModelResponse ¶ added in v0.4.0
type DeleteModelResponse struct { // Optional. Used to retain the full HTTP response. SDKHTTPResponse *HTTPResponse `json:"sdkHttpResponse,omitempty"` }
type DeleteResourceJob ¶ added in v1.14.0
type DeleteResourceJob struct { // Optional. Used to retain the full HTTP response. SDKHTTPResponse *HTTPResponse `json:"sdkHttpResponse,omitempty"` // Optional. Name string `json:"name,omitempty"` // Optional. Done bool `json:"done,omitempty"` // Optional. Error *JobError `json:"error,omitempty"` }
The return value of delete operation.
type DistillationDataStats ¶ added in v1.16.0
type DistillationDataStats struct { // Output only. Statistics computed for the training dataset. TrainingDatasetStats *DatasetStats `json:"trainingDatasetStats,omitempty"` }
Statistics computed for datasets used for distillation.
type DownloadFileConfig ¶ added in v0.5.0
type DownloadFileConfig struct { // Optional. Used to override HTTP request options. HTTPOptions *HTTPOptions `json:"httpOptions,omitempty"` }
Used to override the default configuration.
type DownloadURI ¶ added in v1.0.0
type DownloadURI interface {
// contains filtered or unexported methods
}
DownloadURI represents a resource that can be downloaded.
It is used to abstract the different types of resources that can be downloaded, such as files or videos
You can create instances that implement this interface using the following constructor functions:
- NewDownloadURIFromFile
- NewDownloadURIFromVideo
- NewDownloadURIFromGeneratedVideo
- ...
func NewDownloadURIFromFile ¶ added in v1.0.0
func NewDownloadURIFromFile(f *File) DownloadURI
NewDownloadURIFromFile creates a DownloadURI from a File.
func NewDownloadURIFromGeneratedVideo ¶ added in v1.0.0
func NewDownloadURIFromGeneratedVideo(v *GeneratedVideo) DownloadURI
NewDownloadURIFromVideo creates a DownloadURI from a GeneratedVideo.
func NewDownloadURIFromVideo ¶ added in v1.0.0
func NewDownloadURIFromVideo(v *Video) DownloadURI
NewDownloadURIFromVideo creates a DownloadURI from a Video.
type DynamicRetrievalConfig ¶
type DynamicRetrievalConfig struct { // Optional. The mode of the predictor to be used in dynamic retrieval. Mode DynamicRetrievalConfigMode `json:"mode,omitempty"` // Optional. The threshold to be used in dynamic retrieval. If empty, a system default // value is used. DynamicThreshold *float32 `json:"dynamicThreshold,omitempty"` }
Describes the options to customize dynamic retrieval.
type DynamicRetrievalConfigMode ¶
type DynamicRetrievalConfigMode string
Config for the dynamic retrieval config mode.
const ( // Always trigger retrieval. DynamicRetrievalConfigModeUnspecified DynamicRetrievalConfigMode = "MODE_UNSPECIFIED" // Run retrieval only when system decides it is necessary. DynamicRetrievalConfigModeDynamic DynamicRetrievalConfigMode = "MODE_DYNAMIC" )
type EditImageConfig ¶ added in v0.5.0
type EditImageConfig struct { // Optional. Used to override HTTP request options. HTTPOptions *HTTPOptions `json:"httpOptions,omitempty"` // Optional. Cloud Storage URI used to store the generated images. OutputGCSURI string `json:"outputGcsUri,omitempty"` // Optional. Description of what to discourage in the generated images. NegativePrompt string `json:"negativePrompt,omitempty"` // Optional. Number of images to generate. If empty, the system will choose a default // value (currently 4). NumberOfImages int32 `json:"numberOfImages,omitempty"` // Optional. Aspect ratio of the generated images. Supported values are // "1:1", "3:4", "4:3", "9:16", and "16:9". AspectRatio string `json:"aspectRatio,omitempty"` // Optional. Controls how much the model adheres to the text prompt. Large // values increase output and prompt alignment, but may compromise image // quality. GuidanceScale *float32 `json:"guidanceScale,omitempty"` // Optional. Random seed for image generation. This is not available when // “add_watermark“ is set to true. Seed *int32 `json:"seed,omitempty"` // Optional. Filter level for safety filtering. SafetyFilterLevel SafetyFilterLevel `json:"safetyFilterLevel,omitempty"` // Optional. Allows generation of people by the model. PersonGeneration PersonGeneration `json:"personGeneration,omitempty"` // Optional. Whether to report the safety scores of each generated image and // the positive prompt in the response. IncludeSafetyAttributes bool `json:"includeSafetyAttributes,omitempty"` // Optional. Whether to include the Responsible AI filter reason if the image // is filtered out of the response. IncludeRAIReason bool `json:"includeRaiReason,omitempty"` // Optional. Language of the text in the prompt. Language ImagePromptLanguage `json:"language,omitempty"` // Optional. MIME type of the generated image. OutputMIMEType string `json:"outputMimeType,omitempty"` // Optional. Compression quality of the generated image (for “image/jpeg“ // only). OutputCompressionQuality *int32 `json:"outputCompressionQuality,omitempty"` // Optional. Whether to add a watermark to the generated images. AddWatermark *bool `json:"addWatermark,omitempty"` // Optional. Describes the editing mode for the request. EditMode EditMode `json:"editMode,omitempty"` // Optional. The number of sampling steps. A higher value has better image // quality, while a lower value has better latency. BaseSteps *int32 `json:"baseSteps,omitempty"` }
Configuration for editing an image.
type EditImageResponse ¶ added in v0.5.0
type EditImageResponse struct { // Optional. Used to retain the full HTTP response. SDKHTTPResponse *HTTPResponse `json:"sdkHttpResponse,omitempty"` // Generated images. GeneratedImages []*GeneratedImage `json:"generatedImages,omitempty"` }
Response for the request to edit an image.
type EditMode ¶ added in v0.5.0
type EditMode string
Enum representing the editing mode.
const ( EditModeDefault EditMode = "EDIT_MODE_DEFAULT" EditModeInpaintRemoval EditMode = "EDIT_MODE_INPAINT_REMOVAL" EditModeInpaintInsertion EditMode = "EDIT_MODE_INPAINT_INSERTION" EditModeOutpaint EditMode = "EDIT_MODE_OUTPAINT" EditModeControlledEditing EditMode = "EDIT_MODE_CONTROLLED_EDITING" EditModeStyle EditMode = "EDIT_MODE_STYLE" EditModeBgswap EditMode = "EDIT_MODE_BGSWAP" EditModeProductImage EditMode = "EDIT_MODE_PRODUCT_IMAGE" )
type EmbedContentConfig ¶ added in v0.5.0
type EmbedContentConfig struct { // Optional. Used to override HTTP request options. HTTPOptions *HTTPOptions `json:"httpOptions,omitempty"` // Type of task for which the embedding will be used. TaskType string `json:"taskType,omitempty"` // Title for the text. Only applicable when TaskType is // `RETRIEVAL_DOCUMENT`. Title string `json:"title,omitempty"` // Reduced dimension for the output embedding. If set, // excessive values in the output embedding are truncated from the end. // Supported by newer models since 2024 only. You cannot set this value if // using the earlier model (`models/embedding-001`). OutputDimensionality *int32 `json:"outputDimensionality,omitempty"` // Vertex API only. The MIME type of the input. MIMEType string `json:"mimeType,omitempty"` // Vertex API only. Whether to silently truncate inputs longer than // the max sequence length. If this option is set to false, oversized inputs // will lead to an INVALID_ARGUMENT error, similar to other text APIs. AutoTruncate bool `json:"autoTruncate,omitempty"` }
Optional parameters for the EmbedContent method.
type EmbedContentMetadata ¶ added in v0.5.0
type EmbedContentMetadata struct { // Vertex API only. The total number of billable characters included // in the request. BillableCharacterCount int32 `json:"billableCharacterCount,omitempty"` }
Request-level metadata for the Vertex Embed Content API.
type EmbedContentResponse ¶ added in v0.5.0
type EmbedContentResponse struct { // Optional. Used to retain the full HTTP response. SDKHTTPResponse *HTTPResponse `json:"sdkHttpResponse,omitempty"` // The embeddings for each request, in the same order as provided in // the batch request. Embeddings []*ContentEmbedding `json:"embeddings,omitempty"` // Vertex API only. Metadata about the request. Metadata *EmbedContentMetadata `json:"metadata,omitempty"` }
Response for the embed_content method.
type EncryptionSpec ¶ added in v1.16.0
type EncryptionSpec struct { // Required. The Cloud KMS resource identifier of the customer managed encryption key // used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. // The key needs to be in the same region as where the compute resource is created. KmsKeyName string `json:"kmsKeyName,omitempty"` }
Represents a customer-managed encryption key spec that can be applied to a top-level resource.
type EndSensitivity ¶ added in v1.0.0
type EndSensitivity string
End of speech sensitivity.
const ( // The default is END_SENSITIVITY_LOW. EndSensitivityUnspecified EndSensitivity = "END_SENSITIVITY_UNSPECIFIED" // Automatic detection ends speech more often. EndSensitivityHigh EndSensitivity = "END_SENSITIVITY_HIGH" // Automatic detection ends speech less often. EndSensitivityLow EndSensitivity = "END_SENSITIVITY_LOW" )
type Endpoint ¶ added in v0.4.0
type Endpoint struct { // Optional. Resource name of the endpoint. Name string `json:"name,omitempty"` // Optional. ID of the model that's deployed to the endpoint. DeployedModelID string `json:"deployedModelId,omitempty"` }
An endpoint where models are deployed.
type EnterpriseWebSearch ¶ added in v1.4.0
type EnterpriseWebSearch struct { // Optional. List of domains to be excluded from the search results. The default limit // is 2000 domains. ExcludeDomains []string `json:"excludeDomains,omitempty"` }
Tool to search public web data, powered by Vertex AI Search and Sec4 compliance.
type EntityLabel ¶ added in v1.21.0
type EntityLabel struct { // Optional. The label of the segmented entity. Label string `json:"label,omitempty"` // Optional. The confidence score of the detected label. Score float32 `json:"score,ommitempty,string"` }
An entity representing the segmented area.
type Environment ¶ added in v1.14.0
type Environment string
The environment being operated.
const ( // Defaults to browser. EnvironmentUnspecified Environment = "ENVIRONMENT_UNSPECIFIED" // Operates in a web browser. EnvironmentBrowser Environment = "ENVIRONMENT_BROWSER" )
type ExecutableCode ¶
type ExecutableCode struct { // Required. The code to be executed. Code string `json:"code,omitempty"` // Required. Programming language of the `code`. Language Language `json:"language,omitempty"` }
Code generated by the model that is meant to be executed, and the result returned to the model. Generated when using the [CodeExecution] tool, in which the code will be automatically executed, and a corresponding CodeExecutionResult will also be generated.
type ExternalAPI ¶ added in v1.14.0
type ExternalAPI struct { // The authentication config to access the API. Deprecated. Please use auth_config instead. APIAuth *APIAuth `json:"apiAuth,omitempty"` // The API spec that the external API implements. APISpec APISpec `json:"apiSpec,omitempty"` // The authentication config to access the API. AuthConfig *AuthConfig `json:"authConfig,omitempty"` // Parameters for the elastic search API. ElasticSearchParams *ExternalAPIElasticSearchParams `json:"elasticSearchParams,omitempty"` // The endpoint of the external API. The system will call the API at this endpoint to // retrieve the data for grounding. Example: https://acme.com:443/search Endpoint string `json:"endpoint,omitempty"` // Parameters for the simple search API. SimpleSearchParams *ExternalAPISimpleSearchParams `json:"simpleSearchParams,omitempty"` }
Retrieve from data source powered by external API for grounding. The external API is not owned by Google, but need to follow the pre-defined API spec.
type ExternalAPIElasticSearchParams ¶ added in v1.14.0
type ExternalAPIElasticSearchParams struct { // The ElasticSearch index to use. Index string `json:"index,omitempty"` // Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch // as the `num_hits` param. NumHits *int32 `json:"numHits,omitempty"` // The ElasticSearch search template to use. SearchTemplate string `json:"searchTemplate,omitempty"` }
The search parameters to use for the ELASTIC_SEARCH spec.
type ExternalAPISimpleSearchParams ¶ added in v1.14.0
type ExternalAPISimpleSearchParams struct { }
The search parameters to use for SIMPLE_SEARCH spec.
type ExtrasRequestProvider ¶ added in v1.10.0
ExtrasRequestProvider provides a way to dynamically modify the request body before it is sent. It is a function that takes the request body and returns the modified body. This is useful for advanced scenarios where request parameters need to be added based on logic that cannot be handled by a static map.
type FeatureSelectionPreference ¶ added in v1.1.0
type FeatureSelectionPreference string
Options for feature selection preference.
const ( FeatureSelectionPreferenceUnspecified FeatureSelectionPreference = "FEATURE_SELECTION_PREFERENCE_UNSPECIFIED" FeatureSelectionPreferencePrioritizeQuality FeatureSelectionPreference = "PRIORITIZE_QUALITY" FeatureSelectionPreferenceBalanced FeatureSelectionPreference = "BALANCED" FeatureSelectionPreferencePrioritizeCost FeatureSelectionPreference = "PRIORITIZE_COST" )
type FetchPredictOperationConfig ¶ added in v0.7.0
type FetchPredictOperationConfig struct { // Optional. Used to override HTTP request options. HTTPOptions *HTTPOptions `json:"httpOptions,omitempty"` }
type File ¶ added in v1.0.0
type File struct { // Optional. The `File` resource name. The ID (name excluding the "files/" prefix) can // contain up to 40 characters that are lowercase alphanumeric or dashes (-). The ID // cannot start or end with a dash. If the name is empty on create, a unique name will // be generated. Example: `files/123-456` Name string `json:"name,omitempty"` // Optional. The human-readable display name for the `File`. The display name must be // no more than 512 characters in length, including spaces. Example: 'Welcome Image' DisplayName string `json:"displayName,omitempty"` // Optional. Output only. MIME type of the file. MIMEType string `json:"mimeType,omitempty"` // Optional. Output only. Size of the file in bytes. SizeBytes *int64 `json:"sizeBytes,omitempty,string"` // Optional. Output only. The timestamp of when the `File` was created. CreateTime time.Time `json:"createTime,omitempty"` // Optional. Output only. The timestamp of when the `File` will be deleted. Only set // if the `File` is scheduled to expire. ExpirationTime time.Time `json:"expirationTime,omitempty"` // Optional. Output only. The timestamp of when the `File` was last updated. UpdateTime time.Time `json:"updateTime,omitempty"` // Optional. Output only. SHA-256 hash of the uploaded bytes. The hash value is encoded // in base64 format. Sha256Hash string `json:"sha256Hash,omitempty"` // Optional. Output only. The URI of the `File`. URI string `json:"uri,omitempty"` // Optional. Output only. The URI of the `File`, only set for downloadable (generated) // files. DownloadURI string `json:"downloadUri,omitempty"` // Optional. Output only. Processing state of the File. State FileState `json:"state,omitempty"` // Optional. Output only. The source of the `File`. Source FileSource `json:"source,omitempty"` // Optional. Output only. Metadata for a video. VideoMetadata map[string]any `json:"videoMetadata,omitempty"` // Optional. Output only. Error status if File processing failed. Error *FileStatus `json:"error,omitempty"` }
A file uploaded to the API.
func (*File) MarshalJSON ¶ added in v1.0.0
func (*File) UnmarshalJSON ¶ added in v1.0.0
type FileData ¶
type FileData struct { // Optional. Display name of the file data. Used to provide a label or filename to distinguish // file datas. It is not currently used in the Gemini GenerateContent calls. DisplayName string `json:"displayName,omitempty"` // Optional. Required. URI. FileURI string `json:"fileUri,omitempty"` // Optional. Required. The IANA standard MIME type of the source data. MIMEType string `json:"mimeType,omitempty"` }
URI based data.
type FileSource ¶ added in v1.0.0
type FileSource string
Source of the File.
const ( FileSourceUnspecified FileSource = "SOURCE_UNSPECIFIED" FileSourceUploaded FileSource = "UPLOADED" FileSourceGenerated FileSource = "GENERATED" )
type FileStatus ¶ added in v1.0.0
type FileStatus struct { // Optional. A list of messages that carry the error details. There is a common set // of message types for APIs to use. Details []map[string]any `json:"details,omitempty"` // Optional. A list of messages that carry the error details. There is a common set // of message types for APIs to use. Message string `json:"message,omitempty"` // Optional. The status code. 0 for OK, 1 for CANCELLED Code *int32 `json:"code,omitempty"` }
Status of a File that uses a common error model.
type Files ¶ added in v1.0.0
type Files struct {
// contains filtered or unexported fields
}
func (Files) All ¶ added in v1.0.0
All retrieves all files resources.
This method handles pagination internally, making multiple API calls as needed to fetch all entries. It returns an iterator that yields each file entry one by one. You do not need to manage pagination tokens or make multiple calls to retrieve all data.
func (Files) Delete ¶ added in v1.0.0
func (m Files) Delete(ctx context.Context, name string, config *DeleteFileConfig) (*DeleteFileResponse, error)
func (Files) Download ¶ added in v1.0.0
func (m Files) Download(ctx context.Context, uri DownloadURI, config *DownloadFileConfig) ([]byte, error)
Download function downloads a file from the specified URI. If the URI refers to a video(Video, GeneratedVideo), the video bytes will be populated to the video's VideoBytes field.
func (Files) Upload ¶ added in v1.0.0
Upload copies the contents of the given io.Reader to file storage associated with the service, and returns information about the resulting file.
func (Files) UploadFromPath ¶ added in v1.0.0
func (m Files) UploadFromPath(ctx context.Context, path string, config *UploadFileConfig) (*File, error)
UploadFromPath uploads a file from the specified path and returns information about the resulting file.
type FinishReason ¶
type FinishReason string
The reason why the model stopped generating tokens. If empty, the model has not stopped generating the tokens.
const ( // The finish reason is unspecified. FinishReasonUnspecified FinishReason = "FINISH_REASON_UNSPECIFIED" // Token generation reached a natural stopping point or a configured stop sequence. FinishReasonStop FinishReason = "STOP" // Token generation reached the configured maximum output tokens. FinishReasonMaxTokens FinishReason = "MAX_TOKENS" // Token generation stopped because the content potentially contains safety violations. // NOTE: When streaming, [content][] is empty if content filters blocks the output. FinishReasonSafety FinishReason = "SAFETY" // The token generation stopped because of potential recitation. FinishReasonRecitation FinishReason = "RECITATION" // The token generation stopped because of using an unsupported language. FinishReasonLanguage FinishReason = "LANGUAGE" // All other reasons that stopped the token generation. FinishReasonOther FinishReason = "OTHER" // Token generation stopped because the content contains forbidden terms. FinishReasonBlocklist FinishReason = "BLOCKLIST" // Token generation stopped for potentially containing prohibited content. FinishReasonProhibitedContent FinishReason = "PROHIBITED_CONTENT" // Token generation stopped because the content potentially contains Sensitive Personally // Identifiable Information (SPII). FinishReasonSPII FinishReason = "SPII" // The function call generated by the model is invalid. FinishReasonMalformedFunctionCall FinishReason = "MALFORMED_FUNCTION_CALL" // Token generation stopped because generated images have safety violations. FinishReasonImageSafety FinishReason = "IMAGE_SAFETY" // The tool call generated by the model is invalid. FinishReasonUnexpectedToolCall FinishReason = "UNEXPECTED_TOOL_CALL" )
type FunctionCall ¶
type FunctionCall struct { // Optional. The unique ID of the function call. If populated, the client to execute // the // `function_call` and return the response with the matching `id`. ID string `json:"id,omitempty"` // Optional. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] // for parameter details. Args map[string]any `json:"args,omitempty"` // Required. The name of the function to call. Matches [FunctionDeclaration.Name]. Name string `json:"name,omitempty"` }
A function call.
type FunctionCallingConfig ¶
type FunctionCallingConfig struct { // Optional. Function calling mode. Mode FunctionCallingConfigMode `json:"mode,omitempty"` // Optional. Function names to call. Only set when the Mode is ANY. Function names should // match [FunctionDeclaration.Name]. With mode set to ANY, model will predict a function // call from the set of function names provided. AllowedFunctionNames []string `json:"allowedFunctionNames,omitempty"` }
Function calling config.
type FunctionCallingConfigMode ¶
type FunctionCallingConfigMode string
Config for the function calling config mode.
const ( // The function calling config mode is unspecified. Should not be used. FunctionCallingConfigModeUnspecified FunctionCallingConfigMode = "MODE_UNSPECIFIED" // Default model behavior, model decides to predict either function calls or natural // language response. FunctionCallingConfigModeAuto FunctionCallingConfigMode = "AUTO" // Model is constrained to always predicting function calls only. If "allowed_function_names" // are set, the predicted function calls will be limited to any one of "allowed_function_names", // else the predicted function calls will be any one of the provided "function_declarations". FunctionCallingConfigModeAny FunctionCallingConfigMode = "ANY" // Model will not predict any function calls. Model behavior is same as when not passing // any function declarations. FunctionCallingConfigModeNone FunctionCallingConfigMode = "NONE" // Model decides to predict either a function call or a natural language response, but // will validate function calls with constrained decoding. If "allowed_function_names" // are set, the predicted function call will be limited to any one of "allowed_function_names", // else the predicted function call will be any one of the provided "function_declarations". FunctionCallingConfigModeValidated FunctionCallingConfigMode = "VALIDATED" )
type FunctionDeclaration ¶
type FunctionDeclaration struct { // Optional. Defines the function behavior. Behavior Behavior `json:"behavior,omitempty"` // Optional. Description and purpose of the function. Model uses it to decide how and // whether to call the function. Description string `json:"description,omitempty"` // Required. The name of the function to call. Must start with a letter or an underscore. // Must be a-z, A-Z, 0-9, or contain underscores, dots and dashes, with a maximum length // of 64. Name string `json:"name,omitempty"` // Optional. Describes the parameters to this function in JSON Schema Object format. // Reflects the Open API 3.03 Parameter Object. string Key: the name of the parameter. // Parameter names are case sensitive. Schema Value: the Schema defining the type used // for the parameter. For function with no parameters, this can be left unset. Parameter // names must start with a letter or an underscore and must only contain chars a-z, // A-Z, 0-9, or underscores with a maximum length of 64. Example with 1 required and // 1 optional parameter: type: OBJECT properties: param1: type: STRING param2: type: // INTEGER required: - param1 Parameters *Schema `json:"parameters,omitempty"` // Optional. Describes the parameters to the function in JSON Schema format. The schema // must describe an object where the properties are the parameters to the function. // For example: “` { "type": "object", "properties": { "name": { "type": "string" }, // "age": { "type": "integer" } }, "additionalProperties": false, "required": ["name", // "age"], "propertyOrdering": ["name", "age"] } “` This field is mutually exclusive // with `parameters`. ParametersJsonSchema any `json:"parametersJsonSchema,omitempty"` // Optional. Describes the output from this function in JSON Schema format. Reflects // the Open API 3.03 Response Object. The Schema defines the type used for the response // value of the function. Response *Schema `json:"response,omitempty"` // Optional. Describes the output from this function in JSON Schema format. The value // specified by the schema is the response value of the function. This field is mutually // exclusive with `response`. ResponseJsonSchema any `json:"responseJsonSchema,omitempty"` }
Defines a function that the model can generate JSON inputs for. The inputs are based on `OpenAPI 3.0 specifications <https://spec.openapis.org/oas/v3.0.3>`_.
type FunctionResponse ¶
type FunctionResponse struct { // Optional. Signals that function call continues, and more responses will be returned, // turning the function call into a generator. Is only applicable to NON_BLOCKING function // calls (see FunctionDeclaration.behavior for details), ignored otherwise. If false, // the default, future responses will not be considered. Is only applicable to NON_BLOCKING // function calls, is ignored otherwise. If set to false, future responses will not // be considered. It is allowed to return empty `response` with `will_continue=False` // to signal that the function call is finished. WillContinue *bool `json:"willContinue,omitempty"` // Optional. Specifies how the response should be scheduled in the conversation. Only // applicable to NON_BLOCKING function calls, is ignored otherwise. Defaults to WHEN_IDLE. Scheduling FunctionResponseScheduling `json:"scheduling,omitempty"` // Optional. The ID of the function call this response is for. Populated by the client // to match the corresponding function call `id`. ID string `json:"id,omitempty"` // Required. The name of the function to call. Matches [FunctionDeclaration.name] and // [FunctionCall.name]. Name string `json:"name,omitempty"` // Required. The function response in JSON object format. Use "output" key to specify // function output and "error" key to specify error details (if any). If "output" and // "error" keys are not specified, then whole "response" is treated as function output. Response map[string]any `json:"response,omitempty"` }
A function response.
type FunctionResponseScheduling ¶ added in v1.6.0
type FunctionResponseScheduling string
Specifies how the response should be scheduled in the conversation.
const ( // This value is unused. FunctionResponseSchedulingUnspecified FunctionResponseScheduling = "SCHEDULING_UNSPECIFIED" // Only add the result to the conversation context, do not interrupt or trigger generation. FunctionResponseSchedulingSilent FunctionResponseScheduling = "SILENT" // Add the result to the conversation context, and prompt to generate output without // interrupting ongoing generation. FunctionResponseSchedulingWhenIdle FunctionResponseScheduling = "WHEN_IDLE" // Add the result to the conversation context, interrupt ongoing generation and prompt // to generate output. FunctionResponseSchedulingInterrupt FunctionResponseScheduling = "INTERRUPT" )
type GeminiPreferenceExample ¶ added in v1.20.0
type GeminiPreferenceExample struct { // List of completions for a given prompt. Completions []*GeminiPreferenceExampleCompletion `json:"completions,omitempty"` // Multi-turn contents that represents the Prompt. Contents []*Content `json:"contents,omitempty"` }
Input example for preference optimization.
type GeminiPreferenceExampleCompletion ¶ added in v1.20.0
type GeminiPreferenceExampleCompletion struct { // Single turn completion for the given prompt. Completion *Content `json:"completion,omitempty"` // The score for the given completion. Score float32 `json:"score,omitempty"` }
Completion and its preference score.
type GenerateContentConfig ¶
type GenerateContentConfig struct { // Optional. Used to override HTTP request options. HTTPOptions *HTTPOptions `json:"httpOptions,omitempty"` // Optional. Instructions for the model to steer it toward better performance. // For example, "Answer as concisely as possible" or "Don't use technical // terms in your response". SystemInstruction *Content `json:"systemInstruction,omitempty"` // Optional. Value that controls the degree of randomness in token selection. // Lower temperatures are good for prompts that require a less open-ended or // creative response, while higher temperatures can lead to more diverse or // creative results. Temperature *float32 `json:"temperature,omitempty"` // Optional. Tokens are selected from the most to least probable until the sum // of their probabilities equals this value. Use a lower value for less // random responses and a higher value for more random responses. TopP *float32 `json:"topP,omitempty"` // Optional. For each token selection step, the “top_k“ tokens with the // highest probabilities are sampled. Then tokens are further filtered based // on “top_p“ with the final token selected using temperature sampling. Use // a lower number for less random responses and a higher number for more // random responses. TopK *float32 `json:"topK,omitempty"` // Optional. Number of response variations to return. // If empty, the system will choose a default value (currently 1). CandidateCount int32 `json:"candidateCount,omitempty"` // Optional. Maximum number of tokens that can be generated in the response. // If empty, API will use a default value. The default value varies by model. MaxOutputTokens int32 `json:"maxOutputTokens,omitempty"` // Optional. List of strings that tells the model to stop generating text if one // of the strings is encountered in the response. StopSequences []string `json:"stopSequences,omitempty"` // Optional. Whether to return the log probabilities of the tokens that were // chosen by the model at each step. ResponseLogprobs bool `json:"responseLogprobs,omitempty"` // Optional. Number of top candidate tokens to return the log probabilities for // at each generation step. Logprobs *int32 `json:"logprobs,omitempty"` // Optional. Positive values penalize tokens that already appear in the // generated text, increasing the probability of generating more diverse // content. PresencePenalty *float32 `json:"presencePenalty,omitempty"` // Optional. Positive values penalize tokens that repeatedly appear in the // generated text, increasing the probability of generating more diverse // content. FrequencyPenalty *float32 `json:"frequencyPenalty,omitempty"` // Optional. When “seed“ is fixed to a specific number, the model makes a best // effort to provide the same response for repeated requests. By default, a // random number is used. Seed *int32 `json:"seed,omitempty"` // Optional. Output response mimetype of the generated candidate text. // Supported mimetype: // - `text/plain`: (default) Text output. // - `application/json`: JSON response in the candidates. // The model needs to be prompted to output the appropriate response type, // otherwise the behavior is undefined. // This is a preview feature. ResponseMIMEType string `json:"responseMimeType,omitempty"` // Optional. The `Schema` object allows the definition of input and output data types. // These types can be objects, but also primitives and arrays. // Represents a select subset of an [OpenAPI 3.0 schema // object](https://spec.openapis.org/oas/v3.0.3#schema). // If set, a compatible response_mime_type must also be set. // Compatible mimetypes: `application/json`: Schema for JSON response. ResponseSchema *Schema `json:"responseSchema,omitempty"` // Optional. Output schema of the generated response. // This is an alternative to `response_schema` that accepts [JSON // Schema](https://json-schema.org/). If set, `response_schema` must be // omitted, but `response_mime_type` is required. While the full JSON Schema // may be sent, not all features are supported. Specifically, only the // following properties are supported: - `$id` - `$defs` - `$ref` - `$anchor` // - `type` - `format` - `title` - `description` - `enum` (for strings and // numbers) - `items` - `prefixItems` - `minItems` - `maxItems` - `minimum` - // `maximum` - `anyOf` - `oneOf` (interpreted the same as `anyOf`) - // `properties` - `additionalProperties` - `required` The non-standard // `propertyOrdering` property may also be set. Cyclic references are // unrolled to a limited degree and, as such, may only be used within // non-required properties. (Nullable properties are not sufficient.) If // `$ref` is set on a sub-schema, no other properties, except for than those // starting as a `$`, may be set. ResponseJsonSchema any `json:"responseJsonSchema,omitempty"` // Optional. Configuration for model router requests. RoutingConfig *GenerationConfigRoutingConfig `json:"routingConfig,omitempty"` // Optional. Configuration for model selection. ModelSelectionConfig *ModelSelectionConfig `json:"modelSelectionConfig,omitempty"` // Optional. Safety settings in the request to block unsafe content in the // response. SafetySettings []*SafetySetting `json:"safetySettings,omitempty"` // Optional. Code that enables the system to interact with external systems to // perform an action outside of the knowledge and scope of the model. Tools []*Tool `json:"tools,omitempty"` // Optional. Associates model output to a specific function call. ToolConfig *ToolConfig `json:"toolConfig,omitempty"` // Optional. Labels with user-defined metadata to break down billed charges. Labels map[string]string `json:"labels,omitempty"` // Optional. Resource name of a context cache that can be used in subsequent // requests. CachedContent string `json:"cachedContent,omitempty"` // Optional. The requested modalities of the response. Represents the set of // modalities that the model can return. ResponseModalities []string `json:"responseModalities,omitempty"` // Optional. If specified, the media resolution specified will be used. MediaResolution MediaResolution `json:"mediaResolution,omitempty"` // Optional. The speech generation configuration. SpeechConfig *SpeechConfig `json:"speechConfig,omitempty"` // Optional. If enabled, audio timestamp will be included in the request to the // model. AudioTimestamp bool `json:"audioTimestamp,omitempty"` // Optional. The thinking features configuration. ThinkingConfig *ThinkingConfig `json:"thinkingConfig,omitempty"` }
Optional model configuration parameters. For more information, see `Content generation parameters <https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/content-generation-parameters>`_.
type GenerateContentResponse ¶
type GenerateContentResponse struct { // Optional. Used to retain the full HTTP response. SDKHTTPResponse *HTTPResponse `json:"sdkHttpResponse,omitempty"` // Response variations returned by the model. Candidates []*Candidate `json:"candidates,omitempty"` // Timestamp when the request is made to the server. CreateTime time.Time `json:"createTime,omitempty"` // Output only. The model version used to generate the response. ModelVersion string `json:"modelVersion,omitempty"` // Output only. Content filter results for a prompt sent in the request. Note: Sent // only in the first stream chunk. Only happens when no candidates were generated due // to content violations. PromptFeedback *GenerateContentResponsePromptFeedback `json:"promptFeedback,omitempty"` // Output only. response_id is used to identify each response. It is the encoding of // the event_id. ResponseID string `json:"responseId,omitempty"` // Usage metadata about the response(s). UsageMetadata *GenerateContentResponseUsageMetadata `json:"usageMetadata,omitempty"` }
Response message for PredictionService.GenerateContent.
func (*GenerateContentResponse) CodeExecutionResult ¶ added in v0.5.0
func (r *GenerateContentResponse) CodeExecutionResult() string
CodeExecutionResult returns the code execution result in the GenerateContentResponse.
func (*GenerateContentResponse) ExecutableCode ¶ added in v0.5.0
func (r *GenerateContentResponse) ExecutableCode() string
ExecutableCode returns the executable code in the GenerateContentResponse.
func (*GenerateContentResponse) FunctionCalls ¶ added in v0.1.0
func (r *GenerateContentResponse) FunctionCalls() []*FunctionCall
FunctionCalls returns the list of function calls in the GenerateContentResponse.
func (*GenerateContentResponse) MarshalJSON ¶ added in v0.6.0
func (g *GenerateContentResponse) MarshalJSON() ([]byte, error)
func (*GenerateContentResponse) Text ¶ added in v0.1.0
func (r *GenerateContentResponse) Text() string
Text concatenates all the text parts in the GenerateContentResponse.
func (*GenerateContentResponse) UnmarshalJSON ¶ added in v1.16.0
func (g *GenerateContentResponse) UnmarshalJSON(data []byte) error
type GenerateContentResponsePromptFeedback ¶
type GenerateContentResponsePromptFeedback struct { // Output only. Blocked reason. BlockReason BlockedReason `json:"blockReason,omitempty"` // Output only. A readable block reason message. BlockReasonMessage string `json:"blockReasonMessage,omitempty"` // Output only. Safety ratings. SafetyRatings []*SafetyRating `json:"safetyRatings,omitempty"` }
Content filter results for a prompt sent in the request.
type GenerateContentResponseUsageMetadata ¶
type GenerateContentResponseUsageMetadata struct { // Output only. List of modalities of the cached content in the request input. CacheTokensDetails []*ModalityTokenCount `json:"cacheTokensDetails,omitempty"` // Output only. Number of tokens in the cached part in the input (the cached content). CachedContentTokenCount int32 `json:"cachedContentTokenCount,omitempty"` // Number of tokens in the response(s). This includes all the generated response candidates. CandidatesTokenCount int32 `json:"candidatesTokenCount,omitempty"` // Output only. List of modalities that were returned in the response. CandidatesTokensDetails []*ModalityTokenCount `json:"candidatesTokensDetails,omitempty"` // Number of tokens in the prompt. When cached_content is set, this is still the total // effective prompt size meaning this includes the number of tokens in the cached content. PromptTokenCount int32 `json:"promptTokenCount,omitempty"` // Output only. List of modalities that were processed in the request input. PromptTokensDetails []*ModalityTokenCount `json:"promptTokensDetails,omitempty"` // Output only. Number of tokens present in thoughts output. ThoughtsTokenCount int32 `json:"thoughtsTokenCount,omitempty"` // Output only. Number of tokens present in tool-use prompt(s). ToolUsePromptTokenCount int32 `json:"toolUsePromptTokenCount,omitempty"` // Output only. List of modalities that were processed for tool-use request inputs. ToolUsePromptTokensDetails []*ModalityTokenCount `json:"toolUsePromptTokensDetails,omitempty"` // Total token count for prompt, response candidates, and tool-use prompts (if present). TotalTokenCount int32 `json:"totalTokenCount,omitempty"` // Output only. Traffic type. This shows whether a request consumes Pay-As-You-Go or // Provisioned Throughput quota. TrafficType TrafficType `json:"trafficType,omitempty"` }
Usage metadata about response(s).
type GenerateImagesConfig ¶ added in v0.1.0
type GenerateImagesConfig struct { // Optional. Used to override HTTP request options. HTTPOptions *HTTPOptions `json:"httpOptions,omitempty"` // Optional. Cloud Storage URI used to store the generated images. OutputGCSURI string `json:"outputGcsUri,omitempty"` // Optional. Description of what to discourage in the generated images. NegativePrompt string `json:"negativePrompt,omitempty"` // Optional. Number of images to generate. If empty, the system will choose a default // value (currently 4). NumberOfImages int32 `json:"numberOfImages,omitempty"` // Optional. Aspect ratio of the generated images. Supported values are // "1:1", "3:4", "4:3", "9:16", and "16:9". AspectRatio string `json:"aspectRatio,omitempty"` // Optional. Controls how much the model adheres to the text prompt. Large // values increase output and prompt alignment, but may compromise image // quality. GuidanceScale *float32 `json:"guidanceScale,omitempty"` // Optional. Random seed for image generation. This is not available when // “add_watermark“ is set to true. Seed *int32 `json:"seed,omitempty"` // Optional. Filter level for safety filtering. SafetyFilterLevel SafetyFilterLevel `json:"safetyFilterLevel,omitempty"` // Optional. Allows generation of people by the model. PersonGeneration PersonGeneration `json:"personGeneration,omitempty"` // Optional. Whether to report the safety scores of each generated image and // the positive prompt in the response. IncludeSafetyAttributes bool `json:"includeSafetyAttributes,omitempty"` // Optional. Whether to include the Responsible AI filter reason if the image // is filtered out of the response. IncludeRAIReason bool `json:"includeRaiReason,omitempty"` // Optional. Language of the text in the prompt. Language ImagePromptLanguage `json:"language,omitempty"` // Optional. MIME type of the generated image. OutputMIMEType string `json:"outputMimeType,omitempty"` // Optional. Compression quality of the generated image (for “image/jpeg“ // only). OutputCompressionQuality *int32 `json:"outputCompressionQuality,omitempty"` // Optional. Whether to add a watermark to the generated images. AddWatermark bool `json:"addWatermark,omitempty"` // Optional. The size of the largest dimension of the generated image. // Supported sizes are 1K and 2K (not supported for Imagen 3 models). ImageSize string `json:"imageSize,omitempty"` // Optional. Whether to use the prompt rewriting logic. EnhancePrompt bool `json:"enhancePrompt,omitempty"` }
The configuration for generating images. You can find API default values and more details at VertexAI: https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/imagen-api. GeminiAPI: https://ai.google.dev/gemini-api/docs/imagen#imagen-model
type GenerateImagesResponse ¶ added in v0.1.0
type GenerateImagesResponse struct { // Optional. Used to retain the full HTTP response. SDKHTTPResponse *HTTPResponse `json:"sdkHttpResponse,omitempty"` // List of generated images. GeneratedImages []*GeneratedImage `json:"generatedImages,omitempty"` // Optional. Safety attributes of the positive prompt. Only populated if // “include_safety_attributes“ is set to True. PositivePromptSafetyAttributes *SafetyAttributes `json:"positivePromptSafetyAttributes,omitempty"` }
The output images response.
type GenerateVideosConfig ¶ added in v0.7.0
type GenerateVideosConfig struct { // Optional. Used to override HTTP request options. HTTPOptions *HTTPOptions `json:"httpOptions,omitempty"` // Optional. Number of output videos. If empty, the system will choose a default value. NumberOfVideos int32 `json:"numberOfVideos,omitempty"` // Optional. The GCS bucket where to save the generated videos. OutputGCSURI string `json:"outputGcsUri,omitempty"` // Optional. Frames per second for video generation. FPS *int32 `json:"fps,omitempty"` // Optional. Duration of the clip for video generation in seconds. DurationSeconds *int32 `json:"durationSeconds,omitempty"` // Optional. The RNG seed. If RNG seed is exactly same for each request with // unchanged inputs, the prediction results will be consistent. Otherwise, // a random RNG seed will be used each time to produce a different // result. Seed *int32 `json:"seed,omitempty"` // Optional. The aspect ratio for the generated video. 16:9 (landscape) and // 9:16 (portrait) are supported. AspectRatio string `json:"aspectRatio,omitempty"` // Optional. The resolution for the generated video. 720p and 1080p are // supported. Resolution string `json:"resolution,omitempty"` // Optional. Whether allow to generate person videos, and restrict to specific // ages. Supported values are: dont_allow, allow_adult. PersonGeneration string `json:"personGeneration,omitempty"` // Optional. The pubsub topic where to publish the video generation // progress. PubsubTopic string `json:"pubsubTopic,omitempty"` // Optional. Explicitly state what should not be included in the generated // videos. NegativePrompt string `json:"negativePrompt,omitempty"` // Optional. Whether to use the prompt rewriting logic. EnhancePrompt bool `json:"enhancePrompt,omitempty"` // Optional. Whether to generate audio along with the video. GenerateAudio *bool `json:"generateAudio,omitempty"` // Optional. Image to use as the last frame of generated videos. // Only supported for image to video use cases. LastFrame *Image `json:"lastFrame,omitempty"` // Optional. The images to use as the references to generate the videos. // If this field is provided, the text prompt field must also be provided. // The image, video, or last_frame field are not supported. Each image must // be associated with a type. Veo 2 supports up to 3 asset images *or* 1 // style image. ReferenceImages []*VideoGenerationReferenceImage `json:"referenceImages,omitempty"` // Optional. Compression quality of the generated videos. CompressionQuality VideoCompressionQuality `json:"compressionQuality,omitempty"` }
You can find API default values and more details at VertexAI: https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/veo-video-generation.
type GenerateVideosOperation ¶ added in v0.7.0
type GenerateVideosOperation struct { // The server-assigned name, which is only unique within the same service that originally // returns it. If you use the default HTTP mapping, the `name` should be a resource // name ending with `operations/{unique_id}`. Name string `json:"name,omitempty"` // Optional. Service-specific metadata associated with the operation. It typically contains // progress information and common metadata such as create time. Some services might // not provide such metadata. Any method that returns a long-running operation should // document the metadata type, if any. Metadata map[string]any `json:"metadata,omitempty"` // If the value is `false`, it means the operation is still in progress. If `true`, // the operation is completed, and either `error` or `response` is available. Done bool `json:"done,omitempty"` // Optional. The error result of the operation in case of failure or cancellation. Error map[string]any `json:"error,omitempty"` // Optional. The generated videos. Response *GenerateVideosResponse `json:"response,omitempty"` }
A video generation operation.
type GenerateVideosResponse ¶ added in v0.7.0
type GenerateVideosResponse struct { // List of the generated videos GeneratedVideos []*GeneratedVideo `json:"generatedVideos,omitempty"` // Returns if any videos were filtered due to RAI policies. RAIMediaFilteredCount int32 `json:"raiMediaFilteredCount,omitempty"` // Returns RAI failure reasons if any. RAIMediaFilteredReasons []string `json:"raiMediaFilteredReasons,omitempty"` }
Response with generated videos.
type GenerateVideosSource ¶ added in v1.17.0
type GenerateVideosSource struct { // Optional. The text prompt for generating the videos. // Optional if image or video is provided. Prompt string `json:"prompt,omitempty"` // Optional. The input image for generating the videos. // Optional if prompt is provided. Not allowed if video is provided. Image *Image `json:"image,omitempty"` // Optional. The input video for video extension use cases. // Optional if prompt is provided. Not allowed if image is provided. Video *Video `json:"video,omitempty"` }
A set of source input(s) for video generation.
type GeneratedImage ¶ added in v0.1.0
type GeneratedImage struct { // The output image data. Image *Image `json:"image,omitempty"` // Optional. Responsible AI filter reason if the image is filtered out of the // response. RAIFilteredReason string `json:"raiFilteredReason,omitempty"` // Optional. Safety attributes of the image. Lists of RAI categories and their // scores of each content. SafetyAttributes *SafetyAttributes `json:"safetyAttributes,omitempty"` // Optional. The rewritten prompt used for the image generation if the prompt // enhancer is enabled. EnhancedPrompt string `json:"enhancedPrompt,omitempty"` }
An output image.
type GeneratedImageMask ¶ added in v1.21.0
type GeneratedImageMask struct { // Optional. The generated image mask. Mask *Image `json:"mask,omitempty"` // Optional. The detected entities on the segmented area. Labels []*EntityLabel `json:"labels,omitempty"` }
A generated image mask.
type GeneratedVideo ¶ added in v0.7.0
type GeneratedVideo struct { // Optional. The output video Video *Video `json:"video,omitempty"` }
A generated video.
type GenerationConfig ¶
type GenerationConfig struct { // Optional. Config for model selection. ModelSelectionConfig *ModelSelectionConfig `json:"modelSelectionConfig,omitempty"` // Optional. If enabled, audio timestamp will be included in the request to the model. AudioTimestamp bool `json:"audioTimestamp,omitempty"` // Optional. Number of candidates to generate. If empty, the system will choose a default // value (currently 1). CandidateCount int32 `json:"candidateCount,omitempty"` // Optional. If enabled, the model will detect emotions and adapt its responses accordingly. EnableAffectiveDialog *bool `json:"enableAffectiveDialog,omitempty"` // Optional. Frequency penalties. FrequencyPenalty *float32 `json:"frequencyPenalty,omitempty"` // Optional. Logit probabilities. Logprobs *int32 `json:"logprobs,omitempty"` // Optional. The maximum number of output tokens to generate per message. If empty, // API will use a default value. The default value varies by model. MaxOutputTokens int32 `json:"maxOutputTokens,omitempty"` // Optional. If specified, the media resolution specified will be used. MediaResolution MediaResolution `json:"mediaResolution,omitempty"` // Optional. Positive penalties. PresencePenalty *float32 `json:"presencePenalty,omitempty"` // Optional. Output schema of the generated response. This is an alternative to `response_schema` // that accepts [JSON Schema](https://json-schema.org/). If set, `response_schema` must // be omitted, but `response_mime_type` is required. While the full JSON Schema may // be sent, not all features are supported. Specifically, only the following properties // are supported: - `$id` - `$defs` - `$ref` - `$anchor` - `type` - `format` - `title` // - `description` - `enum` (for strings and numbers) - `items` - `prefixItems` - `minItems` // - `maxItems` - `minimum` - `maximum` - `anyOf` - `oneOf` (interpreted the same as // `anyOf`) - `properties` - `additionalProperties` - `required` The non-standard `propertyOrdering` // property may also be set. Cyclic references are unrolled to a limited degree and, // as such, may only be used within non-required properties. (Nullable properties are // not sufficient.) If `$ref` is set on a sub-schema, no other properties, except for // than those starting as a `$`, may be set. ResponseJsonSchema any `json:"responseJsonSchema,omitempty"` // Optional. If true, export the logprobs results in response. ResponseLogprobs bool `json:"responseLogprobs,omitempty"` // Optional. Output response mimetype of the generated candidate text. Supported mimetype: // - `text/plain`: (default) Text output. - `application/json`: JSON response in the // candidates. The model needs to be prompted to output the appropriate response type, // otherwise the behavior is undefined. This is a preview feature. ResponseMIMEType string `json:"responseMimeType,omitempty"` // Optional. The modalities of the response. ResponseModalities []Modality `json:"responseModalities,omitempty"` // Optional. The `Schema` object allows the definition of input and output data types. // These types can be objects, but also primitives and arrays. Represents a select subset // of an [OpenAPI 3.0 schema object](https://spec.openapis.org/oas/v3.0.3#schema). If // set, a compatible response_mime_type must also be set. Compatible mimetypes: `application/json`: // Schema for JSON response. ResponseSchema *Schema `json:"responseSchema,omitempty"` // Optional. Routing configuration. RoutingConfig *GenerationConfigRoutingConfig `json:"routingConfig,omitempty"` // Optional. Seed. Seed *int32 `json:"seed,omitempty"` // Optional. The speech generation config. SpeechConfig *SpeechConfig `json:"speechConfig,omitempty"` // Optional. Stop sequences. StopSequences []string `json:"stopSequences,omitempty"` // Optional. Controls the randomness of predictions. Temperature *float32 `json:"temperature,omitempty"` // Optional. Config for thinking features. An error will be returned if this field is // set for models that don't support thinking. ThinkingConfig *GenerationConfigThinkingConfig `json:"thinkingConfig,omitempty"` // Optional. If specified, top-k sampling will be used. TopK *float32 `json:"topK,omitempty"` // Optional. If specified, nucleus sampling will be used. TopP *float32 `json:"topP,omitempty"` }
Generation config. You can find API default values and more details at https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference#generationconfig and https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/content-generation-parameters.
type GenerationConfigRoutingConfig ¶
type GenerationConfigRoutingConfig struct { // Automated routing. AutoMode *GenerationConfigRoutingConfigAutoRoutingMode `json:"autoMode,omitempty"` // Manual routing. ManualMode *GenerationConfigRoutingConfigManualRoutingMode `json:"manualMode,omitempty"` }
The configuration for routing the request to a specific model.
type GenerationConfigRoutingConfigAutoRoutingMode ¶
type GenerationConfigRoutingConfigAutoRoutingMode struct { // The model routing preference. ModelRoutingPreference string `json:"modelRoutingPreference,omitempty"` }
When automated routing is specified, the routing will be determined by the pretrained routing model and customer provided model routing preference.
type GenerationConfigRoutingConfigManualRoutingMode ¶
type GenerationConfigRoutingConfigManualRoutingMode struct { // The model name to use. Only the public LLM models are accepted. See [Supported models](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference#supported-models). ModelName string `json:"modelName,omitempty"` }
When manual routing is set, the specified model will be used directly.
type GenerationConfigThinkingConfig ¶ added in v1.7.0
type GenerationConfigThinkingConfig struct { // Optional. Indicates whether to include thoughts in the response. If true, thoughts // are returned only when available. IncludeThoughts bool `json:"includeThoughts,omitempty"` // Optional. Indicates the thinking budget in tokens. ThinkingBudget *int32 `json:"thinkingBudget,omitempty"` }
Config for thinking features.
type GetBatchJobConfig ¶ added in v1.13.0
type GetBatchJobConfig struct { // Optional. Used to override HTTP request options. HTTPOptions *HTTPOptions `json:"httpOptions,omitempty"` }
Optional parameters.
type GetCachedContentConfig ¶ added in v0.1.0
type GetCachedContentConfig struct { // Optional. Used to override HTTP request options. HTTPOptions *HTTPOptions `json:"httpOptions,omitempty"` }
Optional parameters for caches.get method.
type GetFileConfig ¶ added in v1.0.0
type GetFileConfig struct { // Optional. Used to override HTTP request options. HTTPOptions *HTTPOptions `json:"httpOptions,omitempty"` }
Used to override the default configuration.
type GetModelConfig ¶ added in v0.4.0
type GetModelConfig struct { // Optional. Used to override HTTP request options. HTTPOptions *HTTPOptions `json:"httpOptions,omitempty"` }
Optional parameters for models.get method.
type GetOperationConfig ¶ added in v0.7.0
type GetOperationConfig struct { // Optional. Used to override HTTP request options. HTTPOptions *HTTPOptions `json:"httpOptions,omitempty"` }
type GetTuningJobConfig ¶ added in v1.16.0
type GetTuningJobConfig struct { // Optional. Used to override HTTP request options. HTTPOptions *HTTPOptions `json:"httpOptions,omitempty"` }
Optional parameters for tunings.get method.
type GoogleMaps ¶ added in v1.4.0
type GoogleMaps struct { // Optional. Auth config for the Google Maps tool. AuthConfig *AuthConfig `json:"authConfig,omitempty"` }
Tool to support Google Maps in Model.
type GoogleRpcStatus ¶ added in v1.16.0
type GoogleRpcStatus struct { // The status code, which should be an enum value of google.rpc.Code. Code int32 `json:"code,omitempty"` // A list of messages that carry the error details. There is a common set of message // types for APIs to use. Details []map[string]any `json:"details,omitempty"` // A developer-facing error message, which should be in English. Any user-facing error // message should be localized and sent in the google.rpc.Status.details field, or localized // by the client. Message string `json:"message,omitempty"` }
The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).
type GoogleSearch ¶
type GoogleSearch struct { // Optional. Filter search results to a specific time range. // If customers set a start time, they must set an end time (and vice versa). TimeRangeFilter *Interval `json:"timeRangeFilter,omitempty"` // Optional. List of domains to be excluded from the search results. // The default limit is 2000 domains. ExcludeDomains []string `json:"excludeDomains,omitempty"` }
Tool to support Google Search in Model. Powered by Google.
type GoogleSearchRetrieval ¶
type GoogleSearchRetrieval struct { // Optional. Specifies the dynamic retrieval configuration for the given source. DynamicRetrievalConfig *DynamicRetrievalConfig `json:"dynamicRetrievalConfig,omitempty"` }
Tool to retrieve public web data for grounding, powered by Google.
type GroundingChunk ¶
type GroundingChunk struct { // Grounding chunk from Google Maps. Maps *GroundingChunkMaps `json:"maps,omitempty"` // Grounding chunk from context retrieved by the retrieval tools. RetrievedContext *GroundingChunkRetrievedContext `json:"retrievedContext,omitempty"` // Grounding chunk from the web. Web *GroundingChunkWeb `json:"web,omitempty"` }
Grounding chunk.
type GroundingChunkMaps ¶ added in v1.20.0
type GroundingChunkMaps struct { // Sources used to generate the place answer. This includes review snippets and photos // that were used to generate the answer, as well as uris to flag content. PlaceAnswerSources *GroundingChunkMapsPlaceAnswerSources `json:"placeAnswerSources,omitempty"` // This Place's resource name, in `places/{place_id}` format. Can be used to look up // the Place. PlaceID string `json:"placeId,omitempty"` // Text of the chunk. Text string `json:"text,omitempty"` // Title of the chunk. Title string `json:"title,omitempty"` // URI reference of the chunk. URI string `json:"uri,omitempty"` }
Chunk from Google Maps.
type GroundingChunkMapsPlaceAnswerSources ¶ added in v1.20.0
type GroundingChunkMapsPlaceAnswerSources struct { // A link where users can flag a problem with the generated answer. FlagContentURI string `json:"flagContentUri,omitempty"` // Snippets of reviews that are used to generate the answer. ReviewSnippets []*GroundingChunkMapsPlaceAnswerSourcesReviewSnippet `json:"reviewSnippets,omitempty"` }
Sources used to generate the place answer.
type GroundingChunkMapsPlaceAnswerSourcesAuthorAttribution ¶ added in v1.20.0
type GroundingChunkMapsPlaceAnswerSourcesAuthorAttribution struct { // Name of the author of the Photo or Review. DisplayName string `json:"displayName,omitempty"` // Profile photo URI of the author of the Photo or Review. PhotoURI string `json:"photoUri,omitempty"` // URI of the author of the Photo or Review. URI string `json:"uri,omitempty"` }
Author attribution for a photo or review.
type GroundingChunkMapsPlaceAnswerSourcesReviewSnippet ¶ added in v1.20.0
type GroundingChunkMapsPlaceAnswerSourcesReviewSnippet struct { // This review's author. AuthorAttribution *GroundingChunkMapsPlaceAnswerSourcesAuthorAttribution `json:"authorAttribution,omitempty"` // A link where users can flag a problem with the review. FlagContentURI string `json:"flagContentUri,omitempty"` // A link to show the review on Google Maps. GoogleMapsURI string `json:"googleMapsUri,omitempty"` // A string of formatted recent time, expressing the review time relative to the current // time in a form appropriate for the language and country. RelativePublishTimeDescription string `json:"relativePublishTimeDescription,omitempty"` // A reference representing this place review which may be used to look up this place // review again. Review string `json:"review,omitempty"` }
Encapsulates a review snippet.
type GroundingChunkRetrievedContext ¶
type GroundingChunkRetrievedContext struct { // Output only. The full document name for the referenced Vertex AI Search document. DocumentName string `json:"documentName,omitempty"` // Additional context for the RAG retrieval result. This is only populated when using // the RAG retrieval tool. RAGChunk *RAGChunk `json:"ragChunk,omitempty"` // Text of the attribution. Text string `json:"text,omitempty"` // Title of the attribution. Title string `json:"title,omitempty"` // URI reference of the attribution. URI string `json:"uri,omitempty"` }
Chunk from context retrieved by the retrieval tools.
type GroundingChunkWeb ¶
type GroundingChunkWeb struct { // Domain of the (original) URI. Domain string `json:"domain,omitempty"` // Title of the chunk. Title string `json:"title,omitempty"` // URI reference of the chunk. URI string `json:"uri,omitempty"` }
Chunk from the web.
type GroundingMetadata ¶
type GroundingMetadata struct { // Optional. Output only. Resource name of the Google Maps widget context token to be // used with the PlacesContextElement widget to render contextual data. This is populated // only for Google Maps grounding. GoogleMapsWidgetContextToken string `json:"googleMapsWidgetContextToken,omitempty"` // List of supporting references retrieved from specified grounding source. GroundingChunks []*GroundingChunk `json:"groundingChunks,omitempty"` // Optional. List of grounding support. GroundingSupports []*GroundingSupport `json:"groundingSupports,omitempty"` // Optional. Output only. Retrieval metadata. RetrievalMetadata *RetrievalMetadata `json:"retrievalMetadata,omitempty"` // Optional. Queries executed by the retrieval tools. RetrievalQueries []string `json:"retrievalQueries,omitempty"` // Optional. Google search entry for the following-up web searches. SearchEntryPoint *SearchEntryPoint `json:"searchEntryPoint,omitempty"` // Optional. Web search queries for the following-up web search. WebSearchQueries []string `json:"webSearchQueries,omitempty"` }
Metadata returned to client when grounding is enabled.
type GroundingSupport ¶
type GroundingSupport struct { // Confidence score of the support references. Ranges from 0 to 1. 1 is the most confident. // For Gemini 2.0 and before, this list must have the same size as the grounding_chunk_indices. // For Gemini 2.5 and after, this list will be empty and should be ignored. ConfidenceScores []float32 `json:"confidenceScores,omitempty"` // A list of indices (into 'grounding_chunk') specifying the citations associated with // the claim. For instance [1,3,4] means that grounding_chunk[1], grounding_chunk[3], // grounding_chunk[4] are the retrieved content attributed to the claim. GroundingChunkIndices []int32 `json:"groundingChunkIndices,omitempty"` // Segment of the content this support belongs to. Segment *Segment `json:"segment,omitempty"` }
Grounding support.
type HTTPOptions ¶ added in v0.1.0
type HTTPOptions struct { // Optional. BaseURL specifies the base URL for the API endpoint. If empty, defaults // to "https://generativelanguage.googleapis.com/" for the Gemini API backend, and location-specific // Vertex AI endpoint (e.g., "https://us-central1-aiplatform.googleapis.com/ BaseURL string `json:"baseUrl,omitempty"` // Optional. APIVersion specifies the version of the API to use. If empty, defaults // to "v1beta" for Gemini API and "v1beta1" for Vertex AI. APIVersion string `json:"apiVersion,omitempty"` // Optional. Additional HTTP headers to be sent with the request. Headers http.Header `json:"headers,omitempty"` // Optional. Timeout for the request in milliseconds. Timeout *time.Duration `json:"timeout,omitempty"` // Optional. Extra parameters to add to the request body. // The structure must match the backend API's request structure. // - VertexAI backend API docs: https://cloud.google.com/vertex-ai/docs/reference/rest // - GeminiAPI backend API docs: https://ai.google.dev/api/rest ExtraBody map[string]any `json:"extraBody,omitempty"` // Optional. A function that allows for request body customization. // It is executed after ExtraBody has been merged, offering more advanced // control over the request body than the static ExtraBody. ExtrasRequestProvider ExtrasRequestProvider `json:"-"` }
HTTP options to be used in each of the requests.
type HTTPResponse ¶ added in v1.16.0
type HTTPResponse struct { // Optional. Used to retain the processed HTTP headers in the response. Headers http.Header `json:"headers,omitempty"` // Optional. The raw HTTP response body, in JSON format. Body string `json:"body,omitempty"` }
A wrapper class for the HTTP response.
type HarmBlockMethod ¶
type HarmBlockMethod string
Specify if the threshold is used for probability or severity score. If not specified, the threshold is used for probability score.
const ( // The harm block method is unspecified. HarmBlockMethodUnspecified HarmBlockMethod = "HARM_BLOCK_METHOD_UNSPECIFIED" // The harm block method uses both probability and severity scores. HarmBlockMethodSeverity HarmBlockMethod = "SEVERITY" // The harm block method uses the probability score. HarmBlockMethodProbability HarmBlockMethod = "PROBABILITY" )
type HarmBlockThreshold ¶
type HarmBlockThreshold string
The harm block threshold.
const ( // Unspecified harm block threshold. HarmBlockThresholdUnspecified HarmBlockThreshold = "HARM_BLOCK_THRESHOLD_UNSPECIFIED" // Block low threshold and above (i.e. block more). HarmBlockThresholdBlockLowAndAbove HarmBlockThreshold = "BLOCK_LOW_AND_ABOVE" // Block medium threshold and above. HarmBlockThresholdBlockMediumAndAbove HarmBlockThreshold = "BLOCK_MEDIUM_AND_ABOVE" // Block only high threshold (i.e. block less). HarmBlockThresholdBlockOnlyHigh HarmBlockThreshold = "BLOCK_ONLY_HIGH" // Block none. HarmBlockThresholdBlockNone HarmBlockThreshold = "BLOCK_NONE" // Turn off the safety filter. HarmBlockThresholdOff HarmBlockThreshold = "OFF" )
type HarmCategory ¶
type HarmCategory string
Harm category.
const ( // The harm category is unspecified. HarmCategoryUnspecified HarmCategory = "HARM_CATEGORY_UNSPECIFIED" // The harm category is hate speech. HarmCategoryHateSpeech HarmCategory = "HARM_CATEGORY_HATE_SPEECH" // The harm category is dangerous content. HarmCategoryDangerousContent HarmCategory = "HARM_CATEGORY_DANGEROUS_CONTENT" // The harm category is harassment. HarmCategoryHarassment HarmCategory = "HARM_CATEGORY_HARASSMENT" // The harm category is sexually explicit content. HarmCategorySexuallyExplicit HarmCategory = "HARM_CATEGORY_SEXUALLY_EXPLICIT" // Deprecated: Election filter is not longer supported. The harm category is civic integrity. HarmCategoryCivicIntegrity HarmCategory = "HARM_CATEGORY_CIVIC_INTEGRITY" // The harm category is image hate. HarmCategoryImageHate HarmCategory = "HARM_CATEGORY_IMAGE_HATE" // The harm category is image dangerous content. HarmCategoryImageDangerousContent HarmCategory = "HARM_CATEGORY_IMAGE_DANGEROUS_CONTENT" // The harm category is image harassment. HarmCategoryImageHarassment HarmCategory = "HARM_CATEGORY_IMAGE_HARASSMENT" // The harm category is image sexually explicit content. HarmCategoryImageSexuallyExplicit HarmCategory = "HARM_CATEGORY_IMAGE_SEXUALLY_EXPLICIT" )
type HarmProbability ¶
type HarmProbability string
Harm probability levels in the content.
const ( // Harm probability unspecified. HarmProbabilityUnspecified HarmProbability = "HARM_PROBABILITY_UNSPECIFIED" // Negligible level of harm. HarmProbabilityNegligible HarmProbability = "NEGLIGIBLE" // Low level of harm. HarmProbabilityLow HarmProbability = "LOW" // Medium level of harm. HarmProbabilityMedium HarmProbability = "MEDIUM" // High level of harm. HarmProbabilityHigh HarmProbability = "HIGH" )
type HarmSeverity ¶
type HarmSeverity string
Harm severity levels in the content.
const ( // Harm severity unspecified. HarmSeverityUnspecified HarmSeverity = "HARM_SEVERITY_UNSPECIFIED" // Negligible level of harm severity. HarmSeverityNegligible HarmSeverity = "HARM_SEVERITY_NEGLIGIBLE" // Low level of harm severity. HarmSeverityLow HarmSeverity = "HARM_SEVERITY_LOW" // Medium level of harm severity. HarmSeverityMedium HarmSeverity = "HARM_SEVERITY_MEDIUM" // High level of harm severity. HarmSeverityHigh HarmSeverity = "HARM_SEVERITY_HIGH" )
type Image ¶
type Image struct { // Optional. The Cloud Storage URI of the image. “Image“ can contain a value // for this field or the “image_bytes“ field but not both. GCSURI string `json:"gcsUri,omitempty"` // Optional. The image bytes data. “Image“ can contain a value for this field // or the “gcs_uri“ field but not both. ImageBytes []byte `json:"imageBytes,omitempty"` // Optional. The MIME type of the image. MIMEType string `json:"mimeType,omitempty"` }
An image.
type ImagePromptLanguage ¶ added in v0.1.0
type ImagePromptLanguage string
Enum that specifies the language of the text in the prompt.
const ( // Auto-detect the language. ImagePromptLanguageAuto ImagePromptLanguage = "auto" // English ImagePromptLanguageEn