7635 lines
400 KiB
XML
Executable File
7635 lines
400 KiB
XML
Executable File
<?xml version="1.0"?>
|
||
<doc>
|
||
<assembly>
|
||
<name>Mscc.GenerativeAI</name>
|
||
</assembly>
|
||
<members>
|
||
<member name="P:Mscc.GenerativeAI.BaseModel.Model">
|
||
<summary>
|
||
Gets or sets the name of the model to use.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.BaseModel.Name">
|
||
<summary>
|
||
Returns the name of the model.
|
||
</summary>
|
||
<returns>Name of the model.</returns>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.BaseModel.ApiKey">
|
||
<summary>
|
||
Sets the API key to use for the request.
|
||
</summary>
|
||
<remarks>
|
||
The value can only be set or modified before the first request is made.
|
||
</remarks>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.BaseModel.AddApiKeyHeader(System.Net.Http.HttpRequestMessage)">
|
||
<summary>
|
||
Specify API key in HTTP header
|
||
</summary>
|
||
<seealso href="https://cloud.google.com/docs/authentication/api-keys-use#using-with-rest">Using an API key with REST</seealso>
|
||
<param name="request"><see cref="T:System.Net.Http.HttpRequestMessage"/> to send to the API.</param>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.BaseModel.AccessToken">
|
||
<summary>
|
||
Sets the access token to use for the request.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.BaseModel.ProjectId">
|
||
<summary>
|
||
Sets the project ID to use for the request.
|
||
</summary>
|
||
<remarks>
|
||
The value can only be set or modified before the first request is made.
|
||
</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.BaseModel.Region">
|
||
<summary>
|
||
Returns the region to use for the request.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.BaseModel.Timeout">
|
||
<summary>
|
||
Gets or sets the timespan to wait before the request times out.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.BaseModel.ThrowIfUnsupportedRequest``1(``0)">
|
||
<summary>
|
||
Throws a <see cref="T:System.NotSupportedException"/>, if the functionality is not supported by combination of settings.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.BaseModel.#ctor(Microsoft.Extensions.Logging.ILogger)">
|
||
<summary>
|
||
|
||
</summary>
|
||
<param name="logger">Optional. Logger instance used for logging</param>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.BaseModel.#ctor(System.String,System.String,System.String,Microsoft.Extensions.Logging.ILogger)">
|
||
<summary>
|
||
|
||
</summary>
|
||
<param name="projectId"></param>
|
||
<param name="region"></param>
|
||
<param name="model"></param>
|
||
<param name="logger">Optional. Logger instance used for logging</param>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.BaseModel.ParseUrl(System.String,System.String)">
|
||
<summary>
|
||
Parses the URL template and replaces the placeholder with current values.
|
||
Given two API endpoints for Google AI Gemini and Vertex AI Gemini this
|
||
method uses regular expressions to replace placeholders in a URL template with actual values.
|
||
</summary>
|
||
<param name="url">API endpoint to parse.</param>
|
||
<param name="method">Method part of the URL to inject</param>
|
||
<returns></returns>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.BaseModel.Serialize``1(``0)">
|
||
<summary>
|
||
Return serialized JSON string of request payload.
|
||
</summary>
|
||
<param name="request"></param>
|
||
<returns></returns>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.BaseModel.Deserialize``1(System.Net.Http.HttpResponseMessage)">
|
||
<summary>
|
||
Return deserialized object from JSON response.
|
||
</summary>
|
||
<typeparam name="T">Type to deserialize response into.</typeparam>
|
||
<param name="response">Response from an API call in JSON format.</param>
|
||
<returns>An instance of type T.</returns>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.BaseModel.DefaultJsonSerializerOptions">
|
||
<summary>
|
||
Get default options for JSON serialization.
|
||
</summary>
|
||
<returns>default options for JSON serialization.</returns>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.BaseModel.GetCredentialsFromFile(System.String)">
|
||
<summary>
|
||
Get credentials from specified file.
|
||
</summary>
|
||
<remarks>This would usually be the secret.json file from Google Cloud Platform.</remarks>
|
||
<param name="credentialsFile">File with credentials to read.</param>
|
||
<returns>Credentials read from file.</returns>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.BaseModel.GetAccessTokenFromAdc">
|
||
<summary>
|
||
This method uses the gcloud command-line tool to retrieve an access token from the Application Default Credentials (ADC).
|
||
It is specific to Google Cloud Platform and allows easy authentication with the Gemini API on Google Cloud.
|
||
Reference: https://cloud.google.com/docs/authentication
|
||
</summary>
|
||
<returns>The access token.</returns>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.BaseModel.RunExternalExe(System.String,System.String)">
|
||
<summary>
|
||
Run an external application as process in the underlying operating system, if possible.
|
||
</summary>
|
||
<param name="filename">The command or application to run.</param>
|
||
<param name="arguments">Optional arguments given to the application to run.</param>
|
||
<returns>Output from the application.</returns>
|
||
<exception cref="T:System.Exception"></exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.BaseModel.Format(System.String,System.String)">
|
||
<summary>
|
||
Formatting string for logging purpose.
|
||
</summary>
|
||
<param name="filename">The command or application to run.</param>
|
||
<param name="arguments">Optional arguments given to the application to run.</param>
|
||
<returns>Formatted string containing parameter values.</returns>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.CachedContentModel">
|
||
<summary>
|
||
Content that has been preprocessed and can be used in subsequent request to GenerativeService.
|
||
Cached content can be only used with model it was created for.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.CachedContentModel.#ctor">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.CachedContentModel"/> class.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.CachedContentModel.#ctor(Microsoft.Extensions.Logging.ILogger)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.CachedContentModel"/> class.
|
||
</summary>
|
||
<param name="logger">Optional. Logger instance used for logging</param>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.CachedContentModel.Create(Mscc.GenerativeAI.CachedContent,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Creates CachedContent resource.
|
||
</summary>
|
||
<param name="request">The cached content resource to create.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>The cached content resource created</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="request"/> is <see langword="null"/>.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.CachedContentModel.Create(System.String,System.String,Mscc.GenerativeAI.Content,System.Collections.Generic.List{Mscc.GenerativeAI.Content},System.Collections.Generic.List{Mscc.GenerativeAI.ContentResponse},System.Nullable{System.TimeSpan},System.Nullable{System.DateTime},System.Threading.CancellationToken)">
|
||
<summary>
|
||
Creates CachedContent resource.
|
||
</summary>
|
||
<remarks>The minimum input token count for context caching is 32,768, and the maximum is the same as the maximum for the given model.</remarks>
|
||
<param name="model">Required. The name of the `Model` to use for cached content Format: `models/{model}`</param>
|
||
<param name="displayName">Optional. The user-generated meaningful display name of the cached content. Maximum 128 Unicode characters.</param>
|
||
<param name="systemInstruction">Optional. Input only. Developer set system instruction. Currently, text only.</param>
|
||
<param name="contents">Optional. Input only. The content to cache.</param>
|
||
<param name="history">Optional. A chat history to initialize the session with.</param>
|
||
<param name="ttl">Optional. Input only. New TTL for this resource, input only. A duration in seconds with up to nine fractional digits, ending with 's'</param>
|
||
<param name="expireTime">Optional. Timestamp in UTC of when this resource is considered expired. This is always provided on output, regardless of what was sent on input.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>The created cached content resource.</returns>
|
||
<exception cref="T:System.ArgumentException">Thrown when the <paramref name="model"/> is <see langword="null"/> or empty.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.CachedContentModel.List(System.Nullable{System.Int32},System.String,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Lists CachedContents resources.
|
||
</summary>
|
||
<param name="pageSize">Optional. The maximum number of cached contents to return. The service may return fewer than this value. If unspecified, some default (under maximum) number of items will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.</param>
|
||
<param name="pageToken">Optional. A page token, received from a previous `ListCachedContents` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListCachedContents` must match the call that provided the page token.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns></returns>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.CachedContentModel.Get(System.String,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Reads CachedContent resource.
|
||
</summary>
|
||
<param name="cachedContentName">Required. The resource name referring to the content cache entry. Format: `cachedContents/{id}`</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>The cached content resource.</returns>
|
||
<exception cref="T:System.ArgumentException">Thrown when the <paramref name="cachedContentName"/> is <see langword="null"/> or empty.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.CachedContentModel.Update(Mscc.GenerativeAI.CachedContent,System.TimeSpan,System.String,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Updates CachedContent resource (only expiration is updatable).
|
||
</summary>
|
||
<param name="request">The cached content resource to update.</param>
|
||
<param name="ttl">Optional. Input only. New TTL for this resource, input only. A duration in seconds with up to nine fractional digits, ending with 's'</param>
|
||
<param name="updateMask">Optional. The list of fields to update.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>The updated cached content resource.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="request"/> is <see langword="null"/>.</exception>
|
||
<exception cref="T:System.ArgumentException">Thrown when the <paramref name="request.Name.Name"/> is <see langword="null"/> or empty.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.CachedContentModel.Delete(System.String,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Deletes CachedContent resource.
|
||
</summary>
|
||
<param name="cachedContentName">Required. The resource name referring to the content cache entry. Format: `cachedContents/{id}`</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>If successful, the response body is empty.</returns>
|
||
<exception cref="T:System.ArgumentException">Thrown when the <paramref name="cachedContentName"/> is <see langword="null"/> or empty.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.ChatModel.AddApiKeyHeader(System.Net.Http.HttpRequestMessage)">
|
||
<inheritdoc cref="T:Mscc.GenerativeAI.BaseModel"/>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.ChatModel.#ctor">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.ChatModel"/> class.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.ChatModel.#ctor(Microsoft.Extensions.Logging.ILogger)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.ChatModel"/> class.
|
||
</summary>
|
||
<param name="logger">Optional. Logger instance used for logging</param>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.ChatModel.Completions(Mscc.GenerativeAI.ChatCompletionsRequest,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Generates a set of responses from the model given a chat history input.
|
||
</summary>
|
||
<param name="request">Required. The request to send to the API.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="request"/> is <see langword="null"/>.</exception>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.ApiVersion">
|
||
<summary>
|
||
Helper class to provide API versions.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.Model">
|
||
<summary>
|
||
Helper class to provide model names.
|
||
Ref: https://cloud.google.com/vertex-ai/generative-ai/docs/learn/model-versioning#latest-version
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.Model.Imagen3Customization">
|
||
<summary>
|
||
Imagen 3 Generation is a Pre-GA. Allowlisting required.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.Model.ImageGeneration3">
|
||
<summary>
|
||
Imagen 3 Generation is a Pre-GA. Allowlisting required.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.Model.ImageGeneration3Fast">
|
||
<summary>
|
||
Imagen 3 Generation is a Pre-GA. Allowlisting required.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.Role">
|
||
<summary>
|
||
Possible roles.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.CorporaModel.#ctor">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.CorporaModel"/> class.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.CorporaModel.#ctor(Microsoft.Extensions.Logging.ILogger)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.CorporaModel"/> class.
|
||
</summary>
|
||
<param name="logger">Optional. Logger instance used for logging</param>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.CorporaModel.Create(Mscc.GenerativeAI.Corpus,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Creates an empty `Corpus`.
|
||
</summary>
|
||
<returns></returns>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.CorporaModel.Get(System.String,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Gets information about a specific `Corpus`.
|
||
</summary>
|
||
<returns></returns>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.CorporaModel.List(System.Nullable{System.Int32},System.String,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Lists all `Corpora` owned by the user.
|
||
</summary>
|
||
<returns></returns>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.CorporaModel.Delete(System.String,System.Boolean,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Deletes a `Corpus`.
|
||
</summary>
|
||
<returns></returns>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.CorporaModel.Update(System.String,Mscc.GenerativeAI.Corpus,System.String,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Updates a `Corpus`.
|
||
</summary>
|
||
<returns></returns>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.CorporaModel.Query(Mscc.GenerativeAI.QueryCorpusRequest,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Performs semantic search over a `Corpus`.
|
||
</summary>
|
||
<returns></returns>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.EmbeddingsModel">
|
||
<summary>
|
||
Generates embeddings from the model given an input.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.EmbeddingsModel.#ctor">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.EmbeddingsModel"/> class.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.EmbeddingsModel.#ctor(Microsoft.Extensions.Logging.ILogger)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.EmbeddingsModel"/> class.
|
||
</summary>
|
||
<param name="logger">Optional. Logger instance used for logging</param>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.EmbeddingsModel.Embeddings(Mscc.GenerativeAI.GenerateEmbeddingsRequest,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Generates embeddings from the model given an input.
|
||
</summary>
|
||
<param name="request">Required. The request to send to the API.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="request"/> is <see langword="null"/>.</exception>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.AdapterSize">
|
||
<summary>
|
||
Adapter size for tuning job.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.AdapterSize.AdapterSizeUnspecified">
|
||
<summary>
|
||
Unspecified adapter size.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.AdapterSize.AdapterSizeOne">
|
||
<summary>
|
||
Adapter size 1.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.AdapterSize.AdapterSizeFour">
|
||
<summary>
|
||
Adapter size 4.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.AdapterSize.AdapterSizeEight">
|
||
<summary>
|
||
Adapter size 8.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.AdapterSize.AdapterSizeSixteen">
|
||
<summary>
|
||
Adapter size 16.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.AnswerStyle">
|
||
<summary>
|
||
Style for grounded answers.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.AnswerStyle.AnswerStyleUnspecified">
|
||
<summary>
|
||
Unspecified answer style.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.AnswerStyle.Abstractive">
|
||
<summary>
|
||
Succinct but abstract style.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.AnswerStyle.Extractive">
|
||
<summary>
|
||
Very brief and extractive style.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.AnswerStyle.Verbose">
|
||
<summary>
|
||
Verbose style including extra details. The response may be formatted as a sentence, paragraph, multiple paragraphs, or bullet points, etc.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.BlockedReason">
|
||
<summary>
|
||
A list of reasons why content may have been blocked.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.BlockedReason.BlockedReasonUnspecified">
|
||
<summary>
|
||
BlockedReasonUnspecified means unspecified blocked reason.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.BlockedReason.Safety">
|
||
<summary>
|
||
Safety means candidates blocked due to safety.
|
||
You can inspect <see cref="T:Mscc.GenerativeAI.SafetyRating"/>s to understand which safety category blocked it.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.BlockedReason.Other">
|
||
<summary>
|
||
Prompt was blocked due to unknown reasons.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.BlockedReason.Blocklist">
|
||
<summary>
|
||
Prompt was blocked due to the terms which are included from the terminology blocklist.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.BlockedReason.ProhibitedContent">
|
||
<summary>
|
||
Prompt was blocked due to prohibited content.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.BlockedReason.ImageSafety">
|
||
<summary>
|
||
Candidates blocked due to unsafe image generation content.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.DynamicRetrievalConfigMode">
|
||
<summary>
|
||
The mode of the predictor to be used in dynamic retrieval.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.DynamicRetrievalConfigMode.ModeUnspecified">
|
||
<summary>
|
||
Always trigger retrieval.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.DynamicRetrievalConfigMode.ModeDynamic">
|
||
<summary>
|
||
Run retrieval only when system decides it is necessary.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.FileResourceSource">
|
||
<summary>
|
||
Source of the File.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.FileResourceSource.SourceUnspecified">
|
||
<summary>
|
||
Used if source is not specified.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.FileResourceSource.Uploaded">
|
||
<summary>
|
||
Indicates the file is uploaded by the user.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.FileResourceSource.Generated">
|
||
<summary>
|
||
Indicates the file is generated by Google.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.FinishReason">
|
||
<summary>
|
||
The reason why the model stopped generating tokens.
|
||
If empty, the model has not stopped generating the tokens.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.FinishReason.FinishReasonUnspecified">
|
||
<summary>
|
||
Unspecified means the finish reason is unspecified.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.FinishReason.Stop">
|
||
<summary>
|
||
Stop means natural stop point of the model or provided stop sequence.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.FinishReason.MaxTokens">
|
||
<summary>
|
||
MaxTokens means the maximum number of tokens as specified in the request was reached.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.FinishReason.Safety">
|
||
<summary>
|
||
Safety means the token generation was stopped as the response was flagged for safety
|
||
reasons. NOTE: When streaming the Candidate.Content will be empty if
|
||
content filters blocked the output.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.FinishReason.Recitation">
|
||
<summary>
|
||
Recitation means the token generation was stopped as the response was flagged for
|
||
unauthorized citations.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.FinishReason.Other">
|
||
<summary>
|
||
Other means all other reasons that stopped the token generation
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.FinishReason.Blocklist">
|
||
<summary>
|
||
The token generation was stopped as the response was flagged for the
|
||
terms which are included from the terminology blocklist.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.FinishReason.ProhibitedContent">
|
||
<summary>
|
||
The token generation was stopped as the response was flagged for
|
||
the prohibited contents.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.FinishReason.Spii">
|
||
<summary>
|
||
The token generation was stopped as the response was flagged for
|
||
Sensitive Personally Identifiable Information (SPII) contents.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.FinishReason.MalformedFunctionCall">
|
||
<summary>
|
||
The function call generated by the model is invalid.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.FinishReason.Language">
|
||
<summary>
|
||
The response candidate content was flagged for using an unsupported language.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.FinishReason.ImageSafety">
|
||
<summary>
|
||
Token generation stopped because generated images contain safety violations.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.FunctionCallingMode">
|
||
<summary>
|
||
Mode of function calling to define the execution behavior for function calling.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.FunctionCallingMode.ModeUnspecified">
|
||
<summary>
|
||
Unspecified function calling mode. This value should not be used.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.FunctionCallingMode.Auto">
|
||
<summary>
|
||
Default model behavior, model decides to predict either a function call
|
||
or a natural language response.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.FunctionCallingMode.Any">
|
||
<summary>
|
||
Model is constrained to always predicting a function call only.
|
||
If "allowed_function_names" are set, the predicted function call will be
|
||
limited to any one of "allowed_function_names", else the predicted
|
||
function call will be any one of the provided "function_declarations".
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.FunctionCallingMode.None">
|
||
<summary>
|
||
Model will not predict any function call. Model behavior is same as when
|
||
not passing any function declarations.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.HarmBlockMethod">
|
||
<summary>
|
||
Probability vs severity.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.HarmBlockMethod.HarnBlockMethodUnspecified">
|
||
<summary>
|
||
The harm block method is unspecified.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.HarmBlockMethod.Severity">
|
||
<summary>
|
||
The harm block method uses both probability and severity scores.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.HarmBlockMethod.Probability">
|
||
<summary>
|
||
The harm block method uses the probability score.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.HarmBlockThreshold">
|
||
<summary>
|
||
Block at and beyond a specified harm probability.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.HarmBlockThreshold.HarmBlockThresholdUnspecified">
|
||
<summary>
|
||
Threshold is unspecified.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.HarmBlockThreshold.BlockLowAndAbove">
|
||
<summary>
|
||
Content with NEGLIGIBLE will be allowed.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.HarmBlockThreshold.BlockMediumAndAbove">
|
||
<summary>
|
||
Content with NEGLIGIBLE and LOW will be allowed.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.HarmBlockThreshold.BlockOnlyHigh">
|
||
<summary>
|
||
Content with NEGLIGIBLE, LOW, and MEDIUM will be allowed.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.HarmBlockThreshold.BlockNone">
|
||
<summary>
|
||
All content will be allowed.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.HarmBlockThreshold.None">
|
||
<summary>
|
||
Turn off the safety filter.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.HarmCategory">
|
||
<summary>
|
||
The category of a rating.
|
||
Ref: https://ai.google.dev/api/rest/v1beta/HarmCategory
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.HarmCategory.HarmCategoryUnspecified">
|
||
<summary>
|
||
HarmCategoryUnspecified means the harm category is unspecified.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.HarmCategory.HarmCategoryHateSpeech">
|
||
<summary>
|
||
HarmCategoryHateSpeech means the harm category is hate speech.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.HarmCategory.HarmCategoryDangerousContent">
|
||
<summary>
|
||
HarmCategoryDangerousContent means the harm category is dangerous content.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.HarmCategory.HarmCategoryHarassment">
|
||
<summary>
|
||
HarmCategoryHarassment means the harm category is harassment.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.HarmCategory.HarmCategorySexuallyExplicit">
|
||
<summary>
|
||
HarmCategorySexuallyExplicit means the harm category is sexually explicit content.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.HarmCategory.HarmCategoryCivicIntegrity">
|
||
<summary>
|
||
Content that may be used to harm civic integrity.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.HarmCategory.HarmCategoryDerogatory">
|
||
<summary>
|
||
Negative or harmful comments targeting identity and/or protected attribute.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.HarmCategory.HarmCategoryToxicity">
|
||
<summary>
|
||
Content that is rude, disrespectful, or profane.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.HarmCategory.HarmCategoryViolence">
|
||
<summary>
|
||
Describes scenarios depicting violence against an individual or group, or general descriptions of gore.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.HarmCategory.HarmCategorySexual">
|
||
<summary>
|
||
Contains references to sexual acts or other lewd content.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.HarmCategory.HarmCategoryMedical">
|
||
<summary>
|
||
Promotes unchecked medical advice.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.HarmCategory.HarmCategoryDangerous">
|
||
<summary>
|
||
Dangerous content that promotes, facilitates, or encourages harmful acts.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.HarmProbability">
|
||
<summary>
|
||
The probability that a piece of content is harmful.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.HarmProbability.HarmProbabilityUnspecified">
|
||
<summary>
|
||
Unspecified means harm probability unspecified.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.HarmProbability.Negligible">
|
||
<summary>
|
||
Negligible means negligible level of harm.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.HarmProbability.Low">
|
||
<summary>
|
||
Low means low level of harm.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.HarmProbability.Medium">
|
||
<summary>
|
||
Medium means medium level of harm.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.HarmProbability.High">
|
||
<summary>
|
||
High means high level of harm.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.HarmSeverity">
|
||
<summary>
|
||
Harm severity levels.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.HarmSeverity.HarmSeverityUnspecified">
|
||
<summary>
|
||
Unspecified means harm probability unspecified.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.HarmSeverity.HarmSeverityNegligible">
|
||
<summary>
|
||
Negligible means negligible level of harm.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.HarmSeverity.HarmSeverityLow">
|
||
<summary>
|
||
Low means low level of harm.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.HarmSeverity.HarmSeverityMedium">
|
||
<summary>
|
||
Medium means medium level of harm.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.HarmSeverity.HarmSeverityHigh">
|
||
<summary>
|
||
High means high level of harm.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.Language">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.Language.LanguageUnspecified">
|
||
<summary>
|
||
Unspecified language. This value should not be used.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.Language.Python">
|
||
<summary>
|
||
Python >= 3.10, with numpy and simpy available.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.MediaResolution">
|
||
<summary>
|
||
The media resolution
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.MediaResolution.MediaResolutionUnspecified">
|
||
<summary>
|
||
Media resolution has not been set.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.MediaResolution.MediaResolutionLow">
|
||
<summary>
|
||
Media resolution set to low (64 tokens).
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.MediaResolution.MediaResolutionMedium">
|
||
<summary>
|
||
Media resolution set to medium (256 tokens).
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.MediaResolution.MediaResolutionHigh">
|
||
<summary>
|
||
Media resolution set to high (zoomed reframing with 256 tokens).
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.Modality">
|
||
<summary>
|
||
The modality associated with a token count.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.Modality.ModalityUnspecified">
|
||
<summary>
|
||
Unspecified modality.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.Modality.Text">
|
||
<summary>
|
||
Plain text.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.Modality.Image">
|
||
<summary>
|
||
Image.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.Modality.Video">
|
||
<summary>
|
||
Video.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.Modality.Audio">
|
||
<summary>
|
||
Audio.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.Modality.Document">
|
||
<summary>
|
||
Document, e.g. PDF.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.Operator">
|
||
<summary>
|
||
Defines the valid operators that can be applied to a key-value pair.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.Operator.OperatorUnspecified">
|
||
<summary>
|
||
The default value. This value is unused.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.Operator.Less">
|
||
<summary>
|
||
Supported by numeric.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.Operator.LessEqual">
|
||
<summary>
|
||
Supported by numeric.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.Operator.Equal">
|
||
<summary>
|
||
Supported by numeric and string.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.Operator.GreaterEqual">
|
||
<summary>
|
||
Supported by numeric.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.Operator.Greater">
|
||
<summary>
|
||
Supported by numeric.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.Operator.NotEqual">
|
||
<summary>
|
||
Supported by numeric and string.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.Operator.Includes">
|
||
<summary>
|
||
Supported by string only when <see cref="T:Mscc.GenerativeAI.CustomMetadata" /> value type for the given key has a stringListValue.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.Operator.Excludes">
|
||
<summary>
|
||
Supported by string only when <see cref="T:Mscc.GenerativeAI.CustomMetadata" /> value type for the given key has a stringListValue.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.Outcome">
|
||
<summary>
|
||
Outcome of the code execution.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.Outcome.OutcomeUnspecified">
|
||
<summary>
|
||
Unspecified status. This value should not be used.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.Outcome.OutcomeOk">
|
||
<summary>
|
||
Code execution completed successfully.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.Outcome.OutcomeFailed">
|
||
<summary>
|
||
Code execution finished but with a failure. `stderr` should contain the reason.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.Outcome.OutcomeDeadlineExceeded">
|
||
<summary>
|
||
Code execution ran for too long, and was cancelled. There may or may not be a partial output present.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.ParameterType">
|
||
<summary>
|
||
Type contains the list of OpenAPI data types as defined by https://spec.openapis.org/oas/v3.0.3#data-types
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.ParameterType.TypeUnspecified">
|
||
<summary>
|
||
Unspecified means not specified, should not be used.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.ParameterType.String">
|
||
<summary>
|
||
String means openAPI string type
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.ParameterType.Number">
|
||
<summary>
|
||
Number means openAPI number type
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.ParameterType.Integer">
|
||
<summary>
|
||
Integer means openAPI integer type
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.ParameterType.Boolean">
|
||
<summary>
|
||
Boolean means openAPI boolean type
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.ParameterType.Array">
|
||
<summary>
|
||
Array means openAPI array type
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.ParameterType.Object">
|
||
<summary>
|
||
Object means openAPI object type
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.ReferenceType">
|
||
<summary>
|
||
Describes what the field reference contains.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.ReferenceType.Path">
|
||
<summary>
|
||
Reference contains a GFS path or a local path.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.ReferenceType.BlobRef">
|
||
<summary>
|
||
Reference points to a blobstore object. This could be either a v1 blob_ref or a v2 blobstore2_info. Clients should check blobstore2_info first, since v1 is being deprecated.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.ReferenceType.Inline">
|
||
<summary>
|
||
Data is included into this proto buffer.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.ReferenceType.GetMedia">
|
||
<summary>
|
||
Data should be accessed from the current service using the operation GetMedia.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.ReferenceType.CompositeMedia">
|
||
<summary>
|
||
The content for this media object is stored across multiple partial media objects under the composite_media field.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.ReferenceType.BigstoreRef">
|
||
<summary>
|
||
Reference points to a bigstore object.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.ReferenceType.DiffVersionResponse">
|
||
<summary>
|
||
Indicates the data is stored in diff_version_response.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.ReferenceType.DiffChecksumResponse">
|
||
<summary>
|
||
Indicates the data is stored in diff_checksums_response.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.ReferenceType.DiffDownloadResponse">
|
||
<summary>
|
||
Indicates the data is stored in diff_download_response.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.ReferenceType.DiffUploadRequest">
|
||
<summary>
|
||
Indicates the data is stored in diff_upload_request.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.ReferenceType.DiffUploadResponse">
|
||
<summary>
|
||
Indicates the data is stored in diff_upload_response.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.ReferenceType.CosmoBinaryReference">
|
||
<summary>
|
||
Indicates the data is stored in cosmo_binary_reference.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.ReferenceType.ArbitraryBytes">
|
||
<summary>
|
||
Informs Scotty to generate a response payload with the size specified in the length field.
|
||
The contents of the payload are generated by Scotty and are undefined.
|
||
This is useful for testing download speeds between the user and Scotty
|
||
without involving a real payload source. Note: range is not supported when using arbitrary_bytes.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.ResponseModality">
|
||
<summary>
|
||
The requested modalities of the response.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.ResponseModality.ModalityUnspecified">
|
||
<summary>
|
||
Default value.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.ResponseModality.Text">
|
||
<summary>
|
||
Indicates the model should return text.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.ResponseModality.Image">
|
||
<summary>
|
||
Indicates the model should return images.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.ResponseModality.Audio">
|
||
<summary>
|
||
Indicates the model should return audio.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.State">
|
||
<summary>
|
||
The state of the tuned model.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.State.StateUnspecified">
|
||
<summary>
|
||
The default value. This value is unused.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.State.Creating">
|
||
<summary>
|
||
The model is being created.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.State.Active">
|
||
<summary>
|
||
The model is ready to be used.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.State.Failed">
|
||
<summary>
|
||
The model failed to be created.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.StateChunk">
|
||
<summary>
|
||
Output only. Current state of the Chunk.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.StateChunk.StateUnspecified">
|
||
<summary>
|
||
The default value. This value is used if the state is omitted.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.StateChunk.StatePendingProcessing">
|
||
<summary>
|
||
Chunk is being processed (embedding and vector storage).
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.StateChunk.StateActive">
|
||
<summary>
|
||
Chunk is processed and available for querying.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.StateChunk.StateFailed">
|
||
<summary>
|
||
Chunk failed processing.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.StateFileResource">
|
||
<summary>
|
||
States for the lifecycle of a File.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.StateFileResource.StateUnspecified">
|
||
<summary>
|
||
The default value. This value is used if the state is omitted.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.StateFileResource.Processing">
|
||
<summary>
|
||
File is being processed and cannot be used for inference yet.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.StateFileResource.Active">
|
||
<summary>
|
||
File is processed and available for inference.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.StateFileResource.Failed">
|
||
<summary>
|
||
File failed processing.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.StateGeneratedFile">
|
||
<summary>
|
||
The state of the tuned model.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.StateGeneratedFile.StateUnspecified">
|
||
<summary>
|
||
The default value. This value is used if the state is omitted.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.StateGeneratedFile.Generating">
|
||
<summary>
|
||
Being generated.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.StateGeneratedFile.Generated">
|
||
<summary>
|
||
Generated and is ready for download.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.StateGeneratedFile.Failed">
|
||
<summary>
|
||
Failed to generate the GeneratedFile.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.StateTuningJob">
|
||
<summary>
|
||
The state of the tuning job.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.StateTuningJob.StateUnspecified">
|
||
<summary>
|
||
The default value. This value is unused.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.StateTuningJob.JobStateRunning">
|
||
<summary>
|
||
The tuning job is running.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.StateTuningJob.JobStatePending">
|
||
<summary>
|
||
The tuning job is pending.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.StateTuningJob.JobStateFailed">
|
||
<summary>
|
||
The tuning job failed.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.StateTuningJob.JobStateCancelled">
|
||
<summary>
|
||
The tuning job has been cancelled.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.TaskType">
|
||
<summary>
|
||
Type of task for which the embedding will be used.
|
||
Ref: https://ai.google.dev/api/rest/v1beta/TaskType
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.TaskType.TaskTypeUnspecified">
|
||
<summary>
|
||
Unset value, which will default to one of the other enum values.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.TaskType.RetrievalQuery">
|
||
<summary>
|
||
Specifies the given text is a query in a search/retrieval setting.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.TaskType.RetrievalDocument">
|
||
<summary>
|
||
Specifies the given text is a document from the corpus being searched.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.TaskType.SemanticSimilarity">
|
||
<summary>
|
||
Specifies the given text will be used for STS.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.TaskType.Classification">
|
||
<summary>
|
||
Specifies that the given text will be classified.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.TaskType.Clustering">
|
||
<summary>
|
||
Specifies that the embeddings will be used for clustering.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.TaskType.QuestionAnswering">
|
||
<summary>
|
||
Specifies that the given text will be used for question answering.
|
||
</summary>
|
||
</member>
|
||
<member name="F:Mscc.GenerativeAI.TaskType.FactVerification">
|
||
<summary>
|
||
Specifies that the given text will be used for fact verification.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.BlockedPromptException.#ctor">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.BlockedPromptException" /> class.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.BlockedPromptException.#ctor(System.String)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.BlockedPromptException" /> class
|
||
with a specific message that describes the current exception.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.BlockedPromptException.#ctor(System.String,System.Exception)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.BlockedPromptException" /> class
|
||
with a specific message that describes the current exception and an inner exception.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.BlockedPromptException.#ctor(Mscc.GenerativeAI.PromptFeedback)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.BlockedPromptException" /> class
|
||
with the block reason message that describes the current exception.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.MaxUploadFileSizeException.#ctor">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.MaxUploadFileSizeException" /> class.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.MaxUploadFileSizeException.#ctor(System.String)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.MaxUploadFileSizeException" /> class
|
||
with a specific message that describes the current exception.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.MaxUploadFileSizeException.#ctor(System.String,System.Exception)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.MaxUploadFileSizeException" /> class
|
||
with a specific message that describes the current exception and an inner exception.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.UploadFileException.#ctor">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.UploadFileException" /> class.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.UploadFileException.#ctor(System.String)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.UploadFileException" /> class
|
||
with a specific message that describes the current exception.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.UploadFileException.#ctor(System.String,System.Exception)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.UploadFileException" /> class
|
||
with a specific message that describes the current exception and an inner exception.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.ValueErrorException.#ctor">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.ValueErrorException" /> class.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.ValueErrorException.#ctor(System.String)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.ValueErrorException" /> class
|
||
with a specific message that describes the current exception.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.ValueErrorException.#ctor(System.String,System.Exception)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.ValueErrorException" /> class
|
||
with a specific message that describes the current exception and an inner exception.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.StopCandidateException.#ctor">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.StopCandidateException" /> class.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.StopCandidateException.#ctor(System.String)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.StopCandidateException" /> class
|
||
with a specific message that describes the current exception.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.StopCandidateException.#ctor(System.String,System.Exception)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.StopCandidateException" /> class
|
||
with a specific message that describes the current exception and an inner exception.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.StopCandidateException.#ctor(Mscc.GenerativeAI.Candidate)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.StopCandidateException" /> class
|
||
with the finish message that describes the current exception.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.FilesModel.#ctor">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.FilesModel"/> class.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.FilesModel.#ctor(Microsoft.Extensions.Logging.ILogger)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.FilesModel"/> class.
|
||
</summary>
|
||
<param name="logger">Optional. Logger instance used for logging</param>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.FilesModel.ListFiles(System.Nullable{System.Int32},System.String,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Lists the metadata for Files owned by the requesting project.
|
||
</summary>
|
||
<param name="pageSize">The maximum number of Models to return (per page).</param>
|
||
<param name="pageToken">A page token, received from a previous ListFiles call. Provide the pageToken returned by one request as an argument to the next request to retrieve the next page.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>List of files in File API.</returns>
|
||
<exception cref="T:System.NotSupportedException">Thrown when the functionality is not supported by the model.</exception>
|
||
<exception cref="T:System.Net.Http.HttpRequestException">Thrown when the request fails to execute.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.FilesModel.GetFile(System.String,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Gets the metadata for the given File.
|
||
</summary>
|
||
<param name="file">Required. The resource name of the file to get. This name should match a file name returned by the ListFiles method. Format: files/file-id.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>Metadata for the given file.</returns>
|
||
<exception cref="T:System.ArgumentException">Thrown when the <paramref name="file"/> is <see langword="null"/> or empty.</exception>
|
||
<exception cref="T:System.NotSupportedException">Thrown when the functionality is not supported by the model.</exception>
|
||
<exception cref="T:System.Net.Http.HttpRequestException">Thrown when the request fails to execute.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.FilesModel.DeleteFile(System.String,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Deletes a file.
|
||
</summary>
|
||
<param name="file">Required. The resource name of the file to get. This name should match a file name returned by the ListFiles method. Format: files/file-id.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>If successful, the response body is empty.</returns>
|
||
<exception cref="T:System.ArgumentException">Thrown when the <paramref name="file"/> is <see langword="null"/> or empty.</exception>
|
||
<exception cref="T:System.NotSupportedException">Thrown when the functionality is not supported by the model.</exception>
|
||
<exception cref="T:System.Net.Http.HttpRequestException">Thrown when the request fails to execute.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GeneratedFilesModel.#ctor">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.GeneratedFilesModel"/> class.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GeneratedFilesModel.#ctor(Microsoft.Extensions.Logging.ILogger)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.GeneratedFilesModel"/> class.
|
||
</summary>
|
||
<param name="logger">Optional. Logger instance used for logging</param>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GeneratedFilesModel.ListFiles(System.Nullable{System.Int32},System.String,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Lists the generated files owned by the requesting project.
|
||
</summary>
|
||
<param name="pageSize">The maximum number of Models to return (per page).</param>
|
||
<param name="pageToken">A page token, received from a previous ListFiles call. Provide the pageToken returned by one request as an argument to the next request to retrieve the next page.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>List of files in File API.</returns>
|
||
<exception cref="T:System.NotSupportedException">Thrown when the functionality is not supported by the model.</exception>
|
||
<exception cref="T:System.Net.Http.HttpRequestException">Thrown when the request fails to execute.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeAIExtensions.GuardApiKey(System.String)">
|
||
<summary>
|
||
Checks whether the API key has the right conditions.
|
||
</summary>
|
||
<param name="apiKey">API key for the Gemini API.</param>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="apiKey"/> is null.</exception>
|
||
<exception cref="T:System.ArgumentOutOfRangeException">Thrown when the <paramref name="apiKey"/> is empty.</exception>
|
||
<exception cref="T:System.ArgumentException">Thrown when the <paramref name="apiKey"/> has extra whitespace at the start or end, doesn't start with 'AIza', or has the wrong length.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeAIExtensions.GuardSupported(Mscc.GenerativeAI.GenerativeModel,System.String)">
|
||
<summary>
|
||
Checks if the functionality is supported by the model.
|
||
</summary>
|
||
<param name="model">Model to use.</param>
|
||
<param name="message">Message to use.</param>
|
||
<exception cref="T:System.NotSupportedException">Thrown when the functionality is not supported by the model.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeAIExtensions.GuardInlineDataMimeType(System.String)">
|
||
<summary>
|
||
Checks if the IANA standard MIME type is supported by the model.
|
||
</summary>
|
||
<remarks>
|
||
See <see href="https://ai.google.dev/gemini-api/docs/vision"/> for a list of supported image data and video format MIME types.
|
||
See <see href="https://ai.google.dev/gemini-api/docs/audio"/> for a list of supported audio format MIME types.
|
||
</remarks>
|
||
<param name="mimeType">The IANA standard MIME type to check.</param>
|
||
<exception cref="T:System.NotSupportedException">Thrown when the <paramref name="mimeType"/> is not supported by the API.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeAIExtensions.GuardMimeType(System.String)">
|
||
<summary>
|
||
Checks if the IANA standard MIME type is supported by the model.
|
||
</summary>
|
||
<remarks>
|
||
See <see href="https://ai.google.dev/gemini-api/docs/vision"/> for a list of supported image data and video format MIME types.
|
||
See <see href="https://ai.google.dev/gemini-api/docs/audio"/> for a list of supported audio format MIME types.
|
||
See also <seealso href="https://ai.google.dev/gemini-api/docs/document-processing"/> for a list of supported MIME types for document processing.
|
||
Ref: https://developer.mozilla.org/en-US/docs/Web/HTTP/MIME_types/Common_types
|
||
</remarks>
|
||
<param name="mimeType">The IANA standard MIME type to check.</param>
|
||
<exception cref="T:System.NotSupportedException">Thrown when the <paramref name="mimeType"/> is not supported by the API.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeAIExtensions.GuardSupportedLanguage(System.String)">
|
||
<summary>
|
||
Checks if the language is supported by the model.
|
||
</summary>
|
||
<param name="language">Language to use.</param>
|
||
<exception cref="T:System.NotSupportedException">Thrown when the <paramref name="language"/> is not supported by the API.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeAIExtensions.EnsureSuccessAsync(System.Net.Http.HttpResponseMessage,System.String,System.Boolean)">
|
||
<summary>
|
||
Throws an exception if the IsSuccessStatusCode property for the HTTP response is false.
|
||
</summary>
|
||
<param name="response">The HTTP response message to check.</param>
|
||
<param name="errorMessage">Custom error message to prepend the <see cref="T:System.Net.Http.HttpRequestException"/> message."/></param>
|
||
<param name="includeResponseContent">Include the response content in the error message.</param>
|
||
<returns>The HTTP response message if the call is successful.</returns>
|
||
<exception cref="T:System.Net.Http.HttpRequestException"></exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeAIExtensions.Truncate(System.String,System.Int32,System.String)">
|
||
<summary>
|
||
Truncates/abbreviates a string and places a user-facing indicator at the end.
|
||
</summary>
|
||
<param name="value">The string to truncate.</param>
|
||
<param name="maxLength">Maximum length of the resulting string.</param>
|
||
<param name="suffix">Optional. Indicator to use, by default the ellipsis …</param>
|
||
<returns>The truncated string</returns>
|
||
<exception cref="T:System.ArgumentException">Thrown when the <paramref name="suffix"/> parameter is null or empty.</exception>
|
||
<exception cref="T:System.ArgumentOutOfRangeException">Thrown when the length of the <paramref name="suffix"/> is larger than the <paramref name="maxLength"/>.</exception>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerativeModel.Version">
|
||
<inheritdoc />
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerativeModel.UseServerSentEventsFormat">
|
||
<summary>
|
||
You can enable Server Sent Events (SSE) for gemini-1.0-pro
|
||
</summary>
|
||
<remarks>
|
||
See <a href="https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events">Server-sent Events</a>
|
||
</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerativeModel.UseJsonMode">
|
||
<summary>
|
||
Activate JSON Mode (default = no)
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerativeModel.UseGrounding">
|
||
<summary>
|
||
Activate Grounding with Google Search (default = no)
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerativeModel.UseGoogleSearch">
|
||
<summary>
|
||
Activate Google Search (default = no)
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerativeModel.UseRealtime">
|
||
<summary>
|
||
Enable realtime stream using Multimodal Live API
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.ThrowIfUnsupportedRequest``1(``0)">
|
||
<inheritdoc/>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.#ctor">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.GenerativeModel"/> class.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.#ctor(Microsoft.Extensions.Logging.ILogger)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.GenerativeModel"/> class.
|
||
The default constructor attempts to read <c>.env</c> file and environment variables.
|
||
Sets default values, if available.
|
||
</summary>
|
||
<param name="logger">Optional. Logger instance used for logging</param>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.#ctor(System.String,System.String,Mscc.GenerativeAI.GenerationConfig,System.Collections.Generic.List{Mscc.GenerativeAI.SafetySetting},System.Collections.Generic.List{Mscc.GenerativeAI.Tool},Mscc.GenerativeAI.Content,Mscc.GenerativeAI.ToolConfig,System.Boolean,Microsoft.Extensions.Logging.ILogger)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.GenerativeModel"/> class with access to Google AI Gemini API.
|
||
</summary>
|
||
<param name="apiKey">API key provided by Google AI Studio</param>
|
||
<param name="model">Model to use</param>
|
||
<param name="generationConfig">Optional. Configuration options for model generation and outputs.</param>
|
||
<param name="safetySettings">Optional. A list of unique SafetySetting instances for blocking unsafe content.</param>
|
||
<param name="tools">Optional. A list of Tools the model may use to generate the next response.</param>
|
||
<param name="systemInstruction">Optional. </param>
|
||
<param name="toolConfig">Optional. Configuration of tools.</param>
|
||
<param name="vertexAi">Optional. Flag to indicate use of Vertex AI in express mode.</param>
|
||
<param name="logger">Optional. Logger instance used for logging</param>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.#ctor(System.String,System.String,System.String,System.String,Mscc.GenerativeAI.GenerationConfig,System.Collections.Generic.List{Mscc.GenerativeAI.SafetySetting},System.Collections.Generic.List{Mscc.GenerativeAI.Tool},Mscc.GenerativeAI.Content,Mscc.GenerativeAI.ToolConfig,Microsoft.Extensions.Logging.ILogger)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.GenerativeModel"/> class with access to Vertex AI Gemini API.
|
||
</summary>
|
||
<param name="projectId">Identifier of the Google Cloud project</param>
|
||
<param name="region">Region to use</param>
|
||
<param name="model">Model to use</param>
|
||
<param name="endpoint">Optional. Endpoint ID of the tuned model to use.</param>
|
||
<param name="generationConfig">Optional. Configuration options for model generation and outputs.</param>
|
||
<param name="safetySettings">Optional. A list of unique SafetySetting instances for blocking unsafe content.</param>
|
||
<param name="tools">Optional. A list of Tools the model may use to generate the next response.</param>
|
||
<param name="systemInstruction">Optional. </param>
|
||
<param name="toolConfig">Optional. Configuration of tools.</param>
|
||
<param name="logger">Optional. Logger instance used for logging</param>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.#ctor(Mscc.GenerativeAI.CachedContent,Mscc.GenerativeAI.GenerationConfig,System.Collections.Generic.List{Mscc.GenerativeAI.SafetySetting},Microsoft.Extensions.Logging.ILogger)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.GenerativeModel"/> class given cached content.
|
||
</summary>
|
||
<param name="cachedContent">Content that has been preprocessed.</param>
|
||
<param name="generationConfig">Optional. Configuration options for model generation and outputs.</param>
|
||
<param name="safetySettings">Optional. A list of unique SafetySetting instances for blocking unsafe content.</param>
|
||
<param name="logger">Optional. Logger instance used for logging</param>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when <paramref name="cachedContent"/> is null.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.#ctor(Mscc.GenerativeAI.TuningJob,Mscc.GenerativeAI.GenerationConfig,System.Collections.Generic.List{Mscc.GenerativeAI.SafetySetting},Microsoft.Extensions.Logging.ILogger)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.GenerativeModel"/> class given cached content.
|
||
</summary>
|
||
<param name="tuningJob">Tuning Job to use with the model.</param>
|
||
<param name="generationConfig">Optional. Configuration options for model generation and outputs.</param>
|
||
<param name="safetySettings">Optional. A list of unique SafetySetting instances for blocking unsafe content.</param>
|
||
<param name="logger">Optional. Logger instance used for logging</param>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when <paramref name="tuningJob"/> is null.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.ListTunedModels(System.Nullable{System.Int32},System.String,System.String,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Get a list of available tuned models and description.
|
||
</summary>
|
||
<returns>List of available tuned models.</returns>
|
||
<param name="pageSize">The maximum number of Models to return (per page).</param>
|
||
<param name="pageToken">A page token, received from a previous ListModels call. Provide the pageToken returned by one request as an argument to the next request to retrieve the next page.</param>
|
||
<param name="filter">Optional. A filter is a full text search over the tuned model's description and display name. By default, results will not include tuned models shared with everyone. Additional operators: - owner:me - writers:me - readers:me - readers:everyone</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<exception cref="T:System.NotSupportedException"></exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.ListModels(System.Boolean,System.Nullable{System.Int32},System.String,System.String,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Lists the [`Model`s](https://ai.google.dev/gemini-api/docs/models/gemini) available through the Gemini API.
|
||
</summary>
|
||
<returns>List of available models.</returns>
|
||
<param name="tuned">Flag, whether models or tuned models shall be returned.</param>
|
||
<param name="pageSize">The maximum number of `Models` to return (per page). If unspecified, 50 models will be returned per page. This method returns at most 1000 models per page, even if you pass a larger page_size.</param>
|
||
<param name="pageToken">A page token, received from a previous ListModels call. Provide the pageToken returned by one request as an argument to the next request to retrieve the next page.</param>
|
||
<param name="filter">Optional. A filter is a full text search over the tuned model's description and display name. By default, results will not include tuned models shared with everyone. Additional operators: - owner:me - writers:me - readers:me - readers:everyone</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<exception cref="T:System.NotSupportedException">Thrown when the functionality is not supported by the model.</exception>
|
||
<exception cref="T:System.Net.Http.HttpRequestException">Thrown when the request fails to execute.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.GetModel(System.String,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Gets information about a specific `Model` such as its version number, token limits, [parameters](https://ai.google.dev/gemini-api/docs/models/generative-models#model-parameters) and other metadata. Refer to the [Gemini models guide](https://ai.google.dev/gemini-api/docs/models/gemini) for detailed model information.
|
||
</summary>
|
||
<param name="model">Required. The resource name of the model. This name should match a model name returned by the ListModels method. Format: models/model-id or tunedModels/my-model-id</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns></returns>
|
||
<exception cref="T:System.NotSupportedException">Thrown when the functionality is not supported by the model.</exception>
|
||
<exception cref="T:System.Net.Http.HttpRequestException">Thrown when the request fails to execute.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.CopyModel(Mscc.GenerativeAI.CopyModelRequest,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Copies a model in Vertex AI Model Registry.
|
||
</summary>
|
||
<param name="request"></param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns></returns>
|
||
<exception cref="T:System.NotSupportedException">Thrown when the functionality is not supported by the model.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.CreateTunedModel(Mscc.GenerativeAI.CreateTunedModelRequest,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Creates a tuned model.
|
||
</summary>
|
||
<param name="request"></param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns></returns>
|
||
<exception cref="T:System.NotSupportedException">Thrown when the functionality is not supported by the model.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.DeleteTunedModel(System.String,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Deletes a tuned model.
|
||
</summary>
|
||
<param name="model">Required. The resource name of the model. Format: tunedModels/my-model-id</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>If successful, the response body is empty.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="model"/> is null or empty.</exception>
|
||
<exception cref="T:System.NotSupportedException">Thrown when the functionality is not supported by the model.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.UpdateTunedModel(System.String,Mscc.GenerativeAI.ModelResponse,System.String,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Updates a tuned model.
|
||
</summary>
|
||
<param name="model">Required. The resource name of the model. Format: tunedModels/my-model-id</param>
|
||
<param name="tunedModel">The tuned model to update.</param>
|
||
<param name="updateMask">Optional. The list of fields to update. This is a comma-separated list of fully qualified names of fields. Example: "user.displayName,photo".</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns></returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="model"/> is null or empty.</exception>
|
||
<exception cref="T:System.NotSupportedException">Thrown when the functionality is not supported by the model.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.TransferOwnership(System.String,System.String,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Transfers ownership of the tuned model. This is the only way to change ownership of the tuned model. The current owner will be downgraded to writer role.
|
||
</summary>
|
||
<param name="model">Required. The resource name of the tuned model to transfer ownership. Format: tunedModels/my-model-id</param>
|
||
<param name="emailAddress">Required. The email address of the user to whom the tuned model is being transferred to.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>If successful, the response body is empty.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="model"/> or <paramref name="emailAddress"/> is null or empty.</exception>
|
||
<exception cref="T:System.NotSupportedException">Thrown when the functionality is not supported by the model.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.UploadFile(System.String,System.String,System.Boolean,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Uploads a file to the File API backend.
|
||
</summary>
|
||
<param name="uri">URI or path to the file to upload.</param>
|
||
<param name="displayName">A name displayed for the uploaded file.</param>
|
||
<param name="resumable">Flag indicating whether to use resumable upload.</param>
|
||
<param name="cancellationToken">A cancellation token to cancel the upload.</param>
|
||
<returns>A URI of the uploaded file.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="uri"/> is null or empty.</exception>
|
||
<exception cref="T:System.IO.FileNotFoundException">Thrown when the file <paramref name="uri"/> is not found.</exception>
|
||
<exception cref="T:Mscc.GenerativeAI.MaxUploadFileSizeException">Thrown when the file size exceeds the maximum allowed size.</exception>
|
||
<exception cref="T:Mscc.GenerativeAI.UploadFileException">Thrown when the file upload fails.</exception>
|
||
<exception cref="T:System.Net.Http.HttpRequestException">Thrown when the request fails to execute.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.UploadFile(System.IO.Stream,System.String,System.String,System.Boolean,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Uploads a stream to the File API backend.
|
||
</summary>
|
||
<param name="stream">Stream to upload.</param>
|
||
<param name="displayName">A name displayed for the uploaded file.</param>
|
||
<param name="mimeType">The MIME type of the stream content.</param>
|
||
<param name="resumable">Flag indicating whether to use resumable upload.</param>
|
||
<param name="cancellationToken">A cancellation token to cancel the upload.</param>
|
||
<returns>A URI of the uploaded file.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="stream"/> is null or empty.</exception>
|
||
<exception cref="T:Mscc.GenerativeAI.MaxUploadFileSizeException">Thrown when the <paramref name="stream"/> size exceeds the maximum allowed size.</exception>
|
||
<exception cref="T:Mscc.GenerativeAI.UploadFileException">Thrown when the <paramref name="stream"/> upload fails.</exception>
|
||
<exception cref="T:System.Net.Http.HttpRequestException">Thrown when the request fails to execute.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.ListFiles(System.Nullable{System.Int32},System.String,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Lists the metadata for Files owned by the requesting project.
|
||
</summary>
|
||
<param name="pageSize">The maximum number of Models to return (per page).</param>
|
||
<param name="pageToken">A page token, received from a previous ListFiles call. Provide the pageToken returned by one request as an argument to the next request to retrieve the next page.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>List of files in File API.</returns>
|
||
<exception cref="T:System.NotSupportedException">Thrown when the functionality is not supported by the model.</exception>
|
||
<exception cref="T:System.Net.Http.HttpRequestException">Thrown when the request fails to execute.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.GetFile(System.String,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Gets the metadata for the given File.
|
||
</summary>
|
||
<param name="file">Required. The resource name of the file to get. This name should match a file name returned by the ListFiles method. Format: files/file-id.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>Metadata for the given file.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="file"/> is null or empty.</exception>
|
||
<exception cref="T:System.NotSupportedException">Thrown when the functionality is not supported by the model.</exception>
|
||
<exception cref="T:System.Net.Http.HttpRequestException">Thrown when the request fails to execute.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.DeleteFile(System.String,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Deletes a file.
|
||
</summary>
|
||
<param name="file">Required. The resource name of the file to get. This name should match a file name returned by the ListFiles method. Format: files/file-id.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>If successful, the response body is empty.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="file"/> is null or empty.</exception>
|
||
<exception cref="T:System.NotSupportedException">Thrown when the functionality is not supported by the model.</exception>
|
||
<exception cref="T:System.Net.Http.HttpRequestException">Thrown when the request fails to execute.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.GenerateContent(Mscc.GenerativeAI.GenerateContentRequest,Mscc.GenerativeAI.RequestOptions,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Generates a model response given an input <see cref="T:Mscc.GenerativeAI.GenerateContentRequest"/>.
|
||
</summary>
|
||
<remarks>
|
||
Refer to the [text generation guide](https://ai.google.dev/gemini-api/docs/text-generation) for detailed usage information.
|
||
Input capabilities differ between models, including tuned models.
|
||
Refer to the [model guide](https://ai.google.dev/gemini-api/docs/models/gemini) and [tuning guide](https://ai.google.dev/gemini-api/docs/model-tuning) for details.
|
||
</remarks>
|
||
<param name="request">Required. The request to send to the API.</param>
|
||
<param name="requestOptions">Options for the request.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>Response from the model for generated content.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="request"/> is <see langword="null"/>.</exception>
|
||
<exception cref="T:System.Net.Http.HttpRequestException">Thrown when the request fails to execute.</exception>
|
||
<exception cref="T:System.NotSupportedException">Thrown when the functionality is not supported by the model or combination of features.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.GenerateContent(System.String,Mscc.GenerativeAI.GenerationConfig,System.Collections.Generic.List{Mscc.GenerativeAI.SafetySetting},System.Collections.Generic.List{Mscc.GenerativeAI.Tool},Mscc.GenerativeAI.ToolConfig,Mscc.GenerativeAI.RequestOptions,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Generates a response from the model given an input prompt and other parameters.
|
||
</summary>
|
||
<param name="prompt">Required. String to process.</param>
|
||
<param name="generationConfig">Optional. Configuration options for model generation and outputs.</param>
|
||
<param name="safetySettings">Optional. A list of unique SafetySetting instances for blocking unsafe content.</param>
|
||
<param name="tools">Optional. A list of Tools the model may use to generate the next response.</param>
|
||
<param name="toolConfig">Optional. Configuration of tools.</param>
|
||
<param name="requestOptions">Options for the request.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>Response from the model for generated content.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="prompt"/> is <see langword="null"/>.</exception>
|
||
<exception cref="T:System.Net.Http.HttpRequestException">Thrown when the request fails to execute.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.GenerateContent(System.Collections.Generic.List{Mscc.GenerativeAI.IPart},Mscc.GenerativeAI.GenerationConfig,System.Collections.Generic.List{Mscc.GenerativeAI.SafetySetting},System.Collections.Generic.List{Mscc.GenerativeAI.Tool},Mscc.GenerativeAI.ToolConfig,Mscc.GenerativeAI.RequestOptions,System.Threading.CancellationToken)">
|
||
<remarks/>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.GenerateContentStream(Mscc.GenerativeAI.GenerateContentRequest,Mscc.GenerativeAI.RequestOptions,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Generates a streamed response from the model given an input GenerateContentRequest.
|
||
This method uses a MemoryStream and StreamContent to send a streaming request to the API.
|
||
It runs asynchronously sending and receiving chunks to and from the API endpoint, which allows non-blocking code execution.
|
||
</summary>
|
||
<param name="request">The request to send to the API.</param>
|
||
<param name="requestOptions">Options for the request.</param>
|
||
<param name="cancellationToken"></param>
|
||
<returns>Stream of GenerateContentResponse with chunks asynchronously.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="request"/> is <see langword="null"/>.</exception>
|
||
<exception cref="T:System.Net.Http.HttpRequestException">Thrown when the request fails to execute.</exception>
|
||
<exception cref="T:System.NotSupportedException">Thrown when the functionality is not supported by the model or combination of features.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.GenerateContentStream(System.String,Mscc.GenerativeAI.GenerationConfig,System.Collections.Generic.List{Mscc.GenerativeAI.SafetySetting},System.Collections.Generic.List{Mscc.GenerativeAI.Tool},Mscc.GenerativeAI.ToolConfig,Mscc.GenerativeAI.RequestOptions,System.Threading.CancellationToken)">
|
||
<remarks/>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.GenerateContentStream(System.Collections.Generic.List{Mscc.GenerativeAI.IPart},Mscc.GenerativeAI.GenerationConfig,System.Collections.Generic.List{Mscc.GenerativeAI.SafetySetting},System.Collections.Generic.List{Mscc.GenerativeAI.Tool},Mscc.GenerativeAI.ToolConfig,Mscc.GenerativeAI.RequestOptions,System.Threading.CancellationToken)">
|
||
<remarks/>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.GenerateContentStreamSSE(Mscc.GenerativeAI.GenerateContentRequest,Mscc.GenerativeAI.RequestOptions,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Generates a response from the model given an input GenerateContentRequest.
|
||
</summary>
|
||
<param name="request">Required. The request to send to the API.</param>
|
||
<param name="requestOptions">Options for the request.</param>
|
||
<param name="cancellationToken"></param>
|
||
<returns>Response from the model for generated content.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="request"/> is <see langword="null"/>.</exception>
|
||
<exception cref="T:System.Net.Http.HttpRequestException">Thrown when the request fails to execute.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.BidiGenerateContent">
|
||
<summary>
|
||
|
||
</summary>
|
||
<returns></returns>
|
||
<exception cref="T:System.NotSupportedException"></exception>
|
||
<exception cref="T:System.NotImplementedException"></exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.GenerateImages(Mscc.GenerativeAI.GenerateImagesRequest,System.Threading.CancellationToken)">
|
||
<summary>
|
||
|
||
</summary>
|
||
<param name="request"></param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns></returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="request"/> is <see langword="null"/>.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.GenerateImages(System.String,System.String,Mscc.GenerativeAI.GenerateImagesConfig,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Generates images from text prompt.
|
||
</summary>
|
||
<param name="model">Required. Model to use.</param>
|
||
<param name="prompt">Required. String to process.</param>
|
||
<param name="config">Configuration of image generation.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>Response from the model for generated content.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="model"/> is <see langword="null"/>.</exception>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="prompt"/> is <see langword="null"/>.</exception>
|
||
<exception cref="T:System.Net.Http.HttpRequestException">Thrown when the request fails to execute.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.GenerateImages(System.String,System.Int32,System.String,System.String,System.Nullable{System.Int32},System.String,System.String,System.String,System.Nullable{System.Boolean},System.Nullable{System.Boolean},System.Threading.CancellationToken)">
|
||
<summary>
|
||
Generates images from text prompt.
|
||
</summary>
|
||
<param name="prompt">Required. String to process.</param>
|
||
<param name="numberOfImages">Number of images to generate. Range: 1..8.</param>
|
||
<param name="negativePrompt">A description of what you want to omit in the generated images.</param>
|
||
<param name="aspectRatio">Aspect ratio for the image.</param>
|
||
<param name="guidanceScale">Controls the strength of the prompt. Suggested values are - * 0-9 (low strength) * 10-20 (medium strength) * 21+ (high strength)</param>
|
||
<param name="language">Language of the text prompt for the image.</param>
|
||
<param name="safetyFilterLevel">Adds a filter level to Safety filtering.</param>
|
||
<param name="personGeneration">Allow generation of people by the model.</param>
|
||
<param name="enhancePrompt">Option to enhance your provided prompt.</param>
|
||
<param name="addWatermark">Explicitly set the watermark</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>Response from the model for generated content.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="prompt"/> is <see langword="null"/>.</exception>
|
||
<exception cref="T:System.Net.Http.HttpRequestException">Thrown when the request fails to execute.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.GenerateAnswer(Mscc.GenerativeAI.GenerateAnswerRequest,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Generates a grounded answer from the model given an input GenerateAnswerRequest.
|
||
</summary>
|
||
<param name="request"></param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>Response from the model for a grounded answer.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="request"/> is <see langword="null"/>.</exception>
|
||
<exception cref="T:System.NotSupportedException"></exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.GenerateAnswer(System.String,System.Nullable{Mscc.GenerativeAI.AnswerStyle},System.Collections.Generic.List{Mscc.GenerativeAI.SafetySetting},System.Threading.CancellationToken)">
|
||
<remarks/>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.EmbedContent(Mscc.GenerativeAI.EmbedContentRequest,System.String,System.Nullable{Mscc.GenerativeAI.TaskType},System.String,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Generates a text embedding vector from the input `Content` using the specified [Gemini Embedding model](https://ai.google.dev/gemini-api/docs/models/gemini#text-embedding).
|
||
</summary>
|
||
<param name="request">Required. EmbedContentRequest to process. The content to embed. Only the parts.text fields will be counted.</param>
|
||
<param name="model">Optional. The model used to generate embeddings. Defaults to models/embedding-001.</param>
|
||
<param name="taskType">Optional. Optional task type for which the embeddings will be used. Can only be set for models/embedding-001.</param>
|
||
<param name="title">Optional. An optional title for the text. Only applicable when TaskType is RETRIEVAL_DOCUMENT. Note: Specifying a title for RETRIEVAL_DOCUMENT provides better quality embeddings for retrieval.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>List containing the embedding (list of float values) for the input content.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="request"/> is <see langword="null"/>.</exception>
|
||
<exception cref="T:System.NotSupportedException">Thrown when the functionality is not supported by the model.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.EmbedContent(System.Collections.Generic.List{Mscc.GenerativeAI.EmbedContentRequest},System.String,System.Nullable{Mscc.GenerativeAI.TaskType},System.String,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Generates multiple embedding vectors from the input `Content` which consists of a batch of strings represented as `EmbedContentRequest` objects.
|
||
</summary>
|
||
<param name="requests">Required. Embed requests for the batch. The model in each of these requests must match the model specified BatchEmbedContentsRequest.model.</param>
|
||
<param name="model">Optional. The model used to generate embeddings. Defaults to models/embedding-001.</param>
|
||
<param name="taskType">Optional. Optional task type for which the embeddings will be used. Can only be set for models/embedding-001.</param>
|
||
<param name="title">Optional. An optional title for the text. Only applicable when TaskType is RETRIEVAL_DOCUMENT. Note: Specifying a title for RETRIEVAL_DOCUMENT provides better quality embeddings for retrieval.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>List containing the embedding (list of float values) for the input content.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="requests"/> is <see langword="null"/>.</exception>
|
||
<exception cref="T:System.NotSupportedException"></exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.EmbedContent(System.String,System.String,System.Nullable{Mscc.GenerativeAI.TaskType},System.String,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Generates an embedding from the model given an input Content.
|
||
</summary>
|
||
<param name="content">Required. String to process. The content to embed. Only the parts.text fields will be counted.</param>
|
||
<param name="model">Optional. The model used to generate embeddings. Defaults to models/embedding-001.</param>
|
||
<param name="taskType">Optional. Optional task type for which the embeddings will be used. Can only be set for models/embedding-001.</param>
|
||
<param name="title">Optional. An optional title for the text. Only applicable when TaskType is RETRIEVAL_DOCUMENT. Note: Specifying a title for RETRIEVAL_DOCUMENT provides better quality embeddings for retrieval.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>List containing the embedding (list of float values) for the input content.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="content"/> is <see langword="null"/>.</exception>
|
||
<exception cref="T:System.NotSupportedException">Thrown when the functionality is not supported by the model.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.EmbedContent(System.Collections.Generic.IEnumerable{System.String},System.String,System.Nullable{Mscc.GenerativeAI.TaskType},System.String,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Generates an embedding from the model given an input Content.
|
||
</summary>
|
||
<param name="content">Required. List of strings to process. The content to embed. Only the parts.text fields will be counted.</param>
|
||
<param name="model">Optional. The model used to generate embeddings. Defaults to models/embedding-001.</param>
|
||
<param name="taskType">Optional. Optional task type for which the embeddings will be used. Can only be set for models/embedding-001.</param>
|
||
<param name="title">Optional. An optional title for the text. Only applicable when TaskType is RETRIEVAL_DOCUMENT. Note: Specifying a title for RETRIEVAL_DOCUMENT provides better quality embeddings for retrieval.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>List containing the embedding (list of float values) for the input content.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="content"/> is <see langword="null"/>.</exception>
|
||
<exception cref="T:System.NotSupportedException">Thrown when the functionality is not supported by the model.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.EmbedContent(Mscc.GenerativeAI.ContentResponse,System.String,System.Nullable{Mscc.GenerativeAI.TaskType},System.String,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Generates multiple embeddings from the model given input text in a synchronous call.
|
||
</summary>
|
||
<param name="content">Content to embed.</param>
|
||
<param name="model">Optional. The model used to generate embeddings. Defaults to models/embedding-001.</param>
|
||
<param name="taskType">Optional. Optional task type for which the embeddings will be used. Can only be set for models/embedding-001.</param>
|
||
<param name="title">Optional. An optional title for the text. Only applicable when TaskType is RETRIEVAL_DOCUMENT. Note: Specifying a title for RETRIEVAL_DOCUMENT provides better quality embeddings for retrieval.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>List containing the embedding (list of float values) for the input content.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="content"/> is <see langword="null"/>.</exception>
|
||
<exception cref="T:System.NotSupportedException">Thrown when the functionality is not supported by the model.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.CountTokens(Mscc.GenerativeAI.GenerateContentRequest,Mscc.GenerativeAI.RequestOptions,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Runs a model's tokenizer on input `Content` and returns the token count.
|
||
</summary>
|
||
<remarks>
|
||
Refer to the [tokens guide](https://ai.google.dev/gemini-api/docs/tokens) to learn more about tokens.
|
||
</remarks>
|
||
<param name="request"></param>
|
||
<param name="requestOptions">Options for the request.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>Number of tokens.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="request"/> is <see langword="null"/>.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.CountTokens(System.String,Mscc.GenerativeAI.RequestOptions,System.Threading.CancellationToken)">
|
||
<remarks/>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.CountTokens(System.Collections.Generic.List{Mscc.GenerativeAI.IPart},System.Threading.CancellationToken)">
|
||
<remarks/>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.StartChat(System.Collections.Generic.List{Mscc.GenerativeAI.ContentResponse},Mscc.GenerativeAI.GenerationConfig,System.Collections.Generic.List{Mscc.GenerativeAI.SafetySetting},System.Collections.Generic.List{Mscc.GenerativeAI.Tool},System.Boolean)">
|
||
<summary>
|
||
Starts a chat session.
|
||
</summary>
|
||
<param name="history">Optional. A collection of <see cref="T:Mscc.GenerativeAI.ContentResponse"/> objects, or equivalents to initialize the session.</param>
|
||
<param name="generationConfig">Optional. Configuration options for model generation and outputs.</param>
|
||
<param name="safetySettings">Optional. A list of unique SafetySetting instances for blocking unsafe content.</param>
|
||
<param name="tools">Optional. A list of Tools the model may use to generate the next response.</param>
|
||
<param name="enableAutomaticFunctionCalling"></param>
|
||
<returns>Returns a <see cref="T:Mscc.GenerativeAI.ChatSession"/> attached to this model.</returns>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.Predict(Mscc.GenerativeAI.PredictRequest,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Performs a prediction request.
|
||
</summary>
|
||
<param name="request">Required. The request to send to the API.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>Prediction response.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="request"/> is <see langword="null"/>.</exception>
|
||
<exception cref="T:System.Net.Http.HttpRequestException">Thrown when the request fails to execute.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.PredictLongRunning(Mscc.GenerativeAI.PredictLongRunningRequest,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Same as Predict but returns an LRO.
|
||
</summary>
|
||
<param name="request">Required. The request to send to the API.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>Prediction response.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="request"/> is <see langword="null"/>.</exception>
|
||
<exception cref="T:System.Net.Http.HttpRequestException">Thrown when the request fails to execute.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.GenerateText(Mscc.GenerativeAI.GenerateTextRequest,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Generates a response from the model given an input message.
|
||
</summary>
|
||
<param name="request">The request to send to the API.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns></returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="request"/> is <see langword="null"/>.</exception>
|
||
<exception cref="T:System.NotSupportedException"></exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.GenerateText(System.String,System.Threading.CancellationToken)">
|
||
<remarks/>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.CountTokens(Mscc.GenerativeAI.GenerateTextRequest,Mscc.GenerativeAI.RequestOptions,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Counts the number of tokens in the content.
|
||
</summary>
|
||
<param name="request"></param>
|
||
<param name="requestOptions">Options for the request.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>Number of tokens.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="request"/> is <see langword="null"/>.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.GenerateMessage(Mscc.GenerativeAI.GenerateMessageRequest,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Generates a response from the model given an input prompt.
|
||
</summary>
|
||
<param name="request"></param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns></returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="request"/> is <see langword="null"/>.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.GenerateMessage(System.String,System.Threading.CancellationToken)">
|
||
<remarks/>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.CountTokens(Mscc.GenerativeAI.GenerateMessageRequest,Mscc.GenerativeAI.RequestOptions,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Runs a model's tokenizer on a string and returns the token count.
|
||
</summary>
|
||
<param name="request"></param>
|
||
<param name="requestOptions">Options for the request.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>Number of tokens.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="request"/> is <see langword="null"/>.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.EmbedText(Mscc.GenerativeAI.EmbedTextRequest,System.Threading.CancellationToken)">
|
||
<summary>
|
||
|
||
</summary>
|
||
<param name="request"></param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns></returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="request"/> is <see langword="null"/>.</exception>
|
||
<exception cref="T:System.NotSupportedException"></exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.EmbedText(System.String,System.Threading.CancellationToken)">
|
||
<remarks/>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.CountTokens(Mscc.GenerativeAI.EmbedTextRequest,Mscc.GenerativeAI.RequestOptions,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Counts the number of tokens in the content.
|
||
</summary>
|
||
<param name="request"></param>
|
||
<param name="requestOptions">Options for the request.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>Number of tokens.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="request"/> is <see langword="null"/>.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModel.BatchEmbedText(Mscc.GenerativeAI.BatchEmbedTextRequest,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Generates multiple embeddings from the model given input text in a synchronous call.
|
||
</summary>
|
||
<param name="request">Required. Embed requests for the batch. The model in each of these requests must match the model specified BatchEmbedContentsRequest.model.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>List of Embeddings of the content as a list of floating numbers.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="request"/> is <see langword="null"/>.</exception>
|
||
<exception cref="T:System.NotSupportedException"></exception>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.GoogleAI">
|
||
<summary>
|
||
Entry point to access Gemini API running in Google AI.
|
||
</summary>
|
||
<remarks>
|
||
See <a href="https://ai.google.dev/api/rest">Model reference</a>.
|
||
</remarks>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GoogleAI.#ctor(Microsoft.Extensions.Logging.ILogger)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.GoogleAI"/> class with access to Google AI Gemini API.
|
||
The default constructor attempts to read <c>.env</c> file and environment variables.
|
||
Sets default values, if available.
|
||
</summary>
|
||
<remarks>The following environment variables are used:
|
||
<list type="table">
|
||
<item><term>GOOGLE_API_KEY</term>
|
||
<description>API key provided by Google AI Studio.</description></item>
|
||
<item><term>GOOGLE_ACCESS_TOKEN</term>
|
||
<description>Optional. Access token provided by OAuth 2.0 or Application Default Credentials (ADC).</description></item>
|
||
</list>
|
||
</remarks>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GoogleAI.#ctor(System.String,System.String,System.String,Microsoft.Extensions.Logging.ILogger)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.GoogleAI"/> class with access to Google AI Gemini API.
|
||
Either API key or access token is required.
|
||
</summary>
|
||
<param name="apiKey">API key for Google AI Studio.</param>
|
||
<param name="accessToken">Access token for the Google Cloud project.</param>
|
||
<param name="apiVersion">Version of the API.</param>
|
||
<param name="logger">Optional. Logger instance used for logging</param>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GoogleAI.GenerativeModel(System.String,Mscc.GenerativeAI.GenerationConfig,System.Collections.Generic.List{Mscc.GenerativeAI.SafetySetting},System.Collections.Generic.List{Mscc.GenerativeAI.Tool},Mscc.GenerativeAI.Content)">
|
||
<summary>
|
||
Create a generative model on Google AI to use.
|
||
</summary>
|
||
<param name="model">Model to use (default: "gemini-1.5-pro")</param>
|
||
<param name="generationConfig">Optional. Configuration options for model generation and outputs.</param>
|
||
<param name="safetySettings">Optional. A list of unique SafetySetting instances for blocking unsafe content.</param>
|
||
<param name="tools">Optional. A list of Tools the model may use to generate the next response.</param>
|
||
<param name="systemInstruction">Optional. </param>
|
||
<returns>Generative model instance.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when both "apiKey" and "accessToken" are <see langword="null"/>.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GoogleAI.GenerativeModel(Mscc.GenerativeAI.CachedContent,Mscc.GenerativeAI.GenerationConfig,System.Collections.Generic.List{Mscc.GenerativeAI.SafetySetting})">
|
||
<summary>
|
||
Create a generative model on Google AI to use.
|
||
</summary>
|
||
<param name="cachedContent">Content that has been preprocessed.</param>
|
||
<param name="generationConfig">Optional. Configuration options for model generation and outputs.</param>
|
||
<param name="safetySettings">Optional. A list of unique SafetySetting instances for blocking unsafe content.</param>
|
||
<returns>Generative model instance.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when <paramref name="cachedContent"/> is null.</exception>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when both "apiKey" and "accessToken" are <see langword="null"/>.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GoogleAI.GetModel(System.String)">
|
||
<inheritdoc cref="T:Mscc.GenerativeAI.IGenerativeAI"/>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GoogleAI.CachedContent">
|
||
<summary>
|
||
Returns an instance of CachedContent to use with a model.
|
||
</summary>
|
||
<returns>Cached content instance.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when both "apiKey" and "accessToken" are <see langword="null"/>.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GoogleAI.ImageGenerationModel(System.String)">
|
||
<summary>
|
||
Returns an instance of <see cref="M:Mscc.GenerativeAI.GoogleAI.ImageGenerationModel(System.String)"/> to use with a model.
|
||
</summary>
|
||
<param name="model">Model to use (default: "imagegeneration")</param>
|
||
<returns>Imagen model</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when both "apiKey" and "accessToken" are <see langword="null"/>.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GoogleAI.UploadFile(System.String,System.String,System.Boolean,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Uploads a file to the File API backend.
|
||
</summary>
|
||
<param name="uri">URI or path to the file to upload.</param>
|
||
<param name="displayName">A name displayed for the uploaded file.</param>
|
||
<param name="resumable">Flag indicating whether to use resumable upload.</param>
|
||
<param name="cancellationToken">A cancellation token to cancel the upload.</param>
|
||
<returns>A URI of the uploaded file.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="uri"/> is null or empty.</exception>
|
||
<exception cref="T:System.IO.FileNotFoundException">Thrown when the file <paramref name="uri"/> is not found.</exception>
|
||
<exception cref="T:Mscc.GenerativeAI.MaxUploadFileSizeException">Thrown when the file size exceeds the maximum allowed size.</exception>
|
||
<exception cref="T:Mscc.GenerativeAI.UploadFileException">Thrown when the file upload fails.</exception>
|
||
<exception cref="T:System.Net.Http.HttpRequestException">Thrown when the request fails to execute.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GoogleAI.UploadFile(System.IO.Stream,System.String,System.String,System.Boolean,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Uploads a stream to the File API backend.
|
||
</summary>
|
||
<param name="stream">Stream to upload.</param>
|
||
<param name="displayName">A name displayed for the uploaded file.</param>
|
||
<param name="mimeType">The MIME type of the stream content.</param>
|
||
<param name="resumable">Flag indicating whether to use resumable upload.</param>
|
||
<param name="cancellationToken">A cancellation token to cancel the upload.</param>
|
||
<returns>A URI of the uploaded file.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="stream"/> is null or empty.</exception>
|
||
<exception cref="T:Mscc.GenerativeAI.MaxUploadFileSizeException">Thrown when the <paramref name="stream"/> size exceeds the maximum allowed size.</exception>
|
||
<exception cref="T:Mscc.GenerativeAI.UploadFileException">Thrown when the <paramref name="stream"/> upload fails.</exception>
|
||
<exception cref="T:System.Net.Http.HttpRequestException">Thrown when the request fails to execute.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GoogleAI.DownloadFile(System.String)">
|
||
<summary>
|
||
Gets a generated file.
|
||
</summary>
|
||
<remarks>
|
||
When calling this method via REST, only the metadata of the generated file is returned.
|
||
To retrieve the file content via REST, add alt=media as a query parameter.
|
||
</remarks>
|
||
<param name="file">Required. The name of the generated file to retrieve. Example: `generatedFiles/abc-123`</param>
|
||
<returns>Metadata for the given file.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="file"/> is null or empty.</exception>
|
||
<exception cref="T:System.Net.Http.HttpRequestException">Thrown when the request fails to execute.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GoogleAI.ListFiles(System.Nullable{System.Int32},System.String)">
|
||
<summary>
|
||
Lists the metadata for Files owned by the requesting project.
|
||
</summary>
|
||
<param name="pageSize">The maximum number of Models to return (per page).</param>
|
||
<param name="pageToken">A page token, received from a previous files.list call. Provide the pageToken returned by one request as an argument to the next request to retrieve the next page.</param>
|
||
<returns>List of files in File API.</returns>
|
||
<exception cref="T:System.NotSupportedException">Thrown when the functionality is not supported by the model.</exception>
|
||
<exception cref="T:System.Net.Http.HttpRequestException">Thrown when the request fails to execute.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GoogleAI.GetFile(System.String)">
|
||
<summary>
|
||
Gets the metadata for the given File.
|
||
</summary>
|
||
<param name="file">Required. The resource name of the file to get. This name should match a file name returned by the files.list method. Format: files/file-id.</param>
|
||
<returns>Metadata for the given file.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="file"/> is null or empty.</exception>
|
||
<exception cref="T:System.NotSupportedException">Thrown when the functionality is not supported by the model.</exception>
|
||
<exception cref="T:System.Net.Http.HttpRequestException">Thrown when the request fails to execute.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GoogleAI.DeleteFile(System.String)">
|
||
<summary>
|
||
Deletes a file.
|
||
</summary>
|
||
<param name="file">Required. The resource name of the file to get. This name should match a file name returned by the files.list method. Format: files/file-id.</param>
|
||
<returns>If successful, the response body is empty.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="file"/> is null or empty.</exception>
|
||
<exception cref="T:System.NotSupportedException">Thrown when the functionality is not supported by the model.</exception>
|
||
<exception cref="T:System.Net.Http.HttpRequestException">Thrown when the request fails to execute.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GoogleAI.ListGeneratedFiles(System.Nullable{System.Int32},System.String)">
|
||
<summary>
|
||
Lists the metadata for Files owned by the requesting project.
|
||
</summary>
|
||
<param name="pageSize">The maximum number of Models to return (per page).</param>
|
||
<param name="pageToken">A page token, received from a previous files.list call. Provide the pageToken returned by one request as an argument to the next request to retrieve the next page.</param>
|
||
<returns>List of files in File API.</returns>
|
||
<exception cref="T:System.NotSupportedException">Thrown when the functionality is not supported by the model.</exception>
|
||
<exception cref="T:System.Net.Http.HttpRequestException">Thrown when the request fails to execute.</exception>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.IGenerativeAI">
|
||
<summary>
|
||
The interface shall be used to write generic implementations using either
|
||
Google AI Gemini API or Vertex AI Gemini API as backends.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.IGenerativeAI.GenerativeModel(System.String,Mscc.GenerativeAI.GenerationConfig,System.Collections.Generic.List{Mscc.GenerativeAI.SafetySetting},System.Collections.Generic.List{Mscc.GenerativeAI.Tool},Mscc.GenerativeAI.Content)">
|
||
<summary>
|
||
Create an instance of a generative model to use.
|
||
</summary>
|
||
<param name="model">Model to use (default: "gemini-1.5-pro")</param>
|
||
<param name="generationConfig">Optional. Configuration options for model generation and outputs.</param>
|
||
<param name="safetySettings">Optional. A list of unique SafetySetting instances for blocking unsafe content.</param>
|
||
<param name="tools">Optional. A list of Tools the model may use to generate the next response.</param>
|
||
<param name="systemInstruction">Optional. </param>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when required parameters are null.</exception>
|
||
<returns>Generative model instance.</returns>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.IGenerativeAI.GenerativeModel(Mscc.GenerativeAI.CachedContent,Mscc.GenerativeAI.GenerationConfig,System.Collections.Generic.List{Mscc.GenerativeAI.SafetySetting})">
|
||
<summary>
|
||
Create an instance of a generative model to use.
|
||
</summary>
|
||
<param name="cachedContent">Content that has been preprocessed.</param>
|
||
<param name="generationConfig">Optional. Configuration options for model generation and outputs.</param>
|
||
<param name="safetySettings">Optional. A list of unique SafetySetting instances for blocking unsafe content.</param>
|
||
<returns>Generative model instance.</returns>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.IGenerativeAI.GetModel(System.String)">
|
||
<summary>
|
||
Gets information about a specific Model.
|
||
</summary>
|
||
<param name="model">Required. The resource name of the model. This name should match a model name returned by the models.list method. Format: models/model-id or tunedModels/my-model-id</param>
|
||
<returns></returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when model parameter is null.</exception>
|
||
<exception cref="T:System.NotSupportedException">Thrown when the backend does not support this method or the model.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.IGenerativeAI.ImageGenerationModel(System.String)">
|
||
<summary>
|
||
Returns an instance of an image generation model.
|
||
</summary>
|
||
<param name="model">Model to use (default: "imagegeneration")</param>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.ImageGenerationModel">
|
||
<summary>
|
||
Name of the model that supports image generation.
|
||
The <see cref="T:Mscc.GenerativeAI.ImageGenerationModel"/> can create high quality visual assets in seconds and brings Google's state-of-the-art vision and multimodal generative AI capabilities to application developers.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.ImageGenerationModel.#ctor">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.ImageGenerationModel"/> class.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.ImageGenerationModel.#ctor(Microsoft.Extensions.Logging.ILogger)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.ImageGenerationModel"/> class.
|
||
The default constructor attempts to read <c>.env</c> file and environment variables.
|
||
Sets default values, if available.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.ImageGenerationModel.#ctor(System.String,System.String,Microsoft.Extensions.Logging.ILogger)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.ImageGenerationModel"/> class with access to Google AI Gemini API.
|
||
</summary>
|
||
<param name="apiKey">API key provided by Google AI Studio</param>
|
||
<param name="model">Model to use</param>
|
||
<param name="logger">Optional. Logger instance used for logging</param>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.ImageGenerationModel.#ctor(System.String,System.String,System.String,Microsoft.Extensions.Logging.ILogger)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.ImageGenerationModel"/> class with access to Vertex AI Gemini API.
|
||
</summary>
|
||
<param name="projectId">Identifier of the Google Cloud project</param>
|
||
<param name="region">Region to use</param>
|
||
<param name="model">Model to use</param>
|
||
<param name="logger">Optional. Logger instance used for logging</param>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.ImageGenerationModel.GenerateImages(Mscc.GenerativeAI.ImageGenerationRequest,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Generates images from the specified <see cref="T:Mscc.GenerativeAI.ImageGenerationRequest"/>.
|
||
</summary>
|
||
<param name="request">Required. The request to send to the API.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>Response from the model for generated images.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="request"/> is <see langword="null"/>.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.ImageGenerationModel.GenerateImages(System.String,System.Int32,System.String,System.String,System.Nullable{System.Int32},System.String,System.String,System.String,System.Nullable{System.Boolean},System.Nullable{System.Boolean},System.Threading.CancellationToken)">
|
||
<summary>
|
||
Generates images from text prompt.
|
||
</summary>
|
||
<param name="prompt">Required. String to process.</param>
|
||
<param name="numberOfImages">Number of images to generate. Range: 1..8.</param>
|
||
<param name="negativePrompt">A description of what you want to omit in the generated images.</param>
|
||
<param name="aspectRatio">Aspect ratio for the image.</param>
|
||
<param name="guidanceScale">Controls the strength of the prompt. Suggested values are - * 0-9 (low strength) * 10-20 (medium strength) * 21+ (high strength)</param>
|
||
<param name="language">Language of the text prompt for the image.</param>
|
||
<param name="safetyFilterLevel">Adds a filter level to Safety filtering.</param>
|
||
<param name="personGeneration">Allow generation of people by the model.</param>
|
||
<param name="enhancePrompt">Option to enhance your provided prompt.</param>
|
||
<param name="addWatermark">Explicitly set the watermark</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>Response from the model for generated content.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="prompt"/> is <see langword="null"/>.</exception>
|
||
<exception cref="T:System.Net.Http.HttpRequestException">Thrown when the request fails to execute.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.ImageGenerationModel.GenerateContent(System.String,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Generates a response from the model given an input prompt and other parameters.
|
||
</summary>
|
||
<param name="prompt">Required. String to process.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>Response from the model for generated content.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="prompt"/> is <see langword="null"/>.</exception>
|
||
<exception cref="T:System.Net.Http.HttpRequestException">Thrown when the request fails to execute.</exception>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.ImagesModel">
|
||
<summary>
|
||
Generates an image from the model given an input.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.ImagesModel.#ctor">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.ImagesModel"/> class.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.ImagesModel.#ctor(Microsoft.Extensions.Logging.ILogger)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.ImagesModel"/> class.
|
||
</summary>
|
||
<param name="logger">Optional. Logger instance used for logging</param>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.ImagesModel.Images(Mscc.GenerativeAI.GenerateImagesRequest,System.Threading.CancellationToken)">
|
||
<summary>
|
||
|
||
</summary>
|
||
<param name="request"></param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns></returns>
|
||
<exception cref="T:System.ArgumentNullException"></exception>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.ImageTextModel">
|
||
<summary>
|
||
Name of the model that supports image captioning.
|
||
<see cref="T:Mscc.GenerativeAI.ImageTextModel"/> generates a caption from an image you provide based on the language that you specify. The model supports the following languages: English (en), German (de), French (fr), Spanish (es) and Italian (it).
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.ImageTextModel.#ctor">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.ImageTextModel"/> class.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.ImageTextModel.#ctor(Microsoft.Extensions.Logging.ILogger)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.ImageTextModel"/> class.
|
||
The default constructor attempts to read <c>.env</c> file and environment variables.
|
||
Sets default values, if available.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.ImageTextModel.#ctor(System.String,System.String,System.String,Microsoft.Extensions.Logging.ILogger)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.ImageTextModel"/> class with access to Vertex AI Gemini API.
|
||
</summary>
|
||
<param name="projectId">Identifier of the Google Cloud project</param>
|
||
<param name="region">Region to use</param>
|
||
<param name="model">Model to use</param>
|
||
<param name="logger">Optional. Logger instance used for logging</param>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.ImageTextModel.GetCaptions(Mscc.GenerativeAI.ImageTextRequest,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Generates images from the specified <see cref="T:Mscc.GenerativeAI.ImageTextRequest"/>.
|
||
</summary>
|
||
<param name="request">Required. The request to send to the API.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>Response from the model for generated images.</returns>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.ImageTextModel.GetCaptions(System.String,System.Nullable{System.Int32},System.String,System.String,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Generates a response from the model given an input prompt and other parameters.
|
||
</summary>
|
||
<param name="base64Image">Required. The base64 encoded image to process.</param>
|
||
<param name="numberOfResults">Optional. Number of results to return. Default is 1.</param>
|
||
<param name="language">Optional. Language to use. Default is en.</param>
|
||
<param name="storageUri">Optional. Cloud Storage uri where to store the generated predictions.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>Response from the model for generated content.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="base64Image"/> is <see langword="null"/>.</exception>
|
||
<exception cref="T:System.Net.Http.HttpRequestException">Thrown when the request fails to execute.</exception>
|
||
<exception cref="T:System.NotSupportedException">Thrown when the <paramref name="language"/> is not supported by the API.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.ImageTextModel.AskQuestion(System.String,System.String,System.Nullable{System.Int32},System.String,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Generates a response from the model given an input prompt and other parameters.
|
||
</summary>
|
||
<param name="base64Image">Required. The base64 encoded image to process.</param>
|
||
<param name="question">Required. The question to ask about the image.</param>
|
||
<param name="numberOfResults">Optional. Number of results to return. Default is 1.</param>
|
||
<param name="language">Optional. Language to use. Default is en.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>Response from the model for generated content.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="base64Image"/> is <see langword="null"/>.</exception>
|
||
<exception cref="T:System.Net.Http.HttpRequestException">Thrown when the request fails to execute.</exception>
|
||
<exception cref="T:System.NotSupportedException">Thrown when the <paramref name="language"/> is not supported by the API.</exception>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.GenerativeModelLogMessages">
|
||
<summary>
|
||
Extensions for logging <see cref="T:Mscc.GenerativeAI.GenerativeModel"/> invocations.
|
||
</summary>
|
||
<remarks>
|
||
This extension uses the <see cref="T:Microsoft.Extensions.Logging.LoggerMessageAttribute"/> to
|
||
generate logging code at compile time to achieve optimized code.
|
||
</remarks>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModelLogMessages.LogGenerativeModelInvoking(Microsoft.Extensions.Logging.ILogger)">
|
||
<summary>
|
||
Logs <see cref="T:Mscc.GenerativeAI.GenerativeModel"/>
|
||
</summary>
|
||
<param name="logger">Optional. Logger instance used for logging</param>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModelLogMessages.LogGenerativeModelInvoked(Microsoft.Extensions.Logging.ILogger)">
|
||
<summary>
|
||
Logs <see cref="T:Mscc.GenerativeAI.GenerativeModel"/>
|
||
</summary>
|
||
<param name="logger">Optional. Logger instance used for logging</param>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModelLogMessages.LogGenerativeModelInvokingRequest(Microsoft.Extensions.Logging.ILogger,System.String,System.String,System.String)">
|
||
<summary>
|
||
Logs <see cref="T:Mscc.GenerativeAI.GenerativeModel"/> invoking an API request.
|
||
</summary>
|
||
<param name="logger">Optional. Logger instance used for logging</param>
|
||
<param name="methodName">Calling method</param>
|
||
<param name="url">URL of Gemini API endpoint</param>
|
||
<param name="payload">Data sent to the API endpoint</param>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModelLogMessages.LogRunExternalExe(Microsoft.Extensions.Logging.ILogger,System.String)">
|
||
<summary>
|
||
Logs <see cref="T:Mscc.GenerativeAI.BaseModel"/> when exception thrown to run an external application.
|
||
</summary>
|
||
<param name="logger">Optional. Logger instance used for logging</param>
|
||
<param name="message">Message of <see cref="T:System.Exception"/> to log.</param>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerativeModelLogMessages.LogParsedRequestUrl(Microsoft.Extensions.Logging.ILogger,System.String)">
|
||
<summary>
|
||
Logs <see cref="T:Mscc.GenerativeAI.BaseModel"/> parsing the URL to call.
|
||
</summary>
|
||
<param name="logger">Optional. Logger instance used for logging</param>
|
||
<param name="url">Parsed URL.</param>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.MediaModel.#ctor">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.MediaModel"/> class.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.MediaModel.#ctor(Microsoft.Extensions.Logging.ILogger)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.MediaModel"/> class.
|
||
</summary>
|
||
<param name="logger">Optional. Logger instance used for logging</param>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.MediaModel.UploadFile(System.String,System.String,System.Boolean,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Uploads a file to the File API backend.
|
||
</summary>
|
||
<param name="uri">URI or path to the file to upload.</param>
|
||
<param name="displayName">A name displayed for the uploaded file.</param>
|
||
<param name="resumable">Flag indicating whether to use resumable upload.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>A URI of the uploaded file.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="uri"/> is null or empty.</exception>
|
||
<exception cref="T:System.IO.FileNotFoundException">Thrown when the file <paramref name="uri"/> is not found.</exception>
|
||
<exception cref="T:Mscc.GenerativeAI.MaxUploadFileSizeException">Thrown when the file size exceeds the maximum allowed size.</exception>
|
||
<exception cref="T:Mscc.GenerativeAI.UploadFileException">Thrown when the file upload fails.</exception>
|
||
<exception cref="T:System.Net.Http.HttpRequestException">Thrown when the request fails to execute.</exception>
|
||
<exception cref="T:System.NotSupportedException">Thrown when the MIME type of the URI is not supported by the API.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.MediaModel.UploadFile(System.IO.Stream,System.String,System.String,System.Boolean,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Uploads a stream to the File API backend.
|
||
</summary>
|
||
<param name="stream">Stream to upload.</param>
|
||
<param name="displayName">A name displayed for the uploaded file.</param>
|
||
<param name="mimeType">The MIME type of the stream content.</param>
|
||
<param name="resumable">Flag indicating whether to use resumable upload.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>A URI of the uploaded file.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="stream"/> is null or empty.</exception>
|
||
<exception cref="T:Mscc.GenerativeAI.MaxUploadFileSizeException">Thrown when the <paramref name="stream"/> size exceeds the maximum allowed size.</exception>
|
||
<exception cref="T:Mscc.GenerativeAI.UploadFileException">Thrown when the <paramref name="stream"/> upload fails.</exception>
|
||
<exception cref="T:System.Net.Http.HttpRequestException">Thrown when the request fails to execute.</exception>
|
||
<exception cref="T:System.NotSupportedException">Thrown when the <paramref name="mimeType"/> is not supported by the API.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.MediaModel.DownloadFile(System.String,System.Boolean,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Gets a generated file.
|
||
</summary>
|
||
<remarks>
|
||
When calling this method via REST, only the metadata of the generated file is returned.
|
||
To retrieve the file content via REST, add alt=media as a query parameter.
|
||
</remarks>
|
||
<param name="file">Required. The name of the generated file to retrieve. Example: `generatedFiles/abc-123`</param>
|
||
<param name="media">Optional. Flag indicating whether to retrieve the file content.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>Metadata for the given file.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="file"/> is null or empty.</exception>
|
||
<exception cref="T:System.Net.Http.HttpRequestException">Thrown when the request fails to execute.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.OpenAIModel.#ctor">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.OpenAIModel"/> class.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.OpenAIModel.#ctor(Microsoft.Extensions.Logging.ILogger)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.OpenAIModel"/> class.
|
||
</summary>
|
||
<param name="logger">Optional. Logger instance used for logging</param>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.OpenAIModel.ListModels(System.Threading.CancellationToken)">
|
||
<summary>
|
||
Lists the currently available models.
|
||
</summary>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>List of available models.</returns>
|
||
<exception cref="T:System.Net.Http.HttpRequestException">Thrown when the request fails to execute.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.OpenAIModel.GetModel(System.String,System.String,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Gets a model instance.
|
||
</summary>
|
||
<param name="modelsId">Required. The resource name of the model. This name should match a model name returned by the ListModels method.</param>
|
||
<param name="model">Required. The name of the model to get.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns></returns>
|
||
<exception cref="T:System.Net.Http.HttpRequestException">Thrown when the request fails to execute.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.OpenAIModel.Completions(Mscc.GenerativeAI.ChatCompletionsRequest,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Generates a set of responses from the model given a chat history input.
|
||
</summary>
|
||
<param name="request">Required. The request to send to the API.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="request"/> is <see langword="null"/>.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.OpenAIModel.Embeddings(Mscc.GenerativeAI.GenerateEmbeddingsRequest,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Generates embeddings from the model given an input.
|
||
</summary>
|
||
<param name="request">Required. The request to send to the API.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="request"/> is <see langword="null"/>.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.OpenAIModel.Images(Mscc.GenerativeAI.GenerateImagesRequest,System.Threading.CancellationToken)">
|
||
<summary>
|
||
|
||
</summary>
|
||
<param name="request"></param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns></returns>
|
||
<exception cref="T:System.ArgumentNullException"></exception>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.SupervisedTuningJobModel">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.SupervisedTuningJobModel.#ctor">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.SupervisedTuningJobModel"/> class.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.SupervisedTuningJobModel.#ctor(Microsoft.Extensions.Logging.ILogger)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.SupervisedTuningJobModel"/> class.
|
||
</summary>
|
||
<param name="logger">Optional. Logger instance used for logging</param>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.SupervisedTuningJobModel.#ctor(System.String,System.String,System.String,Microsoft.Extensions.Logging.ILogger)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.SupervisedTuningJobModel"/> class with access to Vertex AI Gemini API.
|
||
</summary>
|
||
<param name="projectId">Identifier of the Google Cloud project</param>
|
||
<param name="region">Region to use</param>
|
||
<param name="model">Model to use</param>
|
||
<param name="logger">Optional. Logger instance used for logging</param>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.SupervisedTuningJobModel.Create(Mscc.GenerativeAI.CreateTuningJobRequest,System.Threading.CancellationToken)">
|
||
<summary>
|
||
|
||
</summary>
|
||
<param name="request"></param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns></returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="request"/> is <see langword="null"/>.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.SupervisedTuningJobModel.List(System.Threading.CancellationToken)">
|
||
<summary>
|
||
|
||
</summary>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns></returns>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.SupervisedTuningJobModel.Get(System.String,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Gets metadata of a tuning job.
|
||
</summary>
|
||
<param name="tuningJobId">Required. The ID of the tuning job. Format: `tuningJobs/{id}`</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>The details of a tuning job.</returns>
|
||
<exception cref="T:System.ArgumentException">Thrown when the <paramref name="tuningJobId"/> is <see langword="null"/> or empty.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.SupervisedTuningJobModel.Cancel(System.String,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Cancels a tuning job.
|
||
</summary>
|
||
<param name="tuningJobId">Required. The ID of the tuning job. Format: `tuningJobs/{id}`</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>If successful, the response body is empty.</returns>
|
||
<exception cref="T:System.ArgumentException">Thrown when the <paramref name="tuningJobId"/> is <see langword="null"/> or empty.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.SupervisedTuningJobModel.Delete(System.String,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Deletes a tuning job.
|
||
</summary>
|
||
<param name="tuningJobId">Required. The ID of the tuning job. Format: `tuningJobs/{id}`</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>If successful, the response body is empty.</returns>
|
||
<exception cref="T:System.ArgumentException">Thrown when the <paramref name="tuningJobId"/> is <see langword="null"/> or empty.</exception>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.BaseLogger">
|
||
<summary>
|
||
Abstract base type with logging instance.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.BaseLogger.#ctor(Microsoft.Extensions.Logging.ILogger)">
|
||
<summary>
|
||
Base constructor to set the <see cref="T:Microsoft.Extensions.Logging.ILogger"/> instance.
|
||
</summary>
|
||
<param name="logger">Optional. Logger instance used for logging</param>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.AttributionSourceId">
|
||
<summary>
|
||
Identifier for the source contributing to this attribution.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.AttributionSourceId.GroundingPassage">
|
||
<summary>
|
||
Identifier for an inline passage.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.AttributionSourceId.SemanticRetrieverChunk">
|
||
<summary>
|
||
Identifier for a `Chunk` fetched via Semantic Retriever.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.AudioOptions">
|
||
<summary>
|
||
Options for audio generation.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.AudioOptions.Format">
|
||
<summary>
|
||
Optional. The format of the audio response.
|
||
</summary>
|
||
<remarks>
|
||
Can be either:
|
||
- "wav": Format the response as a WAV file.
|
||
- "mp3": Format the response as an MP3 file.
|
||
- "flac": Format the response as a FLAC file.
|
||
- "opus": Format the response as an OPUS file.
|
||
- "pcm16": Format the response as a PCM16 file.
|
||
</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.AudioOptions.Voice">
|
||
<summary>
|
||
Optional. The voice to use for the audio response.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.Blobstore2Info">
|
||
<summary>
|
||
Information to read/write to blobstore2.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Blobstore2Info.BlobId">
|
||
<summary>
|
||
The blob id, e.g., /blobstore/prod/playground/scotty
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Blobstore2Info.ReadToken">
|
||
<summary>
|
||
The blob read token.
|
||
Needed to read blobs that have not been replicated. Might not be available until the final call.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Blobstore2Info.BlobGeneration">
|
||
<summary>
|
||
The blob generation id.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Blobstore2Info.UploadMetadataContainer">
|
||
<summary>
|
||
Metadata passed from Blobstore -> Scotty for a new GCS upload. This is a signed, serialized blobstore2.BlobMetadataContainer proto which must never be consumed outside of Bigstore, and is not applicable to non-GCS media uploads.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Blobstore2Info.DownloadReadHandle">
|
||
<summary>
|
||
Read handle passed from Bigstore -> Scotty for a GCS download. This is a signed, serialized blobstore2.ReadHandle proto which must never be set outside of Bigstore, and is not applicable to non-GCS media downloads.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ListCachedContentsResponse.CachedContents">
|
||
<summary>
|
||
List of cached content resources.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ListCachedContentsResponse.NextPageToken">
|
||
<summary>
|
||
A token, which can be sent as pageToken to retrieve the next page.
|
||
If this field is omitted, there are no more pages.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CachedContent.Model">
|
||
<summary>
|
||
Required. Immutable. The name of the `Model` to use for cached content Format: `models/{model}`
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CachedContent.Name">
|
||
<summary>
|
||
Optional. Identifier. The resource name referring to the cached content. Format: `cachedContents/{id}`
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CachedContent.DisplayName">
|
||
<summary>
|
||
Optional. Immutable. The user-generated meaningful display name of the cached content. Maximum 128 Unicode characters.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CachedContent.Expiration">
|
||
<summary>
|
||
Specifies when this resource will expire.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CachedContent.Ttl">
|
||
<summary>
|
||
Input only. New TTL for this resource, input only.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CachedContent.CreateTime">
|
||
<summary>
|
||
Output only. Creation time of the cache entry.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CachedContent.UpdateTime">
|
||
<summary>
|
||
Output only. When the cache entry was last updated in UTC time.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CachedContent.ExpireTime">
|
||
<summary>
|
||
Timestamp in UTC of when this resource is considered expired.
|
||
This is *always* provided on output, regardless of what was sent on input.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CachedContent.Contents">
|
||
<summary>
|
||
Optional. Input only. Immutable. The content to cache.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CachedContent.SystemInstruction">
|
||
<summary>
|
||
Optional. Input only. Immutable. Developer set system instruction. Currently text only.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CachedContent.Tools">
|
||
<summary>
|
||
Optional. Input only. Immutable. A list of `Tools` the model may use to generate the next response
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CachedContent.ToolConfig">
|
||
<summary>
|
||
Optional. Input only. Immutable. Tool config. This config is shared for all tools.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CachedContent.UsageMetadata">
|
||
<summary>
|
||
Output only. Metadata on the usage of the cached content.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.CachedContentUsageMetadata">
|
||
<summary>
|
||
Metadata on the usage of the cached content.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CachedContentUsageMetadata.TotalTokenCount">
|
||
<summary>
|
||
Total number of tokens that the cached content consumes.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.Candidate">
|
||
<summary>
|
||
A response candidate generated from the model.
|
||
Ref: https://ai.google.dev/api/rest/v1beta/Candidate
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Candidate.Content">
|
||
<summary>
|
||
Output only. Content parts of the candidate.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Candidate.FinishReason">
|
||
<summary>
|
||
Output only. The reason why the model stopped generating tokens.
|
||
If empty, the model has not stopped generating the tokens.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Candidate.FinishMessage">
|
||
<summary>
|
||
Output only. Describes the reason the mode stopped generating tokens
|
||
in more detail. This is only filled when `finish_reason` is set.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Candidate.Index">
|
||
<summary>
|
||
Output only. Index of the candidate.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Candidate.SafetyRatings">
|
||
<summary>
|
||
Output only. List of ratings for the safety of a response candidate.
|
||
There is at most one rating per category.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Candidate.CitationMetadata">
|
||
<summary>
|
||
Output only. Source attribution of the generated content.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Candidate.FunctionCall">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Candidate.GroundingMetadata">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Candidate.TokenCount">
|
||
<summary>
|
||
Output only. Token count for this candidate.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Candidate.GroundingAttributions">
|
||
<summary>
|
||
Output only. Attribution information for sources that contributed to a grounded answer.
|
||
This field is populated for GenerateAnswer calls.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Candidate.AvgLogprobs">
|
||
<summary>
|
||
Output only.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Candidate.LogprobsResult">
|
||
<summary>
|
||
Output only. Log-likelihood scores for the response tokens and top tokens
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.ChatCompletionsRequest">
|
||
<summary>
|
||
Request for chat completions.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ChatCompletionsRequest.Model">
|
||
<summary>
|
||
Required. The name of the `Model` to use for generating the completion.
|
||
The model name will prefixed by \"models/\" if no slash appears in it.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ChatCompletionsRequest.Messages">
|
||
<summary>
|
||
Required. The chat history to use for generating the completion.
|
||
Supports single and multi-turn queries.
|
||
Note: This is a polymorphic field, it is deserialized to a InternalChatMessage.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ChatCompletionsRequest.MaxCompletionTokens">
|
||
<summary>
|
||
Optional. The maximum number of tokens to include in a response candidate.
|
||
Must be a positive integer.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ChatCompletionsRequest.MaxTokens">
|
||
<summary>
|
||
Optional. The maximum number of tokens to include in a response candidate.
|
||
Must be a positive integer. This field is deprecated by the SDK.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ChatCompletionsRequest.N">
|
||
<summary>
|
||
Optional. Amount of candidate completions to generate.
|
||
Must be a positive integer. Defaults to 1 if not set.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ChatCompletionsRequest.ResponseFormat">
|
||
<summary>
|
||
Optional. Defines the format of the response. If not set, the response will be formatted as text.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ChatCompletionsRequest.Stop">
|
||
<summary>
|
||
Optional. The set of character sequences that will stop output generation.
|
||
Note: This is a polymorphic field. It is meant to contain a string or repeated strings.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ChatCompletionsRequest.Stream">
|
||
<summary>
|
||
Optional. Whether to stream the response or return a single response.
|
||
If true, the \"object\" field in the response will be \"chat.completion.chunk\".
|
||
Otherwise it will be \"chat.completion\".
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ChatCompletionsRequest.StreamOptions">
|
||
<summary>
|
||
Optional. Options for streaming requests.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ChatCompletionsRequest.Temperature">
|
||
<summary>
|
||
Optional. Controls the randomness of the output.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ChatCompletionsRequest.TopP">
|
||
<summary>
|
||
Optional. The maximum cumulative probability of tokens to consider when sampling.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ChatCompletionsRequest.ToolChoice">
|
||
<summary>
|
||
Optional. Controls whether the model should use a tool or not, and which tool to use.
|
||
Can be either:
|
||
- The string \"none\", to disable tools.
|
||
- The string \"auto\", to let the model decide.
|
||
- The string \"required\", to force the model to use a tool.
|
||
- A function name descriptor object, specifying the tool to use. The last option follows the following schema: { \"type\": \"function\", \"function\": {\"name\" : \"the_function_name\"} }
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ChatCompletionsRequest.Tools">
|
||
<summary>
|
||
Optional. The set of tools the model can generate calls for.
|
||
Each tool declares its signature.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ChatCompletionsRequest.Audio">
|
||
<summary>
|
||
Optional. Options for audio generation.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ChatCompletionsRequest.Modalities">
|
||
<summary>
|
||
Optional. Modalities for the request.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ChatCompletionsRequest.ParallelToolCalls">
|
||
<summary>
|
||
Optional. Whether to call tools in parallel.
|
||
</summary>
|
||
<remarks>
|
||
Included here for compatibility with the SDK, but only false is supported.
|
||
</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ChatCompletionsRequest.PresencePenalty">
|
||
<summary>
|
||
Optional. Penalizes new tokens based on previous appearances. Valid ranges are [-2, 2]. Default is 0.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ChatCompletionsRequest.User">
|
||
<summary>
|
||
Optional. The user name used for tracking the request. Not used, only for compatibility with the SDK.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.ChatFunction">
|
||
<summary>
|
||
A function that the model can generate calls for.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ChatFunction.Name">
|
||
<summary>
|
||
Required. The name of the function.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ChatFunction.Description">
|
||
<summary>
|
||
Optional. A description of the function.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ChatFunction.Strict">
|
||
<summary>
|
||
Optional. Whether the schema validation is strict.
|
||
If true, the model will fail if the schema is not valid.
|
||
NOTE: This parameter is currently ignored.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ChatFunction.Parameters">
|
||
<summary>
|
||
Optional. The parameters of the function.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.ChatSession">
|
||
<summary>
|
||
Contains an ongoing conversation with the model.
|
||
</summary>
|
||
<remarks>
|
||
This ChatSession object collects the messages sent and received, in its ChatSession.History attribute.
|
||
</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ChatSession.History">
|
||
<summary>
|
||
The chat history.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ChatSession.Last">
|
||
<summary>
|
||
Returns the last received ContentResponse
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.ChatSession.#ctor(Mscc.GenerativeAI.GenerativeModel,System.Collections.Generic.List{Mscc.GenerativeAI.ContentResponse},Mscc.GenerativeAI.GenerationConfig,System.Collections.Generic.List{Mscc.GenerativeAI.SafetySetting},System.Collections.Generic.List{Mscc.GenerativeAI.Tool},System.Boolean)">
|
||
<summary>
|
||
Constructor to start a chat session with history.
|
||
</summary>
|
||
<param name="model">The model to use in the chat.</param>
|
||
<param name="history">A chat history to initialize the session with.</param>
|
||
<param name="generationConfig">Optional. Configuration options for model generation and outputs.</param>
|
||
<param name="safetySettings">Optional. A list of unique SafetySetting instances for blocking unsafe content.</param>
|
||
<param name="tools">Optional. A list of Tools the model may use to generate the next response.</param>
|
||
<param name="enableAutomaticFunctionCalling">Optional. </param>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.ChatSession.SendMessage(Mscc.GenerativeAI.GenerateContentRequest,Mscc.GenerativeAI.GenerationConfig,System.Collections.Generic.List{Mscc.GenerativeAI.SafetySetting},System.Collections.Generic.List{Mscc.GenerativeAI.Tool},Mscc.GenerativeAI.ToolConfig,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Sends the conversation history with the added message and returns the model's response.
|
||
Appends the request and response to the conversation history.
|
||
</summary>
|
||
<param name="request">The content request.</param>
|
||
<param name="generationConfig">Optional. Overrides for the model's generation config.</param>
|
||
<param name="safetySettings">Optional. Overrides for the model's safety settings.</param>
|
||
<param name="tools">Optional. Overrides for the list of tools the model may use to generate the next response.</param>
|
||
<param name="toolConfig">Optional. Overrides for the configuration of tools.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>The model's response.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when <paramref name="request"/> is <see langword="null"/>.</exception>
|
||
<exception cref="T:Mscc.GenerativeAI.BlockedPromptException">Thrown when the model's response is blocked by a reason.</exception>
|
||
<exception cref="T:Mscc.GenerativeAI.StopCandidateException">Thrown when the model's response is stopped by the model's safety settings.</exception>
|
||
<exception cref="T:Mscc.GenerativeAI.ValueErrorException">Thrown when the candidate count is larger than 1.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.ChatSession.SendMessage(System.String,Mscc.GenerativeAI.GenerationConfig,System.Collections.Generic.List{Mscc.GenerativeAI.SafetySetting},System.Collections.Generic.List{Mscc.GenerativeAI.Tool},Mscc.GenerativeAI.ToolConfig,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Sends the conversation history with the added message and returns the model's response.
|
||
Appends the request and response to the conversation history.
|
||
</summary>
|
||
<param name="prompt">The message or content sent.</param>
|
||
<param name="generationConfig">Optional. Overrides for the model's generation config.</param>
|
||
<param name="safetySettings">Optional. Overrides for the model's safety settings.</param>
|
||
<param name="tools">Optional. Overrides for the list of tools the model may use to generate the next response.</param>
|
||
<param name="toolConfig">Optional. Overrides for the configuration of tools.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>The model's response.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when <paramref name="prompt"/> is <see langword="null"/>.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.ChatSession.SendMessage(System.Collections.Generic.List{Mscc.GenerativeAI.Part},Mscc.GenerativeAI.GenerationConfig,System.Collections.Generic.List{Mscc.GenerativeAI.SafetySetting},System.Collections.Generic.List{Mscc.GenerativeAI.Tool},Mscc.GenerativeAI.ToolConfig,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Sends the conversation history with the added message and returns the model's response.
|
||
Appends the request and response to the conversation history.
|
||
</summary>
|
||
<param name="parts">The list of content parts sent.</param>
|
||
<param name="generationConfig">Optional. Overrides for the model's generation config.</param>
|
||
<param name="safetySettings">Optional. Overrides for the model's safety settings.</param>
|
||
<param name="tools">Optional. Overrides for the list of tools the model may use to generate the next response.</param>
|
||
<param name="toolConfig">Optional. Overrides for the configuration of tools.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>The model's response.</returns>
|
||
<exception cref="T:Mscc.GenerativeAI.ValueErrorException">Thrown when the candidate count is larger than 1.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.ChatSession.SendMessageStream(Mscc.GenerativeAI.GenerateContentRequest,Mscc.GenerativeAI.GenerationConfig,System.Collections.Generic.List{Mscc.GenerativeAI.SafetySetting},System.Collections.Generic.List{Mscc.GenerativeAI.Tool},Mscc.GenerativeAI.ToolConfig,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Sends the conversation history with the added message and returns the model's response.
|
||
</summary>
|
||
<remarks>Appends the request and response to the conversation history.</remarks>
|
||
<param name="request">The content request.</param>
|
||
<param name="generationConfig">Optional. Overrides for the model's generation config.</param>
|
||
<param name="safetySettings">Optional. Overrides for the model's safety settings.</param>
|
||
<param name="tools">Optional. Overrides for the list of tools the model may use to generate the next response.</param>
|
||
<param name="toolConfig">Optional. Overrides for the configuration of tools.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>The model's response.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when <paramref name="request"/> is <see langword="null"/></exception>
|
||
<exception cref="T:Mscc.GenerativeAI.BlockedPromptException">Thrown when the <paramref name="request"/> is blocked by a reason.</exception>
|
||
<exception cref="T:Mscc.GenerativeAI.ValueErrorException">Thrown when the candidate count is larger than 1.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.ChatSession.SendMessageStream(System.String,Mscc.GenerativeAI.GenerationConfig,System.Collections.Generic.List{Mscc.GenerativeAI.SafetySetting},System.Collections.Generic.List{Mscc.GenerativeAI.Tool},Mscc.GenerativeAI.ToolConfig,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Sends the conversation history with the added message and returns the model's response.
|
||
Appends the request and response to the conversation history.
|
||
</summary>
|
||
<param name="prompt">The message sent.</param>
|
||
<param name="generationConfig">Optional. Overrides for the model's generation config.</param>
|
||
<param name="safetySettings">Optional. Overrides for the model's safety settings.</param>
|
||
<param name="tools">Optional. Overrides for the list of tools the model may use to generate the next response.</param>
|
||
<param name="toolConfig">Optional. Overrides for the configuration of tools.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>The model's response.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when <paramref name="prompt"/> is <see langword="null"/>.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.ChatSession.SendMessageStream(System.Collections.Generic.List{Mscc.GenerativeAI.Part},Mscc.GenerativeAI.GenerationConfig,System.Collections.Generic.List{Mscc.GenerativeAI.SafetySetting},System.Collections.Generic.List{Mscc.GenerativeAI.Tool},Mscc.GenerativeAI.ToolConfig,System.Threading.CancellationToken)">
|
||
<summary>
|
||
Sends the conversation history with the added message and returns the model's response.
|
||
Appends the request and response to the conversation history.
|
||
</summary>
|
||
<param name="parts">The list of content parts sent.</param>
|
||
<param name="generationConfig">Optional. Overrides for the model's generation config.</param>
|
||
<param name="safetySettings">Optional. Overrides for the model's safety settings.</param>
|
||
<param name="tools">Optional. Overrides for the list of tools the model may use to generate the next response.</param>
|
||
<param name="toolConfig">Optional. Overrides for the configuration of tools.</param>
|
||
<param name="cancellationToken">A cancellation token that can be used by other objects or threads to receive notice of cancellation.</param>
|
||
<returns>The model's response.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when <paramref name="parts"/> is <see langword="null"/>.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.ChatSession.Rewind">
|
||
<summary>
|
||
Removes the last request/response pair from the chat history.
|
||
</summary>
|
||
<returns>Tuple with the last request/response pair.</returns>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.ChatTool">
|
||
<summary>
|
||
A tool that the model can generate calls for.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ChatTool.Function">
|
||
<summary>
|
||
Required. The name of the tool.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ChatTool.Type">
|
||
<summary>
|
||
Required. Required, must be \"function\".
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.Chunk">
|
||
<summary>
|
||
A `Chunk` is a subpart of a `Document` that is treated as an independent unit for the purposes of vector representation and storage. A `Corpus` can have a maximum of 1 million `Chunk`s.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Chunk.Name">
|
||
<summary>
|
||
Immutable. Identifier. The `Chunk` resource name. The ID (name excluding the \"corpora/*/documents/*/chunks/\" prefix) can contain up to 40 characters that are lowercase alphanumeric or dashes (-). The ID cannot start or end with a dash. If the name is empty on create, a random 12-character unique ID will be generated. Example: `corpora/{corpus_id}/documents/{document_id}/chunks/123a456b789c`
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Chunk.Data">
|
||
<summary>
|
||
Required. The content for the `Chunk`, such as the text string. The maximum number of tokens per chunk is 2043.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Chunk.State">
|
||
<summary>
|
||
Output only. Current state of the `Chunk`.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Chunk.CreateTime">
|
||
<summary>
|
||
Output only. The Timestamp of when the `Chunk` was created.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Chunk.UpdateTime">
|
||
<summary>
|
||
Output only. The Timestamp of when the `Chunk` was last updated.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Chunk.CustomMetadata">
|
||
<summary>
|
||
Optional. User provided custom metadata stored as key-value pairs. The maximum number of `CustomMetadata` per chunk is 20.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.ChunkData">
|
||
<summary>
|
||
Extracted data that represents the `Chunk` content.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ChunkData.StringValue">
|
||
<summary>
|
||
The `Chunk` content as a string. The maximum number of tokens per chunk is 2043.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.CitationMetadata">
|
||
<summary>
|
||
A collection of source attributions for a piece of content.
|
||
Ref: https://ai.google.dev/api/rest/v1beta/CitationMetadata
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CitationMetadata.Citations">
|
||
<summary>
|
||
Output only. List of citations.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.CitationSource">
|
||
<summary>
|
||
A citation to a source for a portion of a specific response.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CitationSource.StartIndex">
|
||
<summary>
|
||
Output only. Start index into the content.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CitationSource.EndIndex">
|
||
<summary>
|
||
Output only. End index into the content.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CitationSource.Uri">
|
||
<summary>
|
||
Output only. Url reference of the attribution.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CitationSource.Title">
|
||
<summary>
|
||
Output only. Title of the attribution.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CitationSource.License">
|
||
<summary>
|
||
Output only. License of the attribution.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CitationSource.PublicationDate">
|
||
<summary>
|
||
Output only. Publication date of the attribution.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.CodeExecution">
|
||
<summary>
|
||
Tool that executes code generated by the model, and
|
||
automatically returns the result to the model.
|
||
</summary>
|
||
<remarks>
|
||
See also `<see cref="T:Mscc.GenerativeAI.ExecutableCode"/>` and `<see cref="T:Mscc.GenerativeAI.CodeExecutionResult"/>`
|
||
which are only generated when using this tool.
|
||
</remarks>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.CodeExecutionResult">
|
||
<summary>
|
||
Result of executing the `ExecutableCode`. Only generated when using the `CodeExecution`, and always follows a `part` containing the `ExecutableCode`.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CodeExecutionResult.Outcome">
|
||
<summary>
|
||
Required. Outcome of the code execution.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CodeExecutionResult.Output">
|
||
<summary>
|
||
Optional. Contains stdout when code execution is successful, stderr or other description otherwise.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.CompositeMedia">
|
||
<summary>
|
||
A sequence of media data references representing composite data. Introduced to support Bigstore composite objects. For details, visit http://go/bigstore-composites.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CompositeMedia.Inline">
|
||
<summary>
|
||
Media data, set if reference_type is INLINE
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CompositeMedia.Path">
|
||
<summary>
|
||
Path to the data, set if reference_type is PATH
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CompositeMedia.ReferenceType">
|
||
<summary>
|
||
Describes what the field reference contains.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CompositeMedia.Md5Hash">
|
||
<summary>
|
||
Scotty-provided MD5 hash for an upload.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CompositeMedia.Sha1Hash">
|
||
<summary>
|
||
Scotty-provided SHA1 hash for an upload.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CompositeMedia.Sha256Hash">
|
||
<summary>
|
||
Scotty-provided SHA256 hash for an upload.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CompositeMedia.Crc32cHash">
|
||
<summary>
|
||
For Scotty Uploads: Scotty-provided hashes for uploads For Scotty Downloads: (WARNING: DO NOT USE WITHOUT PERMISSION FROM THE SCOTTY TEAM.) A Hash provided by the agent to be used to verify the data being downloaded. Currently only supported for inline payloads. Further, only crc32c_hash is currently supported.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CompositeMedia.BlobRef">
|
||
<summary>
|
||
Blobstore v1 reference, set if reference_type is BLOBSTORE_REF This should be the byte representation of a blobstore.BlobRef. Since Blobstore is deprecating v1, use blobstore2_info instead. For now, any v2 blob will also be represented in this field as v1 BlobRef.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CompositeMedia.Length">
|
||
<summary>
|
||
Size of the data, in bytes
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CompositeMedia.ObjectId">
|
||
<summary>
|
||
Reference to a TI Blob, set if reference_type is BIGSTORE_REF.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CompositeMedia.CosmoBinaryReference">
|
||
<summary>
|
||
A binary data reference for a media download. Serves as a technology-agnostic binary reference in some Google infrastructure. This value is a serialized storage_cosmo.BinaryReference proto. Storing it as bytes is a hack to get around the fact that the cosmo proto (as well as others it includes) doesn't support JavaScript. This prevents us from including the actual type of this field.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CompositeMedia.Blobstore2Info">
|
||
<summary>
|
||
Blobstore v2 info, set if reference_type is BLOBSTORE_REF and it refers to a v2 blob.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.ComputeTokensRequest">
|
||
<summary>
|
||
Request message for ComputeTokens RPC call.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ComputeTokensRequest.Model">
|
||
<summary>
|
||
Optional. The name of the publisher model requested to serve the prediction.
|
||
Format: models/{model}.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ComputeTokensRequest.Contents">
|
||
<summary>
|
||
Required. The content of the current conversation with the model.
|
||
For single-turn queries, this is a single instance. For multi-turn queries, this is a repeated field that contains conversation history + latest request.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ComputeTokensRequest.Instances">
|
||
<summary>
|
||
Optional. The instances that are the input to token computing API call.
|
||
</summary>
|
||
<remarks>
|
||
Schema is identical to the prediction schema of the text model, even for the non-text models, like chat models, or Codey models.
|
||
</remarks>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.ComputeTokensResponse">
|
||
<summary>
|
||
Response message for ComputeTokens RPC call.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ComputeTokensResponse.TokensInfo">
|
||
<summary>
|
||
Lists of tokens info from the input.
|
||
</summary>
|
||
<remarks>
|
||
A ComputeTokensRequest could have multiple instances with a prompt in each instance.
|
||
We also need to return lists of tokens info for the request with multiple instances.
|
||
</remarks>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.Content">
|
||
<summary>
|
||
The base structured datatype containing multipart content of a message.
|
||
Ref: https://ai.google.dev/api/rest/v1beta/Content
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Content.Parts">
|
||
<summary>
|
||
Ordered Parts that constitute a single message. Parts may have different MIME types.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Content.Role">
|
||
<summary>
|
||
Optional. The producer of the content. Must be either 'user' or 'model'.
|
||
Useful to set for multi-turn conversations, otherwise can be left blank or unset.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Content.PartTypes">
|
||
<summary>
|
||
Ordered Parts that constitute a single message. Parts may have different MIME types.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Content.ETag">
|
||
<summary>
|
||
The ETag of the item.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.Content.#ctor">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.Content"/> class.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.Content.#ctor(System.String)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.Content"/> class.
|
||
</summary>
|
||
<param name="text">String to process.</param>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.Content.#ctor(Mscc.GenerativeAI.FileData)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.Content"/> class.
|
||
</summary>
|
||
<param name="file">File to process.</param>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.ContentResponse">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.ContentResponse.#ctor">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.ContentResponse"/> class.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.ContentResponse.#ctor(System.String,System.String)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.ContentResponse"/> class.
|
||
</summary>
|
||
<param name="text">String to process.</param>
|
||
<param name="role">Role of the content. Must be either 'user' or 'model'.</param>
|
||
<exception cref="T:System.ArgumentException">Thrown when <paramref name="text"/> or <paramref name="role"/> is empty or null.</exception>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.ContentEmbedding">
|
||
<summary>
|
||
A list of floats representing an embedding.
|
||
Ref: https://ai.google.dev/api/rest/v1beta/ContentEmbedding
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ContentEmbedding.Values">
|
||
<summary>
|
||
The embedding values.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.ContentFilter">
|
||
<summary>
|
||
Content filtering metadata associated with processing a single request.
|
||
Ref: https://ai.google.dev/api/rest/v1beta/ContentFilter
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ContentFilter.BlockReason">
|
||
<summary>
|
||
Output only. The reason content was blocked during request processing.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ContentFilter.Message">
|
||
<summary>
|
||
A string that describes the filtering behavior in more detail.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.ContentTypeInfo">
|
||
<summary>
|
||
Detailed Content-Type information from Scotty. The Content-Type of the media will typically be filled in by the header or Scotty's best_guess, but this extended information provides the backend with more information so that it can make a better decision if needed. This is only used on media upload requests from Scotty.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ContentTypeInfo.FromBytes">
|
||
<summary>
|
||
The content type of the file derived by looking at specific bytes (i.e. \"magic bytes\") of the actual file.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ContentTypeInfo.FromUrlPath">
|
||
<summary>
|
||
The content type of the file derived from the file extension of the URL path. The URL path is assumed to represent a file name (which is typically only true for agents that are providing a REST API).
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ContentTypeInfo.FromHeader">
|
||
<summary>
|
||
The content type of the file as specified in the request headers, multipart headers, or RUPIO start request.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ContentTypeInfo.FromFileName">
|
||
<summary>
|
||
The content type of the file derived from the file extension of the original file name used by the client.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ContentTypeInfo.BestGuess">
|
||
<summary>
|
||
Scotty's best guess of what the content type of the file is.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.CopyModelRequest">
|
||
<summary>
|
||
Request to copy a model.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CopyModelRequest.SourceModel">
|
||
<summary>
|
||
The Google Cloud path of the source model.
|
||
</summary>
|
||
<remarks>
|
||
The path is based on "projects/SOURCE_PROJECT_ID/locations/SOURCE_LOCATION/models/SOURCE_MODEL_ID[@VERSION_ID]"
|
||
</remarks>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.CopyModelResponse">
|
||
<summary>
|
||
The copied model.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CopyModelResponse.Name">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CopyModelResponse.Metadata">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.ListCorporaResponse">
|
||
<summary>
|
||
Response from `ListCorpora` containing a paginated list of `Corpora`. The results are sorted by ascending `corpus.create_time`.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ListCorporaResponse.Corpora">
|
||
<summary>
|
||
The returned corpora.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ListCorporaResponse.NextPageToken">
|
||
<summary>
|
||
A token, which can be sent as `page_token` to retrieve the next page.
|
||
If this field is omitted, there are no more pages.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.Corpus">
|
||
<summary>
|
||
A `Corpus` is a collection of `Document`s. A project can create up to 5 corpora.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Corpus.Name">
|
||
<summary>
|
||
Immutable. Identifier. The `Corpus` resource name. The ID (name excluding the \"corpora/\" prefix) can contain up to 40 characters that are lowercase alphanumeric or dashes (-). The ID cannot start or end with a dash. If the name is empty on create, a unique name will be derived from `display_name` along with a 12 character random suffix. Example: `corpora/my-awesome-corpora-123a456b789c`
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Corpus.DisplayName">
|
||
<summary>
|
||
Optional. The human-readable display name for the `Corpus`. The display name must be no more than 512 characters in length, including spaces. Example: \"Docs on Semantic Retriever\"
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Corpus.CreateTime">
|
||
<summary>
|
||
Output only. The Timestamp of when the `Corpus` was created.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Corpus.UpdateTime">
|
||
<summary>
|
||
Output only. The Timestamp of when the `Corpus` was last updated.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.CountTokensResponse">
|
||
<summary>
|
||
A response from `CountTokens`. It returns the model's `token_count` for the `prompt`.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CountTokensResponse.TotalTokens">
|
||
<summary>
|
||
The total number of tokens counted across all instances from the request.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CountTokensResponse.TokenCount">
|
||
<summary>
|
||
The total number of tokens counted across all instances from the request.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CountTokensResponse.TotalBillableCharacters">
|
||
<summary>
|
||
The total number of billable characters counted across all instances from the request.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CountTokensResponse.CachedContentTokenCount">
|
||
<summary>
|
||
Number of tokens in the cached part of the prompt (the cached content).
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CountTokensResponse.PromptTokensDetails">
|
||
<summary>
|
||
Output only. List of modalities that were processed in the request input.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.CreateTunedModelRequest">
|
||
<summary>
|
||
Request to create a tuned model.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CreateTunedModelRequest.DisplayName">
|
||
<summary>
|
||
The name to display for this model in user interfaces. The display name must be up to 40 characters including spaces.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CreateTunedModelRequest.BaseModel">
|
||
<summary>
|
||
The name of the Model to tune. Example: models/text-bison-001
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CreateTunedModelRequest.TuningTask">
|
||
<summary>
|
||
Tuning tasks that create tuned models.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.CreateTunedModelRequest.#ctor">
|
||
<summary>
|
||
Constructor.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.CreateTunedModelRequest.#ctor(System.String,System.String,System.Collections.Generic.List{Mscc.GenerativeAI.TuningExample},Mscc.GenerativeAI.HyperParameters)">
|
||
<summary>
|
||
Creates a request for a tuned model.
|
||
</summary>
|
||
<param name="model">Model to use.</param>
|
||
<param name="name">Name of the tuned model.</param>
|
||
<param name="dataset">Dataset for training or validation.</param>
|
||
<param name="parameters">Immutable. Hyperparameters controlling the tuning process. If not provided, default values will be used.</param>
|
||
<exception cref="T:System.ArgumentNullException"></exception>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.CreateTunedModelResponse">
|
||
<summary>
|
||
Response of a newly created tuned model.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CreateTunedModelMetadata.TunedModel">
|
||
<summary>
|
||
A fine-tuned model created using ModelService.CreateTunedModel.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.CreateTuningJobRequest">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CreateTuningJobRequest.BaseModel">
|
||
<summary>
|
||
Optional. Name of the foundation model to tune.
|
||
</summary>
|
||
<remarks>
|
||
Supported values: gemini-1.5-pro-002, gemini-1.5-flash-002, and gemini-1.0-pro-002.
|
||
</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CreateTuningJobRequest.SupervisedTuningSpec">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CreateTuningJobRequest.TunedModelDisplayName">
|
||
<summary>
|
||
Optional. A display name for the tuned model. If not set, a random name is generated.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.CreateTuningJobRequest.#ctor">
|
||
<summary>
|
||
Creates an instance of <see cref="P:Mscc.GenerativeAI.CreateTuningJobRequest.SupervisedTuningSpec"/>.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.CreateTuningJobRequest.#ctor(System.String,System.String,System.String,System.String,Mscc.GenerativeAI.HyperParameters)">
|
||
<summary>
|
||
Creates a request for tuning a model.
|
||
</summary>
|
||
<param name="model">Model to use.</param>
|
||
<param name="datasetUri">URI of dataset for training.</param>
|
||
<param name="validationUri">URI of dataset for validation.</param>
|
||
<param name="displayName"></param>
|
||
<param name="parameters">Immutable. Hyperparameters controlling the tuning process. If not provided, default values will be used.</param>
|
||
<exception cref="T:System.ArgumentException">Thrown when <paramref name="model"/> is empty or null.</exception>
|
||
<exception cref="T:System.ArgumentException">Thrown when <paramref name="datasetUri"/> is empty or null.</exception>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.Credentials">
|
||
<summary>
|
||
Represents the credentials used to authenticate with the API.
|
||
It de/serializes the content of the client_secret.json file for OAuth 2.0
|
||
using either Desktop or Web approach, and supports Service Accounts on Google Cloud Platform.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Credentials.Web">
|
||
<summary>
|
||
Client secrets for web applications.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Credentials.Installed">
|
||
<summary>
|
||
Client secrets for desktop applications.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Credentials.Account">
|
||
<summary>
|
||
Account used in Google CLoud Platform.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Credentials.RefreshToken">
|
||
<summary>
|
||
Refresh token for the API to retrieve a new access token.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Credentials.Type">
|
||
<summary>
|
||
Type of account in Google Cloud Platform.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Credentials.UniverseDomain">
|
||
<summary>
|
||
Uri of domain
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Credentials.ProjectId">
|
||
<summary>
|
||
Project ID in Google Cloud Platform.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Credentials.QuotaProjectId">
|
||
<summary>
|
||
Project ID (quota) in Google Cloud Platform.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.ClientSecrets">
|
||
<summary>
|
||
Represents the content of a client_secret.json file used in Google Cloud Platform
|
||
to authenticate a user or service account.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ClientSecrets.ClientId">
|
||
<summary>
|
||
Client ID
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ClientSecrets.ClientSecret">
|
||
<summary>
|
||
Client secret
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ClientSecrets.RedirectUris">
|
||
<summary>
|
||
List of Callback URLs in case of a web application.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ClientSecrets.AuthUri">
|
||
<summary>
|
||
Authentication endpoint.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ClientSecrets.AuthProviderX509CertUrl">
|
||
<summary>
|
||
URL to an X509 certificate provider.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ClientSecrets.TokenUri">
|
||
<summary>
|
||
Uri of token.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.CustomMetadata">
|
||
<summary>
|
||
User provided metadata stored as key-value pairs.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CustomMetadata.Key">
|
||
<summary>
|
||
Required. The key of the metadata to store.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CustomMetadata.NumericValue">
|
||
<summary>
|
||
The numeric value of the metadata to store.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CustomMetadata.StringValue">
|
||
<summary>
|
||
The string value of the metadata to store.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.CustomMetadata.StringListValue">
|
||
<summary>
|
||
The StringList value of the metadata to store.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.DiffChecksumsResponse">
|
||
<summary>
|
||
Backend response for a Diff get checksums response. For details on the Scotty Diff protocol, visit http://go/scotty-diff-protocol.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.DiffChecksumsResponse.ObjectVersion">
|
||
<summary>
|
||
The object version of the object the checksums are being returned for.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.DiffChecksumsResponse.ObjectSizeBytes">
|
||
<summary>
|
||
The total size of the server object.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.DiffChecksumsResponse.ChunkSizeBytes">
|
||
<summary>
|
||
The chunk size of checksums. Must be a multiple of 256KB.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.DiffChecksumsResponse.ObjectLocation">
|
||
<summary>
|
||
If set, calculate the checksums based on the contents and return them to the caller.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.DiffChecksumsResponse.ChecksumsLocation">
|
||
<summary>
|
||
Exactly one of these fields must be populated. If checksums_location is filled, the server will return the corresponding contents to the user. If object_location is filled, the server will calculate the checksums based on the content there and return that to the user. For details on the format of the checksums, see http://go/scotty-diff-protocol.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.DiffDownloadResponse">
|
||
<summary>
|
||
Backend response for a Diff download response. For details on the Scotty Diff protocol, visit http://go/scotty-diff-protocol.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.DiffDownloadResponse.ObjectLocation">
|
||
<summary>
|
||
The original object location.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.DiffUploadRequest">
|
||
<summary>
|
||
A Diff upload request. For details on the Scotty Diff protocol, visit http://go/scotty-diff-protocol.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.DiffUploadRequest.ObjectVersion">
|
||
<summary>
|
||
The object version of the object that is the base version the
|
||
incoming diff script will be applied to. This field will always be filled in.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.DiffUploadRequest.ObjectInfo">
|
||
<summary>
|
||
The location of the new object.
|
||
Agents must clone the object located here, as the upload server
|
||
will delete the contents once a response is received.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.DiffUploadRequest.ChecksumsInfo">
|
||
<summary>
|
||
The location of the checksums for the new object.
|
||
Agents must clone the object located here, as the upload server
|
||
will delete the contents once a response is received.
|
||
For details on the format of the checksums, see http://go/scotty-diff-protocol.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.DiffUploadResponse">
|
||
<summary>
|
||
Backend response for a Diff upload request. For details on the Scotty Diff protocol, visit http://go/scotty-diff-protocol.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.DiffUploadResponse.ObjectVersion">
|
||
<summary>
|
||
The object version of the object at the server.
|
||
Must be included in the end notification response.
|
||
The version in the end notification response must correspond
|
||
to the new version of the object that is now stored at the server, after the upload.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.DiffUploadResponse.OriginalObject">
|
||
<summary>
|
||
The location of the original file for a diff upload request. Must be filled in if responding to an upload start notification.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.DiffVersionResponse">
|
||
<summary>
|
||
Backend response for a Diff get version response. For details on the Scotty Diff protocol, visit http://go/scotty-diff-protocol.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.DiffVersionResponse.ObjectVersion">
|
||
<summary>
|
||
The object version of the object the checksums are being returned for.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.DiffVersionResponse.ObjectSizeBytes">
|
||
<summary>
|
||
The total size of the server object.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.DistributionBucket">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.DistributionBucket.Count">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.DistributionBucket.Left">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.DistributionBucket.Right">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.DownloadFileResponse">
|
||
<summary>
|
||
Response for `DownloadFile`.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.DownloadParameters">
|
||
<summary>
|
||
Parameters specific to media downloads.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.DownloadParameters.AllowGzipCompression">
|
||
<summary>
|
||
A boolean to be returned in the response to Scotty. Allows/disallows gzip encoding of the payload content when the server thinks it's advantageous (hence, does not guarantee compression) which allows Scotty to GZip the response to the client.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.DownloadParameters.IgnoreRange">
|
||
<summary>
|
||
Determining whether or not Apiary should skip the inclusion of any Content-Range header on its response to Scotty.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.Duration">
|
||
<summary>
|
||
A Duration represents a signed, fixed-length span of time represented as a count of seconds and fractions of seconds at nanosecond resolution.
|
||
</summary>
|
||
<remarks>
|
||
It is independent of any calendar and concepts like "day" or "month".
|
||
It is related to Timestamp in that the difference between two Timestamp values is a Duration and
|
||
it can be added or subtracted from a Timestamp. Range is approximately +-10,000 years.
|
||
<seealso href="https://protobuf.dev/reference/protobuf/google.protobuf/#duration"/>
|
||
</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Duration.Seconds">
|
||
<summary>
|
||
Seconds of a duration.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Duration.Nanos">
|
||
<summary>
|
||
Nano seconds of a duration.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.DynamicRetrievalConfig">
|
||
<summary>
|
||
Describes the options to customize dynamic retrieval.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.DynamicRetrievalConfig.Mode">
|
||
<summary>
|
||
The mode of the predictor to be used in dynamic retrieval.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.DynamicRetrievalConfig.DynamicThreshold">
|
||
<summary>
|
||
The threshold to be used in dynamic retrieval. If not set, a system default value is used.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.EmbedContentRequest">
|
||
<summary>
|
||
Request containing the <see cref="P:Mscc.GenerativeAI.EmbedContentRequest.Content"/> for the model to embed.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.EmbedContentRequest.Model">
|
||
<summary>
|
||
Required. The model's resource name. This serves as an ID for the Model to use. This name should match a model name returned by the `ListModels` method. Format: `models/{model}`
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.EmbedContentRequest.Content">
|
||
<summary>
|
||
Required. The content to embed. Only the `parts.text` fields will be counted.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.EmbedContentRequest.TaskType">
|
||
<summary>
|
||
Optional. Optional task type for which the embeddings will be used.
|
||
</summary>
|
||
<remarks>
|
||
Can only be set for `models/embedding-001`.
|
||
</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.EmbedContentRequest.Title">
|
||
<summary>
|
||
Optional. An optional title for the text.
|
||
</summary>
|
||
<remarks>
|
||
Only applicable when TaskType is `RETRIEVAL_DOCUMENT`.
|
||
Note: Specifying a `title` for `RETRIEVAL_DOCUMENT` provides better quality embeddings for retrieval.
|
||
</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.EmbedContentRequest.OutputDimensionality">
|
||
<summary>
|
||
Optional. Optional reduced dimension for the output embedding.
|
||
</summary>
|
||
<remarks>
|
||
If set, excessive values in the output embedding are truncated from the end.
|
||
Supported by newer models since 2024, and the earlier model (`models/embedding-001`) cannot specify this value.
|
||
</remarks>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.EmbedContentRequest.#ctor">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.EmbedContentRequest.#ctor(System.String)">
|
||
<summary>
|
||
|
||
</summary>
|
||
<param name="prompt"></param>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.EmbedContentRequest.#ctor(System.Collections.Generic.List{System.String})">
|
||
<summary>
|
||
|
||
</summary>
|
||
<param name="prompts"></param>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.EmbedContentResponse">
|
||
<summary>
|
||
The response to an EmbedContentRequest.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.EmbedContentResponse.Candidates">
|
||
<summary>
|
||
Output only. Generated candidates.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.EmbedContentResponse.Embedding">
|
||
<summary>
|
||
Output only. The embedding generated from the input content.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.EmbedContentResponse.Embeddings">
|
||
<summary>
|
||
Output only. The embeddings for each request, in the same order as provided in the batch request.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.Embedding">
|
||
<summary>
|
||
A list of floats representing an embedding.
|
||
Ref: https://ai.google.dev/api/rest/v1beta/Embedding
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Embedding.Value">
|
||
<summary>
|
||
The embedding values.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.EmbedTextRequest">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.EmbedTextRequest.Text">
|
||
<summary>
|
||
Optional. The free-form input text that the model will turn into an embedding.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.EmbedTextRequest.#ctor">
|
||
<summary>
|
||
Default constructor.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.EmbedTextRequest.#ctor(System.String)">
|
||
<summary>
|
||
|
||
</summary>
|
||
<param name="text">Optional. The free-form input text that the model will turn into an embedding.</param>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.BatchEmbedTextRequest">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.BatchEmbedTextRequest.Texts">
|
||
<summary>
|
||
Optional. The free-form input texts that the model will turn into an embedding. The current limit is 100 texts, over which an error will be thrown.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.BatchEmbedTextRequest.Requests">
|
||
<summary>
|
||
Optional. Embed requests for the batch. Only one of texts or requests can be set.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.EmbedTextResponse">
|
||
<summary>
|
||
The response to a EmbedTextRequest.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.EmbedTextResponse.Embedding">
|
||
<summary>
|
||
Output only. The embedding generated from the input text.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.EmbedTextResponse.Embeddings">
|
||
<summary>
|
||
Output only. The embeddings generated from the input text.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.Example">
|
||
<summary>
|
||
An input/output example used to instruct the Model.
|
||
It demonstrates how the model should respond or format its response.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Example.Input">
|
||
<summary>
|
||
Required. An example of an input Message from the user.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Example.Output">
|
||
<summary>
|
||
Required. An example of what the model should output given the input.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.ExecutableCode">
|
||
<summary>
|
||
Code generated by the model that is meant to be executed, and the result returned to the model.
|
||
</summary>
|
||
<remarks>
|
||
Only generated when using the `CodeExecution` tool, in which the code will be automatically executed,
|
||
and a corresponding `CodeExecutionResult` will also be generated.
|
||
</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ExecutableCode.Language">
|
||
<summary>
|
||
Required. Programming language of the `code`.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ExecutableCode.Code">
|
||
<summary>
|
||
Required. The code to be executed.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.FileData">
|
||
<summary>
|
||
URI based data.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.FileData.FileUri">
|
||
<summary>
|
||
URI of the file of the image or video to include in the prompt.
|
||
</summary>
|
||
<remarks>
|
||
The bucket that stores the file must be in the same Google Cloud project that's sending the request. You must also specify MIMETYPE.
|
||
Size limit: 20MB
|
||
</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.FileData.MimeType">
|
||
<summary>
|
||
The IANA standard MIME type of the source data.
|
||
</summary>
|
||
<remarks>
|
||
The media type of the image, PDF, or video specified in the data or fileUri fields.
|
||
Acceptable values include the following:
|
||
"image/png", "image/jpeg", "image/heic", "image/heif", "image/webp".
|
||
application/pdf
|
||
video/mov
|
||
video/mpeg
|
||
video/mp4
|
||
video/mpg
|
||
video/avi
|
||
video/wmv
|
||
video/mpegps
|
||
video/flv
|
||
Maximum video length: 2 minutes. No limit on image resolution.
|
||
</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.FileRequest.DisplayName">
|
||
<summary>
|
||
Optional. The human-readable display name for the File. The display name must be no more than 512 characters in length, including spaces. Example: "Welcome Image"
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.FileRequest.Name">
|
||
<summary>
|
||
Optional. The resource name of the File to create.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.FileResource">
|
||
<summary>
|
||
A file resource of the File API.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.FileResource.Name">
|
||
<summary>
|
||
Immutable. Identifier. The File resource name.
|
||
The ID (name excluding the "files/" prefix) can contain up to 40 characters that are
|
||
lowercase alphanumeric or dashes (-). The ID cannot start or end with a dash.
|
||
If the name is empty on create, a unique name will be generated. Example: files/123-456
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.FileResource.DisplayName">
|
||
<summary>
|
||
Optional. The human-readable display name for the File. The display name must be no more than 512 characters
|
||
in length, including spaces. Example: "Welcome Image"
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.FileResource.MimeType">
|
||
<summary>
|
||
Output only. MIME type of the file.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.FileResource.SizeBytes">
|
||
<summary>
|
||
Output only. Size of the file in bytes.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.FileResource.CreateTime">
|
||
<summary>
|
||
Output only. The timestamp of when the File was created.
|
||
A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.
|
||
Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.FileResource.UpdateTime">
|
||
<summary>
|
||
Output only. The timestamp of when the File was last updated.
|
||
A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.
|
||
Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.FileResource.ExpirationTime">
|
||
<summary>
|
||
Output only. The timestamp of when the File will be deleted. Only set if the File is scheduled to expire.
|
||
A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.
|
||
Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.FileResource.Sha256Hash">
|
||
<summary>
|
||
Output only. SHA-256 hash of the uploaded bytes. A base64-encoded string.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.FileResource.Uri">
|
||
<summary>
|
||
Output only. The URI of the File.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.FileResource.State">
|
||
<summary>
|
||
Output only. Processing state of the File.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.FileResource.Error">
|
||
<summary>
|
||
Output only. Error status if File processing failed.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.FileResource.Metadata">
|
||
<summary>
|
||
Output only. Metadata for a video.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.FileResource.DownloadUri">
|
||
<summary>
|
||
Output only. The download uri of the `File`.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.FileResource.Source">
|
||
<summary>
|
||
Source of the File.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.FunctionCall">
|
||
<summary>
|
||
A predicted FunctionCall returned from the model that contains a string
|
||
representing the FunctionDeclaration.name with the parameters and their values.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.FunctionCall.Name">
|
||
<summary>
|
||
Required. The name of the function to call.
|
||
Matches [FunctionDeclaration.name].
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.FunctionCall.Args">
|
||
<summary>
|
||
Optional. The function parameters and values in JSON object format.
|
||
See [FunctionDeclaration.parameters] for parameter details.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.FunctionCall.Id">
|
||
<summary>
|
||
Optional. The unique id of the function call.
|
||
If populated, the client to execute the `function_call` and return the response with the matching `id`.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.FunctionCallingConfig">
|
||
<summary>
|
||
Configuration for specifying function calling behavior.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.FunctionCallingConfig.Mode">
|
||
<summary>
|
||
Optional. Specifies the mode in which function calling should execute. If unspecified, the default value will be set to AUTO.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.FunctionCallingConfig.AllowedFunctionNames">
|
||
<summary>
|
||
Optional. A set of function names that, when provided, limits the functions the model will call. This should only be set when the Mode is ANY. Function names should match [FunctionDeclaration.name]. With mode set to ANY, model will predict a function call from the set of function names provided.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.FunctionDeclaration">
|
||
<summary>
|
||
Structured representation of a function declaration as defined by the OpenAPI 3.03 specification. Included in this declaration are the function name and parameters. This FunctionDeclaration is a representation of a block of code that can be used as a Tool by the model and executed by the client.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.FunctionDeclaration.Name">
|
||
<summary>
|
||
Required. The name of the function to call.
|
||
Must start with a letter or an underscore.
|
||
Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 63.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.FunctionDeclaration.Description">
|
||
<summary>
|
||
Required. A brief description of the function.
|
||
Description and purpose of the function.
|
||
Model uses it to decide how and whether to call the function.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.FunctionDeclaration.Parameters">
|
||
<summary>
|
||
Optional. Describes the parameters to this function.
|
||
</summary>
|
||
<remarks>
|
||
Reflects the Open API 3.03 Parameter Object string Key: the name of the parameter.
|
||
Parameter names are case sensitive. Schema Value: the Schema defining the type used for the parameter.
|
||
For function with no parameters, this can be left unset. Example with 1 required and 1 optional parameter:
|
||
type: OBJECT
|
||
properties:
|
||
param1:
|
||
type: STRING
|
||
param2:
|
||
type: INTEGER
|
||
required: -
|
||
</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.FunctionDeclaration.Response">
|
||
<summary>
|
||
Optional. Describes the output from this function in JSON Schema format.
|
||
</summary>
|
||
<remarks>
|
||
Reflects the Open API 3.03 Response Object. The Schema defines the type used for the response value of the function.
|
||
</remarks>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.FunctionResponse">
|
||
<summary>
|
||
The result output of a FunctionCall that contains a string
|
||
representing the FunctionDeclaration.name and a structured
|
||
JSON object containing any output from the function call.
|
||
It is used as context to the model.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.FunctionResponse.Name">
|
||
<summary>
|
||
Required. The name of the function to call.
|
||
Matches [FunctionDeclaration.name] and [FunctionCall.name].
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.FunctionResponse.Response">
|
||
<summary>
|
||
Required. The function response in JSON object format.
|
||
</summary>
|
||
<remarks>
|
||
Use "output" key to specify function output and "error" key to specify error details (if any).
|
||
If "output" and "error" keys are not specified, then whole "response" is treated as function output.
|
||
</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.FunctionResponse.Id">
|
||
<summary>
|
||
Optional. The id of the function call this response is for.
|
||
Populated by the client to match the corresponding function call `id`.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.GenerateAnswerRequest">
|
||
<summary>
|
||
Request to generate a grounded answer from the model.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateAnswerRequest.Contents">
|
||
<summary>
|
||
Required. The content of the current conversation with the model. For single-turn queries, this is a single question to answer. For multi-turn queries, this is a repeated field that contains conversation history and the last Content in the list containing the question.
|
||
Note: models.generateAnswer currently only supports queries in English.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateAnswerRequest.AnswerStyle">
|
||
<summary>
|
||
Required. Style in which answers should be returned.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateAnswerRequest.SafetySettings">
|
||
<summary>
|
||
Optional. A list of unique SafetySetting instances for blocking unsafe content.
|
||
This will be enforced on the GenerateAnswerRequest.Contents and GenerateAnswerResponse.candidate. There should not be more than one setting for each SafetyCategory type. The API will block any contents and responses that fail to meet the thresholds set by these settings. This list overrides the default settings for each SafetyCategory specified in the safetySettings. If there is no SafetySetting for a given SafetyCategory provided in the list, the API will use the default safety setting for that category. Harm categories HARM_CATEGORY_HATE_SPEECH, HARM_CATEGORY_SEXUALLY_EXPLICIT, HARM_CATEGORY_DANGEROUS_CONTENT, HARM_CATEGORY_HARASSMENT are supported.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateAnswerRequest.InlinePassages">
|
||
<summary>
|
||
Passages provided inline with the request.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateAnswerRequest.SemanticRetriever">
|
||
<summary>
|
||
Content retrieved from resources created via the Semantic Retriever API.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateAnswerRequest.Temperature">
|
||
<summary>
|
||
Optional. Controls the randomness of the output.
|
||
Values can range from [0.0,1.0], inclusive. A value closer to 1.0 will produce responses that are more varied and creative, while a value closer to 0.0 will typically result in more straightforward responses from the model. A low temperature (~0.2) is usually recommended for Attributed-Question-Answering use cases.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerateAnswerRequest.#ctor">
|
||
<summary>
|
||
Default constructor.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.SemanticRetrieverConfig">
|
||
<summary>
|
||
Configuration for retrieving grounding content from a Corpus or Document created using the Semantic Retriever API.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.SemanticRetrieverConfig.Source">
|
||
<summary>
|
||
Required. Name of the resource for retrieval, e.g. corpora/123 or corpora/123/documents/abc.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.SemanticRetrieverConfig.Query">
|
||
<summary>
|
||
Required. Query to use for similarity matching Chunks in the given resource.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.SemanticRetrieverConfig.MetadataFilters">
|
||
<summary>
|
||
Optional. Filters for selecting Documents and/or Chunks from the resource.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.SemanticRetrieverConfig.MaxChunkCount">
|
||
<summary>
|
||
Optional. Maximum number of relevant Chunks to retrieve.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.SemanticRetrieverConfig.MinimumRelevanceScore">
|
||
<summary>
|
||
Optional. Minimum relevance score for retrieved relevant Chunks.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.GroundingPassages">
|
||
<summary>
|
||
A repeated list of passages.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GroundingPassages.Passages">
|
||
<summary>
|
||
List of passages.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.GroundingPassage">
|
||
<summary>
|
||
Passage included inline with a grounding configuration.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GroundingPassage.Id">
|
||
<summary>
|
||
Identifier for the passage for attributing this passage in grounded answers.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GroundingPassage.Content">
|
||
<summary>
|
||
Content of the passage.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.GenerateAnswerResponse">
|
||
<summary>
|
||
Response from the model for a grounded answer.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateAnswerResponse.Text">
|
||
<summary>
|
||
Responded text information of first candidate.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateAnswerResponse.Answer">
|
||
<summary>
|
||
Candidate answer from the model.
|
||
Note: The model always attempts to provide a grounded answer, even when the answer is unlikely to be answerable from the given passages. In that case, a low-quality or ungrounded answer may be provided, along with a low answerableProbability.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateAnswerResponse.AnswerableProbability">
|
||
<summary>
|
||
Output only. The model's estimate of the probability that its answer is correct and grounded in the input passages.
|
||
A low answerableProbability indicates that the answer might not be grounded in the sources.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateAnswerResponse.InputFeedback">
|
||
<summary>
|
||
Output only. Feedback related to the input data used to answer the question, as opposed to model-generated response to the question.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerateAnswerResponse.ToString">
|
||
<summary>
|
||
A convenience overload to easily access the responded text.
|
||
</summary>
|
||
<returns>The responded text information of first candidate.</returns>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.GenerateContentRequest">
|
||
<summary>
|
||
Request to generate a completion from the model.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateContentRequest.Model">
|
||
<summary>
|
||
Required. The name of the Model to use for generating the completion.
|
||
Format: models/{model}.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateContentRequest.Contents">
|
||
<summary>
|
||
Required. The content of the current conversation with the model.
|
||
For single-turn queries, this is a single instance. For multi-turn queries, this is a repeated field that contains conversation history + latest request.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateContentRequest.GenerationConfig">
|
||
<summary>
|
||
Optional. Configuration options for model generation and outputs.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateContentRequest.SafetySettings">
|
||
<summary>
|
||
Optional. A list of unique `<see cref="T:Mscc.GenerativeAI.SafetySetting"/>` instances for blocking unsafe content.
|
||
</summary>
|
||
<remarks>
|
||
This will be enforced on the `GenerateContentRequest.contents` and `GenerateContentResponse.candidates`.
|
||
There should not be more than one setting for each `SafetyCategory` type.
|
||
The API will block any contents and responses that fail to meet the thresholds set by these settings.
|
||
This list overrides the default settings for each `SafetyCategory` specified in the safety_settings.
|
||
If there is no `SafetySetting` for a given `SafetyCategory` provided in the list, the API will use the
|
||
default safety setting for that category.
|
||
Harm categories HARM_CATEGORY_HATE_SPEECH, HARM_CATEGORY_SEXUALLY_EXPLICIT, HARM_CATEGORY_DANGEROUS_CONTENT, HARM_CATEGORY_HARASSMENT, HARM_CATEGORY_CIVIC_INTEGRITY are supported.
|
||
Refer to the [guide](https://ai.google.dev/gemini-api/docs/safety-settings) for detailed information on
|
||
available safety settings. Also refer to the [Safety guidance](https://ai.google.dev/gemini-api/docs/safety-guidance)
|
||
to learn how to incorporate safety considerations in your AI applications.
|
||
</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateContentRequest.SystemInstruction">
|
||
<summary>
|
||
Optional. Available for gemini-1.5-pro and gemini-1.0-pro-002.
|
||
Instructions for the model to steer it toward better performance. For example, "Answer as concisely as possible" or "Don't use technical terms in your response".
|
||
The text strings count toward the token limit.
|
||
The role field of systemInstruction is ignored and doesn't affect the performance of the model.
|
||
</summary>
|
||
<remarks>
|
||
Note: only text should be used in parts and content in each part will be in a separate paragraph.
|
||
</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateContentRequest.ToolConfig">
|
||
<summary>
|
||
Optional. Configuration of tools used by the model.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateContentRequest.Tools">
|
||
<summary>
|
||
Optional. A list of Tools the model may use to generate the next response.
|
||
A <see cref="T:Mscc.GenerativeAI.Tool"/> is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. The only supported tool is currently Function.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateContentRequest.CachedContent">
|
||
<summary>
|
||
Optional. The name of the content cached to use as context to serve the prediction.
|
||
Format: cachedContents/{cachedContent}
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateContentRequest.ETag">
|
||
<summary>
|
||
The ETag of the item.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateContentRequest.Labels">
|
||
<summary>
|
||
Optional. The labels with user-defined metadata for the request.
|
||
</summary>
|
||
<remarks>
|
||
It is used for billing and reporting only.
|
||
Label keys and values can be no longer than 63 characters (Unicode codepoints) and
|
||
can only contain lowercase letters, numeric characters, underscores, and dashes.
|
||
International characters are allowed. Label values are optional. Label keys must start with a letter.
|
||
</remarks>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerateContentRequest.#ctor">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.GenerateContentRequest"/> class.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerateContentRequest.#ctor(System.String,Mscc.GenerativeAI.GenerationConfig,System.Collections.Generic.List{Mscc.GenerativeAI.SafetySetting},System.Collections.Generic.List{Mscc.GenerativeAI.Tool},Mscc.GenerativeAI.Content,Mscc.GenerativeAI.ToolConfig)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.GenerateContentRequest"/> class.
|
||
</summary>
|
||
<param name="prompt">String to process.</param>
|
||
<param name="generationConfig">Optional. Configuration options for model generation and outputs.</param>
|
||
<param name="safetySettings">Optional. A list of unique SafetySetting instances for blocking unsafe content.</param>
|
||
<param name="tools">Optional. A list of Tools the model may use to generate the next response.</param>
|
||
<param name="systemInstruction">Optional. </param>
|
||
<param name="toolConfig">Optional. Configuration of tools.</param>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="prompt"/> is <see langword="null"/>.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerateContentRequest.#ctor(System.Collections.Generic.List{Mscc.GenerativeAI.IPart},Mscc.GenerativeAI.GenerationConfig,System.Collections.Generic.List{Mscc.GenerativeAI.SafetySetting},System.Collections.Generic.List{Mscc.GenerativeAI.Tool},Mscc.GenerativeAI.Content,Mscc.GenerativeAI.ToolConfig)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.GenerateContentRequest"/> class.
|
||
</summary>
|
||
<param name="parts"></param>
|
||
<param name="generationConfig">Optional. Configuration options for model generation and outputs.</param>
|
||
<param name="safetySettings">Optional. A list of unique SafetySetting instances for blocking unsafe content.</param>
|
||
<param name="tools">Optional. A list of Tools the model may use to generate the next response.</param>
|
||
<param name="systemInstruction">Optional. </param>
|
||
<param name="toolConfig">Optional. Configuration of tools.</param>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="parts"/> is <see langword="null"/>.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerateContentRequest.#ctor(Mscc.GenerativeAI.FileResource,Mscc.GenerativeAI.GenerationConfig,System.Collections.Generic.List{Mscc.GenerativeAI.SafetySetting},System.Collections.Generic.List{Mscc.GenerativeAI.Tool},Mscc.GenerativeAI.Content,Mscc.GenerativeAI.ToolConfig)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.GenerateContentRequest"/> class.
|
||
</summary>
|
||
<param name="file">The media file resource.</param>
|
||
<param name="generationConfig">Optional. Configuration options for model generation and outputs.</param>
|
||
<param name="safetySettings">Optional. A list of unique SafetySetting instances for blocking unsafe content.</param>
|
||
<param name="tools">Optional. A list of Tools the model may use to generate the next response.</param>
|
||
<param name="systemInstruction">Optional. </param>
|
||
<param name="toolConfig">Optional. Configuration of tools.</param>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="file"/> is <see langword="null"/>.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerateContentRequest.#ctor(System.Collections.Generic.List{Mscc.GenerativeAI.Part},Mscc.GenerativeAI.GenerationConfig,System.Collections.Generic.List{Mscc.GenerativeAI.SafetySetting},System.Collections.Generic.List{Mscc.GenerativeAI.Tool},Mscc.GenerativeAI.Content,Mscc.GenerativeAI.ToolConfig)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.GenerateContentRequest"/> class.
|
||
</summary>
|
||
<param name="parts"></param>
|
||
<param name="generationConfig">Optional. Configuration options for model generation and outputs.</param>
|
||
<param name="safetySettings">Optional. A list of unique SafetySetting instances for blocking unsafe content.</param>
|
||
<param name="tools">Optional. A list of Tools the model may use to generate the next response.</param>
|
||
<param name="systemInstruction">Optional. </param>
|
||
<param name="toolConfig">Optional. Configuration of tools.</param>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="parts"/> is <see langword="null"/>.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerateContentRequest.AddContent(Mscc.GenerativeAI.Content)">
|
||
<summary>
|
||
Adds a <see cref="T:Mscc.GenerativeAI.Content"/> object to the request.
|
||
</summary>
|
||
<param name="content"></param>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerateContentRequest.AddMedia(System.String,System.String,System.Boolean)">
|
||
<summary>
|
||
Adds a media file or a base64-encoded string to the request.
|
||
</summary>
|
||
<remarks>
|
||
Depending on the <paramref name="useOnline"/> flag, either an <see cref="T:Mscc.GenerativeAI.InlineData"/>
|
||
or <see cref="T:Mscc.GenerativeAI.FileData"/> part will be added to the request.
|
||
Standard URLs are supported and the resource is downloaded if <paramref name="useOnline"/> is <see langword="false"/>.
|
||
</remarks>
|
||
<param name="uri">The URI of the media file.</param>
|
||
<param name="mimeType">The IANA standard MIME type to check.</param>
|
||
<param name="useOnline">Flag indicating whether the file shall be used online or read from the local file system.</param>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="uri"/> is <see langword="null"/>.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerateContentRequest.AddMedia(Mscc.GenerativeAI.FileResource)">
|
||
<summary>
|
||
Adds a media file resource to the request.
|
||
</summary>
|
||
<param name="file">The media file resource.</param>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="file"/> is <see langword="null"/>.</exception>
|
||
<exception cref="T:System.NotSupportedException">Thrown when the MIME type of <paramref name="file"/>> is not supported by the API.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerateContentRequest.AddPart(Mscc.GenerativeAI.IPart,System.Int32)">
|
||
<summary>
|
||
Adds a <see cref="T:Mscc.GenerativeAI.Part"/> object to the Content at the specified <param name="index"></param>.
|
||
</summary>
|
||
<param name="part">Part object to add to the <see cref="P:Mscc.GenerativeAI.GenerateContentRequest.Contents"/> collection.</param>
|
||
<param name="index">Zero-based index of element in the Contents collection.</param>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.GenerateContentResponse">
|
||
<summary>
|
||
Response from the model supporting multiple candidates.
|
||
Ref: https://ai.google.dev/api/rest/v1beta/GenerateContentResponse
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateContentResponse.Text">
|
||
<summary>
|
||
A convenience property to get the responded text information of first candidate.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateContentResponse.Candidates">
|
||
<summary>
|
||
Output only. Generated Candidate responses from the model.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateContentResponse.PromptFeedback">
|
||
<summary>
|
||
Output only. Content filter results for a prompt sent in the request.
|
||
Note: Sent only in the first stream chunk.
|
||
Only happens when no candidates were generated due to content violations.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateContentResponse.UsageMetadata">
|
||
<summary>
|
||
Usage metadata about the response(s).
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateContentResponse.ModelVersion">
|
||
<summary>
|
||
Output only. The model version used to generate the response.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerateContentResponse.ToString">
|
||
<summary>
|
||
A convenience overload to easily access the responded text.
|
||
</summary>
|
||
<returns>The responded text information of first candidate.</returns>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.GeneratedFile">
|
||
<summary>
|
||
A file generated on behalf of a user.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GeneratedFile.Name">
|
||
<summary>
|
||
Identifier. The name of the generated file. Example: `generatedFiles/abc-123`
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GeneratedFile.MimeType">
|
||
<summary>
|
||
MIME type of the generatedFile.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GeneratedFile.Error">
|
||
<summary>
|
||
Error details if the GeneratedFile ends up in the STATE_FAILED state.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GeneratedFile.State">
|
||
<summary>
|
||
Output only. The state of the GeneratedFile.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GeneratedFile.Blob">
|
||
<summary>
|
||
The blob reference of the generated file to download.
|
||
Only set when the GeneratedFiles.get request url has the \"?alt=media\" query param.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.GenerateEmbeddingsEmbedding">
|
||
<summary>
|
||
An embedding vector generated by the model.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateEmbeddingsEmbedding.Embedding">
|
||
<summary>
|
||
Output only. The embedding vector generated for the input.
|
||
Can be either a list of floats or a base64 string encoding the a list of floats with C-style layout (Numpy compatible).
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateEmbeddingsEmbedding.Index">
|
||
<summary>
|
||
Output only. Index of the embedding in the list of embeddings.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateEmbeddingsEmbedding.Object">
|
||
<summary>
|
||
Output only. Always \"embedding\", required by the SDK.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.GenerateEmbeddingsRequest">
|
||
<summary>
|
||
Request for embedding generation.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateEmbeddingsRequest.Model">
|
||
<summary>
|
||
Required. Model to generate the embeddings for.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateEmbeddingsRequest.Input">
|
||
<summary>
|
||
Required. The input to generate embeddings for.
|
||
Can be a string, or a list of strings.
|
||
The SDK supports a list of numbers and list of list of numbers, but this is not yet implemented.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateEmbeddingsRequest.EncodingFormat">
|
||
<summary>
|
||
Optional. The format of the encoding.
|
||
Must be either \"float\" or \"base64\".
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateEmbeddingsRequest.Dimensions">
|
||
<summary>
|
||
Optional. Dimensional size of the generated embeddings.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.GenerateEmbeddingsResponse">
|
||
<summary>
|
||
Response for embedding generation.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateEmbeddingsResponse.Model">
|
||
<summary>
|
||
Output only. Model used to generate the embeddings.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateEmbeddingsResponse.Object">
|
||
<summary>
|
||
Output only. Always \"embedding\", required by the SDK.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateEmbeddingsResponse.Data">
|
||
<summary>
|
||
Output only. A list of the requested embeddings.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.GenerateMessageRequest">
|
||
<summary>
|
||
Generates a response from the model given an input MessagePrompt.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateMessageRequest.Prompt">
|
||
<summary>
|
||
Required. The free-form input text given to the model as a prompt.
|
||
Given a prompt, the model will generate a TextCompletion response it predicts as the completion of the input text.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateMessageRequest.Temperature">
|
||
<summary>
|
||
Optional. Controls the randomness of predictions.
|
||
Temperature controls the degree of randomness in token selection. Lower temperatures are good for prompts that expect a true or correct response, while higher temperatures can lead to more diverse or unexpected results. With a temperature of 0, the highest probability token is always selected.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateMessageRequest.TopP">
|
||
<summary>
|
||
Optional. If specified, nucleus sampling will be used.
|
||
Top-p changes how the model selects tokens for output. Tokens are selected from most probable to least until the sum of their probabilities equals the top-p value. For example, if tokens A, B and C have a probability of .3, .2 and .1 and the top-p value is .5, then the model will select either A or B as the next token (using temperature).
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateMessageRequest.TopK">
|
||
<summary>
|
||
Optional. If specified, top-k sampling will be used.
|
||
Top-k changes how the model selects tokens for output. A top-k of 1 means that the selected token is the most probable among all tokens in the model's vocabulary (also called greedy decoding), while a top-k of 3 means that the next token is selected from among the three most probable tokens (using temperature).
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateMessageRequest.CandidateCount">
|
||
<summary>
|
||
Optional. Number of generated responses to return.
|
||
This value must be between [1, 8], inclusive. If unset, this will default to 1.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerateMessageRequest.#ctor">
|
||
<summary>
|
||
Default constructor.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerateMessageRequest.#ctor(System.String)">
|
||
<summary>
|
||
|
||
</summary>
|
||
<param name="prompt"></param>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateMessageResponse.Text">
|
||
<summary>
|
||
Responded text information of first candidate.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateMessageResponse.Candidates">
|
||
<summary>
|
||
Candidate response messages from the model.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateMessageResponse.Messages">
|
||
<summary>
|
||
The conversation history used by the model.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateMessageResponse.Filters">
|
||
<summary>
|
||
A set of content filtering metadata for the prompt and response text.
|
||
This indicates which SafetyCategory(s) blocked a candidate from this response, the lowest HarmProbability that triggered a block, and the HarmThreshold setting for that category. This indicates the smallest change to the SafetySettings that would be necessary to unblock at least 1 response.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerateMessageResponse.ToString">
|
||
<summary>
|
||
A convenience overload to easily access the responded text.
|
||
</summary>
|
||
<returns>The responded text information of first candidate.</returns>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.GenerateTextRequest">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateTextRequest.Prompt">
|
||
<summary>
|
||
Required. The free-form input text given to the model as a prompt.
|
||
Given a prompt, the model will generate a TextCompletion response it predicts as the completion of the input text.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateTextRequest.SafetySettings">
|
||
<summary>
|
||
Optional. A list of unique SafetySetting instances for blocking unsafe content.
|
||
This will be enforced on the GenerateContentRequest.contents and GenerateContentResponse.candidates. There should not be more than one setting for each SafetyCategory type. The API will block any contents and responses that fail to meet the thresholds set by these settings. This list overrides the default settings for each SafetyCategory specified in the safetySettings. If there is no SafetySetting for a given SafetyCategory provided in the list, the API will use the default safety setting for that category. Harm categories HARM_CATEGORY_HATE_SPEECH, HARM_CATEGORY_SEXUALLY_EXPLICIT, HARM_CATEGORY_DANGEROUS_CONTENT, HARM_CATEGORY_HARASSMENT are supported.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateTextRequest.Temperature">
|
||
<summary>
|
||
Optional. Controls the randomness of predictions.
|
||
Temperature controls the degree of randomness in token selection. Lower temperatures are good for prompts that expect a true or correct response, while higher temperatures can lead to more diverse or unexpected results. With a temperature of 0, the highest probability token is always selected.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateTextRequest.TopP">
|
||
<summary>
|
||
Optional. If specified, nucleus sampling will be used.
|
||
Top-p changes how the model selects tokens for output. Tokens are selected from most probable to least until the sum of their probabilities equals the top-p value. For example, if tokens A, B and C have a probability of .3, .2 and .1 and the top-p value is .5, then the model will select either A or B as the next token (using temperature).
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateTextRequest.TopK">
|
||
<summary>
|
||
Optional. If specified, top-k sampling will be used.
|
||
Top-k changes how the model selects tokens for output. A top-k of 1 means that the selected token is the most probable among all tokens in the model's vocabulary (also called greedy decoding), while a top-k of 3 means that the next token is selected from among the three most probable tokens (using temperature).
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateTextRequest.CandidateCount">
|
||
<summary>
|
||
Optional. Number of generated responses to return.
|
||
This value must be between [1, 8], inclusive. If unset, this will default to 1.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateTextRequest.MaxOutputTokens">
|
||
<summary>
|
||
Optional. The maximum number of output tokens to generate per message.
|
||
Token limit determines the maximum amount of text output from one prompt. A token is approximately four characters.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateTextRequest.StopSequences">
|
||
<summary>
|
||
Optional. Stop sequences.
|
||
A stop sequence is a series of characters (including spaces) that stops response generation if the model encounters it. The sequence is not included as part of the response. You can add up to five stop sequences.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerateTextRequest.#ctor">
|
||
<summary>
|
||
Default constructor.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerateTextRequest.#ctor(System.String)">
|
||
<summary>
|
||
|
||
</summary>
|
||
<param name="prompt"></param>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.GenerateTextResponse">
|
||
<summary>
|
||
The response from the model, including candidate completions.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateTextResponse.Text">
|
||
<summary>
|
||
Responded text information of first candidate.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateTextResponse.Candidates">
|
||
<summary>
|
||
Candidate responses from the model.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateTextResponse.Filters">
|
||
<summary>
|
||
A set of content filtering metadata for the prompt and response text.
|
||
This indicates which SafetyCategory(s) blocked a candidate from this response, the lowest HarmProbability that triggered a block, and the HarmThreshold setting for that category. This indicates the smallest change to the SafetySettings that would be necessary to unblock at least 1 response.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateTextResponse.SafetyFeedback">
|
||
<summary>
|
||
Returns any safety feedback related to content filtering.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerateTextResponse.ToString">
|
||
<summary>
|
||
A convenience overload to easily access the responded text.
|
||
</summary>
|
||
<returns>The responded text information of first candidate.</returns>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.GenerationConfig">
|
||
<summary>
|
||
Configuration options for model generation and outputs. Not all parameters may be configurable for every model.
|
||
Ref: https://ai.google.dev/api/rest/v1beta/GenerationConfig
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerationConfig.Temperature">
|
||
<summary>
|
||
Optional. Controls the randomness of predictions.
|
||
Temperature controls the degree of randomness in token selection. Lower temperatures are good for prompts that expect a true or correct response, while higher temperatures can lead to more diverse or unexpected results. With a temperature of 0, the highest probability token is always selected.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerationConfig.TopP">
|
||
<summary>
|
||
Optional. If specified, nucleus sampling will be used.
|
||
Top-p changes how the model selects tokens for output. Tokens are selected from most probable to least until the sum of their probabilities equals the top-p value. For example, if tokens A, B and C have a probability of .3, .2 and .1 and the top-p value is .5, then the model will select either A or B as the next token (using temperature).
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerationConfig.TopK">
|
||
<summary>
|
||
Optional. If specified, top-k sampling will be used.
|
||
Top-k changes how the model selects tokens for output. A top-k of 1 means that the selected token is the most probable among all tokens in the model's vocabulary (also called greedy decoding), while a top-k of 3 means that the next token is selected from among the three most probable tokens (using temperature).
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerationConfig.CandidateCount">
|
||
<summary>
|
||
Optional. Number of generated responses to return.
|
||
This value must be between [1, 8], inclusive. If unset, this will default to 1.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerationConfig.MaxOutputTokens">
|
||
<summary>
|
||
Optional. Number of generated responses to return.
|
||
If unset, this will default to 1. Please note that this doesn't work for previous generation models (Gemini 1.0 family)
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerationConfig.StopSequences">
|
||
<summary>
|
||
Optional. Stop sequences.
|
||
A stop sequence is a series of characters (including spaces) that stops response generation if the model encounters it. The sequence is not included as part of the response. You can add up to five stop sequences.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerationConfig.ResponseMimeType">
|
||
<summary>
|
||
Optional. Output response mimetype of the generated candidate text.
|
||
Supported mimetype: `text/plain`: (default) Text output. `application/json`: JSON response in the candidates.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerationConfig.ResponseSchema">
|
||
<summary>
|
||
Optional. Output response schema of the generated candidate text when response mime type can have schema.
|
||
</summary>
|
||
<remarks>
|
||
Schema can be objects, primitives or arrays and is a subset of [OpenAPI schema](https://spec.openapis.org/oas/v3.0.3#schema).
|
||
If set, a compatible response_mime_type must also be set. Compatible mimetypes: `application/json`: Schema for JSON response.
|
||
</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerationConfig.PresencePenalty">
|
||
<summary>
|
||
Optional. Presence penalty applied to the next token's logprobs if the token has already been seen in the response.
|
||
</summary>
|
||
<remarks>
|
||
This penalty is binary on/off and not dependant on the number of times the token is used (after the first).
|
||
Use frequencyPenalty for a penalty that increases with each use. A positive penalty will discourage
|
||
the use of tokens that have already been used in the response, increasing the vocabulary. A negative
|
||
penalty will encourage the use of tokens that have already been used in the response, decreasing
|
||
the vocabulary.
|
||
</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerationConfig.FrequencyPenalty">
|
||
<summary>
|
||
Optional. Frequency penalty applied to the next token's logprobs, multiplied by the number of times each token has been seen in the respponse so far.
|
||
</summary>
|
||
<remarks>
|
||
A positive penalty will discourage the use of tokens that have already been used, proportional to the number
|
||
of times the token has been used: The more a token is used, the more difficult it is for the model to use
|
||
that token again increasing the vocabulary of responses.
|
||
Caution: A negative penalty will encourage the model to reuse tokens proportional to the number of times
|
||
the token has been used. Small negative values will reduce the vocabulary of a response.
|
||
Larger negative values will cause the model to start repeating a common token until it hits the
|
||
maxOutputTokens limit: "...the the the the the...".
|
||
</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerationConfig.ResponseLogprobs">
|
||
<summary>
|
||
Optional. If true, export the logprobs results in response.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerationConfig.Logprobs">
|
||
<summary>
|
||
Optional. Only valid if responseLogprobs=True.
|
||
This sets the number of top logprobs to return at each decoding step in the Candidate.logprobs_result.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerationConfig.EnableEnhancedCivicAnswers">
|
||
<summary>
|
||
Optional. Enables enhanced civic answers. It may not be available for all models.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerationConfig.ResponseModalities">
|
||
<summary>
|
||
Optional. The requested modalities of the response.
|
||
Represents the set of modalities that the model can return, and should be expected
|
||
in the response. This is an exact match to the modalities of the response.
|
||
A model may have multiple combinations of supported modalities. If the requested
|
||
modalities do not match any of the supported combinations, an error will be returned.
|
||
An empty list is equivalent to requesting only text.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerationConfig.SpeechConfig">
|
||
<summary>
|
||
Optional. The speech generation config.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerationConfig.MediaResolution">
|
||
<summary>
|
||
Optional. If specified, the media resolution specified will be used.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerationConfig.Seed">
|
||
<summary>
|
||
Optional. Seed used in decoding. If not set, the request uses a randomly generated seed.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerationConfig.ThinkingConfig">
|
||
<summary>
|
||
Optional. Config for thinking features. An error will be returned if this field is set for models that don't support thinking.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.GenericMetadata">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenericMetadata.CreateTime">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenericMetadata.UpdateTime">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.GoogleSearch">
|
||
<summary>
|
||
GoogleSearch tool type. Tool to support Google Search in Model. Powered by Google.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.GoogleSearchRetrieval">
|
||
<summary>
|
||
Tool to retrieve public web data for grounding, powered by Google.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GoogleSearchRetrieval.DynamicRetrievalConfig">
|
||
<summary>
|
||
Specifies the dynamic retrieval configuration for the given source.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GoogleSearchRetrieval.DisableAttribution">
|
||
<summary>
|
||
Optional. Disable using the result from this tool in detecting grounding attribution.
|
||
</summary>
|
||
<remarks>This does not affect how the result is given to the model for generation.</remarks>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GoogleSearchRetrieval.#ctor">
|
||
<summary>
|
||
Creates an instance of <see cref="T:Mscc.GenerativeAI.GoogleSearchRetrieval"/>
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GoogleSearchRetrieval.#ctor(Mscc.GenerativeAI.DynamicRetrievalConfigMode,System.Single)">
|
||
<summary>
|
||
Creates an instance of <see cref="T:Mscc.GenerativeAI.GoogleSearchRetrieval"/> with Mode and DynamicThreshold.
|
||
</summary>
|
||
<param name="mode">The mode of the predictor to be used in dynamic retrieval.</param>
|
||
<param name="dynamicThreshold">The threshold to be used in dynamic retrieval. If not set, a system default value is used.</param>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GroundingAttributionSegment.StartIndex">
|
||
<summary>
|
||
Output only. Start index into the content.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GroundingAttributionSegment.EndIndex">
|
||
<summary>
|
||
Output only. End index into the content.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GroundingAttributionSegment.PartIndex">
|
||
<summary>
|
||
Output only. Part index into the content.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.GroundingAttributionWeb">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GroundingAttributionWeb.Title">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GroundingAttributionWeb.Uri">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.GroundingChunk">
|
||
<summary>
|
||
Grounding chunk.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GroundingChunk.Web">
|
||
<summary>
|
||
Grounding chunk from the web.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.GroundingMetadata">
|
||
<summary>
|
||
Metadata returned to client when grounding is enabled.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GroundingMetadata.SearchEntryPoint">
|
||
<summary>
|
||
Optional. Google search entry for the following-up web searches.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GroundingMetadata.GroundingAttributions">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GroundingMetadata.WebSearchQueries">
|
||
<summary>
|
||
Web search queries for the following-up web search.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GroundingMetadata.GroundingSupports">
|
||
<summary>
|
||
List of grounding support.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GroundingMetadata.RetrievalMetadata">
|
||
<summary>
|
||
Metadata related to retrieval in the grounding flow.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GroundingMetadata.GroundingChunks">
|
||
<summary>
|
||
List of supporting references retrieved from specified grounding source.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.GroundingPassageId">
|
||
<summary>
|
||
Identifier for a part within a `GroundingPassage`.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GroundingPassageId.PartIndex">
|
||
<summary>
|
||
Output only. Index of the part within the `GenerateAnswerRequest`'s `GroundingPassage.content`.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GroundingPassageId.PassageId">
|
||
<summary>
|
||
Output only. ID of the passage matching the `GenerateAnswerRequest`'s `GroundingPassage.id`.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.GroundingSupport">
|
||
<summary>
|
||
Grounding support.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GroundingSupport.Segment">
|
||
<summary>
|
||
Segment of the content this support belongs to.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GroundingSupport.GroundingChunkIndices">
|
||
<summary>
|
||
A list of indices (into 'grounding_chunk') specifying the citations associated with the claim.
|
||
</summary>
|
||
<remarks>
|
||
For instance [1,3,4] means that grounding_chunk[1], grounding_chunk[3], grounding_chunk[4] are the retrieved content attributed to the claim.
|
||
</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GroundingSupport.ConfidenceScores">
|
||
<summary>
|
||
Confidence score of the support references. Ranges from 0 to 1. 1 is the most confident.
|
||
This list must have the same size as the grounding_chunk_indices.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.HttpBody">
|
||
<summary>
|
||
Message that represents an arbitrary HTTP body.
|
||
It should only be used for payload formats that can't be represented as JSON,
|
||
such as raw binary or an HTML page.
|
||
This message can be used both in streaming and non-streaming API methods in the
|
||
request as well as the response. It can be used as a top-level request field,
|
||
which is convenient if one wants to extract parameters from either the URL or
|
||
HTTP template into the request fields and also want access to the raw HTTP body.
|
||
Example: message GetResourceRequest { // A unique request id. string request_id = 1; // The raw HTTP body is bound to this field. google.api.HttpBody http_body = 2; } service ResourceService { rpc GetResource(GetResourceRequest) returns (google.api.HttpBody); rpc UpdateResource(google.api.HttpBody) returns (google.protobuf.Empty); } Example with streaming methods: service CaldavService { rpc GetCalendar(stream google.api.HttpBody) returns (stream google.api.HttpBody); rpc UpdateCalendar(stream google.api.HttpBody) returns (stream google.api.HttpBody); }
|
||
Use of this type only changes how the request and response bodies are handled, all other features will continue to work unchanged.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.HttpBody.ContentType">
|
||
<summary>
|
||
The HTTP Content-Type header value specifying the content type of the body.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.HttpBody.Data">
|
||
<summary>
|
||
The HTTP request/response body as raw binary.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.HttpBody.Extensions">
|
||
<summary>
|
||
Application specific response metadata. Must be set in the first response for streaming APIs.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.HyperParameters">
|
||
<summary>
|
||
Hyperparameters controlling the tuning process.
|
||
Read more at https://ai.google.dev/docs/model_tuning_guidance
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.HyperParameters.BatchSize">
|
||
<summary>
|
||
Immutable. The batch size hyperparameter for tuning. If not set, a default of 4 or 16 will be used based on the number of training examples.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.HyperParameters.LearningRate">
|
||
<summary>
|
||
Optional. Immutable. The learning rate hyperparameter for tuning. If not set, a default of 0.001 or 0.0002 will be calculated based on the number of training examples.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.HyperParameters.LearningRateMultiplier">
|
||
<summary>
|
||
Optional. Immutable. The learning rate multiplier is used to calculate a final learningRate based on the default (recommended) value. Actual learning rate := learningRateMultiplier * default learning rate Default learning rate is dependent on base model and dataset size. If not set, a default of 1.0 will be used.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.HyperParameters.EpochCount">
|
||
<summary>
|
||
Optional. Immutable. The number of training epochs. An epoch is one pass through the training data. If not set, a default of 5 will be used.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.HyperParameters.AdapterSize">
|
||
<summary>
|
||
Optional: The Adapter size to use for the tuning job.
|
||
</summary>
|
||
<remarks>
|
||
The adapter size influences the number of trainable parameters for the tuning job.
|
||
A larger adapter size implies that the model can learn more complex tasks,
|
||
but it requires a larger training dataset and longer training times.
|
||
</remarks>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.InlineData">
|
||
<summary>
|
||
Raw media bytes sent directly in the request.
|
||
Text should not be sent as raw bytes.
|
||
</summary>
|
||
<remarks>
|
||
Serialized bytes data of the image or video.
|
||
You can specify at most 1 image with inlineData. To specify up to 16 images, use fileData.
|
||
</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.InlineData.Data">
|
||
<summary>
|
||
The base64 encoding of the image, PDF, or video to include inline in the prompt.
|
||
</summary>
|
||
<remarks>
|
||
When including media inline, you must also specify MIMETYPE.
|
||
Size limit: 20MB
|
||
</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.InlineData.MimeType">
|
||
<summary>
|
||
The IANA standard MIME type of the source data.
|
||
</summary>
|
||
<remarks>
|
||
The media type of the image, PDF, or video specified in the data or fileUri fields.
|
||
Acceptable values include the following:
|
||
"image/png", "image/jpeg", "image/heic", "image/heif", "image/webp".
|
||
application/pdf
|
||
video/mov
|
||
video/mpeg
|
||
video/mp4
|
||
video/mpg
|
||
video/avi
|
||
video/wmv
|
||
video/mpegps
|
||
video/flv
|
||
Maximum video length: 2 minutes. No limit on image resolution.
|
||
</remarks>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.ListFilesResponse">
|
||
<summary>
|
||
Response from ListFiles method containing a paginated list of files.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ListFilesResponse.Files">
|
||
<summary>
|
||
The list of files.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ListFilesResponse.NextPageToken">
|
||
<summary>
|
||
A token, which can be sent as pageToken to retrieve the next page.
|
||
If this field is omitted, there are no more pages.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.ListGeneratedFilesResponse">
|
||
<summary>
|
||
Response from ListFiles method containing a paginated list of generated files.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ListGeneratedFilesResponse.GeneratedFiles">
|
||
<summary>
|
||
The list of generated files.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ListGeneratedFilesResponse.NextPageToken">
|
||
<summary>
|
||
A token, which can be sent as pageToken to retrieve the next page.
|
||
If this field is omitted, there are no more pages.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.LogprobsResult">
|
||
<summary>
|
||
Logprobs Result
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.LogprobsResult.TopCandidates">
|
||
<summary>
|
||
Length = total number of decoding steps.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.LogprobsResult.ChosenCanditates">
|
||
<summary>
|
||
Length = total number of decoding steps.
|
||
The chosen candidates may or may not be in topCandidates.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.LogprobsResultCandidate">
|
||
<summary>
|
||
Candidate for the logprobs token and score.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.LogprobsResultCandidate.Token">
|
||
<summary>
|
||
The candidate’s token string value.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.LogprobsResultCandidate.TokenId">
|
||
<summary>
|
||
The candidate’s token id value.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.LogprobsResultCandidate.LogProbability">
|
||
<summary>
|
||
The candidate's log probability.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.Media">
|
||
<summary>
|
||
A reference to data stored on the filesystem, on GFS or in blobstore.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Media.Filename">
|
||
<summary>
|
||
Original file name.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Media.Inline">
|
||
<summary>
|
||
Media data, set if reference_type is INLINE
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Media.CompositeMedia">
|
||
<summary>
|
||
A composite media composed of one or more media objects, set if reference_type is COMPOSITE_MEDIA.
|
||
The media length field must be set to the sum of the lengths of all composite media objects.
|
||
Note: All composite media must have length specified.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Media.DownloadParameters">
|
||
<summary>
|
||
Parameters for a media download.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Media.Token">
|
||
<summary>
|
||
A unique fingerprint/version id for the media data.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Media.ContentTypeInfo">
|
||
<summary>
|
||
Extended content type information provided for Scotty uploads.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Media.Sha1Hash">
|
||
<summary>
|
||
Scotty-provided SHA1 hash for an upload.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Media.Sha256Hash">
|
||
<summary>
|
||
Scotty-provided SHA256 hash for an upload.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Media.Md5Hash">
|
||
<summary>
|
||
Scotty-provided MD5 hash for an upload.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Media.HashVerified">
|
||
<summary>
|
||
For Scotty uploads only.
|
||
If a user sends a hash code and the backend has requested that Scotty verify the upload against the client hash, Scotty will perform the check on behalf of the backend and will reject it if the hashes don't match. This is set to true if Scotty performed this verification.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Media.ContentType">
|
||
<summary>
|
||
MIME type of the data.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Media.DiffUploadRequest">
|
||
<summary>
|
||
Set if reference_type is DIFF_UPLOAD_REQUEST.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Media.DiffUploadResponse">
|
||
<summary>
|
||
Set if reference_type is DIFF_UPLOAD_RESPONSE.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Media.DiffChecksumsResponse">
|
||
<summary>
|
||
Set if reference_type is DIFF_CHECKSUMS_RESPONSE.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Media.DiffVersionResponse">
|
||
<summary>
|
||
Set if reference_type is DIFF_VERSION_RESPONSE.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Media.DiffDownloadResponse">
|
||
<summary>
|
||
Set if reference_type is DIFF_DOWNLOAD_RESPONSE.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Media.Algorithm">
|
||
<summary>
|
||
Deprecated, use one of explicit hash type fields instead.
|
||
Algorithm used for calculating the hash.
|
||
As of 2011/01/21, \"MD5\" is the only possible value for this field.
|
||
New values may be added at any time.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Media.ReferenceType">
|
||
<summary>
|
||
Describes what the field reference contains.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Media.BigstoreObjectRef">
|
||
<summary>
|
||
Use object_id instead.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Media.Timestamp">
|
||
<summary>
|
||
Time at which the media data was last updated, in milliseconds since UNIX epoch
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Media.Path">
|
||
<summary>
|
||
Path to the data, set if reference_type is PATH
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Media.Blobstore2Info">
|
||
<summary>
|
||
Blobstore v2 info, set if reference_type is BLOBSTORE_REF, and it refers to a v2 blob.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Media.Hash">
|
||
<summary>
|
||
Deprecated, use one of explicit hash type fields instead. These two hash related fields will only be populated on Scotty based media uploads and will contain the content of the hash group in the NotificationRequest: Hex encoded hash value of the uploaded media.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Media.BlobRef">
|
||
<summary>
|
||
Blobstore v1 reference, set if reference_type is BLOBSTORE_REF This should be the byte representation of a blobstore.BlobRef. Since Blobstore is deprecating v1, use blobstore2_info instead. For now, any v2 blob will also be represented in this field as v1 BlobRef.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Media.Length">
|
||
<summary>
|
||
Size of the data, in bytes
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Media.ObjectId">
|
||
<summary>
|
||
Reference to a TI Blob, set if reference_type is BIGSTORE_REF.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Media.IsPotentialRetry">
|
||
<summary>
|
||
|is_potential_retry| is set false only when Scotty is certain that it has not sent the request before. When a client resumes an upload, this field must be set true in agent calls, because Scotty cannot be certain that it has never sent the request before due to potential failure in the session state persistence.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Media.Crc32CHash">
|
||
<summary>
|
||
For Scotty Uploads: Scotty-provided hashes for uploads For Scotty Downloads: (WARNING: DO NOT USE WITHOUT PERMISSION FROM THE SCOTTY TEAM.) A Hash provided by the agent to be used to verify the data being downloaded. Currently only supported for inline payloads. Further, only crc32c_hash is currently supported.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Media.MediaId">
|
||
<summary>
|
||
Media id to forward to the operation GetMedia. Can be set if reference_type is GET_MEDIA.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Media.CosmoBinaryReference">
|
||
<summary>
|
||
A binary data reference for a media download. Serves as a technology-agnostic binary reference in some Google infrastructure. This value is a serialized storage_cosmo.BinaryReference proto. Storing it as bytes is a hack to get around the fact that the cosmo proto (as well as others it includes) doesn't support JavaScript. This prevents us from including the actual type of this field.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.Message">
|
||
<summary>
|
||
The base unit of structured text.
|
||
A Message includes an author and the content of the Message.
|
||
The author is used to tag messages when they are fed to the model as text.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Message.Author">
|
||
<summary>
|
||
Optional. The author of this Message.
|
||
This serves as a key for tagging the content of this Message when it is fed to the model as text.
|
||
The author can be any alphanumeric string.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Message.Content">
|
||
<summary>
|
||
Required. The text content of the structured Message.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Message.CitationMetadata">
|
||
<summary>
|
||
Output only. Citation information for model-generated content in this Message.
|
||
If this Message was generated as output from the model, this field may be populated with attribution
|
||
information for any text included in the content. This field is used only on output.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.MessagePrompt">
|
||
<summary>
|
||
All of the structured input text passed to the model as a prompt.
|
||
A MessagePrompt contains a structured set of fields that provide context for the conversation,
|
||
examples of user input/model output message pairs that prime the model to respond in different ways,
|
||
and the conversation history or list of messages representing the alternating turns of the conversation
|
||
between the user and the model.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.MessagePrompt.Context">
|
||
<summary>
|
||
Optional. Text that should be provided to the model first to ground the response.
|
||
If not empty, this context will be given to the model first before the examples and messages. When using a context be sure to provide it with every request to maintain continuity.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.MessagePrompt.Examples">
|
||
<summary>
|
||
Optional. Examples of what the model should generate.
|
||
This includes both user input and the response that the model should emulate.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.MessagePrompt.Messages">
|
||
<summary>
|
||
Required. A snapshot of the recent conversation history sorted chronologically.
|
||
Turns alternate between two authors.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.MetadataFilter">
|
||
<summary>
|
||
User provided filter to limit retrieval based on Chunk or Document level metadata values. Example (genre = drama OR genre = action): key = "document.custom_metadata.genre" conditions = [{stringValue = "drama", operation = EQUAL}, {stringValue = "action", operation = EQUAL}]
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.MetadataFilter.Key">
|
||
<summary>
|
||
Required. The key of the metadata to filter on.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.MetadataFilter.Conditions">
|
||
<summary>
|
||
Required. The Conditions for the given key that will trigger this filter. Multiple Conditions are joined by logical ORs.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.Condition">
|
||
<summary>
|
||
Filter condition applicable to a single key.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Condition.Operation">
|
||
<summary>
|
||
Required. Operator applied to the given key-value pair to trigger the condition.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Condition.StringValue">
|
||
<summary>
|
||
The string value to filter the metadata on.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Condition.NumericValue">
|
||
<summary>
|
||
The numeric value to filter the metadata on.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.ModalityTokenCount">
|
||
<summary>
|
||
Represents token counting info for a single modality.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ModalityTokenCount.Modality">
|
||
<summary>
|
||
The modality associated with this token count.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ModalityTokenCount.TokenCount">
|
||
<summary>
|
||
Number of tokens.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.ListModelsResponse">
|
||
<summary>
|
||
Response from ListModels method containing a paginated list of Models.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ListModelsResponse.Models">
|
||
<summary>
|
||
The list of Models.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ListModelsResponse.NextPageToken">
|
||
<summary>
|
||
A token, which can be sent as pageToken to retrieve the next page.
|
||
If this field is omitted, there are no more pages.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.ModelResponse">
|
||
<summary>
|
||
Information about a Generative Language Model.
|
||
Ref: https://ai.google.dev/api/rest/v1beta/models
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ModelResponse.Name">
|
||
<summary>
|
||
Required. The resource name of the Model.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ModelResponse.BaseModelId">
|
||
<summary>
|
||
The name of the base model, pass this to the generation request.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ModelResponse.Version">
|
||
<summary>
|
||
The version number of the model (Google AI).
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ModelResponse.VersionId">
|
||
<summary>
|
||
The version Id of the model (Vertex AI).
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ModelResponse.VersionAliases">
|
||
<summary>
|
||
User provided version aliases so that a model version can be referenced via
|
||
alias (i.e. projects/{project}/locations/{location}/models/{model_id}@{version_alias}
|
||
instead of auto-generated version id (i.e. projects/{project}/locations/{location}/models/{model_id}@{version_id}).
|
||
</summary>
|
||
<remarks>
|
||
The format is a-z{0,126}[a-z0-9] to distinguish from version_id.
|
||
A default version alias will be created for the first version of the model,
|
||
and there must be exactly one default version alias for a model.
|
||
</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ModelResponse.DisplayName">
|
||
<summary>
|
||
The human-readable name of the model. E.g. "Chat Bison".
|
||
The name can be up to 128 characters long and can consist of any UTF-8 characters.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ModelResponse.Description">
|
||
<summary>
|
||
A short description of the model.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ModelResponse.InputTokenLimit">
|
||
<summary>
|
||
Maximum number of input tokens allowed for this model.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ModelResponse.OutputTokenLimit">
|
||
<summary>
|
||
Maximum number of output tokens available for this model.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ModelResponse.SupportedGenerationMethods">
|
||
<summary>
|
||
The model's supported generation methods.
|
||
The method names are defined as Pascal case strings, such as generateMessage which correspond to API methods.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ModelResponse.Temperature">
|
||
<summary>
|
||
Controls the randomness of the output.
|
||
Values can range over [0.0,1.0], inclusive. A value closer to 1.0 will produce responses that are more varied, while a value closer to 0.0 will typically result in less surprising responses from the model. This value specifies default to be used by the backend while making the call to the model.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ModelResponse.MaxTemperature">
|
||
<summary>
|
||
The maximum temperature this model can use.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ModelResponse.TopP">
|
||
<summary>
|
||
For Nucleus sampling.
|
||
Nucleus sampling considers the smallest set of tokens whose probability sum is at least topP. This value specifies default to be used by the backend while making the call to the model.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ModelResponse.TopK">
|
||
<summary>
|
||
For Top-k sampling.
|
||
Top-k sampling considers the set of topK most probable tokens. This value specifies default to be used by the backend while making the call to the model.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ModelResponse.State">
|
||
<summary>
|
||
Output only. The state of the tuned model.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ModelResponse.CreateTime">
|
||
<summary>
|
||
Output only. The timestamp when this model was created.
|
||
A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ModelResponse.UpdateTime">
|
||
<summary>
|
||
Output only. The timestamp when this model was updated.
|
||
A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ModelResponse.TuningTask">
|
||
<summary>
|
||
Required. The tuning task that creates the tuned model.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ModelResponse.TunedModelSource">
|
||
<summary>
|
||
Optional. TunedModel to use as the starting point for training the new model.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ModelResponse.BaseModel">
|
||
<summary>
|
||
The name of the base model, pass this to the generation request.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ModelResponse.ETag">
|
||
<summary>
|
||
The ETag of the item.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ModelResponse.Labels">
|
||
<summary>
|
||
Optional. The labels with user-defined metadata for the request.
|
||
</summary>
|
||
<remarks>
|
||
It is used for billing and reporting only.
|
||
Label keys and values can be no longer than 63 characters (Unicode codepoints) and
|
||
can only contain lowercase letters, numeric characters, underscores, and dashes.
|
||
International characters are allowed. Label values are optional. Label keys must start with a letter.
|
||
</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ModelResponse.VersionCreateTime">
|
||
<summary>
|
||
Output only. The timestamp when this model was created.
|
||
A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ModelResponse.VersionUpdateTime">
|
||
<summary>
|
||
Output only. The timestamp when this model was updated.
|
||
A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ModelResponse.ModelSourceInfo">
|
||
<summary>
|
||
"sourceType": "GENIE"
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ModelResponse.BaseModelSource">
|
||
<summary>
|
||
"genieSource": {}
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.TunedModelSource">
|
||
<summary>
|
||
Tuned model as a source for training a new model.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TunedModelSource.TunedModel">
|
||
<summary>
|
||
Immutable. The name of the TunedModel to use as the starting point for training the new model. Example: tunedModels/my-tuned-model
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TunedModelSource.BaseModel">
|
||
<summary>
|
||
Output only. The name of the base Model this TunedModel was tuned from. Example: models/text-bison-001
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.ObjectId">
|
||
<summary>
|
||
This is a copy of the tech.blob.ObjectId proto, which could not be used directly here due to transitive closure issues with JavaScript support; see http://b/8801763.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ObjectId.ObjectName">
|
||
<summary>
|
||
The name of the object.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ObjectId.BucketName">
|
||
<summary>
|
||
The name of the bucket to which this object belongs.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ObjectId.Generation">
|
||
<summary>
|
||
Generation of the object. Generations are monotonically increasing across writes, allowing them to be be compared to determine which generation is newer. If this is omitted in a request, then you are requesting the live object. See http://go/bigstore-versions
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.Operation">
|
||
<summary>
|
||
This resource represents a long-running operation that is the result of a network API call.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Operation.Name">
|
||
<summary>
|
||
The server-assigned name, which is only unique within the same service that originally returns it.
|
||
If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Operation.Done">
|
||
<summary>
|
||
If the value is `false`, it means the operation is still in progress.
|
||
If `true`, the operation is completed, and either `error` or `response` is available.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Operation.Error">
|
||
<summary>
|
||
The error result of the operation in case of failure or cancellation.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Operation.Metadata">
|
||
<summary>
|
||
Service-specific metadata associated with the operation.
|
||
</summary>
|
||
<remarks>
|
||
It typically contains progress information and common metadata such as create time.
|
||
Some services might not provide such metadata. Any method that returns a long-running operation
|
||
should document the metadata type, if any.
|
||
</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Operation.Response">
|
||
<summary>
|
||
The normal, successful response of the operation.
|
||
</summary>
|
||
<remarks>
|
||
If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`.
|
||
If the original method is standard `Get`/`Create`/`Update`, the response should be the resource.
|
||
For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name.
|
||
For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
|
||
</remarks>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.Part">
|
||
<summary>
|
||
A datatype containing media that is part of a multi-part Content message.
|
||
A part of a turn in a conversation with the model with a fixed MIME type.
|
||
It has one of the following mutually exclusive fields:
|
||
1. text
|
||
2. inline_data
|
||
3. file_data
|
||
4. functionResponse
|
||
5. functionCall
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.Part.#ctor(System.String)">
|
||
<summary>
|
||
|
||
</summary>
|
||
<param name="text"></param>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Part.Text">
|
||
<summary>
|
||
A text part of a conversation with the model.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Part.TextData">
|
||
<remarks/>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Part.InlineData">
|
||
<summary>
|
||
Raw media bytes sent directly in the request.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Part.FileData">
|
||
<summary>
|
||
URI based data.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Part.FunctionResponse">
|
||
<summary>
|
||
The result output of a FunctionCall that contains a string representing the FunctionDeclaration.name and a structured JSON object containing any output from the function is used as context to the model.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Part.FunctionCall">
|
||
<summary>
|
||
A predicted FunctionCall returned from the model that contains a string representing the FunctionDeclaration.name with the arguments and their values.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Part.VideoMetadata">
|
||
<summary>
|
||
Optional. For video input, the start and end offset of the video in Duration format.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Part.ExecutableCode">
|
||
<summary>
|
||
Code generated by the model that is meant to be executed.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Part.CodeExecutionResult">
|
||
<summary>
|
||
Result of executing the ExecutableCode.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Part.Thought">
|
||
<summary>
|
||
Optional. Indicates if the part is thought from the model.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Part.ETag">
|
||
<summary>
|
||
The ETag of the item.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.TuningPart">
|
||
<summary>
|
||
A datatype containing data that is part of a multi-part `TuningContent` message.
|
||
This is a subset of the Part used for model inference, with limited type support.
|
||
A `Part` consists of data which has an associated datatype.
|
||
A `Part` can only contain one of the accepted types in `Part.data`.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.PrebuiltVoiceConfig">
|
||
<summary>
|
||
The configuration for the prebuilt speaker to use.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.PrebuiltVoiceConfig.VoiceName">
|
||
<summary>
|
||
The name of the preset voice to use.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.PredictLongRunningRequest">
|
||
<summary>
|
||
Request message for [PredictionService.PredictLongRunning].
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.PredictLongRunningRequest.Instances">
|
||
<summary>
|
||
Required. The instances that are the input to the prediction call.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.PredictLongRunningRequest.Parameters">
|
||
<summary>
|
||
Optional. The parameters that govern the prediction call.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.PredictRequest">
|
||
<summary>
|
||
Request message for PredictionService.Predict.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.PredictRequest.Instances">
|
||
<summary>
|
||
Required. The instances that are the input to the prediction call.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.PredictRequest.Parameters">
|
||
<summary>
|
||
Optional. The parameters that govern the prediction call.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.PredictResponse">
|
||
<summary>
|
||
Response message for [PredictionService.Predict].
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.PredictResponse.Predictions">
|
||
<summary>
|
||
The outputs of the prediction call.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.PromptFeedback">
|
||
<summary>
|
||
A set of the feedback metadata the prompt specified in GenerateContentRequest.content.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.PromptFeedback.BlockReason">
|
||
<summary>
|
||
Output only. Optional. If set, the prompt was blocked and no candidates are returned. Rephrase your prompt.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.PromptFeedback.SafetyRatings">
|
||
<summary>
|
||
Output only. Ratings for safety of the prompt. There is at most one rating per category.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.PromptFeedback.BlockReasonMessage">
|
||
<summary>
|
||
Output only. A readable block reason message.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.QueryCorpusRequest">
|
||
<summary>
|
||
Request for querying a `Corpus`.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.QueryCorpusRequest.Query">
|
||
<summary>
|
||
Required. Query string to perform semantic search.
|
||
</summary>
|
||
</member>
|
||
<!-- Badly formed XML comment ignored for member "P:Mscc.GenerativeAI.QueryCorpusRequest.MetadataFilters" -->
|
||
<member name="P:Mscc.GenerativeAI.QueryCorpusRequest.ResultsCount">
|
||
<summary>
|
||
Optional. The maximum number of `Chunk`s to return.
|
||
The service may return fewer `Chunk`s. If unspecified, at most 10 `Chunk`s will be returned. The maximum specified result count is 100.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.QueryCorpusResponse">
|
||
<summary>
|
||
Response from `QueryCorpus` containing a list of relevant chunks.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.QueryCorpusResponse.RelevantChunks">
|
||
<summary>
|
||
The relevant chunks.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.RelevantChunk">
|
||
<summary>
|
||
The information for a chunk relevant to a query.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.RelevantChunk.Chunk">
|
||
<summary>
|
||
<see cref="P:Mscc.GenerativeAI.RelevantChunk.Chunk"/> associated with the query.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.RelevantChunk.ChunkRelevanceScore">
|
||
<summary>
|
||
<see cref="P:Mscc.GenerativeAI.RelevantChunk.Chunk"/> relevance to the query.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.RequestOptions.#ctor(Mscc.GenerativeAI.Retry,System.Nullable{System.TimeSpan})">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.RequestOptions"/> class
|
||
</summary>
|
||
<param name="retry">Refer to [retry docs](https://googleapis.dev/python/google-api-core/latest/retry.html) for details.</param>
|
||
<param name="timeout">In seconds (or provide a [TimeToDeadlineTimeout](https://googleapis.dev/python/google-api-core/latest/timeout.html) object).</param>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.ResponseFormat">
|
||
<summary>
|
||
Defines the format of the response.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ResponseFormat.Type">
|
||
<summary>
|
||
Required. Type of the response.
|
||
Can be either:
|
||
- \"text\": Format the response as text.
|
||
- \"json_object\": Format the response as a JSON object.
|
||
- \"json_schema\": Format the response as a JSON object following the given schema.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ResponseFormat.JsonSchema">
|
||
<summary>
|
||
Optional. The JSON schema to follow. Only used if type is \"json_schema\".
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.ResponseFormatSchema">
|
||
<summary>
|
||
Schema for the response.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ResponseFormatSchema.Name">
|
||
<summary>
|
||
Required. Name of the object type represented by the schema.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ResponseFormatSchema.Description">
|
||
<summary>
|
||
Optional. Description of the object represented by the schema.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ResponseFormatSchema.Strict">
|
||
<summary>
|
||
Optional. Whether the schema validation is strict.
|
||
If true, the model will fail if the schema is not valid.
|
||
NOTE: This parameter is currently ignored.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ResponseFormatSchema.Schema">
|
||
<summary>
|
||
Optional. The JSON schema to follow.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.Retrieval">
|
||
<summary>
|
||
Defines a retrieval tool that model can call to access external knowledge.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Retrieval.DisableAttribution">
|
||
<summary>
|
||
Optional. Disable using the result from this tool in detecting grounding attribution.
|
||
</summary>
|
||
<remarks>This does not affect how the result is given to the model for generation.</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Retrieval.VertexAiSearch">
|
||
<summary>
|
||
Optional. Set to use data source powered by Vertex AI Search.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.RetrievalMetadata">
|
||
<summary>
|
||
Metadata related to retrieval in the grounding flow.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.RetrievalMetadata.GoogleSearchDynamicRetrievalScore">
|
||
<summary>
|
||
Optional. Score indicating how likely information from google search could help answer the prompt.
|
||
</summary>
|
||
<remarks>
|
||
The score is in the range [0, 1], where 0 is the least likely and 1 is the most likely.
|
||
This score is only populated when google search grounding and dynamic retrieval is enabled.
|
||
It will be compared to the threshold to determine whether to trigger google search.
|
||
</remarks>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.SafetyFeedback">
|
||
<summary>
|
||
Safety feedback for an entire request.
|
||
This field is populated if content in the input and/or response is blocked due to safety settings.
|
||
SafetyFeedback may not exist for every HarmCategory. Each SafetyFeedback will return the safety settings
|
||
used by the request as well as the lowest HarmProbability that should be allowed in order to return a result.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.SafetyFeedback.Rating">
|
||
<summary>
|
||
Safety rating evaluated from content.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.SafetyFeedback.Setting">
|
||
<summary>
|
||
Safety settings applied to the request.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.SafetyRating">
|
||
<summary>
|
||
Safety rating for a piece of content.
|
||
Ref: https://ai.google.dev/api/rest/v1beta/SafetyRating
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.SafetyRating.Category">
|
||
<summary>
|
||
Output only. Required. The category for this rating.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.SafetyRating.Probability">
|
||
<summary>
|
||
Output only. Required. The probability of harm for this content.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.SafetyRating.Blocked">
|
||
<summary>
|
||
Output only. Indicates whether the content was filtered out because of this rating.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.SafetyRating.ProbabilityScore">
|
||
<summary>
|
||
Output only. Harm probability scoring in the content.
|
||
Vertex AI only
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.SafetyRating.Severity">
|
||
<summary>
|
||
Output only. Harm severity levels in the content.
|
||
Vertex AI only
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.SafetyRating.SeverityScore">
|
||
<summary>
|
||
Output only. Harm severity scoring in the content.
|
||
Vertex AI only
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.SafetySetting">
|
||
<summary>
|
||
Safety setting, affecting the safety-blocking behavior.
|
||
Represents a safety setting that can be used to control the model's behavior.
|
||
It instructs the model to avoid certain responses given safety measurements based on category.
|
||
Ref: https://ai.google.dev/api/rest/v1beta/SafetySetting
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.SafetySetting.Category">
|
||
<summary>
|
||
Required. The category for this setting.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.SafetySetting.Threshold">
|
||
<summary>
|
||
Required. Controls the probability threshold at which harm is blocked.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.Schema">
|
||
<summary>
|
||
The Schema object allows the definition of input and output data types. These types can be objects, but also primitives and arrays. Represents a select subset of an OpenAPI 3.0 schema object.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Schema.Type">
|
||
<summary>
|
||
Required. Data type.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Schema.Format">
|
||
<summary>
|
||
Optional. The format of the data.
|
||
This is used only for primitive datatypes.
|
||
Supported formats:
|
||
for NUMBER type: float, double
|
||
for INTEGER type: int32, int64
|
||
for STRING type: enum, date-time
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Schema.Description">
|
||
<summary>
|
||
Optional. A brief description of the parameter. This could contain examples of use. Parameter description may be formatted as Markdown.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Schema.Nullable">
|
||
<summary>
|
||
Optional. Indicates if the value may be null.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Schema.Items">
|
||
<summary>
|
||
Optional. Schema of the elements of Type.ARRAY.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Schema.MaxItems">
|
||
<summary>
|
||
Optional. Maximum number of the elements for Type.ARRAY.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Schema.MinItems">
|
||
<summary>
|
||
Optional. Minimum number of the elements for Type.ARRAY.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Schema.Enum">
|
||
<summary>
|
||
Optional. Possible values of the element of Type.STRING with enum format.
|
||
For example we can define an Enum Direction as :
|
||
{type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]}
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Schema.Properties">
|
||
<summary>
|
||
Optional. Properties of Type.OBJECT.
|
||
An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Schema.PropertyOrdering">
|
||
<summary>
|
||
Optional. The order of the properties. Not a standard field in open api spec. Used to determine the order of the properties in the response.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Schema.Required">
|
||
<summary>
|
||
Optional. Required properties of Type.OBJECT.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.SdkListModelsResponse">
|
||
<summary>
|
||
Response for list models.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.SdkListModelsResponse.Data">
|
||
<summary>
|
||
Output only. A list of the requested embeddings.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.SdkListModelsResponse.Object">
|
||
<summary>
|
||
Output only. Always "list", required by the SDK.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.SdkModel">
|
||
<summary>
|
||
The model object.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.SdkModel.Id">
|
||
<summary>
|
||
Output only. Id of the model.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.SdkModel.Object">
|
||
<summary>
|
||
Output only. Always "model", required by the SDK.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.SdkModel.Created">
|
||
<summary>
|
||
Output only. The Unix timestamp (in seconds) when the model was created.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.SdkModel.OwnedBy">
|
||
<summary>
|
||
Output only. The organization that owns the model.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.SdkModel.Deleted">
|
||
<summary>
|
||
Output only. Optional. An indicator whether a fine-tuned model has been deleted.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.SearchEntryPoint">
|
||
<summary>
|
||
Google search entry point.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.SearchEntryPoint.RenderedContent">
|
||
<summary>
|
||
Optional. Web content snippet that can be embedded in a web page or an app webview.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.SearchEntryPoint.SdkBlob">
|
||
<summary>
|
||
Optional. Base64 encoded JSON representing array of tuple.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.Segment">
|
||
<summary>
|
||
Segment of the content.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Segment.Text">
|
||
<summary>
|
||
Output only. The text corresponding to the segment from the response.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Segment.StartIndex">
|
||
<summary>
|
||
Output only. Start index in the given Part, measured in bytes. Offset from the start of the Part, inclusive, starting at zero.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Segment.PartIndex">
|
||
<summary>
|
||
Output only. The index of a Part object within its parent Content object.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Segment.EndIndex">
|
||
<summary>
|
||
Output only. End index in the given Part, measured in bytes. Offset from the start of the Part, exclusive, starting at zero.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.SemanticRetrieverChunk">
|
||
<summary>
|
||
Identifier for a `Chunk` retrieved via Semantic Retriever specified in the `GenerateAnswerRequest` using `SemanticRetrieverConfig`.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.SemanticRetrieverChunk.Chunk">
|
||
<summary>
|
||
Output only. Name of the `Chunk` containing the attributed text. Example: `corpora/123/documents/abc/chunks/xyz`
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.SemanticRetrieverChunk.Source">
|
||
<summary>
|
||
Output only. Name of the source matching the request's `SemanticRetrieverConfig.source`. Example: `corpora/123` or `corpora/123/documents/abc`
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.SpeechConfig">
|
||
<summary>
|
||
The speech generation config.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.SpeechConfig.VoiceConfig">
|
||
<summary>
|
||
The configuration for the speaker to use.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.Status">
|
||
<summary>
|
||
The `Status` type defines a logical error model that is suitable for
|
||
different programming environments, including REST APIs and RPC APIs.
|
||
</summary>
|
||
<remarks>
|
||
It is used by [gRPC](https://github.com/grpc).
|
||
Each `Status` message contains three pieces of data: error code,
|
||
error message, and error details. You can find out more about
|
||
this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).
|
||
</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Status.Code">
|
||
<summary>
|
||
The status code, which should be an enum value of google.rpc.Code.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Status.Message">
|
||
<summary>
|
||
A developer-facing error message, which should be in English.
|
||
Any user-facing error message should be localized and sent in
|
||
the google.rpc.Status.details field, or localized by the client.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Status.Details">
|
||
<summary>
|
||
A list of messages that carry the error details.
|
||
There is a common set of message types for APIs to use.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.StreamOptions">
|
||
<summary>
|
||
Options for streaming requests.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.StreamOptions.IncludeUsage">
|
||
<summary>
|
||
Optional. If set, include usage statistics in the response.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.SupervisedTuningSpec">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.SupervisedTuningSpec.TrainingDatasetUri">
|
||
<summary>
|
||
Cloud Storage URI of your training dataset.
|
||
</summary>
|
||
<remarks>
|
||
The dataset must be formatted as a JSONL file. For best results,
|
||
provide at least 100 to 500 examples.
|
||
</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.SupervisedTuningSpec.ValidationDatasetUri">
|
||
<summary>
|
||
Optional: The Cloud Storage URI of your validation dataset file.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.SupervisedTuningSpec.HyperParameters">
|
||
<summary>
|
||
Immutable. Hyperparameters controlling the tuning process. If not provided, default values will be used.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.TextCompletion">
|
||
<summary>
|
||
Output text returned from a model.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TextCompletion.Output">
|
||
<summary>
|
||
Output only. The generated text returned from the model.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TextCompletion.SafetyRatings">
|
||
<summary>
|
||
Ratings for the safety of a response.
|
||
There is at most one rating per category.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TextCompletion.CitationMetadata">
|
||
<summary>
|
||
Output only. Citation information for model-generated output in this TextCompletion.
|
||
This field may be populated with attribution information for any text included in the output.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.AbstractText">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.AbstractText.Text">
|
||
<summary>
|
||
Required. The prompt text.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.TextData">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.TextPrompt">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.ThinkingConfig">
|
||
<summary>
|
||
Config for thinking features.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ThinkingConfig.IncludeThoughts">
|
||
<summary>
|
||
Indicates whether to include thoughts in the response.
|
||
If true, thoughts are returned only when available.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.TokensInfo">
|
||
<summary>
|
||
Tokens info with a list of tokens and the corresponding list of token ids.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TokensInfo.TokenIds">
|
||
<summary>
|
||
A list of token ids from the input.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TokensInfo.Tokens">
|
||
<summary>
|
||
A list of tokens from the input.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TokensInfo.Role">
|
||
<summary>
|
||
Optional. Optional fields for the role from the corresponding Content.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.Tool">
|
||
<summary>
|
||
Defines a tool that model can call to access external knowledge.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Tool.FunctionDeclarations">
|
||
<summary>
|
||
Optional. One or more function declarations to be passed to the model along
|
||
with the current user query. Model may decide to call a subset of these
|
||
functions by populating [FunctionCall][content.part.function_call] in the
|
||
response. User should provide a
|
||
[FunctionResponse][content.part.function_response] for each function call
|
||
in the next turn. Based on the function responses, Model will generate the
|
||
final response back to the user. Maximum 64 function declarations can be
|
||
provided.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Tool.CodeExecution">
|
||
<summary>
|
||
Optional. Enables the model to execute code as part of generation.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Tool.Retrieval">
|
||
<summary>
|
||
Optional. Retrieval tool type. System will always execute the provided retrieval tool(s)
|
||
to get external knowledge to answer the prompt. Retrieval results are presented
|
||
to the model for generation.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Tool.GoogleSearchRetrieval">
|
||
<summary>
|
||
Optional. GoogleSearchRetrieval tool type. Specialized retrieval tool that is powered by Google search.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Tool.GoogleSearch">
|
||
<summary>
|
||
Optional. GoogleSearch tool type. Tool to support Google Search in Model. Powered by Google.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.ToolConfig">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ToolConfig.FunctionCallingConfig">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.TopCandidates">
|
||
<summary>
|
||
Candidates with top log probabilities at each decoding step.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TopCandidates.Candidates">
|
||
<summary>
|
||
Sorted by log probability in descending order.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.TrainingData">
|
||
<summary>
|
||
Dataset for training or validation.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TrainingData.Examples">
|
||
<summary>
|
||
Optional. Inline examples.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.TuningExamples">
|
||
<summary>
|
||
A set of tuning examples. Can be training or validation data.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TuningExamples.Examples">
|
||
<summary>
|
||
Required. The examples. Example input can be for text or discuss, but all examples in a set must be of the same type.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TuningExamples.MultiturnExamples">
|
||
<summary>
|
||
Content examples. For multiturn conversations.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.TuningExample">
|
||
<summary>
|
||
A single example for tuning.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TuningExample.TextInput">
|
||
<summary>
|
||
Optional. Text model input.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TuningExample.Output">
|
||
<summary>
|
||
Required. The expected model output.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.TuningMultiturnExample">
|
||
<summary>
|
||
A tuning example with multiturn input.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TuningMultiturnExample.Contents">
|
||
<summary>
|
||
Each Content represents a turn in the conversation.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TuningMultiturnExample.SystemInstruction">
|
||
<summary>
|
||
Optional. Developer set system instructions. Currently, text only.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.TunedModel">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TunedModel.Model">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TunedModel.Endpoint">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.TuningContent">
|
||
<summary>
|
||
The structured datatype containing multi-part content of an example message. This is a subset of the Content proto used during model inference with limited type support. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TuningContent.Parts">
|
||
<summary>
|
||
Ordered `Parts` that constitute a single message. Parts may have different MIME types.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TuningContent.Role">
|
||
<summary>
|
||
Optional. The producer of the content. Must be either 'user' or 'model'.
|
||
Useful to set for multi-turn conversations, otherwise can be left blank or unset.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TuningContent.PartTypes">
|
||
<summary>
|
||
Ordered Parts that constitute a single message. Parts may have different MIME types.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TuningContent.ETag">
|
||
<summary>
|
||
The ETag of the item.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.TuningContent.#ctor">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.Content"/> class.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.TuningContent.#ctor(System.String)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.Content"/> class.
|
||
</summary>
|
||
<param name="text">String to process.</param>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.TuningContent.#ctor(Mscc.GenerativeAI.FileData)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.Content"/> class.
|
||
</summary>
|
||
<param name="file">File to process.</param>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.TuningJob">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TuningJob.Name">
|
||
<summary>
|
||
Name of the tuned model.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TuningJob.TunedModelDisplayName">
|
||
<summary>
|
||
Display name of the tuned model.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TuningJob.BaseModel">
|
||
<summary>
|
||
Name of the foundation model to tune.
|
||
</summary>
|
||
<remarks>
|
||
Supported values: gemini-1.5-pro-002, gemini-1.5-flash-002, and gemini-1.0-pro-002.
|
||
</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TuningJob.CreateTime">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TuningJob.StartTime">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TuningJob.EndTime">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TuningJob.UpdateTime">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TuningJob.TunedModel">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TuningJob.Experiment">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TuningJob.TuningDataStats">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TuningJob.State">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TuningJob.SupervisedTuningSpec">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TuningJob.Error">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TuningJob.Endpoint">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TuningJob.HasEnded">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.TuningSnapshot">
|
||
<summary>
|
||
Record for a single tuning step.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TuningSnapshot.Step">
|
||
<summary>
|
||
Output only. The tuning step.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TuningSnapshot.Epoch">
|
||
<summary>
|
||
Output only. The epoch this step was part of.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TuningSnapshot.MeanLoss">
|
||
<summary>
|
||
Output only. The mean loss of the training examples for this step.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TuningSnapshot.ComputeTime">
|
||
<summary>
|
||
Output only. The timestamp when this metric was computed.
|
||
A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.TuningTask">
|
||
<summary>
|
||
Tuning tasks that create tuned models.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TuningTask.StartTime">
|
||
<summary>
|
||
Output only. The timestamp when tuning this model started.
|
||
A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TuningTask.CompleteTime">
|
||
<summary>
|
||
Output only. The timestamp when tuning this model completed.
|
||
A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TuningTask.Snapshots">
|
||
<summary>
|
||
Output only. Metrics collected during tuning.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TuningTask.TrainingData">
|
||
<summary>
|
||
Required. Input only. Immutable. The model training data.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.TuningTask.Hyperparameters">
|
||
<summary>
|
||
Immutable. Hyperparameters controlling the tuning process. If not provided, default values will be used.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.UploadMediaRequest">
|
||
<summary>
|
||
Instance to upload a local file to create a File resource.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.UploadMediaRequest.File">
|
||
<summary>
|
||
Optional. Metadata for the file to create.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.UploadMediaResponse">
|
||
<summary>
|
||
Information about an uploaded file via FIle API
|
||
Ref: https://ai.google.dev/api/rest/v1beta/files
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.UploadMediaResponse.File">
|
||
<summary>
|
||
Metadata for the created file.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.UsageMetadata.PromptTokenCount">
|
||
<summary>
|
||
Number of tokens in the request.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.UsageMetadata.CandidatesTokenCount">
|
||
<summary>
|
||
Number of tokens in the response(s).
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.UsageMetadata.TotalTokenCount">
|
||
<summary>
|
||
Number of tokens in the response(s).
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.UsageMetadata.CachedContentTokenCount">
|
||
<summary>
|
||
Number of tokens in the cached content.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.UsageMetadata.ToolUsePromptTokenCount">
|
||
<summary>
|
||
Output only. Number of tokens present in tool-use prompt(s).
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.UsageMetadata.PromptTokensDetails">
|
||
<summary>
|
||
Output only. List of modalities that were processed in the request input.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.UsageMetadata.CandidatesTokensDetails">
|
||
<summary>
|
||
Output only. List of modalities that were returned in the response.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.UsageMetadata.CacheTokensDetails">
|
||
<summary>
|
||
Output only. List of modalities of the cached content in the request input.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.UsageMetadata.ToolUsePromptTokensDetails">
|
||
<summary>
|
||
Output only. List of modalities that were processed for tool-use request inputs.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.VertexAISearch">
|
||
<summary>
|
||
Retrieve from Vertex AI Search datastore for grounding.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.VertexAISearch.Datastore">
|
||
<summary>
|
||
Fully-qualified Vertex AI Search's datastore resource ID.
|
||
</summary>
|
||
<remarks>
|
||
Format: projects/{project_id}/locations/{location}/collections/default_collection/dataStores/{data_store_id}
|
||
See https://cloud.google.com/vertex-ai-search-and-conversation
|
||
</remarks>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.VideoMetadata">
|
||
<summary>
|
||
Optional. For video input, the start and end offset of the video in Duration format.
|
||
</summary>
|
||
<remarks>
|
||
For example, to specify a 10 second clip starting at 1:00,
|
||
set "start_offset": { "seconds": 60 } and "end_offset": { "seconds": 70 }.
|
||
</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.VideoMetadata.VideoDuration">
|
||
<summary>
|
||
Duration of the video.
|
||
</summary>
|
||
<remarks>
|
||
A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s".
|
||
</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.VideoMetadata.StartOffset">
|
||
<summary>
|
||
Starting offset of a video.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.VideoMetadata.EndOffset">
|
||
<summary>
|
||
Ending offset of a video. Should be larger than the <see cref="P:Mscc.GenerativeAI.VideoMetadata.StartOffset"/>.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.VoiceConfig">
|
||
<summary>
|
||
The configuration for the voice to use.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.VoiceConfig.PrebuiltVoiceConfig">
|
||
<summary>
|
||
The configuration for the prebuilt voice to use.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.WebChunk">
|
||
<summary>
|
||
Chunk from the web.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.WebChunk.Uri">
|
||
<summary>
|
||
URI reference of the chunk.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.WebChunk.Title">
|
||
<summary>
|
||
Title of the chunk.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.EditConfig">
|
||
<summary>
|
||
Edit config object for model versions 006 and greater. All editConfig subfields are optional. If not specified, the default editing mode is inpainting.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.EditConfig.EditMode">
|
||
<summary>
|
||
Optional. Describes the editing mode for the request. One editing mode per request.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.EditConfig.GuidanceScale">
|
||
<summary>
|
||
Optional. Controls how much the model adheres to the text prompt. Large values increase output and prompt alignment, but may compromise image quality.
|
||
</summary>
|
||
<remarks>Values: 0-500 - Default: 60</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.EditConfig.MaskMode">
|
||
<summary>
|
||
Optional.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.EditConfig.MaskDilation">
|
||
<summary>
|
||
Optional. Determines the dilation percentage of the mask provided.
|
||
</summary>
|
||
<remarks>0.03 (3%) is the default value of shortest side. Minimum: 0, Maximum: 1</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.EditConfig.ProductPosition">
|
||
<summary>
|
||
Optional. Defines whether the detected product should stay fixed or be repositioned. If you set this field, you must also set "editMode": "product-image".
|
||
</summary>
|
||
<remarks>Values:
|
||
reposition - Lets the model move the location of the detected product or object. (default value)
|
||
fixed - The model maintains the original positioning of the detected product or object
|
||
If the input image is not square, the model defaults to reposition.
|
||
</remarks>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.GeneratedImage">
|
||
<summary>
|
||
An output image.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GeneratedImage.Image">
|
||
<summary>
|
||
The output image data.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GeneratedImage.RaiFilteredReason">
|
||
<summary>
|
||
Responsible AI filter reason if the image is filtered out of the response.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GeneratedImage.EnhancedPrompt">
|
||
<summary>
|
||
The rewritten prompt used for the image generation if the prompt enhancer is enabled.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.GenerateImagesRequest">
|
||
<summary>
|
||
Request for image generation.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.GenerateImagesRequest.#ctor(System.String,System.Nullable{System.Int32})">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.GenerateImagesRequest"/> class.
|
||
</summary>
|
||
<param name="prompt">The text prompt guides what images the model generates.</param>
|
||
<param name="sampleCount">The number of generated images.</param>
|
||
<exception cref="!:ArgumentNullException">Thrown when the <paramref name="prompt"/> is <see langword="null"/>.</exception>
|
||
<exception cref="!:ArgumentOutOfRangeException">Thrown when the <paramref name="sampleCount"/> is less than 1 or greater than 8.</exception>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.GenerateImagesResponse">
|
||
<summary>
|
||
Response for image generation.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateImagesResponse.Images">
|
||
<summary>
|
||
Output only. A list of the generated images.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateImagesResponse.GeneratedImages">
|
||
<summary>
|
||
List of generated images.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateImagesResponse.Model">
|
||
<summary>
|
||
Output only. Model used to generate the images.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.GenerateImagesResponse.Object">
|
||
<summary>
|
||
Output only. Always \"image\", required by the SDK.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.Image">
|
||
<summary>
|
||
An image generated by the model.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Image.BytesBase64Encoded">
|
||
<summary>
|
||
A base64 encoded string of one (generated) image. (20 MB)
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Image.MimeType">
|
||
<summary>
|
||
The IANA standard MIME type of the image.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Image.GcsUri">
|
||
<summary>
|
||
Exists if storageUri is provided. The Cloud Storage uri where the generated images are stored.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Image.ImageBytes">
|
||
<summary>
|
||
The image bytes data.
|
||
<see cref="T:Mscc.GenerativeAI.Image"/> can contain a value for this field or the `GcsUri` field but not both.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Image.B64Json">
|
||
<summary>
|
||
The base64-encoded JSON of the generated image.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.ImageGenerationParameters">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ImageGenerationParameters.SampleCount">
|
||
<summary>
|
||
The number of generated images.
|
||
</summary>
|
||
<remarks>Accepted integer values: 1-8 (v.002), 1-4 (v.005, v.006). Default value: 4.</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ImageGenerationParameters.NumberOfImages">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ImageGenerationParameters.StorageUri">
|
||
<summary>
|
||
Optional. Cloud Storage uri where to store the generated images.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ImageGenerationParameters.Seed">
|
||
<summary>
|
||
Optional. Pseudo random seed for reproducible generated outcome; setting the seed lets you generate deterministic output.
|
||
</summary>
|
||
<remarks>Version 006 model only: To use the seed field you must also set "addWatermark": false in the list of parameters.</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ImageGenerationParameters.Language">
|
||
<summary>
|
||
Optional. The text prompt for guiding the response.
|
||
</summary>
|
||
<remarks>en (default), de, fr, it, es</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ImageGenerationParameters.NegativePrompt">
|
||
<summary>
|
||
Optional. Description of what to discourage in the generated images.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ImageGenerationParameters.GuidanceScale">
|
||
<summary>
|
||
Optional. For model version 006 and greater use editConfig.guidanceScale.
|
||
</summary>
|
||
<remarks>
|
||
Controls how much the model adheres to the text prompt.
|
||
Large values increase output and prompt alignment, but may compromise image quality.
|
||
</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ImageGenerationParameters.DisablePersonFace">
|
||
<summary>
|
||
Optional. Whether to disable the person/face safety filter (so that person/face can be included in the generated images).
|
||
</summary>
|
||
<remarks>Deprecated (v.006 only): Use personGeneration instead.</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ImageGenerationParameters.Mode">
|
||
<summary>
|
||
Optional. With input prompt, image, mask - backgroundEditing mode enables background editing.
|
||
</summary>
|
||
<remarks>Values:
|
||
backgroundEditing
|
||
upscale
|
||
</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ImageGenerationParameters.SampleImageSize">
|
||
<summary>
|
||
Optional. Sample image size when mode is set to upscale. This field is no longer required when upscaling. Use upscaleConfig.upscaleFactor to set the upscaled image size.
|
||
</summary>
|
||
<remarks>2048 or 4096</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ImageGenerationParameters.AspectRatio">
|
||
<summary>
|
||
Optional. The aspect ratio of the generated image.
|
||
</summary>
|
||
<remarks>Value: 1:1, 9:16*, 16:9*, 3:4*, or 4:3*</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ImageGenerationParameters.IncludeRaiReason">
|
||
<summary>
|
||
Optional. Whether to enable the Responsible AI filtered reason or error code for blocked output in the response content.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ImageGenerationParameters.IncludeSafetyAttributes">
|
||
<summary>
|
||
Optional. Whether to enable rounded Responsible AI scores for a list of safety attributes in responses for unfiltered input and output.
|
||
</summary>
|
||
<remarks>Safety attribute categories: "Death, Harm and Tragedy", "Firearms and Weapons", "Hate", "Health", "Illicit Drugs", "Politics", "Porn", "Religion and Belief", "Toxic", "Violence", "Vulgarity", "War and Conflict".</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ImageGenerationParameters.PersonGeneration">
|
||
<summary>
|
||
Optional. The safety setting that controls the type of people or face generation allowed.
|
||
</summary>
|
||
<remarks>"personGeneration": "allow_all" is not available in Imagen 2 Editing and is only available to approved users‡ in Imagen 2 Generation.
|
||
Values:
|
||
allow_all: Allow generation of people of all ages.
|
||
allow_adult (default): Allow generation of adults only.
|
||
dont_allow: Disables the inclusion of people or faces in images.
|
||
</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ImageGenerationParameters.SafetyFilterLevel">
|
||
<summary>
|
||
Optional. The safety setting that controls safety filter thresholds.
|
||
</summary>
|
||
<remarks>Values:
|
||
block_most: The highest threshold resulting in most requests blocked.
|
||
block_some (default): The medium threshold that balances blocks for potentially harmful and benign content.
|
||
block_few: Reduces the number of requests blocked due to safety filters. This setting might increase objectionable content generated by Imagen.
|
||
</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ImageGenerationParameters.AddWatermark">
|
||
<summary>
|
||
Defines whether the image will include a SynthID. For more information, see Identifying AI-generated content with SynthID.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ImageGenerationParameters.EditConfig">
|
||
<summary>
|
||
edit config object for model versions 006 and greater. All editConfig subfields are optional. If not specified, the default editing mode is inpainting.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ImageGenerationParameters.UpscaleConfig">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ImageGenerationParameters.OutputOptions">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ImageGenerationParameters.EnhancePrompt">
|
||
<summary>
|
||
Whether to use the prompt rewriting logic.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ImageGenerationParameters.OutputGcsUri">
|
||
<summary>
|
||
Cloud Storage URI used to store the generated images.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ImageGenerationParameters.OutputMimeType">
|
||
<summary>
|
||
MIME type of the generated image.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ImageGenerationParameters.OutputCompressionQuality">
|
||
<summary>
|
||
Compression quality of the generated image (for `image/jpeg` only).
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.ImageGenerationRequest">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ImageGenerationRequest.Instances">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ImageGenerationRequest.Parameters">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.ImageGenerationRequest.#ctor">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.ImageGenerationRequest"/> class.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.ImageGenerationRequest.#ctor(System.String,System.Nullable{System.Int32})">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.ImageGenerationRequest"/> class.
|
||
</summary>
|
||
<param name="prompt">The text prompt guides what images the model generates.</param>
|
||
<param name="sampleCount">The number of generated images.</param>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="prompt"/> is <see langword="null"/>.</exception>
|
||
<exception cref="T:System.ArgumentOutOfRangeException">Thrown when the <paramref name="sampleCount"/> is less than 1 or greater than 8.</exception>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.ImageGenerationResponse">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ImageGenerationResponse.Predictions">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.ImageTextParameters">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ImageTextParameters.SampleCount">
|
||
<summary>
|
||
The number of generated images.
|
||
</summary>
|
||
<remarks>Accepted integer values: 1-3</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ImageTextParameters.StorageUri">
|
||
<summary>
|
||
Optional. Cloud Storage uri where to store the generated images.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ImageTextParameters.Seed">
|
||
<summary>
|
||
Optional. The seed for random number generator (RNG). If RNG seed is the same for requests with the inputs, the prediction results will be the same.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ImageTextParameters.Language">
|
||
<summary>
|
||
Optional. The text prompt for guiding the response.
|
||
</summary>
|
||
<remarks>en (default), de, fr, it, es</remarks>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.ImageTextRequest">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ImageTextRequest.Instances">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ImageTextRequest.Parameters">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.ImageTextRequest.#ctor">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.ImageGenerationRequest"/> class.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.ImageTextRequest.#ctor(System.String,System.String,System.Nullable{System.Int32},System.String,System.String)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.ImageGenerationRequest"/> class.
|
||
</summary>
|
||
<param name="base64Image">The base64 encoded image to process.</param>
|
||
<param name="question">The question to ask about the image.</param>
|
||
<param name="sampleCount">The number of predictions.</param>
|
||
<param name="language">Language of predicted text. Defaults to "en".</param>
|
||
<param name="storageUri">Optional. Cloud Storage uri where to store the generated predictions.</param>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when the <paramref name="base64Image"/> is <see langword="null"/>.</exception>
|
||
<exception cref="T:System.ArgumentOutOfRangeException">Thrown when the <paramref name="sampleCount"/> is less than 1 or greater than 3.</exception>
|
||
<exception cref="T:System.NotSupportedException">Thrown when the <paramref name="language"/> is not supported.</exception>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.ImageTextResponse">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ImageTextResponse.Predictions">
|
||
<summary>
|
||
List of text strings representing captions, sorted by confidence.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ImageTextResponse.DeployedModelId">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ImageTextResponse.Model">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ImageTextResponse.ModelDisplayName">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.ImageTextResponse.ModelVersionId">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.Instance">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Instance.Prompt">
|
||
<summary>
|
||
The text prompt guides what images the model generates. This field is required for both generation and editing.
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Instance.Image">
|
||
<summary>
|
||
Optional. Input image for editing.
|
||
</summary>
|
||
<remarks>Base64 encoded image (20 MB)</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Instance.Mask">
|
||
<summary>
|
||
Optional. Mask image for mask-based editing.
|
||
</summary>
|
||
<remarks>Base64 input image with 1s and 0s where 1 indicates regions to keep (PNG) (20 MB)</remarks>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.Mask">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.Mask.Image">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.MaskMode">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.MaskMode.MaskType">
|
||
<summary>
|
||
Optional. Prompts the model to generate a mask instead of you needing to provide one. Consequently, when you provide this parameter you can omit a mask object.
|
||
</summary>
|
||
<remarks>Values:
|
||
background: Automatically generates a mask to all regions except primary object, person, or subject in the image
|
||
foreground: Automatically generates a mask to the primary object, person, or subject in the image
|
||
semantic: Use automatic segmentation to create a mask area for one or more of the segmentation classes. Set the segmentation classes using the classes parameter and the corresponding class_id values. You can specify up to 5 classes.
|
||
</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.MaskMode.Classes">
|
||
<summary>
|
||
Optional. Determines the classes of objects that will be segmented in an automatically generated mask image.
|
||
If you use this field, you must also set "maskType": "semantic".
|
||
See <a href="https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/image-generation#segment-ids">Segmentation class IDs</a>
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.OutputOptions">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.OutputOptions.MimeType">
|
||
<summary>
|
||
Optional. The IANA standard MIME type of the image.
|
||
</summary>
|
||
<remarks>Values:
|
||
image/jpeg
|
||
image/png
|
||
</remarks>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.OutputOptions.CompressionQuality">
|
||
<summary>
|
||
Optional. The compression quality of the output image if encoding in image/jpeg.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.UpscaleConfig">
|
||
<summary>
|
||
|
||
</summary>
|
||
</member>
|
||
<member name="P:Mscc.GenerativeAI.UpscaleConfig.UpscaleFactor">
|
||
<summary>
|
||
Optional. When upscaling, the factor to which the image will be upscaled. If not specified, the upscale factor will be determined from the longer side of the input image and sampleImageSize.
|
||
</summary>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.ResponseSchemaJsonConverter">
|
||
<summary>
|
||
Custom JSON converter to serialize and deserialize JSON schema.
|
||
</summary>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.ResponseSchemaJsonConverter.Read(System.Text.Json.Utf8JsonReader@,System.Type,System.Text.Json.JsonSerializerOptions)">
|
||
<inheritdoc cref="T:System.Text.Json.Serialization.JsonConverter"/>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.ResponseSchemaJsonConverter.Write(System.Text.Json.Utf8JsonWriter,System.Object,System.Text.Json.JsonSerializerOptions)">
|
||
<inheritdoc cref="T:System.Text.Json.Serialization.JsonConverter"/>
|
||
</member>
|
||
<member name="T:Mscc.GenerativeAI.VertexAI">
|
||
<summary>
|
||
Entry point to access Gemini API running in Vertex AI.
|
||
</summary>
|
||
<remarks>
|
||
See <a href="https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/overview">Model reference</a>.
|
||
See also https://cloud.google.com/nodejs/docs/reference/vertexai/latest/vertexai/vertexinit
|
||
</remarks>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.VertexAI.#ctor(Microsoft.Extensions.Logging.ILogger)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.VertexAI"/> class with access to Vertex AI Gemini API.
|
||
The default constructor attempts to read <c>.env</c> file and environment variables.
|
||
Sets default values, if available.
|
||
</summary>
|
||
<remarks>The following environment variables are used:
|
||
<list type="table">
|
||
<item><term>GOOGLE_PROJECT_ID</term>
|
||
<description>Identifier of the Google Cloud project.</description></item>
|
||
<item><term>GOOGLE_REGION</term>
|
||
<description>Identifier of the Google Cloud region to use (default: "us-central1").</description></item>
|
||
</list>
|
||
</remarks>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.VertexAI.#ctor(System.String,System.String,Microsoft.Extensions.Logging.ILogger)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.VertexAI"/> class with access to Vertex AI Gemini API.
|
||
</summary>
|
||
<param name="projectId">Identifier of the Google Cloud project.</param>
|
||
<param name="region">Optional. Region to use (default: "us-central1").</param>
|
||
<param name="logger">Optional. Logger instance used for logging</param>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when <paramref name="projectId"/> is <see langword="null"/>.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.VertexAI.#ctor(System.String,Microsoft.Extensions.Logging.ILogger)">
|
||
<summary>
|
||
Initializes a new instance of the <see cref="T:Mscc.GenerativeAI.VertexAI"/> class with access to Vertex AI Gemini API.
|
||
</summary>
|
||
<param name="apiKey">API key for Vertex AI in express mode.</param>
|
||
<param name="logger">Optional. Logger instance used for logging.</param>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when <paramref name="apiKey"/> is <see langword="null"/>.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.VertexAI.GenerativeModel(System.String,Mscc.GenerativeAI.GenerationConfig,System.Collections.Generic.List{Mscc.GenerativeAI.SafetySetting},System.Collections.Generic.List{Mscc.GenerativeAI.Tool},Mscc.GenerativeAI.Content)">
|
||
<summary>
|
||
Create a generative model on Vertex AI to use.
|
||
</summary>
|
||
<param name="model">Model to use (default: "gemini-1.5-pro")</param>
|
||
<param name="generationConfig">Optional. Configuration options for model generation and outputs.</param>
|
||
<param name="safetySettings">Optional. A list of unique SafetySetting instances for blocking unsafe content.</param>
|
||
<param name="tools">Optional. A list of Tools the model may use to generate the next response.</param>
|
||
<param name="systemInstruction">Optional. </param>
|
||
<returns>Generative model instance.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when "projectId" or "region" is <see langword="null"/>.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.VertexAI.GenerativeModel(Mscc.GenerativeAI.CachedContent,Mscc.GenerativeAI.GenerationConfig,System.Collections.Generic.List{Mscc.GenerativeAI.SafetySetting})">
|
||
<summary>
|
||
Create a generative model on Vertex AI to use.
|
||
</summary>
|
||
<param name="cachedContent">Content that has been preprocessed.</param>
|
||
<param name="generationConfig">Optional. Configuration options for model generation and outputs.</param>
|
||
<param name="safetySettings">Optional. A list of unique SafetySetting instances for blocking unsafe content.</param>
|
||
<returns>Generative model instance.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when <paramref name="cachedContent"/> is null.</exception>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when "projectId" or "region" is <see langword="null"/>.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.VertexAI.GenerativeModel(Mscc.GenerativeAI.TuningJob,Mscc.GenerativeAI.GenerationConfig,System.Collections.Generic.List{Mscc.GenerativeAI.SafetySetting})">
|
||
<summary>
|
||
Create a generative model on Vertex AI to use.
|
||
</summary>
|
||
<param name="tuningJob">Tuning Job to use with the model.</param>
|
||
<param name="generationConfig">Optional. Configuration options for model generation and outputs.</param>
|
||
<param name="safetySettings">Optional. A list of unique SafetySetting instances for blocking unsafe content.</param>
|
||
<returns>Generative model instance.</returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when <paramref name="tuningJob"/> is null.</exception>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when "projectId" or "region" is <see langword="null"/>.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.VertexAI.GetModel(System.String)">
|
||
<inheritdoc cref="T:Mscc.GenerativeAI.IGenerativeAI"/>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.VertexAI.SupervisedTuningJob(System.String)">
|
||
<summary>
|
||
|
||
</summary>
|
||
<param name="model">Model to use.</param>
|
||
<returns></returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when "projectId" or "region" is <see langword="null"/>.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.VertexAI.ImageGenerationModel(System.String)">
|
||
<summary>
|
||
|
||
</summary>
|
||
<param name="model">Model to use (default: "imagegeneration")</param>
|
||
<returns></returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when "projectId" or "region" is <see langword="null"/>.</exception>
|
||
</member>
|
||
<member name="M:Mscc.GenerativeAI.VertexAI.ImageTextModel(System.String)">
|
||
<summary>
|
||
|
||
</summary>
|
||
<param name="model">Model to use (default: "imagetext")</param>
|
||
<returns></returns>
|
||
<exception cref="T:System.ArgumentNullException">Thrown when "projectId" or "region" is <see langword="null"/>.</exception>
|
||
</member>
|
||
</members>
|
||
</doc>
|