ChatCompletion
Run RAG chat with retrievers/tools
ChatCompletion
Run RAG chat with retrievers/tools
yaml
type: io.kestra.plugin.ai.rag.ChatCompletionExamples
yaml
id: rag
namespace: company.ai
tasks:
- id: ingest
type: io.kestra.plugin.ai.rag.IngestDocument
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
apiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddings:
type: io.kestra.plugin.ai.embeddings.KestraKVStore
drop: true
fromExternalURLs:
- https://raw.githubusercontent.com/kestra-io/docs/refs/heads/main/content/blogs/release-0-24.md
- id: parallel
type: io.kestra.plugin.core.flow.Parallel
tasks:
- id: chat_without_rag
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
messages:
- type: USER
content: Which features were released in Kestra 0.24?
- id: chat_with_rag
type: io.kestra.plugin.ai.rag.ChatCompletion
chatProvider:
type: io.kestra.plugin.ai.provider.GoogleGemini
embeddingProvider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
embeddings:
type: io.kestra.plugin.ai.embeddings.KestraKVStore
systemMessage: You are a helpful assistant that can answer questions about Kestra.
prompt: Which features were released in Kestra 0.24?
pluginDefaults:
- type: io.kestra.plugin.ai.provider.GoogleGemini
values:
apiKey: "{{ secret('GEMINI_API_KEY') }}"
modelName: gemini-2.5-flashyaml
id: rag_with_websearch_content_retriever
namespace: company.ai
tasks:
- id: chat_with_rag_and_websearch_content_retriever
type: io.kestra.plugin.ai.rag.ChatCompletion
chatProvider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-2.5-flash
apiKey: "{{ secret('GEMINI_API_KEY') }}"
contentRetrievers:
- type: io.kestra.plugin.ai.retriever.TavilyWebSearch
apiKey: "{{ secret('TAVILY_API_KEY') }}"
systemMessage: You are a helpful assistant that can answer questions about Kestra.
prompt: What is the latest release of Kestra?yaml
id: chat_with_memory
namespace: company.ai
inputs:
- id: first
type: STRING
defaults: Hello, my name is John and I'm from Paris
- id: second
type: STRING
defaults: What's my name and where do I live?
tasks:
- id: first
type: io.kestra.plugin.ai.rag.ChatCompletion
chatProvider:
type: io.kestra.plugin.ai.provider.GoogleGemini
embeddingProvider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
embeddings:
type: io.kestra.plugin.ai.embeddings.KestraKVStore
memory:
type: io.kestra.plugin.ai.memory.KestraKVStore
ttl: PT1M
systemMessage: You are a helpful assistant, answer concisely
prompt: "{{ inputs.first }}"
- id: second
type: io.kestra.plugin.ai.rag.ChatCompletion
chatProvider:
type: io.kestra.plugin.ai.provider.GoogleGemini
embeddingProvider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
embeddings:
type: io.kestra.plugin.ai.embeddings.KestraKVStore
memory:
type: io.kestra.plugin.ai.memory.KestraKVStore
systemMessage: You are a helpful assistant, answer concisely
prompt: "{{ inputs.second }}"
pluginDefaults:
- type: io.kestra.plugin.ai.provider.GoogleGemini
values:
apiKey: "{{ secret('GEMINI_API_KEY') }}"
modelName: gemini-2.5-flashyaml
id: chat_with_structured_output
namespace: company.ai
tasks:
- id: categorize_releases
type: io.kestra.plugin.ai.rag.ChatCompletion
chatProvider:
type: io.kestra.plugin.ai.provider.MistralAI
apiKey: "{{ secret('MISTRAL_API_KEY') }}"
modelName: open-mistral-7b
contentRetrievers:
- type: io.kestra.plugin.ai.retriever.TavilyWebSearch
apiKey: "{{ secret('TAVILY_API_KEY') }}"
maxResults: 8
chatConfiguration:
responseFormat:
type: JSON
jsonSchema:
type: object
required: ["releases"]
properties:
releases:
type: array
minItems: 1
items:
type: object
additionalProperties: false
required: ["version", "date", "semver"]
properties:
version:
type: string
description: "Release tag, e.g., 0.24.0"
date:
type: string
description: "Release date"
semver:
type: string
enum: ["MINOR", "PATCH"]
summary:
type: string
description: "Short plain-text summary (optional)"
systemMessage: |
You are a release analyst. Use the Tavily web retriever to find recent Kestra releases.
Determine each release's SemVer category:
- MINOR: new features, no major breaking changes (y in x.Y.z)
- PATCH: bug fixes/patches only (z in x.y.Z)
Return ONLY valid JSON matching the schema. No prose, no extra keys.
prompt: |
Find most recent Kestra releases (within the last ~6 months).
Output their version, release date, semver category, and a one-line summary.Properties
chatProvider *RequiredNon-dynamic
Definitions
Use Amazon Bedrock models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.AmazonBedrock
accessKeyId: "{{ secret('AWS_ACCESS_KEY') }}"
secretAccessKey: "{{ secret('AWS_SECRET_KEY') }}"
modelName: anthropic.claude-3-sonnet-20240229-v1:0
thinkingBudgetTokens: 1024
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
accessKeyId*Requiredstring
modelName*Requiredstring
secretAccessKey*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.AmazonBedrockio.kestra.plugin.langchain4j.provider.AmazonBedrockbaseUrlstring
caPemstring
clientPemstring
modelTypestring
Default
COHEREPossible Values
COHERETITANUse Anthropic Claude models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.Anthropic
apiKey: "{{ secret('ANTHROPIC_API_KEY') }}"
modelName: claude-3-haiku-20240307
thinkingEnabled: true
thinkingBudgetTokens: 1024
returnThinking: false
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.Anthropicio.kestra.plugin.langchain4j.provider.AnthropicbaseUrlstring
caPemstring
clientPemstring
maxTokensintegerstring
Use Azure OpenAI deployments
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.AzureOpenAI
apiKey: "{{ secret('AZURE_API_KEY') }}"
endpoint: https://your-resource.openai.azure.com/
modelName: anthropic.claude-3-sonnet-20240229-v1:0
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
endpoint*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.AzureOpenAIio.kestra.plugin.langchain4j.provider.AzureOpenAIapiKeystring
baseUrlstring
caPemstring
clientIdstring
clientPemstring
clientSecretstring
serviceVersionstring
tenantIdstring
Use DashScope (Qwen) models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.DashScope
apiKey: "{{ secret('DASHSCOPE_API_KEY') }}"
modelName: qwen-plus
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
baseUrlstring
Default
https://dashscope-intl.aliyuncs.com/api/v1caPemstring
clientPemstring
enableSearchbooleanstring
maxTokensintegerstring
repetitionPenaltynumberstring
Use DeepSeek models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.DeepSeek
apiKey: "{{ secret('DEEPSEEK_API_KEY') }}"
modelName: deepseek-chat
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.DeepSeekio.kestra.plugin.langchain4j.provider.DeepSeekbaseUrlstring
Default
https://api.deepseek.com/v1caPemstring
clientPemstring
Use GitHub Models via Azure AI Inference
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.GitHubModels
gitHubToken: "{{ secret('GITHUB_TOKEN') }}"
modelName: gpt-4o-mini
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely.
- type: USER
content: "{{ inputs.prompt }}"
gitHubToken*Requiredstring
modelName*Requiredstring
type*Requiredobject
baseUrlstring
caPemstring
clientPemstring
Use Google Gemini models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
apiKey: "{{ secret('GOOGLE_API_KEY') }}"
modelName: gemini-2.5-flash
thinkingEnabled: true
thinkingBudgetTokens: 1024
returnThinking: true
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
apiKey: "{{ secret('GOOGLE_API_KEY') }}"
modelName: gemini-2.5-flash
clientPem: "{{ secret('CLIENT_PEM') }}"
caPem: "{{ secret('CA_PEM') }}"
baseUrl: "https://internal.gemini.company.com/endpoint"
thinkingEnabled: true
thinkingBudgetTokens: 1024
returnThinking: true
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.GoogleGeminiio.kestra.plugin.langchain4j.provider.GoogleGeminibaseUrlstring
caPemstring
clientPemstring
embeddingModelConfiguration
io.kestra.plugin.ai.provider.GoogleGemini-EmbeddingModelConfiguration
maxRetriesintegerstring
outputDimensionalityintegerstring
taskTypestring
Possible Values
RETRIEVAL_QUERYRETRIEVAL_DOCUMENTSEMANTIC_SIMILARITYCLASSIFICATIONCLUSTERINGQUESTION_ANSWERINGFACT_VERIFICATIONtimeoutstring
titleMetadataKeystring
Use Google Vertex AI models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.GoogleVertexAI
endpoint: your-vertex-ai-endpoint
location: your-google-cloud-region
project: your-google-cloud-project-id
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
endpoint*Requiredstring
location*Requiredstring
modelName*Requiredstring
project*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.GoogleVertexAIio.kestra.plugin.langchain4j.provider.GoogleVertexAIbaseUrlstring
caPemstring
clientPemstring
Use Hugging Face Inference endpoints
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.HuggingFace
apiKey: "{{ secret('HUGGING_FACE_API_KEY') }}"
modelName: HuggingFaceTB/SmolLM3-3B:hf-inference
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
baseUrlstring
Default
https://router.huggingface.co/v1caPemstring
clientPemstring
Use LocalAI OpenAI-compatible server
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.LocalAI
modelName: gemma-3-1b-it
baseUrl: http://localhost:8080/v1
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
baseUrl*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.LocalAIio.kestra.plugin.langchain4j.provider.LocalAIcaPemstring
clientPemstring
Use Mistral models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.MistralAI
apiKey: "{{ secret('MISTRAL_API_KEY') }}"
modelName: mistral:7b
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.MistralAIio.kestra.plugin.langchain4j.provider.MistralAIbaseUrlstring
caPemstring
clientPemstring
Use OCI Generative AI models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.OciGenAI
region: "{{ secret('OCI_GENAI_MODEL_REGION_PROPERTY') }}"
compartmentId: "{{ secret('OCI_GENAI_COMPARTMENT_ID_PROPERTY') }}"
authProvider: "{{ secret('OCI_GENAI_CONFIG_PROFILE_PROPERTY') }}"
modelName: oracle.chat.gpt-3.5
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
compartmentId*Requiredstring
modelName*Requiredstring
region*Requiredstring
type*Requiredobject
authProviderstring
baseUrlstring
caPemstring
clientPemstring
Use local Ollama models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.Ollama
modelName: llama3
endpoint: http://localhost:11434
thinkingEnabled: true
returnThinking: true
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
endpoint*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.Ollamaio.kestra.plugin.langchain4j.provider.OllamabaseUrlstring
caPemstring
clientPemstring
Use OpenAI models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.OpenAI
apiKey: "{{ secret('OPENAI_API_KEY') }}"
modelName: gpt-5-mini
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.OpenAIio.kestra.plugin.langchain4j.provider.OpenAIbaseUrlstring
Default
https://api.openai.com/v1caPemstring
clientPemstring
Use OpenRouter models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.OpenRouter
apiKey: "{{ secret('OPENROUTER_API_KEY') }}"
baseUrl: https://openrouter.ai/api/v1
modelName: x-ai/grok-beta
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.OpenRouterio.kestra.plugin.langchain4j.provider.OpenRouterbaseUrlstring
caPemstring
clientPemstring
Use IBM watsonx.ai models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.WatsonxAI
apiKey: "{{ secret('WATSONX_API_KEY') }}"
projectId: "{{ secret('WATSONX_PROJECT_ID') }}"
modelName: ibm/granite-3-3-8b-instruct
baseUrl : "https://api.eu-de.dataplatform.cloud.ibm.com/wx"
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
projectId*Requiredstring
type*Requiredobject
baseUrlstring
caPemstring
clientPemstring
Use Cloudflare Workers AI models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.WorkersAI
accountId: "{{ secret('WORKERS_AI_ACCOUNT_ID') }}"
apiKey: "{{ secret('WORKERS_AI_API_KEY') }}"
modelName: @cf/meta/llama-2-7b-chat-fp16
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
accountId*Requiredstring
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.WorkersAIio.kestra.plugin.langchain4j.provider.WorkersAIbaseUrlstring
caPemstring
clientPemstring
Use ZhiPu AI models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.ZhiPuAI
apiKey: "{{ secret('ZHIPU_API_KEY') }}"
modelName: glm-4.5-flash
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
baseUrlstring
Default
https://open.bigmodel.cn/caPemstring
clientPemstring
maxRetriesintegerstring
maxTokenintegerstring
stopsarray
SubTypestring
prompt *Requiredstring
chatConfiguration Non-dynamic
Default
{} Definitions
io.kestra.plugin.ai.domain.ChatConfiguration
logRequestsbooleanstring
logResponsesbooleanstring
maxTokenintegerstring
promptCachingbooleanstring
responseFormat
io.kestra.plugin.ai.domain.ChatConfiguration-ResponseFormat
jsonSchemaobject
jsonSchemaDescriptionstring
strictJsonbooleanstring
Default
falsetypestring
Default
TEXTPossible Values
TEXTJSONreturnThinkingbooleanstring
seedintegerstring
temperaturenumberstring
thinkingBudgetTokensintegerstring
thinkingEnabledbooleanstring
topKintegerstring
topPnumberstring
contentRetrieverConfiguration Non-dynamic
Default
{
"maxResults": 3,
"minScore": 0
} Definitions
io.kestra.plugin.ai.rag.ChatCompletion-ContentRetrieverConfiguration
maxResultsinteger
Default
3minScorenumber
Default
0contentRetrievers
Definitions
Retrieve context from an embedding store
Example
yaml
id: agent_with_rag
namespace: company.ai
tasks:
- id: ingest
type: io.kestra.plugin.ai.rag.IngestDocument
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
googleApiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddings:
type: io.kestra.plugin.ai.embeddings.KestraKVStore
drop: true
fromDocuments:
- content: Paris is the capital of France with a population of over 2.1 million people
- content: The Eiffel Tower is the most famous landmark in Paris at 330 meters tall
- id: agent
type: io.kestra.plugin.ai.agent.AIAgent
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-2.0-flash
googleApiKey: "{{ secret('GEMINI_API_KEY') }}"
contentRetrievers:
- type: io.kestra.plugin.ai.retriever.EmbeddingStoreRetriever
embeddings:
type: io.kestra.plugin.ai.embeddings.KestraKVStore
embeddingProvider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
googleApiKey: "{{ secret('GEMINI_API_KEY') }}"
maxResults: 3
minScore: 0.0
prompt: What is the capital of France and how many people live there?
yaml
id: multi_store_rag
namespace: company.ai
tasks:
- id: agent
type: io.kestra.plugin.ai.agent.AIAgent
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-2.0-flash
googleApiKey: "{{ secret('GEMINI_API_KEY') }}"
contentRetrievers:
- type: io.kestra.plugin.ai.retriever.EmbeddingStoreRetriever
embeddings:
type: io.kestra.plugin.ai.embeddings.Pinecone
pineconeApiKey: "{{ secret('PINECONE_API_KEY') }}"
index: technical-docs
embeddingProvider:
type: io.kestra.plugin.ai.provider.OpenAI
googleApiKey: "{{ secret('OPENAI_API_KEY') }}"
modelName: text-embedding-3-small
- type: io.kestra.plugin.ai.retriever.EmbeddingStoreRetriever
embeddings:
type: io.kestra.plugin.ai.embeddings.Qdrant
host: localhost
port: 6333
collectionName: business-docs
embeddingProvider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
googleApiKey: "{{ secret('GEMINI_API_KEY') }}"
- type: io.kestra.plugin.ai.retriever.TavilyWebSearch
tavilyApiKey: "{{ secret('TAVILY_API_KEY') }}"
prompt: What are the latest trends in data orchestration?
embeddingProvider*Required
Use Amazon Bedrock models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.AmazonBedrock
accessKeyId: "{{ secret('AWS_ACCESS_KEY') }}"
secretAccessKey: "{{ secret('AWS_SECRET_KEY') }}"
modelName: anthropic.claude-3-sonnet-20240229-v1:0
thinkingBudgetTokens: 1024
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
accessKeyId*Requiredstring
modelName*Requiredstring
secretAccessKey*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.AmazonBedrockio.kestra.plugin.langchain4j.provider.AmazonBedrockbaseUrlstring
caPemstring
clientPemstring
modelTypestring
Default
COHEREPossible Values
COHERETITANUse Anthropic Claude models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.Anthropic
apiKey: "{{ secret('ANTHROPIC_API_KEY') }}"
modelName: claude-3-haiku-20240307
thinkingEnabled: true
thinkingBudgetTokens: 1024
returnThinking: false
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.Anthropicio.kestra.plugin.langchain4j.provider.AnthropicbaseUrlstring
caPemstring
clientPemstring
maxTokensintegerstring
Use Azure OpenAI deployments
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.AzureOpenAI
apiKey: "{{ secret('AZURE_API_KEY') }}"
endpoint: https://your-resource.openai.azure.com/
modelName: anthropic.claude-3-sonnet-20240229-v1:0
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
endpoint*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.AzureOpenAIio.kestra.plugin.langchain4j.provider.AzureOpenAIapiKeystring
baseUrlstring
caPemstring
clientIdstring
clientPemstring
clientSecretstring
serviceVersionstring
tenantIdstring
Use DashScope (Qwen) models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.DashScope
apiKey: "{{ secret('DASHSCOPE_API_KEY') }}"
modelName: qwen-plus
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
baseUrlstring
Default
https://dashscope-intl.aliyuncs.com/api/v1caPemstring
clientPemstring
enableSearchbooleanstring
maxTokensintegerstring
repetitionPenaltynumberstring
Use DeepSeek models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.DeepSeek
apiKey: "{{ secret('DEEPSEEK_API_KEY') }}"
modelName: deepseek-chat
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.DeepSeekio.kestra.plugin.langchain4j.provider.DeepSeekbaseUrlstring
Default
https://api.deepseek.com/v1caPemstring
clientPemstring
Use GitHub Models via Azure AI Inference
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.GitHubModels
gitHubToken: "{{ secret('GITHUB_TOKEN') }}"
modelName: gpt-4o-mini
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely.
- type: USER
content: "{{ inputs.prompt }}"
gitHubToken*Requiredstring
modelName*Requiredstring
type*Requiredobject
baseUrlstring
caPemstring
clientPemstring
Use Google Gemini models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
apiKey: "{{ secret('GOOGLE_API_KEY') }}"
modelName: gemini-2.5-flash
thinkingEnabled: true
thinkingBudgetTokens: 1024
returnThinking: true
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
apiKey: "{{ secret('GOOGLE_API_KEY') }}"
modelName: gemini-2.5-flash
clientPem: "{{ secret('CLIENT_PEM') }}"
caPem: "{{ secret('CA_PEM') }}"
baseUrl: "https://internal.gemini.company.com/endpoint"
thinkingEnabled: true
thinkingBudgetTokens: 1024
returnThinking: true
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.GoogleGeminiio.kestra.plugin.langchain4j.provider.GoogleGeminibaseUrlstring
caPemstring
clientPemstring
embeddingModelConfiguration
io.kestra.plugin.ai.provider.GoogleGemini-EmbeddingModelConfiguration
maxRetriesintegerstring
outputDimensionalityintegerstring
taskTypestring
Possible Values
RETRIEVAL_QUERYRETRIEVAL_DOCUMENTSEMANTIC_SIMILARITYCLASSIFICATIONCLUSTERINGQUESTION_ANSWERINGFACT_VERIFICATIONtimeoutstring
titleMetadataKeystring
Use Google Vertex AI models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.GoogleVertexAI
endpoint: your-vertex-ai-endpoint
location: your-google-cloud-region
project: your-google-cloud-project-id
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
endpoint*Requiredstring
location*Requiredstring
modelName*Requiredstring
project*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.GoogleVertexAIio.kestra.plugin.langchain4j.provider.GoogleVertexAIbaseUrlstring
caPemstring
clientPemstring
Use Hugging Face Inference endpoints
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.HuggingFace
apiKey: "{{ secret('HUGGING_FACE_API_KEY') }}"
modelName: HuggingFaceTB/SmolLM3-3B:hf-inference
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
baseUrlstring
Default
https://router.huggingface.co/v1caPemstring
clientPemstring
Use LocalAI OpenAI-compatible server
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.LocalAI
modelName: gemma-3-1b-it
baseUrl: http://localhost:8080/v1
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
baseUrl*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.LocalAIio.kestra.plugin.langchain4j.provider.LocalAIcaPemstring
clientPemstring
Use Mistral models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.MistralAI
apiKey: "{{ secret('MISTRAL_API_KEY') }}"
modelName: mistral:7b
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.MistralAIio.kestra.plugin.langchain4j.provider.MistralAIbaseUrlstring
caPemstring
clientPemstring
Use OCI Generative AI models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.OciGenAI
region: "{{ secret('OCI_GENAI_MODEL_REGION_PROPERTY') }}"
compartmentId: "{{ secret('OCI_GENAI_COMPARTMENT_ID_PROPERTY') }}"
authProvider: "{{ secret('OCI_GENAI_CONFIG_PROFILE_PROPERTY') }}"
modelName: oracle.chat.gpt-3.5
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
compartmentId*Requiredstring
modelName*Requiredstring
region*Requiredstring
type*Requiredobject
authProviderstring
baseUrlstring
caPemstring
clientPemstring
Use local Ollama models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.Ollama
modelName: llama3
endpoint: http://localhost:11434
thinkingEnabled: true
returnThinking: true
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
endpoint*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.Ollamaio.kestra.plugin.langchain4j.provider.OllamabaseUrlstring
caPemstring
clientPemstring
Use OpenAI models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.OpenAI
apiKey: "{{ secret('OPENAI_API_KEY') }}"
modelName: gpt-5-mini
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.OpenAIio.kestra.plugin.langchain4j.provider.OpenAIbaseUrlstring
Default
https://api.openai.com/v1caPemstring
clientPemstring
Use OpenRouter models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.OpenRouter
apiKey: "{{ secret('OPENROUTER_API_KEY') }}"
baseUrl: https://openrouter.ai/api/v1
modelName: x-ai/grok-beta
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.OpenRouterio.kestra.plugin.langchain4j.provider.OpenRouterbaseUrlstring
caPemstring
clientPemstring
Use IBM watsonx.ai models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.WatsonxAI
apiKey: "{{ secret('WATSONX_API_KEY') }}"
projectId: "{{ secret('WATSONX_PROJECT_ID') }}"
modelName: ibm/granite-3-3-8b-instruct
baseUrl : "https://api.eu-de.dataplatform.cloud.ibm.com/wx"
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
projectId*Requiredstring
type*Requiredobject
baseUrlstring
caPemstring
clientPemstring
Use Cloudflare Workers AI models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.WorkersAI
accountId: "{{ secret('WORKERS_AI_ACCOUNT_ID') }}"
apiKey: "{{ secret('WORKERS_AI_API_KEY') }}"
modelName: @cf/meta/llama-2-7b-chat-fp16
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
accountId*Requiredstring
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.WorkersAIio.kestra.plugin.langchain4j.provider.WorkersAIbaseUrlstring
caPemstring
clientPemstring
Use ZhiPu AI models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.ZhiPuAI
apiKey: "{{ secret('ZHIPU_API_KEY') }}"
modelName: glm-4.5-flash
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
baseUrlstring
Default
https://open.bigmodel.cn/caPemstring
clientPemstring
maxRetriesintegerstring
maxTokenintegerstring
stopsarray
SubTypestring
embeddings*Required
Store embeddings in Chroma
Example
yaml
id: document_ingestion
namespace: company.ai
tasks:
- id: ingest
type: io.kestra.plugin.ai.rag.IngestDocument
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
apiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddings:
type: io.kestra.plugin.ai.embeddings.Chroma
baseUrl: http://localhost:8000
collectionName: embeddings
fromExternalURLs:
- https://raw.githubusercontent.com/kestra-io/docs/refs/heads/main/content/blogs/release-0-24.md
baseUrl*Requiredstring
collectionName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.embeddings.Chromaio.kestra.plugin.langchain4j.embeddings.ChromaStore embeddings in Elasticsearch
Example
yaml
id: document_ingestion
namespace: company.ai
tasks:
- id: ingest
type: io.kestra.plugin.ai.rag.IngestDocument
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
apiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddings:
type: io.kestra.plugin.ai.embeddings.Elasticsearch
connection:
hosts:
- http://localhost:9200
fromExternalURLs:
- https://raw.githubusercontent.com/kestra-io/docs/refs/heads/main/content/blogs/release-0-24.md
connection*Required
io.kestra.plugin.ai.embeddings.Elasticsearch-ElasticsearchConnection
hosts*Requiredarray
SubTypestring
Min items
1basicAuth
headersarray
SubTypestring
pathPrefixstring
strictDeprecationModebooleanstring
trustAllSslbooleanstring
indexName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.embeddings.Elasticsearchio.kestra.plugin.langchain4j.embeddings.ElasticsearchPrototype embeddings in Kestra KV
Example
yaml
id: document_ingestion
namespace: company.ai
tasks:
- id: ingest
type: io.kestra.plugin.ai.rag.IngestDocument
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
apiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddings:
type: io.kestra.plugin.ai.embeddings.KestraKVStore
drop: true
fromExternalURLs:
- https://raw.githubusercontent.com/kestra-io/docs/refs/heads/main/content/blogs/release-0-24.md
type*Requiredobject
Possible Values
io.kestra.plugin.ai.embeddings.KestraKVStoreio.kestra.plugin.langchain4j.embeddings.KestraKVStorekvNamestring
Default
{{ flow.id }}-embedding-storeStore embeddings in MariaDB
Example
yaml
id: document_ingestion
namespace: company.ai
tasks:
- id: ingest
type: io.kestra.plugin.ai.rag.IngestDocument
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
apiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddings:
type: io.kestra.plugin.ai.embeddings.MariaDB
username: "{{ secret('MARIADB_USERNAME') }}"
password: "{{ secret('MARIADB_PASSWORD') }}"
databaseUrl: "{{ secret('MARIADB_DATABASE_URL') }}"
tableName: embeddings
fieldName: id
fromExternalURLs:
- https://raw.githubusercontent.com/kestra-io/docs/refs/heads/main/content/blogs/release-0-24.md
createTable*Requiredbooleanstring
databaseUrl*Requiredstring
fieldName*Requiredstring
password*Requiredstring
tableName*Requiredstring
type*Requiredobject
username*Requiredstring
columnDefinitionsarray
SubTypestring
indexesarray
SubTypestring
metadataStorageModestring
Default
COLUMN_PER_KEYStore embeddings in Milvus
Example
yaml
id: document_ingestion
namespace: company.ai
tasks:
- id: ingest
type: io.kestra.plugin.ai.rag.IngestDocument
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
apiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddings:
type: io.kestra.plugin.ai.embeddings.Milvus
# Use either `uri` or `host`/`port`:
# For gRPC (typical): milvus://localhost:19530
# For HTTP: http://localhost:9091
uri: "http://localhost:19200"
token: "{{ secret('MILVUS_TOKEN') }}" # omit if auth is disabled
collectionName: embeddings
fromExternalURLs:
- https://raw.githubusercontent.com/kestra-io/docs/refs/heads/main/content/blogs/release-0-24.md
token*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.embeddings.Milvusio.kestra.plugin.langchain4j.embeddings.MilvusautoFlushOnDeletebooleanstring
autoFlushOnInsertbooleanstring
collectionNamestring
consistencyLevelstring
databaseNamestring
hoststring
idFieldNamestring
indexTypestring
metadataFieldNamestring
metricTypestring
passwordstring
portintegerstring
retrieveEmbeddingsOnSearchbooleanstring
textFieldNamestring
uristring
usernamestring
vectorFieldNamestring
Store embeddings in MongoDB Atlas
Example
yaml
id: document_ingestion
namespace: company.ai
tasks:
- id: ingest
type: io.kestra.plugin.ai.rag.IngestDocument
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
apiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddings:
type: io.kestra.plugin.ai.embeddings.MongoDBAtlas
username: "{{ secret('MONGODB_ATLAS_USERNAME') }}"
password: "{{ secret('MONGODB_ATLAS_PASSWORD') }}"
host: "{{ secret('MONGODB_ATLAS_HOST') }}"
database: "{{ secret('MONGODB_ATLAS_DATABASE') }}"
collectionName: embeddings
indexName: embeddings
fromExternalURLs:
- https://raw.githubusercontent.com/kestra-io/docs/refs/heads/main/content/blogs/release-0-24.md
collectionName*Requiredstring
host*Requiredstring
indexName*Requiredstring
scheme*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.embeddings.MongoDBAtlasio.kestra.plugin.langchain4j.embeddings.MongoDBAtlascreateIndexbooleanstring
databasestring
metadataFieldNamesarray
SubTypestring
optionsobject
passwordstring
usernamestring
Store embeddings with pgvector
Example
yaml
id: document_ingestion
namespace: company.ai
tasks:
- id: ingest
type: io.kestra.plugin.ai.rag.IngestDocument
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
apiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddings:
type: io.kestra.plugin.ai.embeddings.PGVector
host: localhost
port: 5432
user: "{{ secret('POSTGRES_USER') }}"
password: "{{ secret('POSTGRES_PASSWORD') }}"
database: postgres
table: embeddings
fromExternalURLs:
- https://raw.githubusercontent.com/kestra-io/docs/refs/heads/main/content/blogs/release-0-24.md
database*Requiredstring
host*Requiredstring
password*Requiredstring
port*Requiredintegerstring
table*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.embeddings.PGVectorio.kestra.plugin.langchain4j.embeddings.PGVectoruser*Requiredstring
useIndexbooleanstring
Default
falseStore embeddings in Pinecone
Example
yaml
id: document_ingestion
namespace: company.ai
tasks:
- id: ingest
type: io.kestra.plugin.ai.rag.IngestDocument
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
apiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddings:
type: io.kestra.plugin.ai.embeddings.Pinecone
apiKey: "{{ secret('PINECONE_API_KEY') }}"
cloud: AWS
region: us-east-1
index: embeddings
fromExternalURLs:
- https://raw.githubusercontent.com/kestra-io/docs/refs/heads/main/content/blogs/release-0-24.md
apiKey*Requiredstring
cloud*Requiredstring
index*Requiredstring
region*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.embeddings.Pineconeio.kestra.plugin.langchain4j.embeddings.Pineconenamespacestring
Store embeddings in Qdrant
Example
yaml
id: document_ingestion
namespace: company.ai
tasks:
- id: ingest
type: io.kestra.plugin.ai.rag.IngestDocument
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
apiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddings:
type: io.kestra.plugin.ai.embeddings.Qdrant
apiKey: "{{ secret('QDRANT_API_KEY') }}"
host: localhost
port: 6334
collectionName: embeddings
fromExternalURLs:
- https://raw.githubusercontent.com/kestra-io/docs/refs/heads/main/content/blogs/release-0-24.md
apiKey*Requiredstring
collectionName*Requiredstring
host*Requiredstring
port*Requiredintegerstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.embeddings.Qdrantio.kestra.plugin.langchain4j.embeddings.QdrantStore embeddings in Redis
Example
yaml
id: document_ingestion
namespace: company.ai
tasks:
- id: ingest
type: io.kestra.plugin.ai.rag.IngestDocument
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
apiKey: my_api_key
embeddings:
type: io.kestra.plugin.ai.embeddings.Redis
host: localhost
port: 6379
indexName: embeddings
fromExternalURLs:
- https://raw.githubusercontent.com/kestra-io/docs/refs/heads/main/content/blogs/release-0-24.md
host*Requiredstring
port*Requiredintegerstring
type*Requiredobject
indexNamestring
Default
embedding-indexStore embeddings in Alibaba Tablestore
Example
yaml
id: document_ingestion
namespace: company.ai
tasks:
- id: ingest
type: io.kestra.plugin.ai.rag.IngestDocument
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
apiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddings:
type: io.kestra.plugin.ai.embeddings.Tablestore
endpoint: "{{ secret('TABLESTORE_ENDPOINT') }}"
instanceName: "{{ secret('TABLESTORE_INSTANCE_NAME') }}"
accessKeyId: "{{ secret('TABLESTORE_ACCESS_KEY_ID') }}"
accessKeySecret: "{{ secret('TABLESTORE_ACCESS_KEY_SECRET') }}"
fromExternalURLs:
- https://raw.githubusercontent.com/kestra-io/docs/refs/heads/main/content/blogs/release-0-24.md
accessKeyId*Requiredstring
accessKeySecret*Requiredstring
endpoint*Requiredstring
instanceName*Requiredstring
type*Requiredobject
metadataSchemaListarray
com.alicloud.openservices.tablestore.model.search.FieldSchema
analyzerstring
Possible Values
SingleWordMaxWordMinWordSplitFuzzyanalyzerParameter
dateFormatsarray
SubTypestring
enableHighlightingboolean
enableSortAndAggboolean
fieldNamestring
fieldTypestring
Possible Values
LONGDOUBLEBOOLEANKEYWORDTEXTNESTEDGEO_POINTDATEVECTORFUZZY_KEYWORDIPJSONUNKNOWNindexboolean
indexOptionsstring
Possible Values
DOCSFREQSPOSITIONSOFFSETSisArrayboolean
jsonTypestring
Possible Values
FLATTENNESTEDsourceFieldNamesarray
SubTypestring
storeboolean
subFieldSchemasarray
vectorOptions
Store embeddings in Weaviate
Example
yaml
id: document_ingestion
namespace: company.ai
tasks:
- id: ingest
type: io.kestra.plugin.ai.rag.IngestDocument
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
apiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddings:
type: io.kestra.plugin.ai.embeddings.Weaviate
apiKey: "{{ secret('WEAVIATE_API_KEY') }}" # omit for local/no-auth
scheme: https # http | https
host: your-cluster-id.weaviate.network # no protocol
# port: 443 # optional; usually omit
drop: true
fromExternalURLs:
- https://raw.githubusercontent.com/kestra-io/docs/refs/heads/main/content/blogs/release-0-24.md
apiKey*Requiredstring
host*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.embeddings.Weaviateio.kestra.plugin.langchain4j.embeddings.WeaviateavoidDupsbooleanstring
consistencyLevelstring
Possible Values
ONEQUORUMALLgrpcPortintegerstring
metadataFieldNamestring
metadataKeysarray
SubTypestring
objectClassstring
portintegerstring
schemestring
securedGrpcbooleanstring
useGrpcForInsertsbooleanstring
type*Requiredobject
maxResultsintegerstring
Default
3minScorenumberstring
Default
0.0Retrieve web results with Google Custom Search
Example
yaml
id: rag
namespace: company.ai
tasks:
- id: chat_with_rag_and_websearch_content_retriever
type: io.kestra.plugin.ai.rag.ChatCompletion
chatProvider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-2.5-flash
apiKey: "{{ secret('GEMINI_API_KEY') }}"
contentRetrievers:
- type: io.kestra.plugin.ai.retriever.GoogleCustomWebSearch
apiKey: "{{ secret('GOOGLE_SEARCH_API_KEY') }}"
csi: "{{ secret('GOOGLE_SEARCH_CSI') }}"
prompt: What is the latest release of Kestra?
apiKey*Requiredstring
csi*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.retriever.GoogleCustomWebSearchio.kestra.plugin.langchain4j.retriever.GoogleCustomWebSearchmaxResultsintegerstring
Default
3Retrieve context from SQL (experimental)
Example
yaml
id: rag
namespace: company.ai
tasks:
- id: chat_with_rag_and_sql_retriever
type: io.kestra.plugin.ai.rag.ChatCompletion
chatProvider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-2.0-flash
apiKey: "{{ secret('GOOGLE_API_KEY') }}"
contentRetrievers:
- type: io.kestra.plugin.ai.retriever.SqlDatabaseRetriever
databaseType: POSTGRESQL
jdbcUrl: "jdbc:postgresql://localhost:5432/mydb"
username: "{{ secret('DB_USER') }}"
password: "{{ secret('DB_PASSWORD') }}"
prompt: "What are the top 5 customers by revenue?"
databaseType*Requiredobject
password*Requiredstring
provider*Required
Use Amazon Bedrock models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.AmazonBedrock
accessKeyId: "{{ secret('AWS_ACCESS_KEY') }}"
secretAccessKey: "{{ secret('AWS_SECRET_KEY') }}"
modelName: anthropic.claude-3-sonnet-20240229-v1:0
thinkingBudgetTokens: 1024
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
accessKeyId*Requiredstring
modelName*Requiredstring
secretAccessKey*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.AmazonBedrockio.kestra.plugin.langchain4j.provider.AmazonBedrockbaseUrlstring
caPemstring
clientPemstring
modelTypestring
Default
COHEREPossible Values
COHERETITANUse Anthropic Claude models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.Anthropic
apiKey: "{{ secret('ANTHROPIC_API_KEY') }}"
modelName: claude-3-haiku-20240307
thinkingEnabled: true
thinkingBudgetTokens: 1024
returnThinking: false
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.Anthropicio.kestra.plugin.langchain4j.provider.AnthropicbaseUrlstring
caPemstring
clientPemstring
maxTokensintegerstring
Use Azure OpenAI deployments
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.AzureOpenAI
apiKey: "{{ secret('AZURE_API_KEY') }}"
endpoint: https://your-resource.openai.azure.com/
modelName: anthropic.claude-3-sonnet-20240229-v1:0
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
endpoint*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.AzureOpenAIio.kestra.plugin.langchain4j.provider.AzureOpenAIapiKeystring
baseUrlstring
caPemstring
clientIdstring
clientPemstring
clientSecretstring
serviceVersionstring
tenantIdstring
Use DashScope (Qwen) models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.DashScope
apiKey: "{{ secret('DASHSCOPE_API_KEY') }}"
modelName: qwen-plus
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
baseUrlstring
Default
https://dashscope-intl.aliyuncs.com/api/v1caPemstring
clientPemstring
enableSearchbooleanstring
maxTokensintegerstring
repetitionPenaltynumberstring
Use DeepSeek models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.DeepSeek
apiKey: "{{ secret('DEEPSEEK_API_KEY') }}"
modelName: deepseek-chat
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.DeepSeekio.kestra.plugin.langchain4j.provider.DeepSeekbaseUrlstring
Default
https://api.deepseek.com/v1caPemstring
clientPemstring
Use GitHub Models via Azure AI Inference
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.GitHubModels
gitHubToken: "{{ secret('GITHUB_TOKEN') }}"
modelName: gpt-4o-mini
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely.
- type: USER
content: "{{ inputs.prompt }}"
gitHubToken*Requiredstring
modelName*Requiredstring
type*Requiredobject
baseUrlstring
caPemstring
clientPemstring
Use Google Gemini models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
apiKey: "{{ secret('GOOGLE_API_KEY') }}"
modelName: gemini-2.5-flash
thinkingEnabled: true
thinkingBudgetTokens: 1024
returnThinking: true
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
apiKey: "{{ secret('GOOGLE_API_KEY') }}"
modelName: gemini-2.5-flash
clientPem: "{{ secret('CLIENT_PEM') }}"
caPem: "{{ secret('CA_PEM') }}"
baseUrl: "https://internal.gemini.company.com/endpoint"
thinkingEnabled: true
thinkingBudgetTokens: 1024
returnThinking: true
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.GoogleGeminiio.kestra.plugin.langchain4j.provider.GoogleGeminibaseUrlstring
caPemstring
clientPemstring
embeddingModelConfiguration
io.kestra.plugin.ai.provider.GoogleGemini-EmbeddingModelConfiguration
maxRetriesintegerstring
outputDimensionalityintegerstring
taskTypestring
Possible Values
RETRIEVAL_QUERYRETRIEVAL_DOCUMENTSEMANTIC_SIMILARITYCLASSIFICATIONCLUSTERINGQUESTION_ANSWERINGFACT_VERIFICATIONtimeoutstring
titleMetadataKeystring
Use Google Vertex AI models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.GoogleVertexAI
endpoint: your-vertex-ai-endpoint
location: your-google-cloud-region
project: your-google-cloud-project-id
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
endpoint*Requiredstring
location*Requiredstring
modelName*Requiredstring
project*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.GoogleVertexAIio.kestra.plugin.langchain4j.provider.GoogleVertexAIbaseUrlstring
caPemstring
clientPemstring
Use Hugging Face Inference endpoints
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.HuggingFace
apiKey: "{{ secret('HUGGING_FACE_API_KEY') }}"
modelName: HuggingFaceTB/SmolLM3-3B:hf-inference
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
baseUrlstring
Default
https://router.huggingface.co/v1caPemstring
clientPemstring
Use LocalAI OpenAI-compatible server
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.LocalAI
modelName: gemma-3-1b-it
baseUrl: http://localhost:8080/v1
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
baseUrl*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.LocalAIio.kestra.plugin.langchain4j.provider.LocalAIcaPemstring
clientPemstring
Use Mistral models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.MistralAI
apiKey: "{{ secret('MISTRAL_API_KEY') }}"
modelName: mistral:7b
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.MistralAIio.kestra.plugin.langchain4j.provider.MistralAIbaseUrlstring
caPemstring
clientPemstring
Use OCI Generative AI models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.OciGenAI
region: "{{ secret('OCI_GENAI_MODEL_REGION_PROPERTY') }}"
compartmentId: "{{ secret('OCI_GENAI_COMPARTMENT_ID_PROPERTY') }}"
authProvider: "{{ secret('OCI_GENAI_CONFIG_PROFILE_PROPERTY') }}"
modelName: oracle.chat.gpt-3.5
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
compartmentId*Requiredstring
modelName*Requiredstring
region*Requiredstring
type*Requiredobject
authProviderstring
baseUrlstring
caPemstring
clientPemstring
Use local Ollama models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.Ollama
modelName: llama3
endpoint: http://localhost:11434
thinkingEnabled: true
returnThinking: true
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
endpoint*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.Ollamaio.kestra.plugin.langchain4j.provider.OllamabaseUrlstring
caPemstring
clientPemstring
Use OpenAI models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.OpenAI
apiKey: "{{ secret('OPENAI_API_KEY') }}"
modelName: gpt-5-mini
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.OpenAIio.kestra.plugin.langchain4j.provider.OpenAIbaseUrlstring
Default
https://api.openai.com/v1caPemstring
clientPemstring
Use OpenRouter models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.OpenRouter
apiKey: "{{ secret('OPENROUTER_API_KEY') }}"
baseUrl: https://openrouter.ai/api/v1
modelName: x-ai/grok-beta
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.OpenRouterio.kestra.plugin.langchain4j.provider.OpenRouterbaseUrlstring
caPemstring
clientPemstring
Use IBM watsonx.ai models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.WatsonxAI
apiKey: "{{ secret('WATSONX_API_KEY') }}"
projectId: "{{ secret('WATSONX_PROJECT_ID') }}"
modelName: ibm/granite-3-3-8b-instruct
baseUrl : "https://api.eu-de.dataplatform.cloud.ibm.com/wx"
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
projectId*Requiredstring
type*Requiredobject
baseUrlstring
caPemstring
clientPemstring
Use Cloudflare Workers AI models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.WorkersAI
accountId: "{{ secret('WORKERS_AI_ACCOUNT_ID') }}"
apiKey: "{{ secret('WORKERS_AI_API_KEY') }}"
modelName: @cf/meta/llama-2-7b-chat-fp16
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
accountId*Requiredstring
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.WorkersAIio.kestra.plugin.langchain4j.provider.WorkersAIbaseUrlstring
caPemstring
clientPemstring
Use ZhiPu AI models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.ZhiPuAI
apiKey: "{{ secret('ZHIPU_API_KEY') }}"
modelName: glm-4.5-flash
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
baseUrlstring
Default
https://open.bigmodel.cn/caPemstring
clientPemstring
maxRetriesintegerstring
maxTokenintegerstring
stopsarray
SubTypestring
type*Requiredobject
username*Requiredstring
configuration
Default
{}io.kestra.plugin.ai.domain.ChatConfiguration
logRequestsbooleanstring
logResponsesbooleanstring
maxTokenintegerstring
promptCachingbooleanstring
responseFormat
io.kestra.plugin.ai.domain.ChatConfiguration-ResponseFormat
jsonSchemaobject
jsonSchemaDescriptionstring
strictJsonbooleanstring
Default
falsetypestring
Default
TEXTPossible Values
TEXTJSONreturnThinkingbooleanstring
seedintegerstring
temperaturenumberstring
thinkingBudgetTokensintegerstring
thinkingEnabledbooleanstring
topKintegerstring
topPnumberstring
driverstring
jdbcUrlstring
maxPoolSizeintegerstring
Default
2Retrieve web results with Tavily
Example
yaml
id: rag
namespace: company.ai
tasks:
- id: chat_with_rag_and_websearch_content_retriever
type: io.kestra.plugin.ai.rag.ChatCompletion
chatProvider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-2.5-flash
apiKey: "{{ secret('GEMINI_API_KEY') }}"
contentRetrievers:
- type: io.kestra.plugin.ai.retriever.TavilyWebSearch
apiKey: "{{ secret('TAVILY_API_KEY') }}"
prompt: What is the latest release of Kestra?
apiKey*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.retriever.TavilyWebSearchio.kestra.plugin.langchain4j.retriever.TavilyWebSearchmaxResultsintegerstring
Default
3embeddingProvider Non-dynamic
Definitions
Use Amazon Bedrock models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.AmazonBedrock
accessKeyId: "{{ secret('AWS_ACCESS_KEY') }}"
secretAccessKey: "{{ secret('AWS_SECRET_KEY') }}"
modelName: anthropic.claude-3-sonnet-20240229-v1:0
thinkingBudgetTokens: 1024
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
accessKeyId*Requiredstring
modelName*Requiredstring
secretAccessKey*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.AmazonBedrockio.kestra.plugin.langchain4j.provider.AmazonBedrockbaseUrlstring
caPemstring
clientPemstring
modelTypestring
Default
COHEREPossible Values
COHERETITANUse Anthropic Claude models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.Anthropic
apiKey: "{{ secret('ANTHROPIC_API_KEY') }}"
modelName: claude-3-haiku-20240307
thinkingEnabled: true
thinkingBudgetTokens: 1024
returnThinking: false
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.Anthropicio.kestra.plugin.langchain4j.provider.AnthropicbaseUrlstring
caPemstring
clientPemstring
maxTokensintegerstring
Use Azure OpenAI deployments
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.AzureOpenAI
apiKey: "{{ secret('AZURE_API_KEY') }}"
endpoint: https://your-resource.openai.azure.com/
modelName: anthropic.claude-3-sonnet-20240229-v1:0
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
endpoint*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.AzureOpenAIio.kestra.plugin.langchain4j.provider.AzureOpenAIapiKeystring
baseUrlstring
caPemstring
clientIdstring
clientPemstring
clientSecretstring
serviceVersionstring
tenantIdstring
Use DashScope (Qwen) models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.DashScope
apiKey: "{{ secret('DASHSCOPE_API_KEY') }}"
modelName: qwen-plus
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
baseUrlstring
Default
https://dashscope-intl.aliyuncs.com/api/v1caPemstring
clientPemstring
enableSearchbooleanstring
maxTokensintegerstring
repetitionPenaltynumberstring
Use DeepSeek models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.DeepSeek
apiKey: "{{ secret('DEEPSEEK_API_KEY') }}"
modelName: deepseek-chat
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.DeepSeekio.kestra.plugin.langchain4j.provider.DeepSeekbaseUrlstring
Default
https://api.deepseek.com/v1caPemstring
clientPemstring
Use GitHub Models via Azure AI Inference
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.GitHubModels
gitHubToken: "{{ secret('GITHUB_TOKEN') }}"
modelName: gpt-4o-mini
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely.
- type: USER
content: "{{ inputs.prompt }}"
gitHubToken*Requiredstring
modelName*Requiredstring
type*Requiredobject
baseUrlstring
caPemstring
clientPemstring
Use Google Gemini models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
apiKey: "{{ secret('GOOGLE_API_KEY') }}"
modelName: gemini-2.5-flash
thinkingEnabled: true
thinkingBudgetTokens: 1024
returnThinking: true
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
apiKey: "{{ secret('GOOGLE_API_KEY') }}"
modelName: gemini-2.5-flash
clientPem: "{{ secret('CLIENT_PEM') }}"
caPem: "{{ secret('CA_PEM') }}"
baseUrl: "https://internal.gemini.company.com/endpoint"
thinkingEnabled: true
thinkingBudgetTokens: 1024
returnThinking: true
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.GoogleGeminiio.kestra.plugin.langchain4j.provider.GoogleGeminibaseUrlstring
caPemstring
clientPemstring
embeddingModelConfiguration
io.kestra.plugin.ai.provider.GoogleGemini-EmbeddingModelConfiguration
maxRetriesintegerstring
outputDimensionalityintegerstring
taskTypestring
Possible Values
RETRIEVAL_QUERYRETRIEVAL_DOCUMENTSEMANTIC_SIMILARITYCLASSIFICATIONCLUSTERINGQUESTION_ANSWERINGFACT_VERIFICATIONtimeoutstring
titleMetadataKeystring
Use Google Vertex AI models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.GoogleVertexAI
endpoint: your-vertex-ai-endpoint
location: your-google-cloud-region
project: your-google-cloud-project-id
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
endpoint*Requiredstring
location*Requiredstring
modelName*Requiredstring
project*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.GoogleVertexAIio.kestra.plugin.langchain4j.provider.GoogleVertexAIbaseUrlstring
caPemstring
clientPemstring
Use Hugging Face Inference endpoints
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.HuggingFace
apiKey: "{{ secret('HUGGING_FACE_API_KEY') }}"
modelName: HuggingFaceTB/SmolLM3-3B:hf-inference
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
baseUrlstring
Default
https://router.huggingface.co/v1caPemstring
clientPemstring
Use LocalAI OpenAI-compatible server
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.LocalAI
modelName: gemma-3-1b-it
baseUrl: http://localhost:8080/v1
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
baseUrl*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.LocalAIio.kestra.plugin.langchain4j.provider.LocalAIcaPemstring
clientPemstring
Use Mistral models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.MistralAI
apiKey: "{{ secret('MISTRAL_API_KEY') }}"
modelName: mistral:7b
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.MistralAIio.kestra.plugin.langchain4j.provider.MistralAIbaseUrlstring
caPemstring
clientPemstring
Use OCI Generative AI models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.OciGenAI
region: "{{ secret('OCI_GENAI_MODEL_REGION_PROPERTY') }}"
compartmentId: "{{ secret('OCI_GENAI_COMPARTMENT_ID_PROPERTY') }}"
authProvider: "{{ secret('OCI_GENAI_CONFIG_PROFILE_PROPERTY') }}"
modelName: oracle.chat.gpt-3.5
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
compartmentId*Requiredstring
modelName*Requiredstring
region*Requiredstring
type*Requiredobject
authProviderstring
baseUrlstring
caPemstring
clientPemstring
Use local Ollama models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.Ollama
modelName: llama3
endpoint: http://localhost:11434
thinkingEnabled: true
returnThinking: true
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
endpoint*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.Ollamaio.kestra.plugin.langchain4j.provider.OllamabaseUrlstring
caPemstring
clientPemstring
Use OpenAI models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.OpenAI
apiKey: "{{ secret('OPENAI_API_KEY') }}"
modelName: gpt-5-mini
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.OpenAIio.kestra.plugin.langchain4j.provider.OpenAIbaseUrlstring
Default
https://api.openai.com/v1caPemstring
clientPemstring
Use OpenRouter models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.OpenRouter
apiKey: "{{ secret('OPENROUTER_API_KEY') }}"
baseUrl: https://openrouter.ai/api/v1
modelName: x-ai/grok-beta
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.OpenRouterio.kestra.plugin.langchain4j.provider.OpenRouterbaseUrlstring
caPemstring
clientPemstring
Use IBM watsonx.ai models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.WatsonxAI
apiKey: "{{ secret('WATSONX_API_KEY') }}"
projectId: "{{ secret('WATSONX_PROJECT_ID') }}"
modelName: ibm/granite-3-3-8b-instruct
baseUrl : "https://api.eu-de.dataplatform.cloud.ibm.com/wx"
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
projectId*Requiredstring
type*Requiredobject
baseUrlstring
caPemstring
clientPemstring
Use Cloudflare Workers AI models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.WorkersAI
accountId: "{{ secret('WORKERS_AI_ACCOUNT_ID') }}"
apiKey: "{{ secret('WORKERS_AI_API_KEY') }}"
modelName: @cf/meta/llama-2-7b-chat-fp16
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
accountId*Requiredstring
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.WorkersAIio.kestra.plugin.langchain4j.provider.WorkersAIbaseUrlstring
caPemstring
clientPemstring
Use ZhiPu AI models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.ZhiPuAI
apiKey: "{{ secret('ZHIPU_API_KEY') }}"
modelName: glm-4.5-flash
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
baseUrlstring
Default
https://open.bigmodel.cn/caPemstring
clientPemstring
maxRetriesintegerstring
maxTokenintegerstring
stopsarray
SubTypestring
embeddings Non-dynamic
Definitions
Store embeddings in Chroma
Example
yaml
id: document_ingestion
namespace: company.ai
tasks:
- id: ingest
type: io.kestra.plugin.ai.rag.IngestDocument
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
apiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddings:
type: io.kestra.plugin.ai.embeddings.Chroma
baseUrl: http://localhost:8000
collectionName: embeddings
fromExternalURLs:
- https://raw.githubusercontent.com/kestra-io/docs/refs/heads/main/content/blogs/release-0-24.md
baseUrl*Requiredstring
collectionName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.embeddings.Chromaio.kestra.plugin.langchain4j.embeddings.ChromaStore embeddings in Elasticsearch
Example
yaml
id: document_ingestion
namespace: company.ai
tasks:
- id: ingest
type: io.kestra.plugin.ai.rag.IngestDocument
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
apiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddings:
type: io.kestra.plugin.ai.embeddings.Elasticsearch
connection:
hosts:
- http://localhost:9200
fromExternalURLs:
- https://raw.githubusercontent.com/kestra-io/docs/refs/heads/main/content/blogs/release-0-24.md
connection*Required
io.kestra.plugin.ai.embeddings.Elasticsearch-ElasticsearchConnection
hosts*Requiredarray
SubTypestring
Min items
1basicAuth
io.kestra.plugin.ai.embeddings.Elasticsearch-ElasticsearchConnection-BasicAuth
passwordstring
usernamestring
headersarray
SubTypestring
pathPrefixstring
strictDeprecationModebooleanstring
trustAllSslbooleanstring
indexName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.embeddings.Elasticsearchio.kestra.plugin.langchain4j.embeddings.ElasticsearchPrototype embeddings in Kestra KV
Example
yaml
id: document_ingestion
namespace: company.ai
tasks:
- id: ingest
type: io.kestra.plugin.ai.rag.IngestDocument
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
apiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddings:
type: io.kestra.plugin.ai.embeddings.KestraKVStore
drop: true
fromExternalURLs:
- https://raw.githubusercontent.com/kestra-io/docs/refs/heads/main/content/blogs/release-0-24.md
type*Requiredobject
Possible Values
io.kestra.plugin.ai.embeddings.KestraKVStoreio.kestra.plugin.langchain4j.embeddings.KestraKVStorekvNamestring
Default
{{ flow.id }}-embedding-storeStore embeddings in MariaDB
Example
yaml
id: document_ingestion
namespace: company.ai
tasks:
- id: ingest
type: io.kestra.plugin.ai.rag.IngestDocument
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
apiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddings:
type: io.kestra.plugin.ai.embeddings.MariaDB
username: "{{ secret('MARIADB_USERNAME') }}"
password: "{{ secret('MARIADB_PASSWORD') }}"
databaseUrl: "{{ secret('MARIADB_DATABASE_URL') }}"
tableName: embeddings
fieldName: id
fromExternalURLs:
- https://raw.githubusercontent.com/kestra-io/docs/refs/heads/main/content/blogs/release-0-24.md
createTable*Requiredbooleanstring
databaseUrl*Requiredstring
fieldName*Requiredstring
password*Requiredstring
tableName*Requiredstring
type*Requiredobject
username*Requiredstring
columnDefinitionsarray
SubTypestring
indexesarray
SubTypestring
metadataStorageModestring
Default
COLUMN_PER_KEYStore embeddings in Milvus
Example
yaml
id: document_ingestion
namespace: company.ai
tasks:
- id: ingest
type: io.kestra.plugin.ai.rag.IngestDocument
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
apiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddings:
type: io.kestra.plugin.ai.embeddings.Milvus
# Use either `uri` or `host`/`port`:
# For gRPC (typical): milvus://localhost:19530
# For HTTP: http://localhost:9091
uri: "http://localhost:19200"
token: "{{ secret('MILVUS_TOKEN') }}" # omit if auth is disabled
collectionName: embeddings
fromExternalURLs:
- https://raw.githubusercontent.com/kestra-io/docs/refs/heads/main/content/blogs/release-0-24.md
token*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.embeddings.Milvusio.kestra.plugin.langchain4j.embeddings.MilvusautoFlushOnDeletebooleanstring
autoFlushOnInsertbooleanstring
collectionNamestring
consistencyLevelstring
databaseNamestring
hoststring
idFieldNamestring
indexTypestring
metadataFieldNamestring
metricTypestring
passwordstring
portintegerstring
retrieveEmbeddingsOnSearchbooleanstring
textFieldNamestring
uristring
usernamestring
vectorFieldNamestring
Store embeddings in MongoDB Atlas
Example
yaml
id: document_ingestion
namespace: company.ai
tasks:
- id: ingest
type: io.kestra.plugin.ai.rag.IngestDocument
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
apiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddings:
type: io.kestra.plugin.ai.embeddings.MongoDBAtlas
username: "{{ secret('MONGODB_ATLAS_USERNAME') }}"
password: "{{ secret('MONGODB_ATLAS_PASSWORD') }}"
host: "{{ secret('MONGODB_ATLAS_HOST') }}"
database: "{{ secret('MONGODB_ATLAS_DATABASE') }}"
collectionName: embeddings
indexName: embeddings
fromExternalURLs:
- https://raw.githubusercontent.com/kestra-io/docs/refs/heads/main/content/blogs/release-0-24.md
collectionName*Requiredstring
host*Requiredstring
indexName*Requiredstring
scheme*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.embeddings.MongoDBAtlasio.kestra.plugin.langchain4j.embeddings.MongoDBAtlascreateIndexbooleanstring
databasestring
metadataFieldNamesarray
SubTypestring
optionsobject
passwordstring
usernamestring
Store embeddings with pgvector
Example
yaml
id: document_ingestion
namespace: company.ai
tasks:
- id: ingest
type: io.kestra.plugin.ai.rag.IngestDocument
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
apiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddings:
type: io.kestra.plugin.ai.embeddings.PGVector
host: localhost
port: 5432
user: "{{ secret('POSTGRES_USER') }}"
password: "{{ secret('POSTGRES_PASSWORD') }}"
database: postgres
table: embeddings
fromExternalURLs:
- https://raw.githubusercontent.com/kestra-io/docs/refs/heads/main/content/blogs/release-0-24.md
database*Requiredstring
host*Requiredstring
password*Requiredstring
port*Requiredintegerstring
table*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.embeddings.PGVectorio.kestra.plugin.langchain4j.embeddings.PGVectoruser*Requiredstring
useIndexbooleanstring
Default
falseStore embeddings in Pinecone
Example
yaml
id: document_ingestion
namespace: company.ai
tasks:
- id: ingest
type: io.kestra.plugin.ai.rag.IngestDocument
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
apiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddings:
type: io.kestra.plugin.ai.embeddings.Pinecone
apiKey: "{{ secret('PINECONE_API_KEY') }}"
cloud: AWS
region: us-east-1
index: embeddings
fromExternalURLs:
- https://raw.githubusercontent.com/kestra-io/docs/refs/heads/main/content/blogs/release-0-24.md
apiKey*Requiredstring
cloud*Requiredstring
index*Requiredstring
region*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.embeddings.Pineconeio.kestra.plugin.langchain4j.embeddings.Pineconenamespacestring
Store embeddings in Qdrant
Example
yaml
id: document_ingestion
namespace: company.ai
tasks:
- id: ingest
type: io.kestra.plugin.ai.rag.IngestDocument
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
apiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddings:
type: io.kestra.plugin.ai.embeddings.Qdrant
apiKey: "{{ secret('QDRANT_API_KEY') }}"
host: localhost
port: 6334
collectionName: embeddings
fromExternalURLs:
- https://raw.githubusercontent.com/kestra-io/docs/refs/heads/main/content/blogs/release-0-24.md
apiKey*Requiredstring
collectionName*Requiredstring
host*Requiredstring
port*Requiredintegerstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.embeddings.Qdrantio.kestra.plugin.langchain4j.embeddings.QdrantStore embeddings in Redis
Example
yaml
id: document_ingestion
namespace: company.ai
tasks:
- id: ingest
type: io.kestra.plugin.ai.rag.IngestDocument
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
apiKey: my_api_key
embeddings:
type: io.kestra.plugin.ai.embeddings.Redis
host: localhost
port: 6379
indexName: embeddings
fromExternalURLs:
- https://raw.githubusercontent.com/kestra-io/docs/refs/heads/main/content/blogs/release-0-24.md
host*Requiredstring
port*Requiredintegerstring
type*Requiredobject
indexNamestring
Default
embedding-indexStore embeddings in Alibaba Tablestore
Example
yaml
id: document_ingestion
namespace: company.ai
tasks:
- id: ingest
type: io.kestra.plugin.ai.rag.IngestDocument
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
apiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddings:
type: io.kestra.plugin.ai.embeddings.Tablestore
endpoint: "{{ secret('TABLESTORE_ENDPOINT') }}"
instanceName: "{{ secret('TABLESTORE_INSTANCE_NAME') }}"
accessKeyId: "{{ secret('TABLESTORE_ACCESS_KEY_ID') }}"
accessKeySecret: "{{ secret('TABLESTORE_ACCESS_KEY_SECRET') }}"
fromExternalURLs:
- https://raw.githubusercontent.com/kestra-io/docs/refs/heads/main/content/blogs/release-0-24.md
accessKeyId*Requiredstring
accessKeySecret*Requiredstring
endpoint*Requiredstring
instanceName*Requiredstring
type*Requiredobject
metadataSchemaListarray
com.alicloud.openservices.tablestore.model.search.FieldSchema
analyzerstring
Possible Values
SingleWordMaxWordMinWordSplitFuzzyanalyzerParameter
com.alicloud.openservices.tablestore.model.search.analysis.AnalyzerParameter
dateFormatsarray
SubTypestring
enableHighlightingboolean
enableSortAndAggboolean
fieldNamestring
fieldTypestring
Possible Values
LONGDOUBLEBOOLEANKEYWORDTEXTNESTEDGEO_POINTDATEVECTORFUZZY_KEYWORDIPJSONUNKNOWNindexboolean
indexOptionsstring
Possible Values
DOCSFREQSPOSITIONSOFFSETSisArrayboolean
jsonTypestring
Possible Values
FLATTENNESTEDsourceFieldNamesarray
SubTypestring
storeboolean
subFieldSchemasarray
com.alicloud.openservices.tablestore.model.search.FieldSchema
analyzerstring
Possible Values
SingleWordMaxWordMinWordSplitFuzzyanalyzerParameter
dateFormatsarray
SubTypestring
enableHighlightingboolean
enableSortAndAggboolean
fieldNamestring
fieldTypestring
Possible Values
LONGDOUBLEBOOLEANKEYWORDTEXTNESTEDGEO_POINTDATEVECTORFUZZY_KEYWORDIPJSONUNKNOWNindexboolean
indexOptionsstring
Possible Values
DOCSFREQSPOSITIONSOFFSETSisArrayboolean
jsonTypestring
Possible Values
FLATTENNESTEDsourceFieldNamesarray
SubTypestring
storeboolean
subFieldSchemasarray
vectorOptions
vectorOptions
com.alicloud.openservices.tablestore.model.search.vector.VectorOptions
dataTypestring
dimensioninteger
metricTypestring
Possible Values
EUCLIDEANCOSINEDOT_PRODUCTStore embeddings in Weaviate
Example
yaml
id: document_ingestion
namespace: company.ai
tasks:
- id: ingest
type: io.kestra.plugin.ai.rag.IngestDocument
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
apiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddings:
type: io.kestra.plugin.ai.embeddings.Weaviate
apiKey: "{{ secret('WEAVIATE_API_KEY') }}" # omit for local/no-auth
scheme: https # http | https
host: your-cluster-id.weaviate.network # no protocol
# port: 443 # optional; usually omit
drop: true
fromExternalURLs:
- https://raw.githubusercontent.com/kestra-io/docs/refs/heads/main/content/blogs/release-0-24.md
apiKey*Requiredstring
host*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.embeddings.Weaviateio.kestra.plugin.langchain4j.embeddings.WeaviateavoidDupsbooleanstring
consistencyLevelstring
Possible Values
ONEQUORUMALLgrpcPortintegerstring
metadataFieldNamestring
metadataKeysarray
SubTypestring
objectClassstring
portintegerstring
schemestring
securedGrpcbooleanstring
useGrpcForInsertsbooleanstring
guardrails Non-dynamic
Definitions
io.kestra.plugin.ai.domain.Guardrails
inputarray
io.kestra.plugin.ai.domain.GuardrailRule
expression*Requiredstring
Min length
1message*Requiredstring
Min length
1outputarray
io.kestra.plugin.ai.domain.GuardrailRule
expression*Requiredstring
Min length
1message*Requiredstring
Min length
1memory Non-dynamic
Definitions
Persist chat memory in Kestra KV
Example
yaml
id: chat_with_memory
namespace: company.ai
inputs:
- id: first
type: STRING
defaults: Hello, my name is John and I'm from Paris
- id: second
type: STRING
defaults: What's my name and where am I from?
tasks:
- id: first
type: io.kestra.plugin.ai.rag.ChatCompletion
chatProvider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-2.5-flash
apiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddingProvider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
apiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddings:
type: io.kestra.plugin.ai.embeddings.KestraKVStore
memory:
type: io.kestra.plugin.ai.memory.KestraKVStore
systemMessage: You are a helpful assistant, answer concisely
prompt: "{{ inputs.first }}"
- id: second
type: io.kestra.plugin.ai.rag.ChatCompletion
chatProvider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-2.5-flash
apiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddingProvider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
apiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddings:
type: io.kestra.plugin.ai.embeddings.KestraKVStore
memory:
type: io.kestra.plugin.ai.memory.KestraKVStore
drop: AFTER_TASKRUN
systemMessage: You are a helpful assistant, answer concisely
prompt: "{{ inputs.second }}"
type*Requiredobject
Possible Values
io.kestra.plugin.ai.memory.KestraKVStoreio.kestra.plugin.ai.memory.KestraKVMemoryio.kestra.plugin.langchain4j.memory.KestraKVMemorydropstring
Default
NEVERPossible Values
NEVERBEFORE_TASKRUNAFTER_TASKRUNmemoryIdstring
Default
{{ labels.system.correlationId }}messagesintegerstring
Default
10ttlstring
Default
PT1HPersist chat memory in PostgreSQL
Example
yaml
id: chat_with_memory
namespace: company.ai
tasks:
- id: first
type: io.kestra.plugin.ai.rag.ChatCompletion
chatProvider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-2.5-flash
apiKey: "{{ secret('GEMINI_API_KEY') }}"
memory:
type: io.kestra.plugin.ai.memory.PostgreSQL
host: localhost
port: 5432
database: ai_memory
user: postgres
password: secret
tableName: my_custom_memory_table
systemMessage: You are a helpful assistant, answer concisely
prompt: "{{ inputs.first }}"
database*Requiredstring
host*Requiredstring
password*Requiredstring
type*Requiredobject
user*Requiredstring
dropstring
Default
NEVERPossible Values
NEVERBEFORE_TASKRUNAFTER_TASKRUNmemoryIdstring
Default
{{ labels.system.correlationId }}messagesintegerstring
Default
10portintegerstring
Default
5432tableNamestring
Default
chat_memoryttlstring
Default
PT1HPersist chat memory in Redis
Example
yaml
id: chat_with_memory
namespace: company.ai
inputs:
- id: first
type: STRING
defaults: Hello, my name is John and I'm from Paris
- id: second
type: STRING
defaults: What's my name and where am I from?
tasks:
- id: first
type: io.kestra.plugin.ai.rag.ChatCompletion
chatProvider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-2.5-flash
apiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddingProvider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
apiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddings:
type: io.kestra.plugin.ai.embeddings.KestraKVStore
memory:
type: io.kestra.plugin.ai.memory.Redis
host: localhost
port: 6379
systemMessage: You are a helpful assistant, answer concisely
prompt: "{{ inputs.first }}"
- id: second
type: io.kestra.plugin.ai.rag.ChatCompletion
chatProvider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-2.5-flash
apiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddingProvider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
apiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddings:
type: io.kestra.plugin.ai.embeddings.KestraKVStore
memory:
type: io.kestra.plugin.ai.memory.Redis
host: localhost
port: 6379
drop: AFTER_TASKRUN
systemMessage: You are a helpful assistant, answer concisely
prompt: "{{ inputs.second }}"
host*Requiredstring
type*Requiredobject
dropstring
Default
NEVERPossible Values
NEVERBEFORE_TASKRUNAFTER_TASKRUNmemoryIdstring
Default
{{ labels.system.correlationId }}messagesintegerstring
Default
10portintegerstring
Default
6379ttlstring
Default
PT1HsystemMessage string
tools Non-dynamic
Definitions
Invoke remote AI agent over A2A
Example
yaml
id: ai-agent-with-agent-tools
namespace: company.ai
inputs:
- id: prompt
type: STRING
defaults: |
Each flow can produce outputs that can be consumed by other flows. This is a list property, so that your flow can produce as many outputs as you need.
Each output needs to have an ID (the name of the output), a type (the same types you know from inputs, e.g., STRING, URI, or JSON), and a value, which is the actual output value that will be stored in internal storage and passed to other flows when needed.
tasks:
- id: ai-agent
type: io.kestra.plugin.ai.agent.AIAgent
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-2.5-flash
apiKey: "{{ secret('GEMINI_API_KEY') }}"
systemMessage: Summarize the user message, then translate it into French using the provided tool.
prompt: "{{ inputs.prompt }}"
tools:
- type: io.kestra.plugin.ai.tool.A2AClient
description: Translation expert
serverUrl: "http://localhost:10000"description*Requiredstring
serverUrl*Requiredstring
type*Requiredobject
namestring
Default
toolExpose a nested AI Agent as a tool
Example
yaml
id: ai-agent-with-agent-tools
namespace: company.ai
inputs:
- id: prompt
type: STRING
defaults: |
Each flow can produce outputs that can be consumed by other flows. This is a list property, so that your flow can produce as many outputs as you need.
Each output needs to have an ID (the name of the output), a type (the same types you know from inputs, e.g., STRING, URI, or JSON), and a value, which is the actual output value that will be stored in internal storage and passed to other flows when needed.
tasks:
- id: ai-agent
type: io.kestra.plugin.ai.agent.AIAgent
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-2.5-flash
apiKey: "{{ secret('GEMINI_API_KEY') }}"
systemMessage: Summarize the user message, then translate it into French using the provided tool.
prompt: "{{ inputs.prompt }}"
tools:
- type: io.kestra.plugin.ai.tool.AIAgent
description: Translation expert
systemMessage: You are an expert in translating text between multiple languages
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-2.5-flash-lite
apiKey: "{{ secret('GEMINI_API_KEY') }}"description*Requiredstring
provider*Required
Use Amazon Bedrock models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.AmazonBedrock
accessKeyId: "{{ secret('AWS_ACCESS_KEY') }}"
secretAccessKey: "{{ secret('AWS_SECRET_KEY') }}"
modelName: anthropic.claude-3-sonnet-20240229-v1:0
thinkingBudgetTokens: 1024
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
accessKeyId*Requiredstring
modelName*Requiredstring
secretAccessKey*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.AmazonBedrockio.kestra.plugin.langchain4j.provider.AmazonBedrockbaseUrlstring
caPemstring
clientPemstring
modelTypestring
Default
COHEREPossible Values
COHERETITANUse Anthropic Claude models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.Anthropic
apiKey: "{{ secret('ANTHROPIC_API_KEY') }}"
modelName: claude-3-haiku-20240307
thinkingEnabled: true
thinkingBudgetTokens: 1024
returnThinking: false
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.Anthropicio.kestra.plugin.langchain4j.provider.AnthropicbaseUrlstring
caPemstring
clientPemstring
maxTokensintegerstring
Use Azure OpenAI deployments
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.AzureOpenAI
apiKey: "{{ secret('AZURE_API_KEY') }}"
endpoint: https://your-resource.openai.azure.com/
modelName: anthropic.claude-3-sonnet-20240229-v1:0
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
endpoint*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.AzureOpenAIio.kestra.plugin.langchain4j.provider.AzureOpenAIapiKeystring
baseUrlstring
caPemstring
clientIdstring
clientPemstring
clientSecretstring
serviceVersionstring
tenantIdstring
Use DashScope (Qwen) models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.DashScope
apiKey: "{{ secret('DASHSCOPE_API_KEY') }}"
modelName: qwen-plus
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
baseUrlstring
Default
https://dashscope-intl.aliyuncs.com/api/v1caPemstring
clientPemstring
enableSearchbooleanstring
maxTokensintegerstring
repetitionPenaltynumberstring
Use DeepSeek models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.DeepSeek
apiKey: "{{ secret('DEEPSEEK_API_KEY') }}"
modelName: deepseek-chat
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.DeepSeekio.kestra.plugin.langchain4j.provider.DeepSeekbaseUrlstring
Default
https://api.deepseek.com/v1caPemstring
clientPemstring
Use GitHub Models via Azure AI Inference
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.GitHubModels
gitHubToken: "{{ secret('GITHUB_TOKEN') }}"
modelName: gpt-4o-mini
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely.
- type: USER
content: "{{ inputs.prompt }}"
gitHubToken*Requiredstring
modelName*Requiredstring
type*Requiredobject
baseUrlstring
caPemstring
clientPemstring
Use Google Gemini models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
apiKey: "{{ secret('GOOGLE_API_KEY') }}"
modelName: gemini-2.5-flash
thinkingEnabled: true
thinkingBudgetTokens: 1024
returnThinking: true
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
apiKey: "{{ secret('GOOGLE_API_KEY') }}"
modelName: gemini-2.5-flash
clientPem: "{{ secret('CLIENT_PEM') }}"
caPem: "{{ secret('CA_PEM') }}"
baseUrl: "https://internal.gemini.company.com/endpoint"
thinkingEnabled: true
thinkingBudgetTokens: 1024
returnThinking: true
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.GoogleGeminiio.kestra.plugin.langchain4j.provider.GoogleGeminibaseUrlstring
caPemstring
clientPemstring
embeddingModelConfiguration
io.kestra.plugin.ai.provider.GoogleGemini-EmbeddingModelConfiguration
maxRetriesintegerstring
outputDimensionalityintegerstring
taskTypestring
Possible Values
RETRIEVAL_QUERYRETRIEVAL_DOCUMENTSEMANTIC_SIMILARITYCLASSIFICATIONCLUSTERINGQUESTION_ANSWERINGFACT_VERIFICATIONtimeoutstring
titleMetadataKeystring
Use Google Vertex AI models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.GoogleVertexAI
endpoint: your-vertex-ai-endpoint
location: your-google-cloud-region
project: your-google-cloud-project-id
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
endpoint*Requiredstring
location*Requiredstring
modelName*Requiredstring
project*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.GoogleVertexAIio.kestra.plugin.langchain4j.provider.GoogleVertexAIbaseUrlstring
caPemstring
clientPemstring
Use Hugging Face Inference endpoints
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.HuggingFace
apiKey: "{{ secret('HUGGING_FACE_API_KEY') }}"
modelName: HuggingFaceTB/SmolLM3-3B:hf-inference
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
baseUrlstring
Default
https://router.huggingface.co/v1caPemstring
clientPemstring
Use LocalAI OpenAI-compatible server
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.LocalAI
modelName: gemma-3-1b-it
baseUrl: http://localhost:8080/v1
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
baseUrl*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.LocalAIio.kestra.plugin.langchain4j.provider.LocalAIcaPemstring
clientPemstring
Use Mistral models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.MistralAI
apiKey: "{{ secret('MISTRAL_API_KEY') }}"
modelName: mistral:7b
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.MistralAIio.kestra.plugin.langchain4j.provider.MistralAIbaseUrlstring
caPemstring
clientPemstring
Use OCI Generative AI models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.OciGenAI
region: "{{ secret('OCI_GENAI_MODEL_REGION_PROPERTY') }}"
compartmentId: "{{ secret('OCI_GENAI_COMPARTMENT_ID_PROPERTY') }}"
authProvider: "{{ secret('OCI_GENAI_CONFIG_PROFILE_PROPERTY') }}"
modelName: oracle.chat.gpt-3.5
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
compartmentId*Requiredstring
modelName*Requiredstring
region*Requiredstring
type*Requiredobject
authProviderstring
baseUrlstring
caPemstring
clientPemstring
Use local Ollama models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.Ollama
modelName: llama3
endpoint: http://localhost:11434
thinkingEnabled: true
returnThinking: true
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
endpoint*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.Ollamaio.kestra.plugin.langchain4j.provider.OllamabaseUrlstring
caPemstring
clientPemstring
Use OpenAI models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.OpenAI
apiKey: "{{ secret('OPENAI_API_KEY') }}"
modelName: gpt-5-mini
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.OpenAIio.kestra.plugin.langchain4j.provider.OpenAIbaseUrlstring
Default
https://api.openai.com/v1caPemstring
clientPemstring
Use OpenRouter models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.OpenRouter
apiKey: "{{ secret('OPENROUTER_API_KEY') }}"
baseUrl: https://openrouter.ai/api/v1
modelName: x-ai/grok-beta
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.OpenRouterio.kestra.plugin.langchain4j.provider.OpenRouterbaseUrlstring
caPemstring
clientPemstring
Use IBM watsonx.ai models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.WatsonxAI
apiKey: "{{ secret('WATSONX_API_KEY') }}"
projectId: "{{ secret('WATSONX_PROJECT_ID') }}"
modelName: ibm/granite-3-3-8b-instruct
baseUrl : "https://api.eu-de.dataplatform.cloud.ibm.com/wx"
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
projectId*Requiredstring
type*Requiredobject
baseUrlstring
caPemstring
clientPemstring
Use Cloudflare Workers AI models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.WorkersAI
accountId: "{{ secret('WORKERS_AI_ACCOUNT_ID') }}"
apiKey: "{{ secret('WORKERS_AI_API_KEY') }}"
modelName: @cf/meta/llama-2-7b-chat-fp16
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
accountId*Requiredstring
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.WorkersAIio.kestra.plugin.langchain4j.provider.WorkersAIbaseUrlstring
caPemstring
clientPemstring
Use ZhiPu AI models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.ZhiPuAI
apiKey: "{{ secret('ZHIPU_API_KEY') }}"
modelName: glm-4.5-flash
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
baseUrlstring
Default
https://open.bigmodel.cn/caPemstring
clientPemstring
maxRetriesintegerstring
maxTokenintegerstring
stopsarray
SubTypestring
type*Requiredobject
configuration
Default
{}io.kestra.plugin.ai.domain.ChatConfiguration
logRequestsbooleanstring
logResponsesbooleanstring
maxTokenintegerstring
promptCachingbooleanstring
responseFormat
io.kestra.plugin.ai.domain.ChatConfiguration-ResponseFormat
jsonSchemaobject
jsonSchemaDescriptionstring
strictJsonbooleanstring
Default
falsetypestring
Default
TEXTPossible Values
TEXTJSONreturnThinkingbooleanstring
seedintegerstring
temperaturenumberstring
thinkingBudgetTokensintegerstring
thinkingEnabledbooleanstring
topKintegerstring
topPnumberstring
contentRetrievers
Retrieve context from an embedding store
Example
yaml
id: agent_with_rag
namespace: company.ai
tasks:
- id: ingest
type: io.kestra.plugin.ai.rag.IngestDocument
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
googleApiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddings:
type: io.kestra.plugin.ai.embeddings.KestraKVStore
drop: true
fromDocuments:
- content: Paris is the capital of France with a population of over 2.1 million people
- content: The Eiffel Tower is the most famous landmark in Paris at 330 meters tall
- id: agent
type: io.kestra.plugin.ai.agent.AIAgent
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-2.0-flash
googleApiKey: "{{ secret('GEMINI_API_KEY') }}"
contentRetrievers:
- type: io.kestra.plugin.ai.retriever.EmbeddingStoreRetriever
embeddings:
type: io.kestra.plugin.ai.embeddings.KestraKVStore
embeddingProvider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
googleApiKey: "{{ secret('GEMINI_API_KEY') }}"
maxResults: 3
minScore: 0.0
prompt: What is the capital of France and how many people live there?
yaml
id: multi_store_rag
namespace: company.ai
tasks:
- id: agent
type: io.kestra.plugin.ai.agent.AIAgent
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-2.0-flash
googleApiKey: "{{ secret('GEMINI_API_KEY') }}"
contentRetrievers:
- type: io.kestra.plugin.ai.retriever.EmbeddingStoreRetriever
embeddings:
type: io.kestra.plugin.ai.embeddings.Pinecone
pineconeApiKey: "{{ secret('PINECONE_API_KEY') }}"
index: technical-docs
embeddingProvider:
type: io.kestra.plugin.ai.provider.OpenAI
googleApiKey: "{{ secret('OPENAI_API_KEY') }}"
modelName: text-embedding-3-small
- type: io.kestra.plugin.ai.retriever.EmbeddingStoreRetriever
embeddings:
type: io.kestra.plugin.ai.embeddings.Qdrant
host: localhost
port: 6333
collectionName: business-docs
embeddingProvider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
googleApiKey: "{{ secret('GEMINI_API_KEY') }}"
- type: io.kestra.plugin.ai.retriever.TavilyWebSearch
tavilyApiKey: "{{ secret('TAVILY_API_KEY') }}"
prompt: What are the latest trends in data orchestration?
embeddingProvider*Required
Use Amazon Bedrock models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.AmazonBedrock
accessKeyId: "{{ secret('AWS_ACCESS_KEY') }}"
secretAccessKey: "{{ secret('AWS_SECRET_KEY') }}"
modelName: anthropic.claude-3-sonnet-20240229-v1:0
thinkingBudgetTokens: 1024
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
accessKeyId*Requiredstring
modelName*Requiredstring
secretAccessKey*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.AmazonBedrockio.kestra.plugin.langchain4j.provider.AmazonBedrockbaseUrlstring
caPemstring
clientPemstring
modelTypestring
Default
COHEREPossible Values
COHERETITANUse Anthropic Claude models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.Anthropic
apiKey: "{{ secret('ANTHROPIC_API_KEY') }}"
modelName: claude-3-haiku-20240307
thinkingEnabled: true
thinkingBudgetTokens: 1024
returnThinking: false
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.Anthropicio.kestra.plugin.langchain4j.provider.AnthropicbaseUrlstring
caPemstring
clientPemstring
maxTokensintegerstring
Use Azure OpenAI deployments
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.AzureOpenAI
apiKey: "{{ secret('AZURE_API_KEY') }}"
endpoint: https://your-resource.openai.azure.com/
modelName: anthropic.claude-3-sonnet-20240229-v1:0
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
endpoint*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.AzureOpenAIio.kestra.plugin.langchain4j.provider.AzureOpenAIapiKeystring
baseUrlstring
caPemstring
clientIdstring
clientPemstring
clientSecretstring
serviceVersionstring
tenantIdstring
Use DashScope (Qwen) models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.DashScope
apiKey: "{{ secret('DASHSCOPE_API_KEY') }}"
modelName: qwen-plus
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
baseUrlstring
Default
https://dashscope-intl.aliyuncs.com/api/v1caPemstring
clientPemstring
enableSearchbooleanstring
maxTokensintegerstring
repetitionPenaltynumberstring
Use DeepSeek models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.DeepSeek
apiKey: "{{ secret('DEEPSEEK_API_KEY') }}"
modelName: deepseek-chat
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.DeepSeekio.kestra.plugin.langchain4j.provider.DeepSeekbaseUrlstring
Default
https://api.deepseek.com/v1caPemstring
clientPemstring
Use GitHub Models via Azure AI Inference
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.GitHubModels
gitHubToken: "{{ secret('GITHUB_TOKEN') }}"
modelName: gpt-4o-mini
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely.
- type: USER
content: "{{ inputs.prompt }}"
gitHubToken*Requiredstring
modelName*Requiredstring
type*Requiredobject
baseUrlstring
caPemstring
clientPemstring
Use Google Gemini models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
apiKey: "{{ secret('GOOGLE_API_KEY') }}"
modelName: gemini-2.5-flash
thinkingEnabled: true
thinkingBudgetTokens: 1024
returnThinking: true
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
apiKey: "{{ secret('GOOGLE_API_KEY') }}"
modelName: gemini-2.5-flash
clientPem: "{{ secret('CLIENT_PEM') }}"
caPem: "{{ secret('CA_PEM') }}"
baseUrl: "https://internal.gemini.company.com/endpoint"
thinkingEnabled: true
thinkingBudgetTokens: 1024
returnThinking: true
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.GoogleGeminiio.kestra.plugin.langchain4j.provider.GoogleGeminibaseUrlstring
caPemstring
clientPemstring
embeddingModelConfiguration
Use Google Vertex AI models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.GoogleVertexAI
endpoint: your-vertex-ai-endpoint
location: your-google-cloud-region
project: your-google-cloud-project-id
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
endpoint*Requiredstring
location*Requiredstring
modelName*Requiredstring
project*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.GoogleVertexAIio.kestra.plugin.langchain4j.provider.GoogleVertexAIbaseUrlstring
caPemstring
clientPemstring
Use Hugging Face Inference endpoints
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.HuggingFace
apiKey: "{{ secret('HUGGING_FACE_API_KEY') }}"
modelName: HuggingFaceTB/SmolLM3-3B:hf-inference
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
baseUrlstring
Default
https://router.huggingface.co/v1caPemstring
clientPemstring
Use LocalAI OpenAI-compatible server
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.LocalAI
modelName: gemma-3-1b-it
baseUrl: http://localhost:8080/v1
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
baseUrl*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.LocalAIio.kestra.plugin.langchain4j.provider.LocalAIcaPemstring
clientPemstring
Use Mistral models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.MistralAI
apiKey: "{{ secret('MISTRAL_API_KEY') }}"
modelName: mistral:7b
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.MistralAIio.kestra.plugin.langchain4j.provider.MistralAIbaseUrlstring
caPemstring
clientPemstring
Use OCI Generative AI models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.OciGenAI
region: "{{ secret('OCI_GENAI_MODEL_REGION_PROPERTY') }}"
compartmentId: "{{ secret('OCI_GENAI_COMPARTMENT_ID_PROPERTY') }}"
authProvider: "{{ secret('OCI_GENAI_CONFIG_PROFILE_PROPERTY') }}"
modelName: oracle.chat.gpt-3.5
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
compartmentId*Requiredstring
modelName*Requiredstring
region*Requiredstring
type*Requiredobject
authProviderstring
baseUrlstring
caPemstring
clientPemstring
Use local Ollama models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.Ollama
modelName: llama3
endpoint: http://localhost:11434
thinkingEnabled: true
returnThinking: true
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
endpoint*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.Ollamaio.kestra.plugin.langchain4j.provider.OllamabaseUrlstring
caPemstring
clientPemstring
Use OpenAI models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.OpenAI
apiKey: "{{ secret('OPENAI_API_KEY') }}"
modelName: gpt-5-mini
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.OpenAIio.kestra.plugin.langchain4j.provider.OpenAIbaseUrlstring
Default
https://api.openai.com/v1caPemstring
clientPemstring
Use OpenRouter models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.OpenRouter
apiKey: "{{ secret('OPENROUTER_API_KEY') }}"
baseUrl: https://openrouter.ai/api/v1
modelName: x-ai/grok-beta
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.OpenRouterio.kestra.plugin.langchain4j.provider.OpenRouterbaseUrlstring
caPemstring
clientPemstring
Use IBM watsonx.ai models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.WatsonxAI
apiKey: "{{ secret('WATSONX_API_KEY') }}"
projectId: "{{ secret('WATSONX_PROJECT_ID') }}"
modelName: ibm/granite-3-3-8b-instruct
baseUrl : "https://api.eu-de.dataplatform.cloud.ibm.com/wx"
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
projectId*Requiredstring
type*Requiredobject
baseUrlstring
caPemstring
clientPemstring
Use Cloudflare Workers AI models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.WorkersAI
accountId: "{{ secret('WORKERS_AI_ACCOUNT_ID') }}"
apiKey: "{{ secret('WORKERS_AI_API_KEY') }}"
modelName: @cf/meta/llama-2-7b-chat-fp16
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
accountId*Requiredstring
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.WorkersAIio.kestra.plugin.langchain4j.provider.WorkersAIbaseUrlstring
caPemstring
clientPemstring
Use ZhiPu AI models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.ZhiPuAI
apiKey: "{{ secret('ZHIPU_API_KEY') }}"
modelName: glm-4.5-flash
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
baseUrlstring
Default
https://open.bigmodel.cn/caPemstring
clientPemstring
maxRetriesintegerstring
maxTokenintegerstring
stopsarray
SubTypestring
embeddings*Required
Store embeddings in Chroma
Example
yaml
id: document_ingestion
namespace: company.ai
tasks:
- id: ingest
type: io.kestra.plugin.ai.rag.IngestDocument
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
apiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddings:
type: io.kestra.plugin.ai.embeddings.Chroma
baseUrl: http://localhost:8000
collectionName: embeddings
fromExternalURLs:
- https://raw.githubusercontent.com/kestra-io/docs/refs/heads/main/content/blogs/release-0-24.md
baseUrl*Requiredstring
collectionName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.embeddings.Chromaio.kestra.plugin.langchain4j.embeddings.ChromaStore embeddings in Elasticsearch
Example
yaml
id: document_ingestion
namespace: company.ai
tasks:
- id: ingest
type: io.kestra.plugin.ai.rag.IngestDocument
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
apiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddings:
type: io.kestra.plugin.ai.embeddings.Elasticsearch
connection:
hosts:
- http://localhost:9200
fromExternalURLs:
- https://raw.githubusercontent.com/kestra-io/docs/refs/heads/main/content/blogs/release-0-24.md
connection*Required
indexName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.embeddings.Elasticsearchio.kestra.plugin.langchain4j.embeddings.ElasticsearchPrototype embeddings in Kestra KV
Example
yaml
id: document_ingestion
namespace: company.ai
tasks:
- id: ingest
type: io.kestra.plugin.ai.rag.IngestDocument
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
apiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddings:
type: io.kestra.plugin.ai.embeddings.KestraKVStore
drop: true
fromExternalURLs:
- https://raw.githubusercontent.com/kestra-io/docs/refs/heads/main/content/blogs/release-0-24.md
type*Requiredobject
Possible Values
io.kestra.plugin.ai.embeddings.KestraKVStoreio.kestra.plugin.langchain4j.embeddings.KestraKVStorekvNamestring
Default
{{ flow.id }}-embedding-storeStore embeddings in MariaDB
Example
yaml
id: document_ingestion
namespace: company.ai
tasks:
- id: ingest
type: io.kestra.plugin.ai.rag.IngestDocument
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
apiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddings:
type: io.kestra.plugin.ai.embeddings.MariaDB
username: "{{ secret('MARIADB_USERNAME') }}"
password: "{{ secret('MARIADB_PASSWORD') }}"
databaseUrl: "{{ secret('MARIADB_DATABASE_URL') }}"
tableName: embeddings
fieldName: id
fromExternalURLs:
- https://raw.githubusercontent.com/kestra-io/docs/refs/heads/main/content/blogs/release-0-24.md
createTable*Requiredbooleanstring
databaseUrl*Requiredstring
fieldName*Requiredstring
password*Requiredstring
tableName*Requiredstring
type*Requiredobject
username*Requiredstring
columnDefinitionsarray
SubTypestring
indexesarray
SubTypestring
metadataStorageModestring
Default
COLUMN_PER_KEYStore embeddings in Milvus
Example
yaml
id: document_ingestion
namespace: company.ai
tasks:
- id: ingest
type: io.kestra.plugin.ai.rag.IngestDocument
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
apiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddings:
type: io.kestra.plugin.ai.embeddings.Milvus
# Use either `uri` or `host`/`port`:
# For gRPC (typical): milvus://localhost:19530
# For HTTP: http://localhost:9091
uri: "http://localhost:19200"
token: "{{ secret('MILVUS_TOKEN') }}" # omit if auth is disabled
collectionName: embeddings
fromExternalURLs:
- https://raw.githubusercontent.com/kestra-io/docs/refs/heads/main/content/blogs/release-0-24.md
token*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.embeddings.Milvusio.kestra.plugin.langchain4j.embeddings.MilvusautoFlushOnDeletebooleanstring
autoFlushOnInsertbooleanstring
collectionNamestring
consistencyLevelstring
databaseNamestring
hoststring
idFieldNamestring
indexTypestring
metadataFieldNamestring
metricTypestring
passwordstring
portintegerstring
retrieveEmbeddingsOnSearchbooleanstring
textFieldNamestring
uristring
usernamestring
vectorFieldNamestring
Store embeddings in MongoDB Atlas
Example
yaml
id: document_ingestion
namespace: company.ai
tasks:
- id: ingest
type: io.kestra.plugin.ai.rag.IngestDocument
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
apiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddings:
type: io.kestra.plugin.ai.embeddings.MongoDBAtlas
username: "{{ secret('MONGODB_ATLAS_USERNAME') }}"
password: "{{ secret('MONGODB_ATLAS_PASSWORD') }}"
host: "{{ secret('MONGODB_ATLAS_HOST') }}"
database: "{{ secret('MONGODB_ATLAS_DATABASE') }}"
collectionName: embeddings
indexName: embeddings
fromExternalURLs:
- https://raw.githubusercontent.com/kestra-io/docs/refs/heads/main/content/blogs/release-0-24.md
collectionName*Requiredstring
host*Requiredstring
indexName*Requiredstring
scheme*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.embeddings.MongoDBAtlasio.kestra.plugin.langchain4j.embeddings.MongoDBAtlascreateIndexbooleanstring
databasestring
metadataFieldNamesarray
SubTypestring
optionsobject
passwordstring
usernamestring
Store embeddings with pgvector
Example
yaml
id: document_ingestion
namespace: company.ai
tasks:
- id: ingest
type: io.kestra.plugin.ai.rag.IngestDocument
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
apiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddings:
type: io.kestra.plugin.ai.embeddings.PGVector
host: localhost
port: 5432
user: "{{ secret('POSTGRES_USER') }}"
password: "{{ secret('POSTGRES_PASSWORD') }}"
database: postgres
table: embeddings
fromExternalURLs:
- https://raw.githubusercontent.com/kestra-io/docs/refs/heads/main/content/blogs/release-0-24.md
database*Requiredstring
host*Requiredstring
password*Requiredstring
port*Requiredintegerstring
table*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.embeddings.PGVectorio.kestra.plugin.langchain4j.embeddings.PGVectoruser*Requiredstring
useIndexbooleanstring
Default
falseStore embeddings in Pinecone
Example
yaml
id: document_ingestion
namespace: company.ai
tasks:
- id: ingest
type: io.kestra.plugin.ai.rag.IngestDocument
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
apiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddings:
type: io.kestra.plugin.ai.embeddings.Pinecone
apiKey: "{{ secret('PINECONE_API_KEY') }}"
cloud: AWS
region: us-east-1
index: embeddings
fromExternalURLs:
- https://raw.githubusercontent.com/kestra-io/docs/refs/heads/main/content/blogs/release-0-24.md
apiKey*Requiredstring
cloud*Requiredstring
index*Requiredstring
region*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.embeddings.Pineconeio.kestra.plugin.langchain4j.embeddings.Pineconenamespacestring
Store embeddings in Qdrant
Example
yaml
id: document_ingestion
namespace: company.ai
tasks:
- id: ingest
type: io.kestra.plugin.ai.rag.IngestDocument
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
apiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddings:
type: io.kestra.plugin.ai.embeddings.Qdrant
apiKey: "{{ secret('QDRANT_API_KEY') }}"
host: localhost
port: 6334
collectionName: embeddings
fromExternalURLs:
- https://raw.githubusercontent.com/kestra-io/docs/refs/heads/main/content/blogs/release-0-24.md
apiKey*Requiredstring
collectionName*Requiredstring
host*Requiredstring
port*Requiredintegerstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.embeddings.Qdrantio.kestra.plugin.langchain4j.embeddings.QdrantStore embeddings in Redis
Example
yaml
id: document_ingestion
namespace: company.ai
tasks:
- id: ingest
type: io.kestra.plugin.ai.rag.IngestDocument
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
apiKey: my_api_key
embeddings:
type: io.kestra.plugin.ai.embeddings.Redis
host: localhost
port: 6379
indexName: embeddings
fromExternalURLs:
- https://raw.githubusercontent.com/kestra-io/docs/refs/heads/main/content/blogs/release-0-24.md
host*Requiredstring
port*Requiredintegerstring
type*Requiredobject
indexNamestring
Default
embedding-indexStore embeddings in Alibaba Tablestore
Example
yaml
id: document_ingestion
namespace: company.ai
tasks:
- id: ingest
type: io.kestra.plugin.ai.rag.IngestDocument
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
apiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddings:
type: io.kestra.plugin.ai.embeddings.Tablestore
endpoint: "{{ secret('TABLESTORE_ENDPOINT') }}"
instanceName: "{{ secret('TABLESTORE_INSTANCE_NAME') }}"
accessKeyId: "{{ secret('TABLESTORE_ACCESS_KEY_ID') }}"
accessKeySecret: "{{ secret('TABLESTORE_ACCESS_KEY_SECRET') }}"
fromExternalURLs:
- https://raw.githubusercontent.com/kestra-io/docs/refs/heads/main/content/blogs/release-0-24.md
accessKeyId*Requiredstring
accessKeySecret*Requiredstring
endpoint*Requiredstring
instanceName*Requiredstring
type*Requiredobject
metadataSchemaListarray
Store embeddings in Weaviate
Example
yaml
id: document_ingestion
namespace: company.ai
tasks:
- id: ingest
type: io.kestra.plugin.ai.rag.IngestDocument
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-embedding-exp-03-07
apiKey: "{{ secret('GEMINI_API_KEY') }}"
embeddings:
type: io.kestra.plugin.ai.embeddings.Weaviate
apiKey: "{{ secret('WEAVIATE_API_KEY') }}" # omit for local/no-auth
scheme: https # http | https
host: your-cluster-id.weaviate.network # no protocol
# port: 443 # optional; usually omit
drop: true
fromExternalURLs:
- https://raw.githubusercontent.com/kestra-io/docs/refs/heads/main/content/blogs/release-0-24.md
apiKey*Requiredstring
host*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.embeddings.Weaviateio.kestra.plugin.langchain4j.embeddings.WeaviateavoidDupsbooleanstring
consistencyLevelstring
Possible Values
ONEQUORUMALLgrpcPortintegerstring
metadataFieldNamestring
metadataKeysarray
SubTypestring
objectClassstring
portintegerstring
schemestring
securedGrpcbooleanstring
useGrpcForInsertsbooleanstring
type*Requiredobject
maxResultsintegerstring
Default
3minScorenumberstring
Default
0.0Retrieve web results with Google Custom Search
Example
yaml
id: rag
namespace: company.ai
tasks:
- id: chat_with_rag_and_websearch_content_retriever
type: io.kestra.plugin.ai.rag.ChatCompletion
chatProvider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-2.5-flash
apiKey: "{{ secret('GEMINI_API_KEY') }}"
contentRetrievers:
- type: io.kestra.plugin.ai.retriever.GoogleCustomWebSearch
apiKey: "{{ secret('GOOGLE_SEARCH_API_KEY') }}"
csi: "{{ secret('GOOGLE_SEARCH_CSI') }}"
prompt: What is the latest release of Kestra?
apiKey*Requiredstring
csi*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.retriever.GoogleCustomWebSearchio.kestra.plugin.langchain4j.retriever.GoogleCustomWebSearchmaxResultsintegerstring
Default
3Retrieve context from SQL (experimental)
Example
yaml
id: rag
namespace: company.ai
tasks:
- id: chat_with_rag_and_sql_retriever
type: io.kestra.plugin.ai.rag.ChatCompletion
chatProvider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-2.0-flash
apiKey: "{{ secret('GOOGLE_API_KEY') }}"
contentRetrievers:
- type: io.kestra.plugin.ai.retriever.SqlDatabaseRetriever
databaseType: POSTGRESQL
jdbcUrl: "jdbc:postgresql://localhost:5432/mydb"
username: "{{ secret('DB_USER') }}"
password: "{{ secret('DB_PASSWORD') }}"
prompt: "What are the top 5 customers by revenue?"
databaseType*Requiredobject
password*Requiredstring
provider*Required
Use Amazon Bedrock models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.AmazonBedrock
accessKeyId: "{{ secret('AWS_ACCESS_KEY') }}"
secretAccessKey: "{{ secret('AWS_SECRET_KEY') }}"
modelName: anthropic.claude-3-sonnet-20240229-v1:0
thinkingBudgetTokens: 1024
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
accessKeyId*Requiredstring
modelName*Requiredstring
secretAccessKey*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.AmazonBedrockio.kestra.plugin.langchain4j.provider.AmazonBedrockbaseUrlstring
caPemstring
clientPemstring
modelTypestring
Default
COHEREPossible Values
COHERETITANUse Anthropic Claude models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.Anthropic
apiKey: "{{ secret('ANTHROPIC_API_KEY') }}"
modelName: claude-3-haiku-20240307
thinkingEnabled: true
thinkingBudgetTokens: 1024
returnThinking: false
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.Anthropicio.kestra.plugin.langchain4j.provider.AnthropicbaseUrlstring
caPemstring
clientPemstring
maxTokensintegerstring
Use Azure OpenAI deployments
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.AzureOpenAI
apiKey: "{{ secret('AZURE_API_KEY') }}"
endpoint: https://your-resource.openai.azure.com/
modelName: anthropic.claude-3-sonnet-20240229-v1:0
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
endpoint*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.AzureOpenAIio.kestra.plugin.langchain4j.provider.AzureOpenAIapiKeystring
baseUrlstring
caPemstring
clientIdstring
clientPemstring
clientSecretstring
serviceVersionstring
tenantIdstring
Use DashScope (Qwen) models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.DashScope
apiKey: "{{ secret('DASHSCOPE_API_KEY') }}"
modelName: qwen-plus
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
baseUrlstring
Default
https://dashscope-intl.aliyuncs.com/api/v1caPemstring
clientPemstring
enableSearchbooleanstring
maxTokensintegerstring
repetitionPenaltynumberstring
Use DeepSeek models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.DeepSeek
apiKey: "{{ secret('DEEPSEEK_API_KEY') }}"
modelName: deepseek-chat
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.DeepSeekio.kestra.plugin.langchain4j.provider.DeepSeekbaseUrlstring
Default
https://api.deepseek.com/v1caPemstring
clientPemstring
Use GitHub Models via Azure AI Inference
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.GitHubModels
gitHubToken: "{{ secret('GITHUB_TOKEN') }}"
modelName: gpt-4o-mini
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely.
- type: USER
content: "{{ inputs.prompt }}"
gitHubToken*Requiredstring
modelName*Requiredstring
type*Requiredobject
baseUrlstring
caPemstring
clientPemstring
Use Google Gemini models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
apiKey: "{{ secret('GOOGLE_API_KEY') }}"
modelName: gemini-2.5-flash
thinkingEnabled: true
thinkingBudgetTokens: 1024
returnThinking: true
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
apiKey: "{{ secret('GOOGLE_API_KEY') }}"
modelName: gemini-2.5-flash
clientPem: "{{ secret('CLIENT_PEM') }}"
caPem: "{{ secret('CA_PEM') }}"
baseUrl: "https://internal.gemini.company.com/endpoint"
thinkingEnabled: true
thinkingBudgetTokens: 1024
returnThinking: true
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.GoogleGeminiio.kestra.plugin.langchain4j.provider.GoogleGeminibaseUrlstring
caPemstring
clientPemstring
embeddingModelConfiguration
Use Google Vertex AI models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.GoogleVertexAI
endpoint: your-vertex-ai-endpoint
location: your-google-cloud-region
project: your-google-cloud-project-id
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
endpoint*Requiredstring
location*Requiredstring
modelName*Requiredstring
project*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.GoogleVertexAIio.kestra.plugin.langchain4j.provider.GoogleVertexAIbaseUrlstring
caPemstring
clientPemstring
Use Hugging Face Inference endpoints
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.HuggingFace
apiKey: "{{ secret('HUGGING_FACE_API_KEY') }}"
modelName: HuggingFaceTB/SmolLM3-3B:hf-inference
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
baseUrlstring
Default
https://router.huggingface.co/v1caPemstring
clientPemstring
Use LocalAI OpenAI-compatible server
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.LocalAI
modelName: gemma-3-1b-it
baseUrl: http://localhost:8080/v1
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
baseUrl*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.LocalAIio.kestra.plugin.langchain4j.provider.LocalAIcaPemstring
clientPemstring
Use Mistral models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.MistralAI
apiKey: "{{ secret('MISTRAL_API_KEY') }}"
modelName: mistral:7b
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.MistralAIio.kestra.plugin.langchain4j.provider.MistralAIbaseUrlstring
caPemstring
clientPemstring
Use OCI Generative AI models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.OciGenAI
region: "{{ secret('OCI_GENAI_MODEL_REGION_PROPERTY') }}"
compartmentId: "{{ secret('OCI_GENAI_COMPARTMENT_ID_PROPERTY') }}"
authProvider: "{{ secret('OCI_GENAI_CONFIG_PROFILE_PROPERTY') }}"
modelName: oracle.chat.gpt-3.5
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
compartmentId*Requiredstring
modelName*Requiredstring
region*Requiredstring
type*Requiredobject
authProviderstring
baseUrlstring
caPemstring
clientPemstring
Use local Ollama models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.Ollama
modelName: llama3
endpoint: http://localhost:11434
thinkingEnabled: true
returnThinking: true
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
endpoint*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.Ollamaio.kestra.plugin.langchain4j.provider.OllamabaseUrlstring
caPemstring
clientPemstring
Use OpenAI models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.OpenAI
apiKey: "{{ secret('OPENAI_API_KEY') }}"
modelName: gpt-5-mini
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.OpenAIio.kestra.plugin.langchain4j.provider.OpenAIbaseUrlstring
Default
https://api.openai.com/v1caPemstring
clientPemstring
Use OpenRouter models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.OpenRouter
apiKey: "{{ secret('OPENROUTER_API_KEY') }}"
baseUrl: https://openrouter.ai/api/v1
modelName: x-ai/grok-beta
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.OpenRouterio.kestra.plugin.langchain4j.provider.OpenRouterbaseUrlstring
caPemstring
clientPemstring
Use IBM watsonx.ai models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.completion.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.WatsonxAI
apiKey: "{{ secret('WATSONX_API_KEY') }}"
projectId: "{{ secret('WATSONX_PROJECT_ID') }}"
modelName: ibm/granite-3-3-8b-instruct
baseUrl : "https://api.eu-de.dataplatform.cloud.ibm.com/wx"
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
projectId*Requiredstring
type*Requiredobject
baseUrlstring
caPemstring
clientPemstring
Use Cloudflare Workers AI models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.WorkersAI
accountId: "{{ secret('WORKERS_AI_ACCOUNT_ID') }}"
apiKey: "{{ secret('WORKERS_AI_API_KEY') }}"
modelName: @cf/meta/llama-2-7b-chat-fp16
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
accountId*Requiredstring
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.provider.WorkersAIio.kestra.plugin.langchain4j.provider.WorkersAIbaseUrlstring
caPemstring
clientPemstring
Use ZhiPu AI models
Example
yaml
id: chat_completion
namespace: company.ai
inputs:
- id: prompt
type: STRING
tasks:
- id: chat_completion
type: io.kestra.plugin.ai.ChatCompletion
provider:
type: io.kestra.plugin.ai.provider.ZhiPuAI
apiKey: "{{ secret('ZHIPU_API_KEY') }}"
modelName: glm-4.5-flash
messages:
- type: SYSTEM
content: You are a helpful assistant, answer concisely, avoid overly casual language or unnecessary verbosity.
- type: USER
content: "{{ inputs.prompt }}"
apiKey*Requiredstring
modelName*Requiredstring
type*Requiredobject
baseUrlstring
Default
https://open.bigmodel.cn/caPemstring
clientPemstring
maxRetriesintegerstring
maxTokenintegerstring
stopsarray
SubTypestring
type*Requiredobject
username*Requiredstring
configuration
Default
{}io.kestra.plugin.ai.domain.ChatConfiguration
logRequestsbooleanstring
logResponsesbooleanstring
maxTokenintegerstring
promptCachingbooleanstring
responseFormat
returnThinkingbooleanstring
seedintegerstring
temperaturenumberstring
thinkingBudgetTokensintegerstring
thinkingEnabledbooleanstring
topKintegerstring
topPnumberstring
driverstring
jdbcUrlstring
maxPoolSizeintegerstring
Default
2Retrieve web results with Tavily
Example
yaml
id: rag
namespace: company.ai
tasks:
- id: chat_with_rag_and_websearch_content_retriever
type: io.kestra.plugin.ai.rag.ChatCompletion
chatProvider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-2.5-flash
apiKey: "{{ secret('GEMINI_API_KEY') }}"
contentRetrievers:
- type: io.kestra.plugin.ai.retriever.TavilyWebSearch
apiKey: "{{ secret('TAVILY_API_KEY') }}"
prompt: What is the latest release of Kestra?
apiKey*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.retriever.TavilyWebSearchio.kestra.plugin.langchain4j.retriever.TavilyWebSearchmaxResultsintegerstring
Default
3maxSequentialToolsInvocationsintegerstring
namestring
Default
toolsystemMessagestring
tools
Execute JavaScript via Judge0
Example
yaml
id: calculator_agent
namespace: company.ai
tasks:
- id: agent
type: io.kestra.plugin.ai.agent.AIAgent
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
apiKey: "{{ secret('GEMINI_API_KEY') }}"
modelName: gemini-2.5-flash
prompt: What is the square root of 49506838032859?
tools:
- type: io.kestra.plugin.ai.tool.CodeExecution
apiKey: "{{ secret('RAPID_API_KEY') }}"
apiKey*Requiredstring
type*Requiredobject
Run MCP tools in Docker
Example
yaml
id: docker_mcp_client
namespace: company.ai
inputs:
- id: prompt
type: STRING
defaults: What is the current UTC time?
tasks:
- id: agent
type: io.kestra.plugin.ai.agent.AIAgent
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
apiKey: "{{ secret('GEMINI_API_KEY') }}"
modelName: gemini-2.5-flash
prompt: "{{ inputs.prompt }}"
tools:
- type: io.kestra.plugin.ai.tool.DockerMcpClient
image: mcp/timeyaml
id: docker_mcp_client
namespace: company.ai
inputs:
- id: prompt
type: STRING
defaults: Create a file 'hello.txt' with the content "Hello World" in the /tmp directory.
tasks:
- id: agent
type: io.kestra.plugin.ai.agent.AIAgent
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
apiKey: "{{ secret('GEMINI_API_KEY') }}"
modelName: gemini-2.5-flash
prompt: "{{ inputs.prompt }}"
tools:
- type: io.kestra.plugin.ai.tool.DockerMcpClient
image: mcp/filesystem
command: ["/tmp"]
# Mount the container path to the task working directory to access the generated file
binds: ["{{ workingDir }}:/tmp"]
outputFiles:
- hello.txtimage*Requiredstring
type*Requiredobject
apiVersionstring
bindsarray
SubTypestring
commandarray
SubTypestring
dockerCertPathstring
dockerConfigstring
dockerContextstring
dockerHoststring
dockerTlsVerifybooleanstring
envobject
SubTypestring
logEventsbooleanstring
Default
falseregistryEmailstring
registryPasswordstring
registryUrlstring
registryUsernamestring
Search the web with Google CSE
Example
yaml
id: agent_searching_web
namespace: company.ai
inputs:
- id: prompt
type: STRING
defaults: What is the latest Kestra release and what new features does it include?
tasks:
- id: agent
type: io.kestra.plugin.ai.agent.AIAgent
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
apiKey: "{{ secret('GEMINI_API_KEY') }}"
modelName: gemini-2.5-flash
prompt: "{{ inputs.prompt }}"
tools:
- type: io.kestra.plugin.ai.tool.GoogleCustomWebSearch
apiKey: "{{ secret('GOOGLE_SEARCH_API_KEY') }}"
csi: "{{ secret('GOOGLE_SEARCH_CSI') }}"
apiKey*Requiredstring
csi*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.tool.GoogleCustomWebSearchio.kestra.plugin.langchain4j.tool.GoogleCustomWebSearchExecute Kestra flows from an agent
Example
yaml
id: agent_calling_flows_explicitly
namespace: company.ai
inputs:
- id: use_case
type: SELECT
description: Your Orchestration Use Case
defaults: Hello World
values:
- Business Automation
- Business Processes
- Data Engineering Pipeline
- Data Warehouse and Analytics
- Infrastructure Automation
- Microservices and APIs
- Hello World
tasks:
- id: agent
type: io.kestra.plugin.ai.agent.AIAgent
prompt: Execute a flow that best matches the {{ inputs.use_case }} use case selected by the user
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-2.5-flash
apiKey: "{{ secret('GEMINI_API_KEY') }}"
tools:
- type: io.kestra.plugin.ai.tool.KestraFlow
namespace: tutorial
flowId: business-automation
description: Business Automation
- type: io.kestra.plugin.ai.tool.KestraFlow
namespace: tutorial
flowId: business-processes
description: Business Processes
- type: io.kestra.plugin.ai.tool.KestraFlow
namespace: tutorial
flowId: data-engineering-pipeline
description: Data Engineering Pipeline
- type: io.kestra.plugin.ai.tool.KestraFlow
namespace: tutorial
flowId: dwh-and-analytics
description: Data Warehouse and Analytics
- type: io.kestra.plugin.ai.tool.KestraFlow
namespace: tutorial
flowId: file-processing
description: File Processing
- type: io.kestra.plugin.ai.tool.KestraFlow
namespace: tutorial
flowId: hello-world
description: Hello World
- type: io.kestra.plugin.ai.tool.KestraFlow
namespace: tutorial
flowId: infrastructure-automation
description: Infrastructure Automation
- type: io.kestra.plugin.ai.tool.KestraFlow
namespace: tutorial
flowId: microservices-and-apis
description: Microservices and APIsyaml
id: agent_calling_flows_implicitly
namespace: company.ai
inputs:
- id: use_case
type: SELECT
description: Your Orchestration Use Case
defaults: Hello World
values:
- Business Automation
- Business Processes
- Data Engineering Pipeline
- Data Warehouse and Analytics
- Infrastructure Automation
- Microservices and APIs
- Hello World
tasks:
- id: agent
type: io.kestra.plugin.ai.agent.AIAgent
prompt: |
Execute a flow that best matches the {{ inputs.use_case }} use case selected by the user. Use the following mapping of use cases to flow IDs:
- Business Automation: business-automation
- Business Processes: business-processes
- Data Engineering Pipeline: data-engineering-pipeline
- Data Warehouse and Analytics: dwh-and-analytics
- Infrastructure Automation: infrastructure-automation
- Microservices and APIs: microservices-and-apis
- Hello World: hello-world
Remember that all those flows are in the tutorial namespace.
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-2.5-flash
apiKey: "{{ secret('GEMINI_API_KEY') }}"
tools:
- type: io.kestra.plugin.ai.tool.KestraFlowtype*Requiredobject
descriptionstring
flowIdstring
inheritLabelsbooleanstring
Default
falseinputsobject
labelsarrayobject
namespacestring
revisionintegerstring
scheduleDatestring
Expose Kestra runnable tasks as tools
Example
yaml
id: call_a_kestra_task
namespace: company.ai
tasks:
- id: agent
type: io.kestra.plugin.ai.agent.AIAgent
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-2.5-flash
apiKey: "{{ secret('GEMINI_API_KEY') }}"
tools:
- type: io.kestra.plugin.ai.tool.KestraTask
tasks:
- id: log
type: io.kestra.plugin.core.log.Log
message: "..." # This is a placeholder; the agent will fill it.
prompt: "Log the following message: 'Hello World!'"
tasks*Requiredarray
type*Requiredobject
Call MCP server over SSE
Example
yaml
id: mcp_client_sse
namespace: company.ai
inputs:
- id: prompt
type: STRING
defaults: Find 2 restaurants in Lille, France with the best reviews
tasks:
- id: agent
type: io.kestra.plugin.ai.agent.AIAgent
prompt: "{{ inputs.prompt }}"
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-2.5-flash
apiKey: "{{ secret('GEMINI_API_KEY') }}"
tools:
- type: io.kestra.plugin.ai.tool.SseMcpClient
sseUrl: https://mcp.apify.com/?actors=compass/crawler-google-places
timeout: PT5M
headers:
Authorization: Bearer {{ secret('APIFY_API_TOKEN') }}sseUrl*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.tool.SseMcpClientio.kestra.plugin.ai.tool.HttpMcpClientio.kestra.plugin.langchain4j.tool.HttpMcpClientheadersobject
SubTypestring
logRequestsbooleanstring
Default
falselogResponsesbooleanstring
Default
falsetimeoutstring
Run MCP tools over stdio
Example
yaml
id: mcp_client_stdio
namespace: company.ai
inputs:
- id: prompt
type: STRING
defaults: What is the current time in New York?
tasks:
- id: agent
type: io.kestra.plugin.ai.agent.AIAgent
prompt: "{{ inputs.prompt }}"
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
apiKey: "{{ secret('GEMINI_API_KEY') }}"
modelName: gemini-2.5-flash
tools:
- type: io.kestra.plugin.ai.tool.StdioMcpClient
command: ["docker", "run", "--rm", "-i", "mcp/time"]
command*Requiredarray
SubTypestring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.tool.StdioMcpClientio.kestra.plugin.langchain4j.tool.StdioMcpClientenvobject
SubTypestring
logEventsbooleanstring
Default
falseCall MCP server over HTTP streaming
Example
yaml
id: mcp_client_sse
namespace: company.ai
inputs:
- id: prompt
type: STRING
defaults: Find the 2 restaurants in Lille, France with the best reviews.
tasks:
- id: agent
type: io.kestra.plugin.ai.agent.AIAgent
prompt: "{{ inputs.prompt }}"
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-2.5-flash
apiKey: "{{ secret('GEMINI_API_KEY') }}"
tools:
- type: io.kestra.plugin.ai.tool.StreamableHttpMcpClient
url: https://mcp.apify.com/?actors=compass/crawler-google-places
timeout: PT5M
headers:
Authorization: Bearer {{ secret('APIFY_API_TOKEN') }}type*Requiredobject
url*Requiredstring
headersobject
SubTypestring
logRequestsbooleanstring
Default
falselogResponsesbooleanstring
Default
falsetimeoutstring
Search the web with Tavily
Example
yaml
id: research_agent
namespace: company.ai
inputs:
- id: prompt
type: STRING
defaults: What is the latest Kestra release and what new features does it include? (name 10 new features)
tasks:
- id: agent
type: io.kestra.plugin.ai.agent.AIAgent
prompt: "{{ inputs.prompt }}"
provider:
type: io.kestra.plugin.ai.provider.GoogleGemini
modelName: gemini-2.5-flash
apiKey: "{{ secret('GEMINI_API_KEY') }}"
tools:
- type: io.kestra.plugin.ai.tool.TavilyWebSearch
apiKey: "{{ secret('TAVILY_API_KEY') }}"
apiKey*Requiredstring
type*Requiredobject
Possible Values
io.kestra.plugin.ai.tool.TavilyWebSearchio.kestra.plugin.langchain4j.tool.TavilyWebSearchOutputs
finishReason string
Possible Values
STOPLENGTHTOOL_EXECUTIONCONTENT_FILTEROTHERguardrailViolated boolean
Default
falseguardrailViolationMessage string
intermediateResponses array
Definitions
io.kestra.plugin.ai.domain.AIOutput-AIResponse
completionstring
finishReasonstring
Possible Values
STOPLENGTHTOOL_EXECUTIONCONTENT_FILTEROTHERidstring
requestDurationinteger
tokenUsage
io.kestra.plugin.ai.domain.TokenUsage
inputTokenCountinteger
outputTokenCountinteger
totalTokenCountinteger
toolExecutionRequestsarray
io.kestra.plugin.ai.domain.AIOutput-AIResponse-ToolExecutionRequest
argumentsobject
idstring
namestring
jsonOutput object
outputFiles object
SubTypestring
requestDuration integer
sources array
Definitions
io.kestra.plugin.ai.domain.AIOutput-ContentSource
contentstring
metadataobject
textOutput string
thinking string
tokenUsage
Definitions
io.kestra.plugin.ai.domain.TokenUsage
inputTokenCountinteger
outputTokenCountinteger
totalTokenCountinteger
toolExecutions array
Definitions
io.kestra.plugin.ai.domain.AIOutput-ToolExecution
requestArgumentsobject
requestIdstring
requestNamestring
resultstring
Metrics
input.token.count counter
Unit
tokenoutput.token.count counter
Unit
tokentotal.token.count counter
Unit
token