feat(together-ai): add app with create completion and create chat completion actions

This commit is contained in:
Ali BARIN
2025-01-22 15:14:15 +00:00
parent e686d3b067
commit bb204781e0
17 changed files with 433 additions and 0 deletions

View File

@@ -0,0 +1,169 @@
import defineAction from '../../../../helpers/define-action.js';
const castFloatOrUndefined = (value) => {
return value === '' ? undefined : parseFloat(value);
};
export default defineAction({
name: 'Create chat completion',
key: 'createChatCompletion',
description: 'Queries a chat model.',
arguments: [
{
label: 'Model',
key: 'model',
type: 'dropdown',
required: true,
variables: true,
source: {
type: 'query',
name: 'getDynamicData',
arguments: [
{
name: 'key',
value: 'listModels',
},
],
},
},
{
label: 'Messages',
key: 'messages',
type: 'dynamic',
required: true,
description: 'A list of messages comprising the conversation so far.',
value: [{ role: 'system', body: '' }],
fields: [
{
label: 'Role',
key: 'role',
type: 'dropdown',
required: true,
description:
'The role of the messages author. Choice between: system, user, or assistant.',
options: [
{
label: 'System',
value: 'system',
},
{
label: 'Assistant',
value: 'assistant',
},
{
label: 'User',
value: 'user',
},
],
},
{
label: 'Content',
key: 'content',
type: 'string',
required: true,
variables: true,
description:
'The content of the message, which can either be a simple string or a structured format.',
},
],
},
{
label: 'Temperature',
key: 'temperature',
type: 'string',
required: false,
variables: true,
description:
'A decimal number from 0-1 that determines the degree of randomness in the response. A temperature less than 1 favors more correctness and is appropriate for question answering or summarization. A value closer to 1 introduces more randomness in the output.',
},
{
label: 'Maximum tokens',
key: 'maxTokens',
type: 'string',
required: false,
variables: true,
description: 'The maximum number of tokens to generate.',
},
{
label: 'Stop sequences',
key: 'stopSequences',
type: 'dynamic',
required: false,
variables: true,
description:
'A list of string sequences that will truncate (stop) inference text output. For example, "" will stop generation as soon as the model generates the given token.',
fields: [
{
label: 'Stop sequence',
key: 'stopSequence',
type: 'string',
required: false,
variables: true,
},
],
},
{
label: 'Top P',
key: 'topP',
type: 'string',
required: false,
variables: true,
description: `A percentage (also called the nucleus parameter) that's used to dynamically adjust the number of choices for each predicted token based on the cumulative probabilities. It specifies a probability threshold below which all less likely tokens are filtered out. This technique helps maintain diversity and generate more fluent and natural-sounding text.`,
},
{
label: 'Top K',
key: 'topK',
type: 'string',
required: false,
variables: true,
description: `An integer that's used to limit the number of choices for the next predicted word or token. It specifies the maximum number of tokens to consider at each step, based on their probability of occurrence. This technique helps to speed up the generation process and can improve the quality of the generated text by focusing on the most likely options.`,
},
{
label: 'Frequency Penalty',
key: 'frequencyPenalty',
type: 'string',
required: false,
variables: true,
description: `A number between -2.0 and 2.0 where a positive value decreases the likelihood of repeating tokens that have already been mentioned.`,
},
{
label: 'Presence Penalty',
key: 'presencePenalty',
type: 'string',
required: false,
variables: true,
description: `A number between -2.0 and 2.0 where a positive value increases the likelihood of a model talking about new topics.`,
},
],
async run($) {
const nonEmptyStopSequences = $.step.parameters.stopSequences
.filter(({ stopSequence }) => stopSequence)
.map(({ stopSequence }) => stopSequence);
const messages = $.step.parameters.messages.map((message) => ({
role: message.role,
content: message.content,
}));
const payload = {
model: $.step.parameters.model,
messages,
temperature: castFloatOrUndefined($.step.parameters.temperature),
max_tokens: castFloatOrUndefined($.step.parameters.maxTokens),
stop: nonEmptyStopSequences,
top_p: castFloatOrUndefined($.step.parameters.topP),
top_k: castFloatOrUndefined($.step.parameters.topK),
presence_penalty: castFloatOrUndefined($.step.parameters.presencePenalty),
frequency_penalty: castFloatOrUndefined(
$.step.parameters.frequencyPenalty
),
};
const { data } = await $.http.post('/v1/chat/completions', payload);
$.setActionItem({
raw: data,
});
},
});

View File

@@ -0,0 +1,131 @@
import defineAction from '../../../../helpers/define-action.js';
const castFloatOrUndefined = (value) => {
return value === '' ? undefined : parseFloat(value);
};
export default defineAction({
name: 'Create completion',
key: 'createCompletion',
description: 'Queries a language, code, or image model.',
arguments: [
{
label: 'Model',
key: 'model',
type: 'dropdown',
required: true,
variables: true,
source: {
type: 'query',
name: 'getDynamicData',
arguments: [
{
name: 'key',
value: 'listModels',
},
],
},
},
{
label: 'Prompt',
key: 'prompt',
type: 'string',
required: true,
variables: true,
description: 'A string providing context for the model to complete.',
},
{
label: 'Temperature',
key: 'temperature',
type: 'string',
required: false,
variables: true,
description:
'A decimal number from 0-1 that determines the degree of randomness in the response. A temperature less than 1 favors more correctness and is appropriate for question answering or summarization. A value closer to 1 introduces more randomness in the output.',
},
{
label: 'Maximum tokens',
key: 'maxTokens',
type: 'string',
required: false,
variables: true,
description: 'The maximum number of tokens to generate.',
},
{
label: 'Stop sequences',
key: 'stopSequences',
type: 'dynamic',
required: false,
variables: true,
description:
'A list of string sequences that will truncate (stop) inference text output. For example, "" will stop generation as soon as the model generates the given token.',
fields: [
{
label: 'Stop sequence',
key: 'stopSequence',
type: 'string',
required: false,
variables: true,
},
],
},
{
label: 'Top P',
key: 'topP',
type: 'string',
required: false,
variables: true,
description: `A percentage (also called the nucleus parameter) that's used to dynamically adjust the number of choices for each predicted token based on the cumulative probabilities. It specifies a probability threshold below which all less likely tokens are filtered out. This technique helps maintain diversity and generate more fluent and natural-sounding text.`,
},
{
label: 'Top K',
key: 'topK',
type: 'string',
required: false,
variables: true,
description: `An integer that's used to limit the number of choices for the next predicted word or token. It specifies the maximum number of tokens to consider at each step, based on their probability of occurrence. This technique helps to speed up the generation process and can improve the quality of the generated text by focusing on the most likely options.`,
},
{
label: 'Frequency Penalty',
key: 'frequencyPenalty',
type: 'string',
required: false,
variables: true,
description: `A number between -2.0 and 2.0 where a positive value decreases the likelihood of repeating tokens that have already been mentioned.`,
},
{
label: 'Presence Penalty',
key: 'presencePenalty',
type: 'string',
required: false,
variables: true,
description: `A number between -2.0 and 2.0 where a positive value increases the likelihood of a model talking about new topics.`,
},
],
async run($) {
const nonEmptyStopSequences = $.step.parameters.stopSequences
.filter(({ stopSequence }) => stopSequence)
.map(({ stopSequence }) => stopSequence);
const payload = {
model: $.step.parameters.model,
prompt: $.step.parameters.prompt,
temperature: castFloatOrUndefined($.step.parameters.temperature),
max_tokens: castFloatOrUndefined($.step.parameters.maxTokens),
stop: nonEmptyStopSequences,
top_p: castFloatOrUndefined($.step.parameters.topP),
top_k: castFloatOrUndefined($.step.parameters.topK),
presence_penalty: castFloatOrUndefined($.step.parameters.presencePenalty),
frequency_penalty: castFloatOrUndefined(
$.step.parameters.frequencyPenalty
),
};
const { data } = await $.http.post('/v1/completions', payload);
$.setActionItem({
raw: data,
});
},
});

View File

@@ -0,0 +1,4 @@
import createCompletion from './create-completion/index.js';
import createChatCompletion from './create-chat-completion/index.js';
export default [createChatCompletion, createCompletion];

View File

@@ -0,0 +1 @@
<svg fill="none" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 32 32"><g clip-path="url(#clip0_542_18748)"><rect width="32" height="32" rx="5.64706" fill="#F1EFED"></rect><circle cx="22.8233" cy="9.64706" r="5.64706" fill="#D3D1D1"></circle><circle cx="22.8233" cy="22.8238" r="5.64706" fill="#D3D1D1"></circle><circle cx="9.64706" cy="22.8238" r="5.64706" fill="#D3D1D1"></circle><circle cx="9.64706" cy="9.64706" r="5.64706" fill="#0F6FFF"></circle></g><defs><clipPath id="clip0_542_18748"><rect width="32" height="32" fill="white"></rect></clipPath></defs></svg>

After

Width:  |  Height:  |  Size: 567 B

View File

@@ -0,0 +1,34 @@
import verifyCredentials from './verify-credentials.js';
import isStillVerified from './is-still-verified.js';
export default {
fields: [
{
key: 'screenName',
label: 'Screen Name',
type: 'string',
required: true,
readOnly: false,
value: null,
placeholder: null,
description:
'Screen name of your connection to be used on Automatisch UI.',
clickToCopy: false,
},
{
key: 'apiKey',
label: 'API Key',
type: 'string',
required: true,
readOnly: false,
value: null,
placeholder: null,
description: 'Together AI API key of your account.',
docUrl: 'https://automatisch.io/docs/together-ai#api-key',
clickToCopy: false,
},
],
verifyCredentials,
isStillVerified,
};

View File

@@ -0,0 +1,6 @@
const isStillVerified = async ($) => {
await $.http.get('/v1/models');
return true;
};
export default isStillVerified;

View File

@@ -0,0 +1,5 @@
const verifyCredentials = async ($) => {
await $.http.get('/v1/models');
};
export default verifyCredentials;

View File

@@ -0,0 +1,9 @@
const addAuthHeader = ($, requestConfig) => {
if ($.auth.data?.apiKey) {
requestConfig.headers.Authorization = `Bearer ${$.auth.data.apiKey}`;
}
return requestConfig;
};
export default addAuthHeader;

View File

@@ -0,0 +1,3 @@
import listModels from './list-models/index.js';
export default [listModels];

View File

@@ -0,0 +1,17 @@
export default {
name: 'List models',
key: 'listModels',
async run($) {
const { data } = await $.http.get('/v1/models');
const models = data.map((model) => {
return {
value: model.id,
name: model.display_name,
};
});
return { data: models };
},
};

View File

@@ -0,0 +1,20 @@
import defineApp from '../../helpers/define-app.js';
import addAuthHeader from './common/add-auth-header.js';
import auth from './auth/index.js';
import actions from './actions/index.js';
import dynamicData from './dynamic-data/index.js';
export default defineApp({
name: 'Together AI',
key: 'together-ai',
baseUrl: 'https://together.ai',
apiBaseUrl: 'https://api.together.xyz',
iconUrl: '{BASE_URL}/apps/together-ai/assets/favicon.svg',
authDocUrl: '{DOCS_URL}/apps/together-ai/connection',
primaryColor: '#000000',
supportsConnections: true,
beforeRequest: [addAuthHeader],
auth,
actions,
dynamicData,
});

View File

@@ -59,6 +59,7 @@ exports[`App model > list should have list of applications keys 1`] = `
"stripe",
"telegram-bot",
"todoist",
"together-ai",
"trello",
"twilio",
"twitter",

View File

@@ -508,6 +508,15 @@ export default defineConfig({
{ text: 'Connection', link: '/apps/todoist/connection' },
],
},
{
text: 'Together AI',
collapsible: true,
collapsed: true,
items: [
{ text: 'Actions', link: '/apps/together-ai/actions' },
{ text: 'Connection', link: '/apps/together-ai/connection' },
],
},
{
text: 'Trello',
collapsible: true,

View File

@@ -0,0 +1,14 @@
---
favicon: /favicons/together-ai.svg
items:
- name: Create chat completion
desc: Queries a chat model.
- name: Create completion
desc: Queries a language, code, or image model.
---
<script setup>
import CustomListing from '../../components/CustomListing.vue'
</script>
<CustomListing />

View File

@@ -0,0 +1,8 @@
# Together AI
1. Go to [API Keys page](https://api.together.ai/settings/api-keys) on Together AI.
2. Copy your API key.
3. Paste the key into the `API Key` field in Automatisch.
4. Write any screen name to be displayed in Automatisch.
5. Click `Save`.
6. Start using Together AI integration with Automatisch!

View File

@@ -52,6 +52,7 @@ The following integrations are currently supported by Automatisch.
- [Stripe](/apps/stripe/triggers)
- [Telegram](/apps/telegram-bot/actions)
- [Todoist](/apps/todoist/triggers)
- [Together AI](/apps/together-ai/actions)
- [Trello](/apps/trello/actions)
- [Twilio](/apps/twilio/triggers)
- [Twitter](/apps/twitter/triggers)

View File

@@ -0,0 +1 @@
<svg fill="none" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 32 32"><g clip-path="url(#clip0_542_18748)"><rect width="32" height="32" rx="5.64706" fill="#F1EFED"></rect><circle cx="22.8233" cy="9.64706" r="5.64706" fill="#D3D1D1"></circle><circle cx="22.8233" cy="22.8238" r="5.64706" fill="#D3D1D1"></circle><circle cx="9.64706" cy="22.8238" r="5.64706" fill="#D3D1D1"></circle><circle cx="9.64706" cy="9.64706" r="5.64706" fill="#0F6FFF"></circle></g><defs><clipPath id="clip0_542_18748"><rect width="32" height="32" fill="white"></rect></clipPath></defs></svg>

After

Width:  |  Height:  |  Size: 567 B