diff --git a/packages/backend/src/apps/together-ai/actions/create-chat-completion/index.js b/packages/backend/src/apps/together-ai/actions/create-chat-completion/index.js
new file mode 100644
index 00000000..30195690
--- /dev/null
+++ b/packages/backend/src/apps/together-ai/actions/create-chat-completion/index.js
@@ -0,0 +1,169 @@
+import defineAction from '../../../../helpers/define-action.js';
+
+const castFloatOrUndefined = (value) => {
+ return value === '' ? undefined : parseFloat(value);
+};
+
+export default defineAction({
+ name: 'Create chat completion',
+ key: 'createChatCompletion',
+ description: 'Queries a chat model.',
+ arguments: [
+ {
+ label: 'Model',
+ key: 'model',
+ type: 'dropdown',
+ required: true,
+ variables: true,
+ source: {
+ type: 'query',
+ name: 'getDynamicData',
+ arguments: [
+ {
+ name: 'key',
+ value: 'listModels',
+ },
+ ],
+ },
+ },
+ {
+ label: 'Messages',
+ key: 'messages',
+ type: 'dynamic',
+ required: true,
+ description: 'A list of messages comprising the conversation so far.',
+ value: [{ role: 'system', body: '' }],
+ fields: [
+ {
+ label: 'Role',
+ key: 'role',
+ type: 'dropdown',
+ required: true,
+ description:
+ 'The role of the messages author. Choice between: system, user, or assistant.',
+ options: [
+ {
+ label: 'System',
+ value: 'system',
+ },
+ {
+ label: 'Assistant',
+ value: 'assistant',
+ },
+ {
+ label: 'User',
+ value: 'user',
+ },
+ ],
+ },
+ {
+ label: 'Content',
+ key: 'content',
+ type: 'string',
+ required: true,
+ variables: true,
+ description:
+ 'The content of the message, which can either be a simple string or a structured format.',
+ },
+ ],
+ },
+ {
+ label: 'Temperature',
+ key: 'temperature',
+ type: 'string',
+ required: false,
+ variables: true,
+ description:
+ 'A decimal number from 0-1 that determines the degree of randomness in the response. A temperature less than 1 favors more correctness and is appropriate for question answering or summarization. A value closer to 1 introduces more randomness in the output.',
+ },
+ {
+ label: 'Maximum tokens',
+ key: 'maxTokens',
+ type: 'string',
+ required: false,
+ variables: true,
+ description: 'The maximum number of tokens to generate.',
+ },
+ {
+ label: 'Stop sequences',
+ key: 'stopSequences',
+ type: 'dynamic',
+ required: false,
+ variables: true,
+ description:
+ 'A list of string sequences that will truncate (stop) inference text output. For example, "" will stop generation as soon as the model generates the given token.',
+ fields: [
+ {
+ label: 'Stop sequence',
+ key: 'stopSequence',
+ type: 'string',
+ required: false,
+ variables: true,
+ },
+ ],
+ },
+ {
+ label: 'Top P',
+ key: 'topP',
+ type: 'string',
+ required: false,
+ variables: true,
+ description: `A percentage (also called the nucleus parameter) that's used to dynamically adjust the number of choices for each predicted token based on the cumulative probabilities. It specifies a probability threshold below which all less likely tokens are filtered out. This technique helps maintain diversity and generate more fluent and natural-sounding text.`,
+ },
+ {
+ label: 'Top K',
+ key: 'topK',
+ type: 'string',
+ required: false,
+ variables: true,
+ description: `An integer that's used to limit the number of choices for the next predicted word or token. It specifies the maximum number of tokens to consider at each step, based on their probability of occurrence. This technique helps to speed up the generation process and can improve the quality of the generated text by focusing on the most likely options.`,
+ },
+ {
+ label: 'Frequency Penalty',
+ key: 'frequencyPenalty',
+ type: 'string',
+ required: false,
+ variables: true,
+ description: `A number between -2.0 and 2.0 where a positive value decreases the likelihood of repeating tokens that have already been mentioned.`,
+ },
+ {
+ label: 'Presence Penalty',
+ key: 'presencePenalty',
+ type: 'string',
+ required: false,
+ variables: true,
+ description: `A number between -2.0 and 2.0 where a positive value increases the likelihood of a model talking about new topics.`,
+ },
+ ],
+
+ async run($) {
+ const nonEmptyStopSequences = $.step.parameters.stopSequences
+ .filter(({ stopSequence }) => stopSequence)
+ .map(({ stopSequence }) => stopSequence);
+
+ const messages = $.step.parameters.messages.map((message) => ({
+ role: message.role,
+ content: message.content,
+ }));
+
+ const payload = {
+ model: $.step.parameters.model,
+ messages,
+ temperature: castFloatOrUndefined($.step.parameters.temperature),
+ max_tokens: castFloatOrUndefined($.step.parameters.maxTokens),
+ stop: nonEmptyStopSequences,
+ top_p: castFloatOrUndefined($.step.parameters.topP),
+ top_k: castFloatOrUndefined($.step.parameters.topK),
+ presence_penalty: castFloatOrUndefined($.step.parameters.presencePenalty),
+ frequency_penalty: castFloatOrUndefined(
+ $.step.parameters.frequencyPenalty
+ ),
+ };
+
+ const { data } = await $.http.post('/v1/chat/completions', payload);
+
+ $.setActionItem({
+ raw: data,
+ });
+ },
+});
diff --git a/packages/backend/src/apps/together-ai/actions/create-completion/index.js b/packages/backend/src/apps/together-ai/actions/create-completion/index.js
new file mode 100644
index 00000000..702e5a0a
--- /dev/null
+++ b/packages/backend/src/apps/together-ai/actions/create-completion/index.js
@@ -0,0 +1,131 @@
+import defineAction from '../../../../helpers/define-action.js';
+
+const castFloatOrUndefined = (value) => {
+ return value === '' ? undefined : parseFloat(value);
+};
+
+export default defineAction({
+ name: 'Create completion',
+ key: 'createCompletion',
+ description: 'Queries a language, code, or image model.',
+ arguments: [
+ {
+ label: 'Model',
+ key: 'model',
+ type: 'dropdown',
+ required: true,
+ variables: true,
+ source: {
+ type: 'query',
+ name: 'getDynamicData',
+ arguments: [
+ {
+ name: 'key',
+ value: 'listModels',
+ },
+ ],
+ },
+ },
+ {
+ label: 'Prompt',
+ key: 'prompt',
+ type: 'string',
+ required: true,
+ variables: true,
+ description: 'A string providing context for the model to complete.',
+ },
+ {
+ label: 'Temperature',
+ key: 'temperature',
+ type: 'string',
+ required: false,
+ variables: true,
+ description:
+ 'A decimal number from 0-1 that determines the degree of randomness in the response. A temperature less than 1 favors more correctness and is appropriate for question answering or summarization. A value closer to 1 introduces more randomness in the output.',
+ },
+ {
+ label: 'Maximum tokens',
+ key: 'maxTokens',
+ type: 'string',
+ required: false,
+ variables: true,
+ description: 'The maximum number of tokens to generate.',
+ },
+ {
+ label: 'Stop sequences',
+ key: 'stopSequences',
+ type: 'dynamic',
+ required: false,
+ variables: true,
+ description:
+ 'A list of string sequences that will truncate (stop) inference text output. For example, "" will stop generation as soon as the model generates the given token.',
+ fields: [
+ {
+ label: 'Stop sequence',
+ key: 'stopSequence',
+ type: 'string',
+ required: false,
+ variables: true,
+ },
+ ],
+ },
+ {
+ label: 'Top P',
+ key: 'topP',
+ type: 'string',
+ required: false,
+ variables: true,
+ description: `A percentage (also called the nucleus parameter) that's used to dynamically adjust the number of choices for each predicted token based on the cumulative probabilities. It specifies a probability threshold below which all less likely tokens are filtered out. This technique helps maintain diversity and generate more fluent and natural-sounding text.`,
+ },
+ {
+ label: 'Top K',
+ key: 'topK',
+ type: 'string',
+ required: false,
+ variables: true,
+ description: `An integer that's used to limit the number of choices for the next predicted word or token. It specifies the maximum number of tokens to consider at each step, based on their probability of occurrence. This technique helps to speed up the generation process and can improve the quality of the generated text by focusing on the most likely options.`,
+ },
+ {
+ label: 'Frequency Penalty',
+ key: 'frequencyPenalty',
+ type: 'string',
+ required: false,
+ variables: true,
+ description: `A number between -2.0 and 2.0 where a positive value decreases the likelihood of repeating tokens that have already been mentioned.`,
+ },
+ {
+ label: 'Presence Penalty',
+ key: 'presencePenalty',
+ type: 'string',
+ required: false,
+ variables: true,
+ description: `A number between -2.0 and 2.0 where a positive value increases the likelihood of a model talking about new topics.`,
+ },
+ ],
+
+ async run($) {
+ const nonEmptyStopSequences = $.step.parameters.stopSequences
+ .filter(({ stopSequence }) => stopSequence)
+ .map(({ stopSequence }) => stopSequence);
+
+ const payload = {
+ model: $.step.parameters.model,
+ prompt: $.step.parameters.prompt,
+ temperature: castFloatOrUndefined($.step.parameters.temperature),
+ max_tokens: castFloatOrUndefined($.step.parameters.maxTokens),
+ stop: nonEmptyStopSequences,
+ top_p: castFloatOrUndefined($.step.parameters.topP),
+ top_k: castFloatOrUndefined($.step.parameters.topK),
+ presence_penalty: castFloatOrUndefined($.step.parameters.presencePenalty),
+ frequency_penalty: castFloatOrUndefined(
+ $.step.parameters.frequencyPenalty
+ ),
+ };
+
+ const { data } = await $.http.post('/v1/completions', payload);
+
+ $.setActionItem({
+ raw: data,
+ });
+ },
+});
diff --git a/packages/backend/src/apps/together-ai/actions/index.js b/packages/backend/src/apps/together-ai/actions/index.js
new file mode 100644
index 00000000..a8c2e257
--- /dev/null
+++ b/packages/backend/src/apps/together-ai/actions/index.js
@@ -0,0 +1,4 @@
+import createCompletion from './create-completion/index.js';
+import createChatCompletion from './create-chat-completion/index.js';
+
+export default [createChatCompletion, createCompletion];
diff --git a/packages/backend/src/apps/together-ai/assets/favicon.svg b/packages/backend/src/apps/together-ai/assets/favicon.svg
new file mode 100644
index 00000000..620ac88a
--- /dev/null
+++ b/packages/backend/src/apps/together-ai/assets/favicon.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/packages/backend/src/apps/together-ai/auth/index.js b/packages/backend/src/apps/together-ai/auth/index.js
new file mode 100644
index 00000000..4765e6ef
--- /dev/null
+++ b/packages/backend/src/apps/together-ai/auth/index.js
@@ -0,0 +1,34 @@
+import verifyCredentials from './verify-credentials.js';
+import isStillVerified from './is-still-verified.js';
+
+export default {
+ fields: [
+ {
+ key: 'screenName',
+ label: 'Screen Name',
+ type: 'string',
+ required: true,
+ readOnly: false,
+ value: null,
+ placeholder: null,
+ description:
+ 'Screen name of your connection to be used on Automatisch UI.',
+ clickToCopy: false,
+ },
+ {
+ key: 'apiKey',
+ label: 'API Key',
+ type: 'string',
+ required: true,
+ readOnly: false,
+ value: null,
+ placeholder: null,
+ description: 'Together AI API key of your account.',
+ docUrl: 'https://automatisch.io/docs/together-ai#api-key',
+ clickToCopy: false,
+ },
+ ],
+
+ verifyCredentials,
+ isStillVerified,
+};
diff --git a/packages/backend/src/apps/together-ai/auth/is-still-verified.js b/packages/backend/src/apps/together-ai/auth/is-still-verified.js
new file mode 100644
index 00000000..3e6c9095
--- /dev/null
+++ b/packages/backend/src/apps/together-ai/auth/is-still-verified.js
@@ -0,0 +1,6 @@
+const isStillVerified = async ($) => {
+ await $.http.get('/v1/models');
+ return true;
+};
+
+export default isStillVerified;
diff --git a/packages/backend/src/apps/together-ai/auth/verify-credentials.js b/packages/backend/src/apps/together-ai/auth/verify-credentials.js
new file mode 100644
index 00000000..7f43f884
--- /dev/null
+++ b/packages/backend/src/apps/together-ai/auth/verify-credentials.js
@@ -0,0 +1,5 @@
+const verifyCredentials = async ($) => {
+ await $.http.get('/v1/models');
+};
+
+export default verifyCredentials;
diff --git a/packages/backend/src/apps/together-ai/common/add-auth-header.js b/packages/backend/src/apps/together-ai/common/add-auth-header.js
new file mode 100644
index 00000000..f9f5acba
--- /dev/null
+++ b/packages/backend/src/apps/together-ai/common/add-auth-header.js
@@ -0,0 +1,9 @@
+const addAuthHeader = ($, requestConfig) => {
+ if ($.auth.data?.apiKey) {
+ requestConfig.headers.Authorization = `Bearer ${$.auth.data.apiKey}`;
+ }
+
+ return requestConfig;
+};
+
+export default addAuthHeader;
diff --git a/packages/backend/src/apps/together-ai/dynamic-data/index.js b/packages/backend/src/apps/together-ai/dynamic-data/index.js
new file mode 100644
index 00000000..6db48046
--- /dev/null
+++ b/packages/backend/src/apps/together-ai/dynamic-data/index.js
@@ -0,0 +1,3 @@
+import listModels from './list-models/index.js';
+
+export default [listModels];
diff --git a/packages/backend/src/apps/together-ai/dynamic-data/list-models/index.js b/packages/backend/src/apps/together-ai/dynamic-data/list-models/index.js
new file mode 100644
index 00000000..28532967
--- /dev/null
+++ b/packages/backend/src/apps/together-ai/dynamic-data/list-models/index.js
@@ -0,0 +1,17 @@
+export default {
+ name: 'List models',
+ key: 'listModels',
+
+ async run($) {
+ const { data } = await $.http.get('/v1/models');
+
+ const models = data.map((model) => {
+ return {
+ value: model.id,
+ name: model.display_name,
+ };
+ });
+
+ return { data: models };
+ },
+};
diff --git a/packages/backend/src/apps/together-ai/index.js b/packages/backend/src/apps/together-ai/index.js
new file mode 100644
index 00000000..efddacc2
--- /dev/null
+++ b/packages/backend/src/apps/together-ai/index.js
@@ -0,0 +1,20 @@
+import defineApp from '../../helpers/define-app.js';
+import addAuthHeader from './common/add-auth-header.js';
+import auth from './auth/index.js';
+import actions from './actions/index.js';
+import dynamicData from './dynamic-data/index.js';
+
+export default defineApp({
+ name: 'Together AI',
+ key: 'together-ai',
+ baseUrl: 'https://together.ai',
+ apiBaseUrl: 'https://api.together.xyz',
+ iconUrl: '{BASE_URL}/apps/together-ai/assets/favicon.svg',
+ authDocUrl: '{DOCS_URL}/apps/together-ai/connection',
+ primaryColor: '#000000',
+ supportsConnections: true,
+ beforeRequest: [addAuthHeader],
+ auth,
+ actions,
+ dynamicData,
+});
diff --git a/packages/backend/src/models/__snapshots__/app.test.js.snap b/packages/backend/src/models/__snapshots__/app.test.js.snap
index 4a9b7efb..2d4e4e0d 100644
--- a/packages/backend/src/models/__snapshots__/app.test.js.snap
+++ b/packages/backend/src/models/__snapshots__/app.test.js.snap
@@ -61,6 +61,7 @@ exports[`App model > list should have list of applications keys 1`] = `
"stripe",
"telegram-bot",
"todoist",
+ "together-ai",
"trello",
"twilio",
"twitter",
diff --git a/packages/docs/pages/.vitepress/config.js b/packages/docs/pages/.vitepress/config.js
index 11fc33ec..a162b062 100644
--- a/packages/docs/pages/.vitepress/config.js
+++ b/packages/docs/pages/.vitepress/config.js
@@ -526,6 +526,15 @@ export default defineConfig({
{ text: 'Connection', link: '/apps/todoist/connection' },
],
},
+ {
+ text: 'Together AI',
+ collapsible: true,
+ collapsed: true,
+ items: [
+ { text: 'Actions', link: '/apps/together-ai/actions' },
+ { text: 'Connection', link: '/apps/together-ai/connection' },
+ ],
+ },
{
text: 'Trello',
collapsible: true,
diff --git a/packages/docs/pages/apps/together-ai/actions.md b/packages/docs/pages/apps/together-ai/actions.md
new file mode 100644
index 00000000..4152212d
--- /dev/null
+++ b/packages/docs/pages/apps/together-ai/actions.md
@@ -0,0 +1,14 @@
+---
+favicon: /favicons/together-ai.svg
+items:
+ - name: Create chat completion
+ desc: Queries a chat model.
+ - name: Create completion
+ desc: Queries a language, code, or image model.
+---
+
+
+
+
diff --git a/packages/docs/pages/apps/together-ai/connection.md b/packages/docs/pages/apps/together-ai/connection.md
new file mode 100644
index 00000000..e40983a5
--- /dev/null
+++ b/packages/docs/pages/apps/together-ai/connection.md
@@ -0,0 +1,8 @@
+# Together AI
+
+1. Go to [API Keys page](https://api.together.ai/settings/api-keys) on Together AI.
+2. Copy your API key.
+3. Paste the key into the `API Key` field in Automatisch.
+4. Write any screen name to be displayed in Automatisch.
+5. Click `Save`.
+6. Start using Together AI integration with Automatisch!
diff --git a/packages/docs/pages/guide/available-apps.md b/packages/docs/pages/guide/available-apps.md
index 33372e90..21c0e800 100644
--- a/packages/docs/pages/guide/available-apps.md
+++ b/packages/docs/pages/guide/available-apps.md
@@ -54,6 +54,7 @@ The following integrations are currently supported by Automatisch.
- [Stripe](/apps/stripe/triggers)
- [Telegram](/apps/telegram-bot/actions)
- [Todoist](/apps/todoist/triggers)
+- [Together AI](/apps/together-ai/actions)
- [Trello](/apps/trello/actions)
- [Twilio](/apps/twilio/triggers)
- [Twitter](/apps/twitter/triggers)
diff --git a/packages/docs/pages/public/favicons/together-ai.svg b/packages/docs/pages/public/favicons/together-ai.svg
new file mode 100644
index 00000000..620ac88a
--- /dev/null
+++ b/packages/docs/pages/public/favicons/together-ai.svg
@@ -0,0 +1 @@
+
\ No newline at end of file