Skip to content
This repository has been archived by the owner on Sep 16, 2024. It is now read-only.

initial commit for official api usage #250

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,10 @@ CHATGPT_API_MODEL=gpt-3.5-turbo
# CHATGPT_TEMPERATURE=0.8
# (Optional) (Optional) Davinci models have a max context length of 4097 tokens, but you may need to change this for other models.
# CHATGPT_MAX_CONTEXT_TOKENS=4097
# You might want to lower this to save money if using a paid model. Earlier messages will be dropped until the prompt is within the limit.
# (Optional) You might want to lower this to save money if using a paid model. Earlier messages will be dropped until the prompt is within the limit.
# CHATGPT_MAX_PROMPT_TOKENS=3097
# (Optional) A custom name if you don't like ChatGPT
# CHATGPT_NAME=Assistant HAL

# Set data store settings
KEYV_BACKEND=file
Expand Down
2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,13 @@
"@keyv/postgres": "^1.4.9",
"@keyv/redis": "^2.8.0",
"@keyv/sqlite": "^3.6.6",
"@waylaidwanderer/chatgpt-api": "^1.37.3",
"dotenv": "^16.3.1",
"hash.js": "^1.1.7",
"keyv": "^4.5.4",
"keyv-file": "^0.2.0",
"markdown-it": "^13.0.2",
"matrix-bot-sdk": "0.7.0",
"openai": "^4.17.4",
"typescript": "^5.2.2",
"znv": "^0.4.0",
"zod": "^3.22.4"
Expand Down
6 changes: 4 additions & 2 deletions src/env.ts
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ export const {
CHATGPT_TEMPERATURE,
CHATGPT_MAX_CONTEXT_TOKENS,
CHATGPT_MAX_PROMPT_TOKENS,
CHATGPT_NAME,
} = parseEnv(process.env, {
DATA_PATH: { schema: z.string().default("./storage"), description: "Set to /storage/ if using docker, ./storage if running without" },
KEYV_BACKEND: { schema: z.enum(["file", "other"]).default("file"),description: "Set the Keyv backend to 'file' or 'other' if other set KEYV_URL" },
Expand Down Expand Up @@ -75,6 +76,7 @@ export const {
CHATGPT_IGNORE_MEDIA: { schema: z.boolean().default(false), description: "Wether or not the bot should react to non-text messages"},
CHATGPT_REVERSE_PROXY: { schema: z.string().default(""), description: "Change the api url to use another (OpenAI-compatible) API endpoint" },
CHATGPT_TEMPERATURE: { schema: z.number().default(0.8), description: "Set the temperature for the model" },
CHATGPT_MAX_CONTEXT_TOKENS: { schema: z.number().default(4097), description: "Davinci models have a max context length of 4097 tokens, but you may need to change this for other models." },
CHATGPT_MAX_PROMPT_TOKENS: { schema: z.number().default(3097), description: "You might want to lower this to save money if using a paid model. Earlier messages will be dropped until the prompt is within the limit." },
CHATGPT_MAX_CONTEXT_TOKENS: { schema: z.number().default(4097), description: "Davinci models have a max context length of 4097 tokens, but you may need to change this for other models." },
CHATGPT_MAX_PROMPT_TOKENS: { schema: z.number().default(3097), description: "You might want to lower this to save money if using a paid model. Earlier messages will be dropped until the prompt is within the limit." },
CHATGPT_NAME: { schema: z.string().default("ChatGPT"), description: "A custom name you can use for your bot. Will only change the name within API communication, not the name of the bot shown in matrix." },
});
11 changes: 6 additions & 5 deletions src/handlers.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import ChatGPTClient from '@waylaidwanderer/chatgpt-api';
import OpenAI from "openai";
import { LogService, MatrixClient, UserID } from "matrix-bot-sdk";
import { CHATGPT_CONTEXT, CHATGPT_TIMEOUT, CHATGPT_IGNORE_MEDIA, MATRIX_DEFAULT_PREFIX_REPLY, MATRIX_DEFAULT_PREFIX, MATRIX_BLACKLIST, MATRIX_WHITELIST, MATRIX_RICH_TEXT, MATRIX_PREFIX_DM, MATRIX_THREADS, MATRIX_ROOM_BLACKLIST, MATRIX_ROOM_WHITELIST } from "./env.js";
import { RelatesTo, MessageEvent, StoredConversation, StoredConversationConfig } from "./interfaces.js";
Expand All @@ -11,7 +11,7 @@ export default class CommandHandler {
private userId: string;
private localpart: string;

constructor(private client: MatrixClient, private chatGPT: ChatGPTClient) { }
constructor(private client: MatrixClient, private openaiClient: OpenAI, private assistant: OpenAI.Beta.Assistant) { }

public async start() {
await this.prepareProfile(); // Populate the variables above (async)
Expand Down Expand Up @@ -124,18 +124,19 @@ export default class CommandHandler {
return;
}

const result = await sendChatGPTMessage(this.chatGPT, await bodyWithoutPrefix, storedConversation)
const result = await sendChatGPTMessage(this.openaiClient, this.assistant, await bodyWithoutPrefix, storedConversation)
.catch((error) => {
LogService.error(`OpenAI-API Error: ${error}`);
sendError(this.client, `The bot has encountered an error, please contact your administrator (Error code ${error.status || "Unknown"}).`, roomId, event.event_id);
});
await Promise.all([
this.client.setTyping(roomId, false, 500),
sendReply(this.client, roomId, this.getRootEventId(event), `${result.response}`, MATRIX_THREADS, MATRIX_RICH_TEXT)
// unwraping the response using direct array accesses is not nice.
sendReply(this.client, roomId, this.getRootEventId(event), `${result["data"][0].content[0].text.value}`, MATRIX_THREADS, MATRIX_RICH_TEXT)
]);

const storedConfig = ((storedConversation !== undefined && storedConversation.config !== undefined) ? storedConversation.config : {})
const configString: string = JSON.stringify({conversationId: result.conversationId, messageId: result.messageId, config: storedConfig})
const configString: string = JSON.stringify({threadId: result["data"][0].thread_id, config: storedConfig})
await this.client.storageProvider.storeValue('gpt-' + storageKey, configString);
if ((storageKey === roomId) && (CHATGPT_CONTEXT === "both")) await this.client.storageProvider.storeValue('gpt-' + event.event_id, configString);
} catch (err) {
Expand Down
56 changes: 33 additions & 23 deletions src/index.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,4 @@
import ChatGPTClient from '@waylaidwanderer/chatgpt-api';
import Keyv from 'keyv'
import { KeyvFile } from 'keyv-file';
import OpenAI, { ClientOptions } from 'openai';
import {
MatrixAuth, MatrixClient, AutojoinRoomsMixin, LogService, LogLevel, RichConsoleLogger,
RustSdkCryptoStorageProvider, IStorageProvider, SimpleFsStorageProvider, ICryptoStorageProvider,
Expand All @@ -11,14 +9,15 @@ import {
DATA_PATH, KEYV_URL, OPENAI_AZURE, OPENAI_API_KEY, MATRIX_HOMESERVER_URL, MATRIX_ACCESS_TOKEN, MATRIX_AUTOJOIN,
MATRIX_BOT_PASSWORD, MATRIX_BOT_USERNAME, MATRIX_ENCRYPTION, MATRIX_THREADS, CHATGPT_CONTEXT,
CHATGPT_API_MODEL, KEYV_BOT_STORAGE, KEYV_BACKEND, CHATGPT_PROMPT_PREFIX, MATRIX_WELCOME,
CHATGPT_REVERSE_PROXY, CHATGPT_TEMPERATURE, CHATGPT_MAX_CONTEXT_TOKENS, CHATGPT_MAX_PROMPT_TOKENS
CHATGPT_REVERSE_PROXY, CHATGPT_TEMPERATURE, CHATGPT_MAX_CONTEXT_TOKENS, CHATGPT_MAX_PROMPT_TOKENS,
CHATGPT_TIMEOUT, CHATGPT_NAME,
} from './env.js'
import CommandHandler from "./handlers.js"
import { KeyvStorageProvider } from './storage.js'
import { parseMatrixUsernamePretty, wrapPrompt } from './utils.js';

LogService.setLogger(new RichConsoleLogger());
// LogService.setLevel(LogLevel.DEBUG); // Shows the Matrix sync loop details - not needed most of the time
LogService.setLevel(LogLevel.DEBUG); // Shows the Matrix sync loop details - not needed most of the time
LogService.setLevel(LogLevel.INFO);
// LogService.muteModule("Metrics");
LogService.trace = LogService.debug;
Expand All @@ -34,11 +33,6 @@ if (KEYV_BOT_STORAGE) {
let cryptoStore: ICryptoStorageProvider;
if (MATRIX_ENCRYPTION) cryptoStore = new RustSdkCryptoStorageProvider(path.join(DATA_PATH, "encrypted")); // /storage/encrypted

let cacheOptions // Options for the Keyv cache, see https://www.npmjs.com/package/keyv
if (KEYV_BACKEND === 'file'){
cacheOptions = { store: new KeyvFile({ filename: path.join(DATA_PATH, `chatgpt-bot-api.json`) }) };
} else { cacheOptions = { uri: KEYV_URL } }

async function main() {
if (!MATRIX_ACCESS_TOKEN){
const botUsernameWithoutDomain = parseMatrixUsernamePretty(MATRIX_BOT_USERNAME);
Expand All @@ -57,20 +51,36 @@ async function main() {
return;
}

const clientOptions = { // (Optional) Parameters as described in https://platform.openai.com/docs/api-reference/completions
modelOptions: {
model: CHATGPT_API_MODEL, // The model is set to gpt-3.5-turbo by default
temperature: CHATGPT_TEMPERATURE,
},
promptPrefix: wrapPrompt(CHATGPT_PROMPT_PREFIX),
debug: false,
azure: OPENAI_AZURE,
reverseProxyUrl: CHATGPT_REVERSE_PROXY,
maxContextTokens: CHATGPT_MAX_CONTEXT_TOKENS,
maxPromptTokens: CHATGPT_MAX_PROMPT_TOKENS
// const clientOptions = { // (Optional) Parameters as described in https://platform.openai.com/docs/api-reference/completions
// modelOptions: {
// model: CHATGPT_API_MODEL, // The model is set to gpt-3.5-turbo by default
// temperature: CHATGPT_TEMPERATURE,
// },
// promptPrefix: wrapPrompt(CHATGPT_PROMPT_PREFIX),
// debug: false,
// azure: OPENAI_AZURE,
// reverseProxyUrl: CHATGPT_REVERSE_PROXY,
// maxContextTokens: CHATGPT_MAX_CONTEXT_TOKENS,
// maxPromptTokens: CHATGPT_MAX_PROMPT_TOKENS
// };

const clientOptions: ClientOptions = {
apiKey: OPENAI_API_KEY,
timeout: CHATGPT_TIMEOUT,
baseURL: CHATGPT_REVERSE_PROXY !== "" ? CHATGPT_REVERSE_PROXY: undefined,
};
const openaiClient = new OpenAI(clientOptions);

const chatgpt = new ChatGPTClient(OPENAI_API_KEY, clientOptions, cacheOptions);
const assistant = await openaiClient.beta.assistants.create({
instructions: wrapPrompt(CHATGPT_PROMPT_PREFIX),
tools: [
{ type: 'code_interpreter' }
],
model: CHATGPT_API_MODEL,
name: CHATGPT_NAME,
// TODO: I can not figure out how to use temperature, max context tokens und max prompt tokens.
// Maybe these parameters are not yet to be found in the beta/assistant code? Not sure, will check later.
});

// Automatically join rooms the bot is invited to
if (MATRIX_AUTOJOIN) AutojoinRoomsMixin.setupOnClient(client);
Expand All @@ -97,7 +107,7 @@ async function main() {
});

// Prepare the command handler
const commands = new CommandHandler(client, chatgpt);
const commands = new CommandHandler(client, openaiClient, assistant);
await commands.start();

LogService.info("index", `Starting bot using ChatGPT model: ${CHATGPT_API_MODEL}`);
Expand Down
3 changes: 1 addition & 2 deletions src/interfaces.ts
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,6 @@ export type StoredConversationConfig = {
}

export type StoredConversation = {
conversationId: string;
messageId: string;
threadId: string;
config: StoredConversationConfig;
}
69 changes: 60 additions & 9 deletions src/utils.ts
Original file line number Diff line number Diff line change
@@ -1,9 +1,7 @@
import ChatGPTClient from '@waylaidwanderer/chatgpt-api';
import OpenAI from 'openai';
import Markdown from 'markdown-it';
import { MatrixClient } from "matrix-bot-sdk";
import { LogService, MatrixClient } from "matrix-bot-sdk";
import { MessageEvent, StoredConversation } from "./interfaces.js";
import { CHATGPT_TIMEOUT } from "./env.js";

const md = Markdown();

export function parseMatrixUsernamePretty(matrix_username: string): string {
Expand Down Expand Up @@ -76,11 +74,64 @@ export async function sendReply(client: MatrixClient, roomId: string, rootEventI
await client.sendEvent(roomId, "m.room.message", finalContent);
}

export async function sendChatGPTMessage(chatgpt: ChatGPTClient, question: string, storedConversation: StoredConversation) {
// TODO: CHATGPT_TIMEOUT
return (storedConversation !== undefined) ?
await chatgpt.sendMessage(question, { conversationId: storedConversation.conversationId, parentMessageId: storedConversation.messageId }) :
await chatgpt.sendMessage(question);
/**
* The OpenAI Assistance API queues tasks in runs. While these runs are running, we have to check if they already finished.
* @param {OpenAI} client the open-ai client to be used
* @param {string} runId the id of the current run to wait for
* @param {string} threadId the id of the current thread we're using
*/
async function waitForRun(client: OpenAI, runId: string, threadId: string) {
let run = await client.beta.threads.runs.retrieve(threadId, runId);
if (run.status === "in_progress") {
await new Promise((resolve) => setTimeout(resolve, 250));
return waitForRun(client, runId, threadId);
} else if (run.status !== "completed") {
LogService.error(`Failed while getting the response from OpenAI API. Details: ${run}`);
throw "Failed getting response from OpenAI";
}
return;
}

/**
* Schedule a run in order to retrive a response from the API
* @param {OpenAI} client the open-ai client to be used
* @param {string} threadId the id of the current thread we're using
* @param {OpenAI.Beta.Assistant} assistant the assistant to use
*/
export async function getResponse(client: OpenAI, threadId: string, assistant: OpenAI.Beta.Assistant) {
// check if our response finished generating
LogService.debug("Checking if our response finished generating...");
let run = await client.beta.threads.runs.create(threadId, {assistant_id: assistant.id});
await waitForRun(client, run.id, threadId);
const message = await client.beta.threads.messages.list(threadId);
LogService.debug(`Finished generating response and got message: ${message}`);
return message;
}

/**
* Send a question to the OpenAI API and get its reponse
* @param {OpenAI} client the open-ai client to be used
* @param {OpenAI.Beta.Assistant} assistant the assistant to use
* @param {string} question the question to be asked
* @param {StoredConversation} storedConversation holds the threadId for context
*/
export async function sendChatGPTMessage(client: OpenAI, assistant: OpenAI.Beta.Assistant, question: string, storedConversation?: StoredConversation) {
let threadId = "";
if (storedConversation !== undefined) {
// use existing threadId if we already have one
threadId = storedConversation.threadId;
} else {
// if no threadId exists, create a new thread
const thread = await client.beta.threads.create();
threadId = thread.id;
}
// add our question to the current thread
const message = await client.beta.threads.messages.create(threadId, {
role: "user",
content: question
});
const response = await getResponse(client, threadId, assistant);
return response;
}

export function wrapPrompt(wrapped: string) {
Expand Down
Loading
Loading