From 44d006b9deaee58dbd5fce1fd733aa6553fc5d95 Mon Sep 17 00:00:00 2001 From: Webifi Date: Fri, 9 Jun 2023 23:15:17 -0500 Subject: [PATCH 1/2] Better error handling --- src/lib/Chat.svelte | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lib/Chat.svelte b/src/lib/Chat.svelte index b54a54a..2ad8ec3 100644 --- a/src/lib/Chat.svelte +++ b/src/lib/Chat.svelte @@ -392,7 +392,7 @@ const signal = controller.signal - console.log('apikey', $apiKeyStorage) + // console.log('apikey', $apiKeyStorage) const fetchOptions = { method: 'POST', @@ -408,7 +408,7 @@ let errorResponse try { const errObj = await response.json() - errorResponse = errObj?.error?.code + errorResponse = errObj?.error?.message || errObj?.error?.code if (!errorResponse && response.choices && response.choices[0]) { errorResponse = response.choices[0]?.message?.content } From 2b8eefe113855310b8a8d46120ce3f1c52efee4a Mon Sep 17 00:00:00 2001 From: Webifi Date: Sat, 10 Jun 2023 09:58:13 -0500 Subject: [PATCH 2/2] Fix token limit issue in #161 --- src/lib/Chat.svelte | 31 ++++++++++++++++++++----------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/src/lib/Chat.svelte b/src/lib/Chat.svelte index 2ad8ec3..e240cb8 100644 --- a/src/lib/Chat.svelte +++ b/src/lib/Chat.svelte @@ -20,7 +20,10 @@ type Request, type Message, type Chat, - type ChatCompletionOpts + type ChatCompletionOpts, + + type Model + } from './Types.svelte' import Prompts from './Prompts.svelte' import Messages from './Messages.svelte' @@ -358,6 +361,7 @@ // Update token count with actual promptTokenCount = countPromptTokens(messagePayload, model) + const maxAllowed = getModelMaxTokens(chatSettings.model as Model) - (promptTokenCount + 1) try { const request: Request = { @@ -369,17 +373,22 @@ if (typeof setting.apiTransform === 'function') { value = setting.apiTransform(chatId, setting, value) } - if (opts.maxTokens) { - if (key === 'max_tokens') value = opts.maxTokens // only as large as requested + if (key === 'max_tokens') { + if (opts.maxTokens) { + value = opts.maxTokens // only as large as requested + } + if (value > maxAllowed || value < 1) value = null } - if (opts.streaming || opts.summaryRequest) { - /* - Streaming goes insane with more than one completion. - Doesn't seem like there's any way to separate the jumbled mess of deltas for the - different completions. - Summary should only have one completion - */ - if (key === 'n') value = 1 + if (key === 'n') { + if (opts.streaming || opts.summaryRequest) { + /* + Streaming goes insane with more than one completion. + Doesn't seem like there's any way to separate the jumbled mess of deltas for the + different completions. + Summary should only have one completion + */ + value = 1 + } } if (value !== null) acc[key] = value return acc