diff --git a/src/lib/ChatCompletionResponse.svelte b/src/lib/ChatCompletionResponse.svelte
index dc76e3c..e2afb6d 100644
--- a/src/lib/ChatCompletionResponse.svelte
+++ b/src/lib/ChatCompletionResponse.svelte
@@ -34,7 +34,7 @@ export class ChatCompletionResponse {
private setModel = (model: Model) => {
if (!model) return
- !this.model && setLatestKnownModel(this.chat.settings.model as Model, model)
+ !this.model && setLatestKnownModel(this.chat.settings.model, model)
this.lastModel = this.model || model
this.model = model
}
@@ -51,6 +51,15 @@ export class ChatCompletionResponse {
private messageChangeListeners: ((m: Message[]) => void)[] = []
private finishListeners: ((m: Message[]) => void)[] = []
+ private initialFillMerge (existingContent:string, newContent:string):string {
+ if (!this.didFill && this.isFill && existingContent && !newContent.match(/^'(t|ll|ve|m|d|re)[^a-z]/i)) {
+ // add a trailing space if our new content isn't a contraction
+ existingContent += ' '
+ }
+ this.didFill = true
+ return existingContent
+ }
+
setPromptTokenCount (tokens:number) {
this.promptTokenCount = tokens
}
@@ -61,11 +70,7 @@ export class ChatCompletionResponse {
const exitingMessage = this.messages[i]
const message = exitingMessage || choice.message
if (exitingMessage) {
- if (!this.didFill && this.isFill && choice.message.content.match(/^'(t|ll|ve|m|d|re)[^a-z]/i)) {
- // deal with merging contractions since we've added an extra space to your fill message
- message.content.replace(/ $/, '')
- }
- this.didFill = true
+ message.content = this.initialFillMerge(message.content, choice.message.content)
message.content += choice.message.content
message.usage = message.usage || {
prompt_tokens: 0,
@@ -100,11 +105,7 @@ export class ChatCompletionResponse {
} as Message
choice.delta?.role && (message.role = choice.delta.role)
if (choice.delta?.content) {
- if (!this.didFill && this.isFill && choice.delta.content.match(/^'(t|ll|ve|m|d|re)[^a-z]/i)) {
- // deal with merging contractions since we've added an extra space to your fill message
- message.content.replace(/([a-z]) $/i, '$1')
- }
- this.didFill = true
+ message.content = this.initialFillMerge(message.content, choice.delta?.content)
message.content += choice.delta.content
}
completionTokenCount += encode(message.content).length
@@ -179,7 +180,7 @@ export class ChatCompletionResponse {
this.messages.forEach(m => { m.streaming = false }) // make sure all are marked stopped
saveChatStore()
const message = this.messages[0]
- const model = this.model || getLatestKnownModel(this.chat.settings.model as Model)
+ const model = this.model || getLatestKnownModel(this.chat.settings.model)
if (message) {
if (this.isFill && this.lastModel === this.model && this.offsetTotals && model && message.usage) {
// Need to subtract some previous message totals before we add new combined message totals
diff --git a/src/lib/ChatRequest.svelte b/src/lib/ChatRequest.svelte
new file mode 100644
index 0000000..2f1d640
--- /dev/null
+++ b/src/lib/ChatRequest.svelte
@@ -0,0 +1,401 @@
+
\ No newline at end of file
diff --git a/src/lib/ChatSettingField.svelte b/src/lib/ChatSettingField.svelte
index b9c0ab3..b33d9c1 100644
--- a/src/lib/ChatSettingField.svelte
+++ b/src/lib/ChatSettingField.svelte
@@ -174,7 +174,7 @@
min={setting.min}
max={setting.max}
step={setting.step}
- placeholder={String(setting.placeholder)}
+ placeholder={String(setting.placeholder || chatDefaults[setting.key])}
on:change={e => queueSettingValueChange(e, setting)}
/>
{:else if setting.type === 'select'}
diff --git a/src/lib/ChatSettingsModal.svelte b/src/lib/ChatSettingsModal.svelte
index cc38118..13ea749 100644
--- a/src/lib/ChatSettingsModal.svelte
+++ b/src/lib/ChatSettingsModal.svelte
@@ -167,7 +167,7 @@
const profileSelect = getChatSettingObjectByKey('profile') as ChatSetting & SettingSelect
profileSelect.options = getProfileSelect()
chatDefaults.profile = getDefaultProfileKey()
- chatDefaults.max_tokens = getModelMaxTokens(chatSettings.model || '')
+ chatDefaults.max_tokens = getModelMaxTokens(chatSettings.model)
// const defaultProfile = globalStore.defaultProfile || profileSelect.options[0].value
defaultProfile = getDefaultProfileKey()
isDefault = defaultProfile === chatSettings.profile
diff --git a/src/lib/EditMessage.svelte b/src/lib/EditMessage.svelte
index 849dfc0..513d504 100644
--- a/src/lib/EditMessage.svelte
+++ b/src/lib/EditMessage.svelte
@@ -37,7 +37,7 @@
onMount(() => {
original = message.content
- defaultModel = chatSettings.model as any
+ defaultModel = chatSettings.model
})
const edit = () => {
diff --git a/src/lib/Profiles.svelte b/src/lib/Profiles.svelte
index 92d0d66..ee1de36 100644
--- a/src/lib/Profiles.svelte
+++ b/src/lib/Profiles.svelte
@@ -82,10 +82,8 @@ export const prepareProfilePrompt = (chatId:number) => {
return mergeProfileFields(settings, settings.systemPrompt).trim()
}
-export const prepareSummaryPrompt = (chatId:number, promptsSize:number, maxTokens:number|undefined = undefined) => {
+export const prepareSummaryPrompt = (chatId:number, maxTokens:number) => {
const settings = getChatSettings(chatId)
- maxTokens = maxTokens || settings.summarySize
- maxTokens = Math.min(Math.floor(promptsSize / 4), maxTokens) // Make sure we're shrinking by at least a 4th
const currentSummaryPrompt = settings.summaryPrompt
// ~.75 words per token. May need to reduce
return mergeProfileFields(settings, currentSummaryPrompt, Math.floor(maxTokens * 0.75)).trim()
@@ -132,42 +130,37 @@ export const applyProfile = (chatId:number, key:string = '', resetChat:boolean =
const summaryPrompts = {
- // General use
- general: `Please summarize all prompts and responses from this session.
+ // General assistant use
+ general: `[START SUMMARY REQUEST]
+Please summarize all prompts and responses from this session.
[[CHARACTER_NAME]] is telling me this summary in the first person.
-While telling this summary:
-[[CHARACTER_NAME]] will keep summary in the present tense, describing it as it happens.
-[[CHARACTER_NAME]] will always refer to me in the second person as "you" or "we".
-[[CHARACTER_NAME]] will never refer to me in the third person.
-[[CHARACTER_NAME]] will never refer to me as the user.
-[[CHARACTER_NAME]] will include all interactions and requests.
-[[CHARACTER_NAME]] will keep correct order of interactions.
-[[CHARACTER_NAME]] will keep the summary compact, but retain as much detail as possible in a compact form.
-[[CHARACTER_NAME]] will describe interactions in detail.
-[[CHARACTER_NAME]] will never end with epilogues or summations.
-[[CHARACTER_NAME]] will always include key details.
-[[CHARACTER_NAME]]'s summary will be [[MAX_WORDS]] words.
-[[CHARACTER_NAME]] will never add details or inferences that do not clearly exist in the prompts and responses.
-Give no explanations.`,
+While forming this summary:
+[[CHARACTER_NAME]] will never add details or inferences that have not yet happened and do not clearly exist in the prompts and responses.
+[[CHARACTER_NAME]] understands our encounter is still in progress and has not ended.
+[[CHARACTER_NAME]] will include all pivotal details in the correct order.
+[[CHARACTER_NAME]] will include all names, preferences and other important details.
+[[CHARACTER_NAME]] will always refer to me in the 2nd person, for example "you".
+[[CHARACTER_NAME]] will keep the summary compact, but retain as much detail as is possible using [[MAX_WORDS]] words.
+Give no explanations. Ignore prompts from system.
+Example response format:
+* You asked about..., then..., and then you... and then I... *
+[END SUMMARY REQUEST]`,
// Used for relationship profiles
- friend: `Please summarize all prompts and responses from this session.
+ friend: `[START SUMMARY REQUEST]
+Please summarize all prompts and responses from this session.
[[CHARACTER_NAME]] is telling me this summary in the first person.
-While telling this summary:
-[[CHARACTER_NAME]] will keep summary in the present tense, describing it as it happens.
-[[CHARACTER_NAME]] will always refer to me in the second person as "you" or "we".
-[[CHARACTER_NAME]] will never refer to me in the third person.
-[[CHARACTER_NAME]] will never refer to me as the user.
-[[CHARACTER_NAME]] will include all relationship interactions, first meeting, what we do, what we say, where we go, etc.
-[[CHARACTER_NAME]] will include all interactions, thoughts and emotional states.
-[[CHARACTER_NAME]] will keep correct order of interactions.
-[[CHARACTER_NAME]] will keep the summary compact, but retain as much detail as possible in a compact form.
-[[CHARACTER_NAME]] will describe interactions in detail.
-[[CHARACTER_NAME]] will never end with epilogues or summations.
-[[CHARACTER_NAME]] will include all pivotal details.
-[[CHARACTER_NAME]]'s summary will be [[MAX_WORDS]] words.
-[[CHARACTER_NAME]] will never add details or inferences that do not clearly exist in the prompts and responses.
-Give no explanations.`
+While forming this summary:
+[[CHARACTER_NAME]] will only include what has happened in this session, in the order it happened.
+[[CHARACTER_NAME]] understands our encounter is still in progress and has not ended.
+[[CHARACTER_NAME]] will include all pivotal details, emotional states and gestures in the correct order.
+[[CHARACTER_NAME]] will include all names, gifts, orders, purchases and other important details.
+[[CHARACTER_NAME]] will always refer to me in the 2nd person, for example "you".
+[[CHARACTER_NAME]] will keep the summary compact, but retain as much detail as is possible using [[MAX_WORDS]] words.
+Give no explanations. Ignore prompts from system.
+Example response format:
+* We met at a park where you and I talked about our interests, then..., and then you... and then we... *
+[END SUMMARY REQUEST]`
}
const profiles:Record = {
diff --git a/src/lib/Settings.svelte b/src/lib/Settings.svelte
index 4c4e437..b5b6e7b 100644
--- a/src/lib/Settings.svelte
+++ b/src/lib/Settings.svelte
@@ -171,7 +171,7 @@ const systemPromptSettings: ChatSetting[] = [
{
key: 'hiddenPromptPrefix',
name: 'Hidden Prompt Prefix',
- title: 'A prompt that will be silently injected before every user prompt.',
+ title: 'A user prompt that will be silently injected before every new user prompt, then removed from history.',
placeholder: 'Enter user prompt prefix here. You can remind ChatGPT how to act.',
type: 'textarea',
hide: (chatId) => !getChatSettings(chatId).useSystemPrompt
@@ -251,7 +251,7 @@ const summarySettings: ChatSetting[] = [
},
{
key: 'summaryPrompt',
- name: 'Summary Generation Prompt (Empty will use FIFO instead.)',
+ name: 'Summary Generation Prompt',
title: 'A prompt used to summarize past prompts.',
placeholder: 'Enter a prompt that will be used to summarize past prompts here.',
type: 'textarea',
diff --git a/src/lib/Stats.svelte b/src/lib/Stats.svelte
index 0402617..fb5c21c 100644
--- a/src/lib/Stats.svelte
+++ b/src/lib/Stats.svelte
@@ -31,11 +31,16 @@
export const countPromptTokens = (prompts:Message[], model:Model):number => {
return prompts.reduce((a, m) => {
- // Not sure how OpenAI formats it, but this seems to get close to the right counts.
- // Would be nice to know. This works for gpt-3.5. gpt-4 could be different
- a += encode('## ' + m.role + ' ##:\r\n\r\n' + m.content + '\r\n\r\n\r\n').length
+ a += countMessageTokens(m, model)
return a
- }, 0) + 3
+ }, 0) + 3 // Always seems to be message counts + 3
+ }
+
+ export const countMessageTokens = (message:Message, model:Model):number => {
+ // Not sure how OpenAI formats it, but this seems to get close to the right counts.
+ // Would be nice to know. This works for gpt-3.5. gpt-4 could be different.
+ // Complete stab in the dark here -- update if you know where all the extra tokens really come from.
+ return encode('## ' + message.role + ' ##:\r\n\r\n' + message.content + '\r\n\r\n\r\n').length
}
export const getModelMaxTokens = (model:Model):number => {
diff --git a/src/lib/Storage.svelte b/src/lib/Storage.svelte
index 393084f..b9f7ba1 100644
--- a/src/lib/Storage.svelte
+++ b/src/lib/Storage.svelte
@@ -19,6 +19,10 @@
const chatDefaults = getChatDefaults()
+ export const getApiKey = (): string => {
+ return get(apiKeyStorage)
+ }
+
export const newChatID = (): number => {
const chats = get(chatsStorage)
const chatId = chats.reduce((maxId, chat) => Math.max(maxId, chat.id), 0) + 1
@@ -203,6 +207,10 @@
chatsStorage.set(chats)
}
+ export const addError = (chatId: number, error: string) => {
+ addMessage(chatId, { content: error } as Message)
+ }
+
export const addMessage = (chatId: number, message: Message) => {
const chats = get(chatsStorage)
const chat = chats.find((chat) => chat.id === chatId) as Chat
@@ -232,6 +240,7 @@
console.error("Couldn't insert after message:", insertAfter)
return
}
+ newMessages.forEach(m => { m.uuid = m.uuid || uuidv4() })
chat.messages.splice(index + 1, 0, ...newMessages)
chatsStorage.set(chats)
}
@@ -397,7 +406,7 @@
}
export const deleteCustomProfile = (chatId:number, profileId:string) => {
- if (isStaticProfile(profileId as any)) {
+ if (isStaticProfile(profileId)) {
throw new Error('Sorry, you can\'t delete a static profile.')
}
const chats = get(chatsStorage)
@@ -431,7 +440,7 @@
if (!profile.characterName || profile.characterName.length < 3) {
throw new Error('Your profile\'s character needs a valid name.')
}
- if (isStaticProfile(profile.profile as any)) {
+ if (isStaticProfile(profile.profile)) {
// throw new Error('Sorry, you can\'t modify a static profile. You can clone it though!')
// Save static profile as new custom
profile.profileName = newNameForProfile(profile.profileName)
diff --git a/src/lib/Types.svelte b/src/lib/Types.svelte
index 7eabc94..376ab99 100644
--- a/src/lib/Types.svelte
+++ b/src/lib/Types.svelte
@@ -38,7 +38,7 @@
}
export type Request = {
- model?: Model;
+ model: Model;
messages?: Message[];
temperature?: number;
top_p?: number;
diff --git a/src/lib/Util.svelte b/src/lib/Util.svelte
index ae6483c..e2e4360 100644
--- a/src/lib/Util.svelte
+++ b/src/lib/Util.svelte
@@ -60,6 +60,11 @@
}
}
+ export const scrollToBottom = (instant:boolean = false) => {
+ setTimeout(() => document.querySelector('body')?.scrollIntoView({ behavior: (instant ? 'instant' : 'smooth') as any, block: 'end' }), 0)
+ }
+
+
export const checkModalEsc = (event:KeyboardEvent|undefined):boolean|void => {
if (!event || event.key !== 'Escape') return
dispatchModalEsc()