Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Openai o1 models command #364

Merged
merged 5 commits into from
Sep 18, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .husky/pre-commit
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@
. "$(dirname -- "$0")/_/husky.sh"

npm run lint
tsc --noEmit
npx tsc --noEmit
1 change: 1 addition & 0 deletions .nvmrc
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
v18
24 changes: 16 additions & 8 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

10 changes: 7 additions & 3 deletions src/bot.ts
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ import { WalletConnect } from './modules/walletconnect'
import { BotPayments } from './modules/payment'
import { BotSchedule } from './modules/schedule'
import config from './config'
import { commandsHelpText, FEEDBACK, LOVE, MODELS, SUPPORT, TERMS, LANG, ALIAS } from './constants'
import { commandsHelpText, FEEDBACK, LOVE, SUPPORT, TERMS, LANG, ALIAS } from './constants'
import prometheusRegister, { PrometheusMetrics } from './metrics/prometheus'

import { chatService, statsService } from './database/services'
Expand All @@ -57,6 +57,7 @@ import { VoiceToVoiceGPTBot } from './modules/voice-to-voice-gpt'
// import { VoiceCommand } from './modules/voice-command'
import { createInitialSessionData } from './helpers'
import { LlamaAgent } from './modules/subagents'
import { llmModelManager } from './modules/llms/utils/llmModelsManager'

Events.EventEmitter.defaultMaxListeners = 30

Expand Down Expand Up @@ -504,10 +505,13 @@ bot.command('support', async (ctx) => {
})

bot.command('models', async (ctx) => {
const models = llmModelManager.generateTelegramOutput()
console.log(models)
writeCommandLog(ctx as OnMessageContext).catch(logErrorHandler)
return await ctx.reply(MODELS.text, {
return await ctx.reply(models, {
parse_mode: 'Markdown',
link_preview_options: { is_disabled: true }
link_preview_options: { is_disabled: true },
message_thread_id: ctx.message?.message_thread_id
})
})

Expand Down
7 changes: 4 additions & 3 deletions src/helpers.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import config from './config'
import { LlmsModelsEnum } from './modules/llms/utils/types'
import { LlmModelsEnum } from './modules/llms/utils/llmModelsManager'
import { type DalleImageSize } from './modules/llms/utils/types'
import { type BotSessionData } from './modules/types'

export function createInitialSessionData (): BotSessionData {
Expand Down Expand Up @@ -38,14 +39,14 @@ export function createInitialSessionData (): BotSessionData {
},
dalle: {
numImages: config.openAi.dalle.sessionDefault.numImages,
imgSize: config.openAi.dalle.sessionDefault.imgSize,
imgSize: config.openAi.dalle.sessionDefault.imgSize as DalleImageSize,
isEnabled: config.openAi.dalle.isEnabled,
imgRequestQueue: [],
isProcessingQueue: false,
imageGenerated: [],
isInscriptionLotteryEnabled: config.openAi.dalle.isInscriptionLotteryEnabled,
imgInquiried: []
},
currentModel: LlmsModelsEnum.GPT_4
currentModel: LlmModelsEnum.GPT_4O
}
}
8 changes: 4 additions & 4 deletions src/modules/llms/api/athropic.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,9 @@ import { pino } from 'pino'
import config from '../../../config'
import { type OnCallBackQueryData, type OnMessageContext, type ChatConversation } from '../../types'
import { type LlmCompletion } from './llmApi'
import { LlmsModelsEnum } from '../utils/types'
import { sleep } from '../../sd-images/utils'
import { headers, headersStream } from './helper'
import { LlmModelsEnum } from '../utils/llmModelsManager'

const logger = pino({
name: 'anthropic - llmsBot',
Expand All @@ -22,7 +22,7 @@ const API_ENDPOINT = config.llms.apiEndpoint // 'http://127.0.0.1:5000' // confi

export const anthropicCompletion = async (
conversation: ChatConversation[],
model = LlmsModelsEnum.CLAUDE_OPUS
model = LlmModelsEnum.CLAUDE_3_OPUS
): Promise<LlmCompletion> => {
logger.info(`Handling ${model} completion`)
const data = {
Expand Down Expand Up @@ -59,7 +59,7 @@ export const anthropicCompletion = async (

export const anthropicStreamCompletion = async (
conversation: ChatConversation[],
model = LlmsModelsEnum.CLAUDE_OPUS,
model = LlmModelsEnum.CLAUDE_3_OPUS,
ctx: OnMessageContext | OnCallBackQueryData,
msgId: number,
limitTokens = true
Expand Down Expand Up @@ -158,7 +158,7 @@ export const anthropicStreamCompletion = async (

export const toolsChatCompletion = async (
conversation: ChatConversation[],
model = LlmsModelsEnum.CLAUDE_OPUS
model = LlmModelsEnum.CLAUDE_3_OPUS
): Promise<LlmCompletion> => {
logger.info(`Handling ${model} completion`)
const input = {
Expand Down
8 changes: 4 additions & 4 deletions src/modules/llms/api/llmApi.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,9 @@ import axios from 'axios'
import config from '../../../config'
import { type ChatConversation } from '../../types'
import pino from 'pino'
import { LlmsModels, LlmsModelsEnum } from '../utils/types'
import { type ChatModel } from '../utils/types'
import { headers } from './helper'
import { llmModelManager, LlmModelsEnum } from '../utils/llmModelsManager'

// import { type ChatModel } from '../../open-ai/types'

Expand Down Expand Up @@ -39,8 +39,8 @@ interface QueryUrlDocument {
conversation?: ChatConversation[]
}

export const getChatModel = (modelName: string): ChatModel => {
return LlmsModels[modelName]
export const getChatModel = (modelName: string): ChatModel | undefined => {
return llmModelManager.getModel(modelName) as ChatModel// LlmsModels[modelName]
}

export const getChatModelPrice = (
Expand Down Expand Up @@ -111,7 +111,7 @@ export const deleteCollection = async (collectionName: string): Promise<void> =>

export const llmCompletion = async (
conversation: ChatConversation[],
model = LlmsModelsEnum.BISON
model = LlmModelsEnum.CHAT_BISON
): Promise<LlmCompletion> => {
const data = {
model, // chat-bison@001 'chat-bison', //'gpt-3.5-turbo',
Expand Down
Loading
Loading