From 1fcb6a09f382dff547e2677d753a7a2894a2be3e Mon Sep 17 00:00:00 2001 From: fegloff Date: Fri, 4 Oct 2024 23:01:58 -0500 Subject: [PATCH 1/5] add lumabot + luma logic --- package-lock.json | 15 +++ package.json | 1 + src/bot.ts | 11 ++ src/config.ts | 7 +- src/modules/llms/api/luma.ts | 47 ++++++++ src/modules/llms/lumaBot.ts | 173 +++++++++++++++++++++++++++++ src/modules/llms/utils/llmsData.ts | 15 +++ src/modules/llms/utils/types.ts | 7 +- 8 files changed, 273 insertions(+), 3 deletions(-) create mode 100644 src/modules/llms/api/luma.ts create mode 100644 src/modules/llms/lumaBot.ts diff --git a/package-lock.json b/package-lock.json index 094ada88..47771d28 100644 --- a/package-lock.json +++ b/package-lock.json @@ -37,6 +37,7 @@ "litllm": "^3.0.0", "lokijs": "^1.5.12", "lru-cache": "^10.0.0", + "lumaai": "^1.0.2", "marked": "^14.1.2", "moment": "^2.29.4", "moment-timezone": "^0.5.43", @@ -13830,6 +13831,20 @@ "node": "14 || >=16.14" } }, + "node_modules/lumaai": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/lumaai/-/lumaai-1.0.2.tgz", + "integrity": "sha512-0kaKKcDEoTck0rxv3p8jvu6LrJr+w8VTSAqhZ917Eq74CU8HVyyg3VqCPHv62G6dJaO8lGND9Px7V1bMM0pjPQ==", + "dependencies": { + "@types/node": "^18.11.18", + "@types/node-fetch": "^2.6.4", + "abort-controller": "^3.0.0", + "agentkeepalive": "^4.2.1", + "form-data-encoder": "1.7.2", + "formdata-node": "^4.3.2", + "node-fetch": "^2.6.7" + } + }, "node_modules/make-dir": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-4.0.0.tgz", diff --git a/package.json b/package.json index d6326d0f..0c69df62 100644 --- a/package.json +++ b/package.json @@ -101,6 +101,7 @@ "litllm": "^3.0.0", "lokijs": "^1.5.12", "lru-cache": "^10.0.0", + "lumaai": "^1.0.2", "marked": "^14.1.2", "moment": "^2.29.4", "moment-timezone": "^0.5.43", diff --git a/src/bot.ts b/src/bot.ts index bf20d8b3..32e845a8 100644 --- a/src/bot.ts +++ b/src/bot.ts @@ -59,6 +59,7 @@ import { createInitialSessionData, addQuotePrefix, markdownToTelegramHtml } from import { LlamaAgent } from './modules/subagents' import { llmModelManager } from './modules/llms/utils/llmModelsManager' import { HmnyBot } from './modules/hmny' +import { LumaBot } from './modules/llms/lumaBot' Events.EventEmitter.defaultMaxListeners = 30 @@ -209,6 +210,7 @@ const schedule = new BotSchedule(bot) const llamaAgent = new LlamaAgent(payments, 'llamaService') const openAiBot = new OpenAIBot(payments, [llamaAgent]) const dalleBot = new DalleBot(payments) +const lumaBot = new LumaBot(payments) const claudeBot = new ClaudeBot(payments) const vertexBot = new VertexBot(payments, [llamaAgent]) const oneCountryBot = new OneCountryBot(payments) @@ -340,6 +342,7 @@ const PayableBots: Record = { dalleBot: { bot: dalleBot }, claudeBot: { bot: claudeBot }, vertexBot: { bot: vertexBot }, + lumaBot: { bot: lumaBot }, openAiBot: { enabled: (ctx: OnMessageContext) => ctx.session.dalle.isEnabled, bot: openAiBot @@ -446,6 +449,12 @@ const onCallback = async (ctx: OnCallBackQueryData): Promise => { // return // } + if (lumaBot.isSupportedEvent(ctx)) { + await lumaBot.onEvent(ctx, (e) => { + logger.error(e) + }) + } + if (dalleBot.isSupportedEvent(ctx)) { await dalleBot.onEvent(ctx, (e) => { logger.error(e) @@ -457,6 +466,8 @@ const onCallback = async (ctx: OnCallBackQueryData): Promise => { } } +bot.on('callback_query:data', onCallback) + bot.command(['start', 'help', 'menu'], async (ctx) => { const accountId = payments.getAccountId(ctx as OnMessageContext) const account = payments.getUserAccount(accountId) diff --git a/src/config.ts b/src/config.ts index 09d61672..daa86a63 100644 --- a/src/config.ts +++ b/src/config.ts @@ -32,8 +32,13 @@ export default { sessionTimeout: process.env.SESSION_TIMEOUT ? parseInt(process.env.SESSION_TIMEOUT) : 48, // in hours + luma: { + isEnabled: true, + videoUrl: '', + apiKey: process.env.LUMAAI_API_KEY + }, llms: { - apiEndpoint: process.env.LLMS_ENDPOINT, // // process.env.LLMS_ENDPOINT, // 'http://127.0.0.1:5000', + apiEndpoint: 'http://127.0.0.1:5000', // // process.env.LLMS_ENDPOINT, // 'http://127.0.0.1:5000', apiKey: process.env.LLMS_API_KEY ?? '', wordLimit: 50, model: 'chat-bison', diff --git a/src/modules/llms/api/luma.ts b/src/modules/llms/api/luma.ts new file mode 100644 index 00000000..89d64831 --- /dev/null +++ b/src/modules/llms/api/luma.ts @@ -0,0 +1,47 @@ +import axios from 'axios' +import { pino } from 'pino' +import { LumaAI } from 'lumaai' +import config from '../../../config' +import { headers } from './helper' + +const logger = pino({ + name: 'luma - LumaBot', + transport: { + target: 'pino-pretty', + options: { colorize: true } + } +}) + +const lumaClient = new LumaAI({ authToken: config.luma.apiKey }) + +const API_ENDPOINT = config.llms.apiEndpoint + +export interface LumaGenerationResponse { + gnerationId: string + generationInProgress: string + queueTime: string +} + +export const lumaGeneration = async ( + chatId: number, + prompt: string +): Promise => { + logger.info(`Handling luma generation for this prompt: "${prompt}"`) + const data = { + chat_id: chatId, + prompt + } + const url = `${API_ENDPOINT}/luma/generations` + const response = await axios.post(url, data, headers) + const respJson = response.data + return { + gnerationId: respJson.generation_id, + generationInProgress: respJson.in_progress, + queueTime: respJson.queue_time + } +} + +export const getGeneration = async (generationId: string): Promise => { + const generation = await lumaClient.generations.get(generationId) + return generation +} diff --git a/src/modules/llms/lumaBot.ts b/src/modules/llms/lumaBot.ts new file mode 100644 index 00000000..bc59bc91 --- /dev/null +++ b/src/modules/llms/lumaBot.ts @@ -0,0 +1,173 @@ +import { type BotPayments } from '../payment' +import { + type OnMessageContext, + type OnCallBackQueryData, + type ChatConversation, + RequestState +} from '../types' +import { + MAX_TRIES, + sendMessage +} from './utils/helpers' +import * as Sentry from '@sentry/node' +import { LlmsBase } from './llmsBase' +import config from '../../config' +import { now } from '../../utils/perf' +import { type ModelVersion } from './utils/llmModelsManager' +import { Callbacks } from './utils/types' +import { type LlmCompletion } from './api/llmApi' +import { getGeneration, lumaGeneration } from './api/luma' + +interface VideoGeneration { + msgId: number + generationId: string + prompt: string +} + +export class LumaBot extends LlmsBase { + private generationList: VideoGeneration[] + + constructor (payments: BotPayments) { + super(payments, 'LumaBot', 'luma') + this.generationList = [] + if (!config.luma.isEnabled) { + this.logger.warn('Luma AI is disabled in config') + } + } + + public getEstimatedPrice (ctx: any): number { + try { + return 0 + // const session = this.getSession(ctx) + // if (!this.commands) { + // throw new Error('Not command list found') + // } + // if ( + // ctx.hasCommand(this.commands) + // ) { + // const imageNumber = session.numImages + // const imageSize = session.imgSize + // const price = getDalleModelPrice(this.model, imageSize, true, imageNumber) // cents + // return price * PRICE_ADJUSTMENT + // } + // return 0 + } catch (e) { + Sentry.captureException(e) + this.logger.error(`getEstimatedPrice error ${e}`) + throw e + } + } + + public isSupportedEvent ( + ctx: OnMessageContext | OnCallBackQueryData + ): boolean { + const hasCommand = ctx.hasCommand(this.supportedCommands) + const chatPrefix = this.hasPrefix(ctx.message?.text ?? '') + if (chatPrefix !== '') { + return true + } + return hasCommand || this.isSupportedCallbackQuery(ctx) + } + + public isSupportedCallbackQuery ( + ctx: OnMessageContext | OnCallBackQueryData + ): boolean { + if (!ctx.callbackQuery?.data) { + return false + } + return ctx.callbackQuery?.data.startsWith(Callbacks.LumaDownloadVideo) + } + + async chatStreamCompletion ( + conversation: ChatConversation[], + model: ModelVersion, + ctx: OnMessageContext | OnCallBackQueryData, + msgId: number, + limitTokens: boolean + ): Promise { + throw new Error('chatStreamCompletion is not implemented for LumaAiBot') + } + + async chatCompletion ( + conversation: ChatConversation[], + model: ModelVersion + ): Promise { + throw new Error('chatCompletion is not implemented for LumaAiBot') + } + + public async onEvent ( + ctx: OnMessageContext | OnCallBackQueryData, + refundCallback: (reason?: string) => void + ): Promise { + ctx.transient.analytics.module = this.module + + const isSupportedEvent = this.isSupportedEvent(ctx) + if (!isSupportedEvent && ctx.chat?.type !== 'private') { + this.logger.warn(`### unsupported command ${ctx.message?.text}`) + return + } + + if (this.isSupportedCallbackQuery(ctx)) { + if (ctx.callbackQuery?.data) { + const data = ctx.callbackQuery.data.split(':') + await this.onHandleVideoDownload(ctx, data[1]) + return + } + } + + const model = this.getModelFromContext(ctx) + if (model) { + await this.onGeneration(ctx) + return + } + + ctx.transient.analytics.sessionState = RequestState.Error + await sendMessage(ctx, '### unsupported command').catch(async (e) => { + await this.onError(ctx, e, MAX_TRIES, '### unsupported command') + }) + ctx.transient.analytics.actualResponseTime = now() + } + + private async onHandleVideoDownload (ctx: OnMessageContext | OnCallBackQueryData, generationId: string): Promise { + try { + const generation = await getGeneration(generationId) + const videoUrl = generation.assets?.video + if (videoUrl && ctx.chatId) { + const videoGeneration = this.generationList.find(gen => gen.generationId === generationId) + if (videoGeneration) { + await ctx.api.deleteMessages(ctx.chatId, [ctx.msgId, videoGeneration.msgId]) + await ctx.replyWithVideo(videoUrl, { caption: videoGeneration.prompt }) + this.generationList = this.generationList.filter(gen => gen.generationId !== generationId) + } + } + await ctx.answerCallbackQuery('Video sent successfully') + } catch (error) { + console.error('Error in video download:', error) + await ctx.answerCallbackQuery('Error processing video. Please try again.') + } + } + + async onGeneration (ctx: OnMessageContext | OnCallBackQueryData): Promise { + try { + const chatId = ctx.chat?.id + if (chatId) { + const prompt = ctx.match + const response = await lumaGeneration(chatId, prompt as string) + const msgId = ( + await ctx.reply(`You are #${response.generationInProgress} in line for the video generation. The wait time is about ${response.queueTime} seconds.`, { + message_thread_id: + ctx.message?.message_thread_id ?? + ctx.message?.reply_to_message?.message_thread_id + }) + ).message_id + this.generationList.push({ + generationId: response.gnerationId, + msgId, + prompt: prompt as string + }) + } + } catch (e: any) { + await this.onError(ctx, e) + } + } +} diff --git a/src/modules/llms/utils/llmsData.ts b/src/modules/llms/utils/llmsData.ts index 4d6a8356..adbf99af 100644 --- a/src/modules/llms/utils/llmsData.ts +++ b/src/modules/llms/utils/llmsData.ts @@ -179,6 +179,21 @@ export const llmData: LLMData = { '1024x1792': 0.12, '1792x1024': 0.12 } + }, + lumaai: { + provider: 'luma', + name: 'Luma AI', + fullName: 'Luma AI', + botName: 'LumaBot', + version: 'lumaai-1-0-2', + commands: ['luma', 'l'], + prefix: ['l. '], + apiSpec: 'https://docs.lumalabs.ai/docs/welcome', + price: { + '1024x1024': 0.8, + '1024x1792': 0.12, + '1792x1024': 0.12 + } } } } diff --git a/src/modules/llms/utils/types.ts b/src/modules/llms/utils/types.ts index 58561d56..157f31cd 100644 --- a/src/modules/llms/utils/types.ts +++ b/src/modules/llms/utils/types.ts @@ -1,4 +1,4 @@ -export type Provider = 'openai' | 'claude' | 'vertex' | 'palm' | 'jurassic' +export type Provider = 'openai' | 'claude' | 'vertex' | 'palm' | 'jurassic' | 'luma' export type ChargeType = 'TOKEN' | 'CHAR' export type DalleImageSize = '1024x1024' | '1024x1792' | '1792x1024' @@ -24,7 +24,6 @@ export interface ChatModel extends BaseModel { } export interface ImageModel extends BaseModel { - apiSpec: string price: ImagePrice } @@ -34,3 +33,7 @@ export interface LLMData { chatModels: Record imageModels: Record } + +export enum Callbacks { + LumaDownloadVideo = 'luma_dl' +} From 568f4d29cd34c175ed33daaf72926a6e8c1b46e5 Mon Sep 17 00:00:00 2001 From: fegloff Date: Mon, 7 Oct 2024 15:29:40 -0500 Subject: [PATCH 2/5] update config file --- src/config.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/src/config.ts b/src/config.ts index daa86a63..cfe8c93f 100644 --- a/src/config.ts +++ b/src/config.ts @@ -34,7 +34,6 @@ export default { : 48, // in hours luma: { isEnabled: true, - videoUrl: '', apiKey: process.env.LUMAAI_API_KEY }, llms: { From 695a9c9bbd80815d45ed70ac4250f0352d897ca2 Mon Sep 17 00:00:00 2001 From: fegloff Date: Mon, 7 Oct 2024 16:27:08 -0500 Subject: [PATCH 3/5] update vision command list --- src/modules/llms/utils/llmsData.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/modules/llms/utils/llmsData.ts b/src/modules/llms/utils/llmsData.ts index adbf99af..70673d7b 100644 --- a/src/modules/llms/utils/llmsData.ts +++ b/src/modules/llms/utils/llmsData.ts @@ -110,7 +110,7 @@ export const llmData: LLMData = { fullName: 'GPT-4 Vision', botName: 'OpenAIBot', version: 'gpt-4-vision-preview', - commands: ['vision, v'], + commands: ['vision', 'v'], prefix: ['v. '], apiSpec: 'https://platform.openai.com/docs/guides/vision', inputPrice: 0.03, From 15b290d064e9b8c253ab1cb0b90f02f5db9ccccf Mon Sep 17 00:00:00 2001 From: fegloff Date: Fri, 1 Nov 2024 17:28:06 -0500 Subject: [PATCH 4/5] update getEstimatedPrice + add loop + add generation delete api call and logic --- src/modules/llms/api/luma.ts | 36 +++++++++++++++++++++++++++++++++--- src/modules/llms/lumaBot.ts | 24 +++++++++--------------- 2 files changed, 42 insertions(+), 18 deletions(-) diff --git a/src/modules/llms/api/luma.ts b/src/modules/llms/api/luma.ts index 89d64831..4a78eaa5 100644 --- a/src/modules/llms/api/luma.ts +++ b/src/modules/llms/api/luma.ts @@ -1,4 +1,4 @@ -import axios from 'axios' +import axios, { AxiosError } from 'axios' import { pino } from 'pino' import { LumaAI } from 'lumaai' import config from '../../../config' @@ -24,12 +24,14 @@ export interface LumaGenerationResponse { export const lumaGeneration = async ( chatId: number, - prompt: string + prompt: string, + loop = true ): Promise => { logger.info(`Handling luma generation for this prompt: "${prompt}"`) const data = { chat_id: chatId, - prompt + prompt, + loop } const url = `${API_ENDPOINT}/luma/generations` const response = await axios.post(url, data, headers) @@ -45,3 +47,31 @@ export const getGeneration = async (generationId: string): Promise => { + try { + logger.info(`Deleting luma generation ${generationId}`) + const url = `${API_ENDPOINT}/luma/generations/${generationId}` + const response = await axios.delete(url, headers) + if (response.status === 204) { + logger.info(`Successfully deleted luma generation ${generationId}`) + return true + } + logger.warn(`Unexpected response status ${response.status} when deleting generation ${generationId}`) + return false + } catch (e) { + if (e instanceof AxiosError) { + const status = e.response?.status + if (status === 404) { + logger.warn(`Generation ${generationId} not found`) + } else if (status === 403) { + logger.error(`Unauthorized to delete generation ${generationId}`) + } else { + logger.error(`Error deleting generation ${generationId}: ${e.message}`) + } + } else { + logger.error(`Unexpected error deleting generation ${generationId}: ${e}`) + } + return false + } +} diff --git a/src/modules/llms/lumaBot.ts b/src/modules/llms/lumaBot.ts index bc59bc91..7372a339 100644 --- a/src/modules/llms/lumaBot.ts +++ b/src/modules/llms/lumaBot.ts @@ -7,6 +7,7 @@ import { } from '../types' import { MAX_TRIES, + PRICE_ADJUSTMENT, sendMessage } from './utils/helpers' import * as Sentry from '@sentry/node' @@ -16,7 +17,7 @@ import { now } from '../../utils/perf' import { type ModelVersion } from './utils/llmModelsManager' import { Callbacks } from './utils/types' import { type LlmCompletion } from './api/llmApi' -import { getGeneration, lumaGeneration } from './api/luma' +import { deleteGeneration, getGeneration, lumaGeneration } from './api/luma' interface VideoGeneration { msgId: number @@ -26,10 +27,13 @@ interface VideoGeneration { export class LumaBot extends LlmsBase { private generationList: VideoGeneration[] + protected supportedCommands: string[] + protected supportedPrefixes: string[] constructor (payments: BotPayments) { super(payments, 'LumaBot', 'luma') this.generationList = [] + if (!config.luma.isEnabled) { this.logger.warn('Luma AI is disabled in config') } @@ -37,20 +41,9 @@ export class LumaBot extends LlmsBase { public getEstimatedPrice (ctx: any): number { try { - return 0 - // const session = this.getSession(ctx) - // if (!this.commands) { - // throw new Error('Not command list found') - // } - // if ( - // ctx.hasCommand(this.commands) - // ) { - // const imageNumber = session.numImages - // const imageSize = session.imgSize - // const price = getDalleModelPrice(this.model, imageSize, true, imageNumber) // cents - // return price * PRICE_ADJUSTMENT - // } - // return 0 + // $0.0032 per frame or about $0.4 for 5s 24fps video at 1280×720p + // price in cents + return PRICE_ADJUSTMENT ? 40 * PRICE_ADJUSTMENT : 40 * 2 } catch (e) { Sentry.captureException(e) this.logger.error(`getEstimatedPrice error ${e}`) @@ -138,6 +131,7 @@ export class LumaBot extends LlmsBase { await ctx.api.deleteMessages(ctx.chatId, [ctx.msgId, videoGeneration.msgId]) await ctx.replyWithVideo(videoUrl, { caption: videoGeneration.prompt }) this.generationList = this.generationList.filter(gen => gen.generationId !== generationId) + await deleteGeneration(videoGeneration.generationId) } } await ctx.answerCallbackQuery('Video sent successfully') From 80d5954341d0cfe28c42a0bbdc446a253b11a355 Mon Sep 17 00:00:00 2001 From: fegloff Date: Mon, 4 Nov 2024 12:21:17 -0500 Subject: [PATCH 5/5] minor change --- src/modules/llms/utils/llmsData.ts | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/modules/llms/utils/llmsData.ts b/src/modules/llms/utils/llmsData.ts index 2cdae7af..355473e9 100644 --- a/src/modules/llms/utils/llmsData.ts +++ b/src/modules/llms/utils/llmsData.ts @@ -238,6 +238,12 @@ export const llmData: LLMData = { system: config.openAi.chatGpt.chatCompletionContext, max_tokens: +config.openAi.chatGpt.maxTokens } + }, + luma: { + defaultParameters: { + system: config.openAi.chatGpt.chatCompletionContext, + max_tokens: +config.openAi.chatGpt.maxTokens + } } } }