diff --git a/package-lock.json b/package-lock.json index 094ada8..47771d2 100644 --- a/package-lock.json +++ b/package-lock.json @@ -37,6 +37,7 @@ "litllm": "^3.0.0", "lokijs": "^1.5.12", "lru-cache": "^10.0.0", + "lumaai": "^1.0.2", "marked": "^14.1.2", "moment": "^2.29.4", "moment-timezone": "^0.5.43", @@ -13830,6 +13831,20 @@ "node": "14 || >=16.14" } }, + "node_modules/lumaai": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/lumaai/-/lumaai-1.0.2.tgz", + "integrity": "sha512-0kaKKcDEoTck0rxv3p8jvu6LrJr+w8VTSAqhZ917Eq74CU8HVyyg3VqCPHv62G6dJaO8lGND9Px7V1bMM0pjPQ==", + "dependencies": { + "@types/node": "^18.11.18", + "@types/node-fetch": "^2.6.4", + "abort-controller": "^3.0.0", + "agentkeepalive": "^4.2.1", + "form-data-encoder": "1.7.2", + "formdata-node": "^4.3.2", + "node-fetch": "^2.6.7" + } + }, "node_modules/make-dir": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-4.0.0.tgz", diff --git a/package.json b/package.json index d6326d0..0c69df6 100644 --- a/package.json +++ b/package.json @@ -101,6 +101,7 @@ "litllm": "^3.0.0", "lokijs": "^1.5.12", "lru-cache": "^10.0.0", + "lumaai": "^1.0.2", "marked": "^14.1.2", "moment": "^2.29.4", "moment-timezone": "^0.5.43", diff --git a/src/bot.ts b/src/bot.ts index 61b90e1..8ae4c0b 100644 --- a/src/bot.ts +++ b/src/bot.ts @@ -59,6 +59,7 @@ import { createInitialSessionData, addQuotePrefix, markdownToTelegramHtml } from import { LlamaAgent } from './modules/subagents' import { llmModelManager } from './modules/llms/utils/llmModelsManager' import { HmnyBot } from './modules/hmny' +import { LumaBot } from './modules/llms/lumaBot' import { XaiBot } from './modules/llms/xaiBot' Events.EventEmitter.defaultMaxListeners = 30 @@ -207,6 +208,7 @@ const schedule = new BotSchedule(bot) const llamaAgent = new LlamaAgent(payments, 'llamaService') const openAiBot = new OpenAIBot(payments, [llamaAgent]) const dalleBot = new DalleBot(payments) +const lumaBot = new LumaBot(payments) const claudeBot = new ClaudeBot(payments) const vertexBot = new VertexBot(payments, [llamaAgent]) const xaiBot = new XaiBot(payments) @@ -339,6 +341,7 @@ const PayableBots: Record = { dalleBot: { bot: dalleBot }, claudeBot: { bot: claudeBot }, vertexBot: { bot: vertexBot }, + lumaBot: { bot: lumaBot }, aixBot: { bot: xaiBot }, openAiBot: { enabled: (ctx: OnMessageContext) => ctx.session.dalle.isEnabled, @@ -446,6 +449,12 @@ const onCallback = async (ctx: OnCallBackQueryData): Promise => { // return // } + if (lumaBot.isSupportedEvent(ctx)) { + await lumaBot.onEvent(ctx, (e) => { + logger.error(e) + }) + } + if (dalleBot.isSupportedEvent(ctx)) { await dalleBot.onEvent(ctx, (e) => { logger.error(e) @@ -457,6 +466,8 @@ const onCallback = async (ctx: OnCallBackQueryData): Promise => { } } +bot.on('callback_query:data', onCallback) + bot.command(['start', 'help', 'menu'], async (ctx) => { const accountId = payments.getAccountId(ctx as OnMessageContext) const account = payments.getUserAccount(accountId) diff --git a/src/config.ts b/src/config.ts index 5bcbad4..110ddb0 100644 --- a/src/config.ts +++ b/src/config.ts @@ -32,8 +32,12 @@ export default { sessionTimeout: process.env.SESSION_TIMEOUT ? parseInt(process.env.SESSION_TIMEOUT) : 48, // in hours + luma: { + isEnabled: true, + apiKey: process.env.LUMAAI_API_KEY + }, llms: { - apiEndpoint: process.env.LLMS_ENDPOINT, // // process.env.LLMS_ENDPOINT, // 'http://127.0.0.1:5000', + apiEndpoint: 'http://127.0.0.1:5000', // // process.env.LLMS_ENDPOINT, // 'http://127.0.0.1:5000', apiKey: process.env.LLMS_API_KEY ?? '', wordLimit: 50, model: 'gpt-4o', diff --git a/src/modules/llms/api/luma.ts b/src/modules/llms/api/luma.ts new file mode 100644 index 0000000..4a78eaa --- /dev/null +++ b/src/modules/llms/api/luma.ts @@ -0,0 +1,77 @@ +import axios, { AxiosError } from 'axios' +import { pino } from 'pino' +import { LumaAI } from 'lumaai' +import config from '../../../config' +import { headers } from './helper' + +const logger = pino({ + name: 'luma - LumaBot', + transport: { + target: 'pino-pretty', + options: { colorize: true } + } +}) + +const lumaClient = new LumaAI({ authToken: config.luma.apiKey }) + +const API_ENDPOINT = config.llms.apiEndpoint + +export interface LumaGenerationResponse { + gnerationId: string + generationInProgress: string + queueTime: string +} + +export const lumaGeneration = async ( + chatId: number, + prompt: string, + loop = true +): Promise => { + logger.info(`Handling luma generation for this prompt: "${prompt}"`) + const data = { + chat_id: chatId, + prompt, + loop + } + const url = `${API_ENDPOINT}/luma/generations` + const response = await axios.post(url, data, headers) + const respJson = response.data + return { + gnerationId: respJson.generation_id, + generationInProgress: respJson.in_progress, + queueTime: respJson.queue_time + } +} + +export const getGeneration = async (generationId: string): Promise => { + const generation = await lumaClient.generations.get(generationId) + return generation +} + +export const deleteGeneration = async (generationId: string): Promise => { + try { + logger.info(`Deleting luma generation ${generationId}`) + const url = `${API_ENDPOINT}/luma/generations/${generationId}` + const response = await axios.delete(url, headers) + if (response.status === 204) { + logger.info(`Successfully deleted luma generation ${generationId}`) + return true + } + logger.warn(`Unexpected response status ${response.status} when deleting generation ${generationId}`) + return false + } catch (e) { + if (e instanceof AxiosError) { + const status = e.response?.status + if (status === 404) { + logger.warn(`Generation ${generationId} not found`) + } else if (status === 403) { + logger.error(`Unauthorized to delete generation ${generationId}`) + } else { + logger.error(`Error deleting generation ${generationId}: ${e.message}`) + } + } else { + logger.error(`Unexpected error deleting generation ${generationId}: ${e}`) + } + return false + } +} diff --git a/src/modules/llms/lumaBot.ts b/src/modules/llms/lumaBot.ts new file mode 100644 index 0000000..7372a33 --- /dev/null +++ b/src/modules/llms/lumaBot.ts @@ -0,0 +1,167 @@ +import { type BotPayments } from '../payment' +import { + type OnMessageContext, + type OnCallBackQueryData, + type ChatConversation, + RequestState +} from '../types' +import { + MAX_TRIES, + PRICE_ADJUSTMENT, + sendMessage +} from './utils/helpers' +import * as Sentry from '@sentry/node' +import { LlmsBase } from './llmsBase' +import config from '../../config' +import { now } from '../../utils/perf' +import { type ModelVersion } from './utils/llmModelsManager' +import { Callbacks } from './utils/types' +import { type LlmCompletion } from './api/llmApi' +import { deleteGeneration, getGeneration, lumaGeneration } from './api/luma' + +interface VideoGeneration { + msgId: number + generationId: string + prompt: string +} + +export class LumaBot extends LlmsBase { + private generationList: VideoGeneration[] + protected supportedCommands: string[] + protected supportedPrefixes: string[] + + constructor (payments: BotPayments) { + super(payments, 'LumaBot', 'luma') + this.generationList = [] + + if (!config.luma.isEnabled) { + this.logger.warn('Luma AI is disabled in config') + } + } + + public getEstimatedPrice (ctx: any): number { + try { + // $0.0032 per frame or about $0.4 for 5s 24fps video at 1280×720p + // price in cents + return PRICE_ADJUSTMENT ? 40 * PRICE_ADJUSTMENT : 40 * 2 + } catch (e) { + Sentry.captureException(e) + this.logger.error(`getEstimatedPrice error ${e}`) + throw e + } + } + + public isSupportedEvent ( + ctx: OnMessageContext | OnCallBackQueryData + ): boolean { + const hasCommand = ctx.hasCommand(this.supportedCommands) + const chatPrefix = this.hasPrefix(ctx.message?.text ?? '') + if (chatPrefix !== '') { + return true + } + return hasCommand || this.isSupportedCallbackQuery(ctx) + } + + public isSupportedCallbackQuery ( + ctx: OnMessageContext | OnCallBackQueryData + ): boolean { + if (!ctx.callbackQuery?.data) { + return false + } + return ctx.callbackQuery?.data.startsWith(Callbacks.LumaDownloadVideo) + } + + async chatStreamCompletion ( + conversation: ChatConversation[], + model: ModelVersion, + ctx: OnMessageContext | OnCallBackQueryData, + msgId: number, + limitTokens: boolean + ): Promise { + throw new Error('chatStreamCompletion is not implemented for LumaAiBot') + } + + async chatCompletion ( + conversation: ChatConversation[], + model: ModelVersion + ): Promise { + throw new Error('chatCompletion is not implemented for LumaAiBot') + } + + public async onEvent ( + ctx: OnMessageContext | OnCallBackQueryData, + refundCallback: (reason?: string) => void + ): Promise { + ctx.transient.analytics.module = this.module + + const isSupportedEvent = this.isSupportedEvent(ctx) + if (!isSupportedEvent && ctx.chat?.type !== 'private') { + this.logger.warn(`### unsupported command ${ctx.message?.text}`) + return + } + + if (this.isSupportedCallbackQuery(ctx)) { + if (ctx.callbackQuery?.data) { + const data = ctx.callbackQuery.data.split(':') + await this.onHandleVideoDownload(ctx, data[1]) + return + } + } + + const model = this.getModelFromContext(ctx) + if (model) { + await this.onGeneration(ctx) + return + } + + ctx.transient.analytics.sessionState = RequestState.Error + await sendMessage(ctx, '### unsupported command').catch(async (e) => { + await this.onError(ctx, e, MAX_TRIES, '### unsupported command') + }) + ctx.transient.analytics.actualResponseTime = now() + } + + private async onHandleVideoDownload (ctx: OnMessageContext | OnCallBackQueryData, generationId: string): Promise { + try { + const generation = await getGeneration(generationId) + const videoUrl = generation.assets?.video + if (videoUrl && ctx.chatId) { + const videoGeneration = this.generationList.find(gen => gen.generationId === generationId) + if (videoGeneration) { + await ctx.api.deleteMessages(ctx.chatId, [ctx.msgId, videoGeneration.msgId]) + await ctx.replyWithVideo(videoUrl, { caption: videoGeneration.prompt }) + this.generationList = this.generationList.filter(gen => gen.generationId !== generationId) + await deleteGeneration(videoGeneration.generationId) + } + } + await ctx.answerCallbackQuery('Video sent successfully') + } catch (error) { + console.error('Error in video download:', error) + await ctx.answerCallbackQuery('Error processing video. Please try again.') + } + } + + async onGeneration (ctx: OnMessageContext | OnCallBackQueryData): Promise { + try { + const chatId = ctx.chat?.id + if (chatId) { + const prompt = ctx.match + const response = await lumaGeneration(chatId, prompt as string) + const msgId = ( + await ctx.reply(`You are #${response.generationInProgress} in line for the video generation. The wait time is about ${response.queueTime} seconds.`, { + message_thread_id: + ctx.message?.message_thread_id ?? + ctx.message?.reply_to_message?.message_thread_id + }) + ).message_id + this.generationList.push({ + generationId: response.gnerationId, + msgId, + prompt: prompt as string + }) + } + } catch (e: any) { + await this.onError(ctx, e) + } + } +} diff --git a/src/modules/llms/utils/llmsData.ts b/src/modules/llms/utils/llmsData.ts index 80df59f..355473e 100644 --- a/src/modules/llms/utils/llmsData.ts +++ b/src/modules/llms/utils/llmsData.ts @@ -196,6 +196,21 @@ export const llmData: LLMData = { '1024x1792': 0.12, '1792x1024': 0.12 } + }, + lumaai: { + provider: 'luma', + name: 'Luma AI', + fullName: 'Luma AI', + botName: 'LumaBot', + version: 'lumaai-1-0-2', + commands: ['luma', 'l'], + prefix: ['l. '], + apiSpec: 'https://docs.lumalabs.ai/docs/welcome', + price: { + '1024x1024': 0.8, + '1024x1792': 0.12, + '1792x1024': 0.12 + } } }, providerParameters: { @@ -223,6 +238,12 @@ export const llmData: LLMData = { system: config.openAi.chatGpt.chatCompletionContext, max_tokens: +config.openAi.chatGpt.maxTokens } + }, + luma: { + defaultParameters: { + system: config.openAi.chatGpt.chatCompletionContext, + max_tokens: +config.openAi.chatGpt.maxTokens + } } } } diff --git a/src/modules/llms/utils/types.ts b/src/modules/llms/utils/types.ts index dabfc35..f88a0fb 100644 --- a/src/modules/llms/utils/types.ts +++ b/src/modules/llms/utils/types.ts @@ -1,4 +1,4 @@ -export type Provider = 'openai' | 'claude' | 'vertex' | 'xai' // | 'palm' | 'jurassic' +export type Provider = 'openai' | 'claude' | 'vertex' | 'xai' | 'luma' export type ChargeType = 'TOKEN' | 'CHAR' export type DalleImageSize = '1024x1024' | '1024x1792' | '1792x1024' @@ -36,7 +36,6 @@ export interface ChatModel extends BaseModel { } export interface ImageModel extends BaseModel { - apiSpec: string price: ImagePrice } @@ -53,3 +52,7 @@ export interface ParseDate { year: number monthName: string } + +export enum Callbacks { + LumaDownloadVideo = 'luma_dl' +}