Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Luma video #366

Merged
merged 6 commits into from
Nov 4, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 15 additions & 0 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,7 @@
"litllm": "^3.0.0",
"lokijs": "^1.5.12",
"lru-cache": "^10.0.0",
"lumaai": "^1.0.2",
"marked": "^14.1.2",
"moment": "^2.29.4",
"moment-timezone": "^0.5.43",
Expand Down
11 changes: 11 additions & 0 deletions src/bot.ts
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ import { createInitialSessionData, addQuotePrefix, markdownToTelegramHtml } from
import { LlamaAgent } from './modules/subagents'
import { llmModelManager } from './modules/llms/utils/llmModelsManager'
import { HmnyBot } from './modules/hmny'
import { LumaBot } from './modules/llms/lumaBot'
import { XaiBot } from './modules/llms/xaiBot'

Events.EventEmitter.defaultMaxListeners = 30
Expand Down Expand Up @@ -207,6 +208,7 @@ const schedule = new BotSchedule(bot)
const llamaAgent = new LlamaAgent(payments, 'llamaService')
const openAiBot = new OpenAIBot(payments, [llamaAgent])
const dalleBot = new DalleBot(payments)
const lumaBot = new LumaBot(payments)
const claudeBot = new ClaudeBot(payments)
const vertexBot = new VertexBot(payments, [llamaAgent])
const xaiBot = new XaiBot(payments)
Expand Down Expand Up @@ -339,6 +341,7 @@ const PayableBots: Record<string, PayableBotConfig> = {
dalleBot: { bot: dalleBot },
claudeBot: { bot: claudeBot },
vertexBot: { bot: vertexBot },
lumaBot: { bot: lumaBot },
aixBot: { bot: xaiBot },
openAiBot: {
enabled: (ctx: OnMessageContext) => ctx.session.dalle.isEnabled,
Expand Down Expand Up @@ -446,6 +449,12 @@ const onCallback = async (ctx: OnCallBackQueryData): Promise<void> => {
// return
// }

if (lumaBot.isSupportedEvent(ctx)) {
await lumaBot.onEvent(ctx, (e) => {
logger.error(e)
})
}

if (dalleBot.isSupportedEvent(ctx)) {
await dalleBot.onEvent(ctx, (e) => {
logger.error(e)
Expand All @@ -457,6 +466,8 @@ const onCallback = async (ctx: OnCallBackQueryData): Promise<void> => {
}
}

bot.on('callback_query:data', onCallback)

bot.command(['start', 'help', 'menu'], async (ctx) => {
const accountId = payments.getAccountId(ctx as OnMessageContext)
const account = payments.getUserAccount(accountId)
Expand Down
6 changes: 5 additions & 1 deletion src/config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,12 @@ export default {
sessionTimeout: process.env.SESSION_TIMEOUT
? parseInt(process.env.SESSION_TIMEOUT)
: 48, // in hours
luma: {
isEnabled: true,
apiKey: process.env.LUMAAI_API_KEY
},
llms: {
apiEndpoint: process.env.LLMS_ENDPOINT, // // process.env.LLMS_ENDPOINT, // 'http://127.0.0.1:5000',
apiEndpoint: 'http://127.0.0.1:5000', // // process.env.LLMS_ENDPOINT, // 'http://127.0.0.1:5000',
apiKey: process.env.LLMS_API_KEY ?? '',
wordLimit: 50,
model: 'gpt-4o',
Expand Down
77 changes: 77 additions & 0 deletions src/modules/llms/api/luma.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
import axios, { AxiosError } from 'axios'
import { pino } from 'pino'
import { LumaAI } from 'lumaai'
import config from '../../../config'
import { headers } from './helper'

const logger = pino({
name: 'luma - LumaBot',
transport: {
target: 'pino-pretty',
options: { colorize: true }
}
})

const lumaClient = new LumaAI({ authToken: config.luma.apiKey })

const API_ENDPOINT = config.llms.apiEndpoint

export interface LumaGenerationResponse {
gnerationId: string
generationInProgress: string
queueTime: string
}

export const lumaGeneration = async (
chatId: number,
prompt: string,
loop = true
): Promise<LumaGenerationResponse> => {
logger.info(`Handling luma generation for this prompt: "${prompt}"`)
const data = {
chat_id: chatId,
prompt,
loop
}
const url = `${API_ENDPOINT}/luma/generations`
const response = await axios.post(url, data, headers)
const respJson = response.data
return {
gnerationId: respJson.generation_id,
generationInProgress: respJson.in_progress,
queueTime: respJson.queue_time
}
}

export const getGeneration = async (generationId: string): Promise<LumaAI.Generations.Generation> => {
const generation = await lumaClient.generations.get(generationId)
return generation
}

export const deleteGeneration = async (generationId: string): Promise<boolean> => {
try {
logger.info(`Deleting luma generation ${generationId}`)
const url = `${API_ENDPOINT}/luma/generations/${generationId}`
const response = await axios.delete(url, headers)
if (response.status === 204) {
logger.info(`Successfully deleted luma generation ${generationId}`)
return true
}
logger.warn(`Unexpected response status ${response.status} when deleting generation ${generationId}`)
return false
} catch (e) {
if (e instanceof AxiosError) {
const status = e.response?.status
if (status === 404) {
logger.warn(`Generation ${generationId} not found`)
} else if (status === 403) {
logger.error(`Unauthorized to delete generation ${generationId}`)
} else {
logger.error(`Error deleting generation ${generationId}: ${e.message}`)
}
} else {
logger.error(`Unexpected error deleting generation ${generationId}: ${e}`)
}
return false
}
}
167 changes: 167 additions & 0 deletions src/modules/llms/lumaBot.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,167 @@
import { type BotPayments } from '../payment'
import {
type OnMessageContext,
type OnCallBackQueryData,
type ChatConversation,
RequestState
} from '../types'
import {
MAX_TRIES,
PRICE_ADJUSTMENT,
sendMessage
} from './utils/helpers'
import * as Sentry from '@sentry/node'
import { LlmsBase } from './llmsBase'
import config from '../../config'
import { now } from '../../utils/perf'
import { type ModelVersion } from './utils/llmModelsManager'
import { Callbacks } from './utils/types'
import { type LlmCompletion } from './api/llmApi'
import { deleteGeneration, getGeneration, lumaGeneration } from './api/luma'

interface VideoGeneration {
msgId: number
generationId: string
prompt: string
}

export class LumaBot extends LlmsBase {
private generationList: VideoGeneration[]
protected supportedCommands: string[]
protected supportedPrefixes: string[]

constructor (payments: BotPayments) {
super(payments, 'LumaBot', 'luma')
this.generationList = []

if (!config.luma.isEnabled) {
this.logger.warn('Luma AI is disabled in config')
}
}

public getEstimatedPrice (ctx: any): number {
try {
// $0.0032 per frame or about $0.4 for 5s 24fps video at 1280×720p
// price in cents
return PRICE_ADJUSTMENT ? 40 * PRICE_ADJUSTMENT : 40 * 2
} catch (e) {
Sentry.captureException(e)
this.logger.error(`getEstimatedPrice error ${e}`)
throw e
}
}

public isSupportedEvent (
ctx: OnMessageContext | OnCallBackQueryData
): boolean {
const hasCommand = ctx.hasCommand(this.supportedCommands)
const chatPrefix = this.hasPrefix(ctx.message?.text ?? '')
if (chatPrefix !== '') {
return true
}
return hasCommand || this.isSupportedCallbackQuery(ctx)
}

public isSupportedCallbackQuery (
ctx: OnMessageContext | OnCallBackQueryData
): boolean {
if (!ctx.callbackQuery?.data) {
return false
}
return ctx.callbackQuery?.data.startsWith(Callbacks.LumaDownloadVideo)
}

async chatStreamCompletion (
conversation: ChatConversation[],
model: ModelVersion,
ctx: OnMessageContext | OnCallBackQueryData,
msgId: number,
limitTokens: boolean
): Promise<LlmCompletion> {
throw new Error('chatStreamCompletion is not implemented for LumaAiBot')
}

async chatCompletion (
conversation: ChatConversation[],
model: ModelVersion
): Promise<LlmCompletion> {
throw new Error('chatCompletion is not implemented for LumaAiBot')
}

public async onEvent (
ctx: OnMessageContext | OnCallBackQueryData,
refundCallback: (reason?: string) => void
): Promise<void> {
ctx.transient.analytics.module = this.module

const isSupportedEvent = this.isSupportedEvent(ctx)
if (!isSupportedEvent && ctx.chat?.type !== 'private') {
this.logger.warn(`### unsupported command ${ctx.message?.text}`)
return
}

if (this.isSupportedCallbackQuery(ctx)) {
if (ctx.callbackQuery?.data) {
const data = ctx.callbackQuery.data.split(':')
await this.onHandleVideoDownload(ctx, data[1])
return
}
}

const model = this.getModelFromContext(ctx)
if (model) {
await this.onGeneration(ctx)
return
}

ctx.transient.analytics.sessionState = RequestState.Error
await sendMessage(ctx, '### unsupported command').catch(async (e) => {
await this.onError(ctx, e, MAX_TRIES, '### unsupported command')
})
ctx.transient.analytics.actualResponseTime = now()
}

private async onHandleVideoDownload (ctx: OnMessageContext | OnCallBackQueryData, generationId: string): Promise<void> {
try {
const generation = await getGeneration(generationId)
const videoUrl = generation.assets?.video
if (videoUrl && ctx.chatId) {
const videoGeneration = this.generationList.find(gen => gen.generationId === generationId)
if (videoGeneration) {
await ctx.api.deleteMessages(ctx.chatId, [ctx.msgId, videoGeneration.msgId])
await ctx.replyWithVideo(videoUrl, { caption: videoGeneration.prompt })
this.generationList = this.generationList.filter(gen => gen.generationId !== generationId)
await deleteGeneration(videoGeneration.generationId)
}
}
await ctx.answerCallbackQuery('Video sent successfully')
} catch (error) {
console.error('Error in video download:', error)
await ctx.answerCallbackQuery('Error processing video. Please try again.')
}
}

async onGeneration (ctx: OnMessageContext | OnCallBackQueryData): Promise<void> {
try {
const chatId = ctx.chat?.id
if (chatId) {
const prompt = ctx.match
const response = await lumaGeneration(chatId, prompt as string)
const msgId = (
await ctx.reply(`You are #${response.generationInProgress} in line for the video generation. The wait time is about ${response.queueTime} seconds.`, {
message_thread_id:
ctx.message?.message_thread_id ??
ctx.message?.reply_to_message?.message_thread_id
})
).message_id
this.generationList.push({
generationId: response.gnerationId,
msgId,
prompt: prompt as string
})
}
} catch (e: any) {
await this.onError(ctx, e)
}
}
}
21 changes: 21 additions & 0 deletions src/modules/llms/utils/llmsData.ts
Original file line number Diff line number Diff line change
Expand Up @@ -196,6 +196,21 @@ export const llmData: LLMData = {
'1024x1792': 0.12,
'1792x1024': 0.12
}
},
lumaai: {
provider: 'luma',
name: 'Luma AI',
fullName: 'Luma AI',
botName: 'LumaBot',
version: 'lumaai-1-0-2',
commands: ['luma', 'l'],
prefix: ['l. '],
apiSpec: 'https://docs.lumalabs.ai/docs/welcome',
price: {
'1024x1024': 0.8,
'1024x1792': 0.12,
'1792x1024': 0.12
}
}
},
providerParameters: {
Expand Down Expand Up @@ -223,6 +238,12 @@ export const llmData: LLMData = {
system: config.openAi.chatGpt.chatCompletionContext,
max_tokens: +config.openAi.chatGpt.maxTokens
}
},
luma: {
defaultParameters: {
system: config.openAi.chatGpt.chatCompletionContext,
max_tokens: +config.openAi.chatGpt.maxTokens
}
}
}
}
7 changes: 5 additions & 2 deletions src/modules/llms/utils/types.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
export type Provider = 'openai' | 'claude' | 'vertex' | 'xai' // | 'palm' | 'jurassic'
export type Provider = 'openai' | 'claude' | 'vertex' | 'xai' | 'luma'
export type ChargeType = 'TOKEN' | 'CHAR'

export type DalleImageSize = '1024x1024' | '1024x1792' | '1792x1024'
Expand Down Expand Up @@ -36,7 +36,6 @@ export interface ChatModel extends BaseModel {
}

export interface ImageModel extends BaseModel {
apiSpec: string
price: ImagePrice
}

Expand All @@ -53,3 +52,7 @@ export interface ParseDate {
year: number
monthName: string
}

export enum Callbacks {
LumaDownloadVideo = 'luma_dl'
}
Loading