From 3d1406d377b944127db3c86a85f295376929ddda Mon Sep 17 00:00:00 2001 From: fegloff Date: Wed, 9 Oct 2024 14:11:26 -0500 Subject: [PATCH 01/14] add getTotalCreditPaymentUsers method --- src/database/stats.service.ts | 13 +++++++++++++ src/modules/schedule/index.ts | 1 + 2 files changed, 14 insertions(+) diff --git a/src/database/stats.service.ts b/src/database/stats.service.ts index 9c25136c..d5ff962a 100644 --- a/src/database/stats.service.ts +++ b/src/database/stats.service.ts @@ -246,4 +246,17 @@ export class StatsService { return await queryBuilder.execute() } + + public async getTotalCreditPaymentUsers (): Promise { + const queryBuilder = logRepository.createQueryBuilder('logs') + .select('distinct(logs.tgUserId)') + .where('logs.amountOne > 0') + .getCount() + + return await queryBuilder + } } + +// SELECT DISTINCT "tgUserId") as count_groups_paying_in_one +// FROM logs +// WHERE diff --git a/src/modules/schedule/index.ts b/src/modules/schedule/index.ts index 83eaced4..a9abc8c9 100644 --- a/src/modules/schedule/index.ts +++ b/src/modules/schedule/index.ts @@ -198,6 +198,7 @@ export class BotSchedule { statsService.getUniqueUsersCount() ]) + console.log('FCO:::::: JAJAJAJAJAJAJ', await statsService.getTotalCreditPaymentUsers()) const report = `\nBot fees: *${botFeesReport}*` + `\nWeekly bot fees collected: *${abbreviateNumber(botFeesWeekly)}*` + `\nDaily Active Users: *${abbreviateNumber(dau)}*` + From c8140a5d1464976967dd079661fa8a48127ebc45 Mon Sep 17 00:00:00 2001 From: fegloff Date: Wed, 9 Oct 2024 16:06:48 -0500 Subject: [PATCH 02/14] add getGroupsPayingInCreditsNotOne and fix no rows in generateReportEngagementByCommand --- src/database/stats.service.ts | 28 +++++++++++++++++++++++----- src/modules/schedule/index.ts | 16 +++++++++++----- 2 files changed, 34 insertions(+), 10 deletions(-) diff --git a/src/database/stats.service.ts b/src/database/stats.service.ts index d5ff962a..56a4f23f 100644 --- a/src/database/stats.service.ts +++ b/src/database/stats.service.ts @@ -247,13 +247,31 @@ export class StatsService { return await queryBuilder.execute() } - public async getTotalCreditPaymentUsers (): Promise { - const queryBuilder = logRepository.createQueryBuilder('logs') - .select('distinct(logs.tgUserId)') + public async getTotalOnePaymentUsers (): Promise { + const queryBuilder = await logRepository.createQueryBuilder('logs') + .select('COUNT(DISTINCT logs.tgUserId)', 'count') .where('logs.amountOne > 0') - .getCount() + .getRawOne() + + return queryBuilder ? parseInt(queryBuilder.count) : 0 + } + + public async getGroupsPayingInCreditsNotOne (): Promise { + const result = await logRepository.createQueryBuilder('l1') + .select('COUNT(DISTINCT l1.tgUserId)', 'count_groups_paying_in_credits_not_one') + .where('l1.amountCredits > 0') + .andWhere(qb => { + const subQuery = qb.subQuery() + .select('1') + .from('logs', 'l2') + .where('l2.groupId = l1.groupId') + .andWhere('l2.amountOne > 0') + .getQuery() + return 'NOT EXISTS (' + subQuery + ')' + }) + .getRawOne() - return await queryBuilder + return result ? parseInt(result.count_groups_paying_in_credits_not_one) : 0 } } diff --git a/src/modules/schedule/index.ts b/src/modules/schedule/index.ts index a9abc8c9..0f5fadf0 100644 --- a/src/modules/schedule/index.ts +++ b/src/modules/schedule/index.ts @@ -150,9 +150,10 @@ export class BotSchedule { public async generateReportEngagementByCommand (days: number): Promise { const dbRows = await statsService.getUserEngagementByCommand(days) - const cropIndex = dbRows.length >= 50 ? 50 : dbRows.length - 1 - + if (dbRows.length === 0) { + return '' + } let otherCommandCount = 0 for (let i = cropIndex; i < dbRows.length; i++) { otherCommandCount += Number(dbRows[i].commandCount) @@ -182,7 +183,9 @@ export class BotSchedule { engagementByCommand, onetimeUsers, newUsers, - totalUsers + totalUsers, + totalOnePaidUsers, + totalCreditPaidUsers ] = await Promise.all([ this.getBotFeeReport(this.holderAddress), getBotFee(this.holderAddress, 7), @@ -195,15 +198,18 @@ export class BotSchedule { this.generateReportEngagementByCommand(7), statsService.getOnetimeUsers(), statsService.getNewUsers(7), - statsService.getUniqueUsersCount() + statsService.getUniqueUsersCount(), + statsService.getTotalOnePaymentUsers(), + statsService.getGroupsPayingInCreditsNotOne() ]) - console.log('FCO:::::: JAJAJAJAJAJAJ', await statsService.getTotalCreditPaymentUsers()) const report = `\nBot fees: *${botFeesReport}*` + `\nWeekly bot fees collected: *${abbreviateNumber(botFeesWeekly)}*` + `\nDaily Active Users: *${abbreviateNumber(dau)}*` + `\nTotal fees users pay in ONE: *${abbreviateNumber(totalOne)}*` + `\nTotal fees users pay in free credits: *${abbreviateNumber(totalCredits)}*` + + `\nTotal users pay in ONE: *${totalOnePaidUsers}*` + + `\nTotal users pay in credits: *${totalCreditPaidUsers}*` + `\nWeekly active users: *${abbreviateNumber(weeklyUsers)}*` + `\nWeekly new users: *${abbreviateNumber(newUsers)}*` + `\nWeekly user engagement (any commands): *${abbreviateNumber(totalMessages)}*` + From d1648f8a54955fefb325102a2929d5c77ef0aca2 Mon Sep 17 00:00:00 2001 From: fegloff Date: Thu, 10 Oct 2024 18:10:47 -0500 Subject: [PATCH 03/14] update paid user definition for allstats command --- src/database/stats.service.ts | 139 ++++++++++++++++++++++-------- src/modules/llms/utils/helpers.ts | 4 + src/modules/schedule/index.ts | 41 +++++---- 3 files changed, 130 insertions(+), 54 deletions(-) diff --git a/src/database/stats.service.ts b/src/database/stats.service.ts index 56a4f23f..307851dc 100644 --- a/src/database/stats.service.ts +++ b/src/database/stats.service.ts @@ -3,6 +3,7 @@ import { StatBotCommand } from './entities/StatBotCommand' import moment from 'moment-timezone' import { BotLog } from './entities/Log' import pino from 'pino' +import { isValidDate } from '../modules/llms/utils/helpers' const logger = pino({ name: 'StatsService', @@ -65,10 +66,24 @@ export class StatsService { } } - async getUniqueUsersCount (): Promise { + // added date for Amanda's monthly report => /allstats MM/DD/YYYY + async getUniqueUsersCount (date?: Date): Promise { + let whereClause = '' + const params: any[] = [] try { - const rows = await logRepository.query('select count(distinct("tgUserId")) from logs') - return rows.length ? +rows[0].count : 0 + if (date && isValidDate(date)) { + whereClause = 'WHERE (EXTRACT(YEAR FROM "createdAt") = $1) AND (EXTRACT(MONTH FROM "createdAt") = $2)' + const year = date.getFullYear().toString() + const month = (date.getMonth() + 1).toString() + params.push(year, month) + } + + const query = ` + select count(distinct("tgUserId")) from logs + ${whereClause} + ` + const result = await logRepository.query(query, params) + return result.length ? +result[0].count : 0 } catch (e) { logger.error(e) return 0 @@ -126,7 +141,8 @@ export class StatsService { } // Doesn't check last 7 days. - public async getOnetimeUsers (): Promise { + // added date for Amanda's monthly report => /allstats MM/DD/YYYY + public async getOnetimeUsers (date?: Date): Promise { try { const bufferDays = 7 const bufferDate = moment() @@ -134,14 +150,20 @@ export class StatsService { .set({ hour: 0, minute: 0, second: 0 }) .subtract(bufferDays, 'days') .unix() - const query = await logRepository + const query = logRepository .createQueryBuilder('logs') .select('count("tgUserId") AS row_count, "tgUserId", MAX("createdAt") AS max_created') .where(`"createdAt" < TO_TIMESTAMP(${bufferDate})`) - .groupBy('"tgUserId"') - .getRawMany() - const result = query.filter(row => row.row_count === '1') - return result.length + if (date && isValidDate(date)) { + const year = date.getFullYear().toString() + const month = (date.getMonth() + 1).toString() + query.andWhere(`EXTRACT(YEAR FROM "createdAt") = ${year}`) + query.andWhere(`EXTRACT(MONTH FROM "createdAt") = ${month}`) + } + query.groupBy('"tgUserId"') + const result = await query.getRawMany() + const filter = result.filter(row => row.row_count === '1') + return filter.length } catch (e) { logger.error(e) return 0 @@ -243,38 +265,83 @@ export class StatsService { public async getAllChatId (): Promise { const queryBuilder = logRepository.createQueryBuilder('logs') .select('distinct("groupId")') - return await queryBuilder.execute() } - public async getTotalOnePaymentUsers (): Promise { - const queryBuilder = await logRepository.createQueryBuilder('logs') - .select('COUNT(DISTINCT logs.tgUserId)', 'count') - .where('logs.amountOne > 0') - .getRawOne() + // to do port to queryBuilder + // added date for Amanda's monthly report => /allstats MM/DD/YYYY + public async getPaidUsers (date?: Date): Promise<{ users: number, freeCreditsBurned: number, amountCredits: number, amountOnes: number }> { + let whereClause = '' + const params: any[] = [] + + if (date && isValidDate(date)) { + whereClause = 'WHERE (EXTRACT(YEAR FROM "createdAt") = $1) AND (EXTRACT(MONTH FROM "createdAt") = $2)' + const year = date.getFullYear().toString() + const month = (date.getMonth() + 1).toString() + params.push(year, month) + } - return queryBuilder ? parseInt(queryBuilder.count) : 0 + const query = ` + SELECT + COUNT(*) as user_count, + COUNT(CASE WHEN total_credits > 100 THEN 1 END) as free_credits_burned, + SUM(GREATEST(total_credits - 100, 0)) as total_paid_credits, + SUM(total_ones) as total_ones_spent + FROM ( + SELECT + "tgUserId", + SUM("amountCredits") as total_credits, + SUM("amountOne") as total_ones + FROM logs + ${whereClause} + GROUP BY "tgUserId" + HAVING SUM("amountCredits") > 100 OR SUM("amountOne") > 0 + ) as subquery + ` + const result = await logRepository.query(query, params) + return { + users: result[0] ? parseInt(result[0].user_count) : 0, + freeCreditsBurned: (result[0] && !date) ? parseInt(result[0].free_credits_burned) : 0, + amountCredits: result[0] ? parseInt(result[0].total_paid_credits) : 0, + amountOnes: result[0] ? parseInt(result[0].total_ones_spent) : 0 + } } - public async getGroupsPayingInCreditsNotOne (): Promise { - const result = await logRepository.createQueryBuilder('l1') - .select('COUNT(DISTINCT l1.tgUserId)', 'count_groups_paying_in_credits_not_one') - .where('l1.amountCredits > 0') - .andWhere(qb => { - const subQuery = qb.subQuery() - .select('1') - .from('logs', 'l2') - .where('l2.groupId = l1.groupId') - .andWhere('l2.amountOne > 0') - .getQuery() - return 'NOT EXISTS (' + subQuery + ')' - }) - .getRawOne() - - return result ? parseInt(result.count_groups_paying_in_credits_not_one) : 0 + // to do port to queryBuilder + // added date for Amanda's monthly report => /allstats MM/DD/YYYY + public async getFreeCreditUsers (date?: Date): Promise<{ users: number, amountFreeCredits: number, amountFreeCreditsRemaining: number }> { + let whereClause = '' + const params: any[] = [] + + if (date && date instanceof Date && !isNaN(date.getTime())) { + whereClause = 'WHERE (EXTRACT(YEAR FROM "createdAt") = $1) AND (EXTRACT(MONTH FROM "createdAt") = $2)' + const year = date.getFullYear().toString() + const month = (date.getMonth() + 1).toString() + params.push(year, month) + } + + const query = ` + SELECT + COUNT(*) as users_with_free_credits, + SUM(total_credits) as total_free_credits_used, + 100 * COUNT(*) - SUM(total_credits) as remaining_free_credits + FROM ( + SELECT + "tgUserId", + SUM("amountCredits") as total_credits + FROM logs + ${whereClause} + GROUP BY "tgUserId" + HAVING SUM("amountCredits") > 0 AND SUM("amountCredits") <= 100 AND SUM("amountOne") = 0 + ) as subquery + ` + + const result = await logRepository.query(query, params) + + return { + users: result[0] ? parseInt(result[0].users_with_free_credits) : 0, + amountFreeCredits: result[0] ? parseInt(result[0].total_free_credits_used) : 0, + amountFreeCreditsRemaining: result[0] && !date ? parseInt(result[0].remaining_free_credits) : 0 + } } } - -// SELECT DISTINCT "tgUserId") as count_groups_paying_in_one -// FROM logs -// WHERE diff --git a/src/modules/llms/utils/helpers.ts b/src/modules/llms/utils/helpers.ts index 65c3d421..0e84b3d7 100644 --- a/src/modules/llms/utils/helpers.ts +++ b/src/modules/llms/utils/helpers.ts @@ -150,6 +150,10 @@ export const isValidUrl = (url: string): boolean => { return urlRegex.test(url) } +export function isValidDate (date: Date): boolean { + return date instanceof Date && !isNaN(date.getTime()) +} + // doesn't get all the special characters like ! export const hasUserPasswordRegex = (prompt: string): { password: string, user: string } => { const pattern = diff --git a/src/modules/schedule/index.ts b/src/modules/schedule/index.ts index 0f5fadf0..9ccfb154 100644 --- a/src/modules/schedule/index.ts +++ b/src/modules/schedule/index.ts @@ -10,6 +10,7 @@ import { statsService } from '../../database/services' import { abbreviateNumber, lessThan100, precise } from './utils' import { getOneRate } from './exchangeApi' import { getTradingVolume } from './subgraphAPI' +import { isValidDate } from '../llms/utils/helpers' enum SupportedCommands { BOT_STATS = 'botstats', @@ -170,13 +171,14 @@ export class BotSchedule { return '```\n' + rows.join('\n') + '\n```' } - public async generateFullReport (): Promise { + public async generateFullReport (date?: Date): Promise { + if (date && !isValidDate(date)) { + return 'Invalid date format' + } const [ botFeesReport, botFeesWeekly, dau, - totalOne, - totalCredits, weeklyUsers, totalMessages, totalSupportedMessages, @@ -184,38 +186,36 @@ export class BotSchedule { onetimeUsers, newUsers, totalUsers, - totalOnePaidUsers, - totalCreditPaidUsers + totalPaidUsers, + totalfreePaidUsers ] = await Promise.all([ this.getBotFeeReport(this.holderAddress), getBotFee(this.holderAddress, 7), statsService.getActiveUsers(0), - statsService.getTotalONE(), - statsService.getTotalFreeCredits(), statsService.getActiveUsers(7), statsService.getTotalMessages(7), statsService.getTotalMessages(7, true), this.generateReportEngagementByCommand(7), - statsService.getOnetimeUsers(), + statsService.getOnetimeUsers(date), statsService.getNewUsers(7), - statsService.getUniqueUsersCount(), - statsService.getTotalOnePaymentUsers(), - statsService.getGroupsPayingInCreditsNotOne() + statsService.getUniqueUsersCount(date), + statsService.getPaidUsers(date), + statsService.getFreeCreditUsers(date) ]) - const report = `\nBot fees: *${botFeesReport}*` + `\nWeekly bot fees collected: *${abbreviateNumber(botFeesWeekly)}*` + `\nDaily Active Users: *${abbreviateNumber(dau)}*` + - `\nTotal fees users pay in ONE: *${abbreviateNumber(totalOne)}*` + - `\nTotal fees users pay in free credits: *${abbreviateNumber(totalCredits)}*` + - `\nTotal users pay in ONE: *${totalOnePaidUsers}*` + - `\nTotal users pay in credits: *${totalCreditPaidUsers}*` + `\nWeekly active users: *${abbreviateNumber(weeklyUsers)}*` + `\nWeekly new users: *${abbreviateNumber(newUsers)}*` + `\nWeekly user engagement (any commands): *${abbreviateNumber(totalMessages)}*` + `\nWeekly user engagement (commands supported by bot): *${abbreviateNumber(totalSupportedMessages)}*` + - `\nTotal users: *${totalUsers}*` + + `\n\nTotal users: *${totalUsers}*` + `\nOne-time users: *${onetimeUsers}*` + + `\nTotal fees users pay in ONE: *${abbreviateNumber(totalPaidUsers.amountCredits + totalPaidUsers.amountOnes)}*` + + `\nTotal fees users pay in free credits: *${abbreviateNumber(totalfreePaidUsers.amountFreeCredits + (totalPaidUsers.freeCreditsBurned * 100))}*` + + `\nTotal free credits reamining: *${abbreviateNumber(totalfreePaidUsers.amountFreeCreditsRemaining)}*` + + `\nTotal users who paid in ONE: *${totalPaidUsers.users}*` + + `\nTotal users who paid in free credits: *${totalfreePaidUsers.users}*` + `\n\n${engagementByCommand}` return report } @@ -240,7 +240,12 @@ export class BotSchedule { } if (ctx.hasCommand(SupportedCommands.ALL_STATS)) { - const report = await this.generateFullReport() + let date + const input = ctx.match + if (input) { + date = new Date(input) + } + const report = await this.generateFullReport(date) await ctx.reply(report, { parse_mode: 'Markdown', message_thread_id: ctx.message?.message_thread_id From 1a4d43e3358b102f2fd9fd032a6ed56ac55bdde5 Mon Sep 17 00:00:00 2001 From: fegloff Date: Thu, 10 Oct 2024 22:43:07 -0500 Subject: [PATCH 04/14] update paidUsers and freeUsers queries for monthly report --- src/database/stats.service.ts | 133 +++++++++++++++++++++------------- src/modules/schedule/index.ts | 5 +- 2 files changed, 84 insertions(+), 54 deletions(-) diff --git a/src/database/stats.service.ts b/src/database/stats.service.ts index 307851dc..896fa7f1 100644 --- a/src/database/stats.service.ts +++ b/src/database/stats.service.ts @@ -111,7 +111,8 @@ export class StatsService { } } - public async getNewUsers (daysPeriod = 0): Promise { + // added date for Amanda's monthly report => /allstats MM/DD/YYYY + public async getNewUsers (daysPeriod = 0, date?: Date): Promise { try { const currentTime = moment() const dateStart = moment() @@ -122,14 +123,17 @@ export class StatsService { const dateEnd = currentTime.unix() const query = logRepository .createQueryBuilder('logs') - .select('distinct("FirstInsertTime")') + .select('distinct("first_insert_time")') .from(subQuery => subQuery - .select('"tgUserId", MIN("createdAt") AS "FirstInsertTime"') + .select('"tgUserId", MIN("createdAt") AS "first_insert_time"') .from(BotLog, 'logs') .groupBy('"tgUserId"'), 'first_inserts') - if (daysPeriod > 0) { - query.where(`"FirstInsertTime" BETWEEN TO_TIMESTAMP(${dateStart}) and TO_TIMESTAMP(${dateEnd})`) + if (date && isValidDate(date)) { + query.where('EXTRACT(YEAR FROM first_insert_time) = :year', { year: date.getFullYear() }) + .andWhere('EXTRACT(MONTH FROM first_insert_time) = :month', { month: date.getMonth() + 1 }) + } else if (daysPeriod > 0) { + query.where('first_insert_time BETWEEN TO_TIMESTAMP(:start) AND TO_TIMESTAMP(:end)', { start: dateStart, end: dateEnd }) } const result = await query.execute() // console.log(dateStart, dateEnd, result.length) @@ -269,75 +273,100 @@ export class StatsService { } // to do port to queryBuilder + // Paid User = A user who has spent more than 100 credits (first 100 are free). // added date for Amanda's monthly report => /allstats MM/DD/YYYY public async getPaidUsers (date?: Date): Promise<{ users: number, freeCreditsBurned: number, amountCredits: number, amountOnes: number }> { - let whereClause = '' - const params: any[] = [] - - if (date && isValidDate(date)) { - whereClause = 'WHERE (EXTRACT(YEAR FROM "createdAt") = $1) AND (EXTRACT(MONTH FROM "createdAt") = $2)' - const year = date.getFullYear().toString() - const month = (date.getMonth() + 1).toString() - params.push(year, month) + let yearCondition = '' + let monthCondition = '' + let params: any[] = [] + + if (date && date instanceof Date) { + const year = date.getFullYear() + const month = date.getMonth() + 1 // JavaScript months are 0-indexed + yearCondition = 'AND EXTRACT(YEAR FROM l."createdAt") = $1' + monthCondition = 'AND EXTRACT(MONTH FROM l."createdAt") = $2' + params = [year, month] } const query = ` + WITH paid_users AS ( + SELECT "tgUserId" + FROM logs + ${date ? 'WHERE "createdAt" <= $3' : ''} + GROUP BY "tgUserId" + HAVING SUM("amountCredits") > 100 + ), + spending AS ( + SELECT + l."tgUserId", + SUM(l."amountCredits") as credits + FROM logs l + JOIN paid_users pu ON l."tgUserId" = pu."tgUserId" + WHERE 1=1 ${yearCondition} ${monthCondition} + GROUP BY l."tgUserId" + HAVING SUM(l."amountCredits") > 0 + ) SELECT - COUNT(*) as user_count, - COUNT(CASE WHEN total_credits > 100 THEN 1 END) as free_credits_burned, - SUM(GREATEST(total_credits - 100, 0)) as total_paid_credits, - SUM(total_ones) as total_ones_spent - FROM ( - SELECT - "tgUserId", - SUM("amountCredits") as total_credits, - SUM("amountOne") as total_ones - FROM logs - ${whereClause} - GROUP BY "tgUserId" - HAVING SUM("amountCredits") > 100 OR SUM("amountOne") > 0 - ) as subquery + COUNT(*) as user_count, + SUM(GREATEST(credits - 100, 0)) as credits_burned, + SUM(credits) as total_credits + FROM spending ` + if (date) { + params.push(date.toISOString()) + } const result = await logRepository.query(query, params) return { - users: result[0] ? parseInt(result[0].user_count) : 0, - freeCreditsBurned: (result[0] && !date) ? parseInt(result[0].free_credits_burned) : 0, - amountCredits: result[0] ? parseInt(result[0].total_paid_credits) : 0, - amountOnes: result[0] ? parseInt(result[0].total_ones_spent) : 0 + users: parseInt(result[0]?.user_count) || 0, + freeCreditsBurned: !date ? parseFloat(result[0]?.total_credits) - parseFloat(result[0]?.credits_burned) || 0 : 0, + amountCredits: parseFloat(result[0]?.total_credits) || 0, + amountOnes: 0 // TODO: implement this } } // to do port to queryBuilder // added date for Amanda's monthly report => /allstats MM/DD/YYYY public async getFreeCreditUsers (date?: Date): Promise<{ users: number, amountFreeCredits: number, amountFreeCreditsRemaining: number }> { - let whereClause = '' - const params: any[] = [] - - if (date && date instanceof Date && !isNaN(date.getTime())) { - whereClause = 'WHERE (EXTRACT(YEAR FROM "createdAt") = $1) AND (EXTRACT(MONTH FROM "createdAt") = $2)' - const year = date.getFullYear().toString() - const month = (date.getMonth() + 1).toString() - params.push(year, month) + let yearCondition = '' + let monthCondition = '' + let params: any[] = [] + + if (date && date instanceof Date) { + const year = date.getFullYear() + const month = date.getMonth() + 1 // JavaScript months are 0-indexed + yearCondition = 'AND EXTRACT(YEAR FROM l."createdAt") = $1' + monthCondition = 'AND EXTRACT(MONTH FROM l."createdAt") = $2' + // get the date with the last day of the month + const lastDayOfMonth = new Date(year, month, 0).getDate() + const endOfMonthDate = new Date(year, month - 1, lastDayOfMonth, 23, 59, 59, 999) + params = [year, month, endOfMonthDate.toISOString()] } const query = ` - SELECT - COUNT(*) as users_with_free_credits, - SUM(total_credits) as total_free_credits_used, - 100 * COUNT(*) - SUM(total_credits) as remaining_free_credits - FROM ( - SELECT - "tgUserId", - SUM("amountCredits") as total_credits + WITH paid_users AS ( + SELECT "tgUserId" FROM logs - ${whereClause} + ${date ? 'WHERE "createdAt" <= $3' : ''} GROUP BY "tgUserId" - HAVING SUM("amountCredits") > 0 AND SUM("amountCredits") <= 100 AND SUM("amountOne") = 0 - ) as subquery + HAVING SUM("amountCredits") > 0 AND SUM("amountCredits") <= 100 + ), + spending AS ( + SELECT + l."tgUserId", + SUM(l."amountCredits") as credits + FROM logs l + JOIN paid_users pu ON l."tgUserId" = pu."tgUserId" + WHERE 1=1 ${yearCondition} ${monthCondition} + GROUP BY l."tgUserId" + HAVING SUM(l."amountCredits") > 0 + ) + SELECT + COUNT(*) as users_with_free_credits, + SUM(credits) as total_free_credits_used, + 100 * COUNT(*) - SUM(credits) as remaining_free_credits + FROM spending ` - const result = await logRepository.query(query, params) - return { users: result[0] ? parseInt(result[0].users_with_free_credits) : 0, amountFreeCredits: result[0] ? parseInt(result[0].total_free_credits_used) : 0, diff --git a/src/modules/schedule/index.ts b/src/modules/schedule/index.ts index 9ccfb154..872123f1 100644 --- a/src/modules/schedule/index.ts +++ b/src/modules/schedule/index.ts @@ -197,11 +197,12 @@ export class BotSchedule { statsService.getTotalMessages(7, true), this.generateReportEngagementByCommand(7), statsService.getOnetimeUsers(date), - statsService.getNewUsers(7), + statsService.getNewUsers(7, date), statsService.getUniqueUsersCount(date), statsService.getPaidUsers(date), statsService.getFreeCreditUsers(date) ]) + const report = `\nBot fees: *${botFeesReport}*` + `\nWeekly bot fees collected: *${abbreviateNumber(botFeesWeekly)}*` + `\nDaily Active Users: *${abbreviateNumber(dau)}*` + @@ -212,7 +213,7 @@ export class BotSchedule { `\n\nTotal users: *${totalUsers}*` + `\nOne-time users: *${onetimeUsers}*` + `\nTotal fees users pay in ONE: *${abbreviateNumber(totalPaidUsers.amountCredits + totalPaidUsers.amountOnes)}*` + - `\nTotal fees users pay in free credits: *${abbreviateNumber(totalfreePaidUsers.amountFreeCredits + (totalPaidUsers.freeCreditsBurned * 100))}*` + + `\nTotal fees users pay in free credits: *${abbreviateNumber(totalfreePaidUsers.amountFreeCredits + (totalPaidUsers.freeCreditsBurned))}*` + `\nTotal free credits reamining: *${abbreviateNumber(totalfreePaidUsers.amountFreeCreditsRemaining)}*` + `\nTotal users who paid in ONE: *${totalPaidUsers.users}*` + `\nTotal users who paid in free credits: *${totalfreePaidUsers.users}*` + From 67e1c1b0c8367da5c992bc687f72d180bb306ade Mon Sep 17 00:00:00 2001 From: fegloff Date: Fri, 11 Oct 2024 13:20:23 -0500 Subject: [PATCH 05/14] add date logic to getTotalOne method and totalOne stats to allstats command + update allstats command --- src/config.ts | 2 +- src/database/stats.service.ts | 68 +++++++++++++++++++++++-------- src/modules/llms/utils/helpers.ts | 12 ++++++ src/modules/llms/utils/types.ts | 6 +++ src/modules/schedule/index.ts | 32 +++++++++++---- 5 files changed, 92 insertions(+), 28 deletions(-) diff --git a/src/config.ts b/src/config.ts index 13905df0..afec235a 100644 --- a/src/config.ts +++ b/src/config.ts @@ -154,7 +154,7 @@ export default { maxChatsWhitelist: (process.env.CREDITS_CHATS_WHITELIST ?? '') .split(',') .map((item) => item.toString().toLowerCase()), - creditsAmount: '100' + creditsAmount: '100' // todo: handle multiple credits numbers (+ dateSince), considering future number change. }, betteruptime: { botHeartBitId: process.env.BOT_HEARTBIT_ID ?? '' }, telegramPayments: { token: process.env.TELEGRAM_PAYMENTS_TOKEN ?? '' }, diff --git a/src/database/stats.service.ts b/src/database/stats.service.ts index 896fa7f1..554aaca3 100644 --- a/src/database/stats.service.ts +++ b/src/database/stats.service.ts @@ -4,6 +4,7 @@ import moment from 'moment-timezone' import { BotLog } from './entities/Log' import pino from 'pino' import { isValidDate } from '../modules/llms/utils/helpers' +import config from '../config' const logger = pino({ name: 'StatsService', @@ -13,6 +14,8 @@ const logger = pino({ } }) +const FREE_CREDITS = config.credits.creditsAmount + const statBotCommandRepository = AppDataSource.getRepository(StatBotCommand) const logRepository = AppDataSource.getRepository(BotLog) @@ -46,9 +49,19 @@ export class StatsService { return await logRepository.save(paymentLog) } - async getTotalONE (): Promise { + async getTotalONE (date?: Date): Promise { + let whereClause = '' + const params: any[] = [] try { - const rows = await logRepository.query('select sum("amountOne") from logs') + if (date && isValidDate(date)) { + whereClause = 'WHERE (EXTRACT(YEAR FROM "createdAt") = $1) AND (EXTRACT(MONTH FROM "createdAt") = $2)' + const year = date.getFullYear().toString() + const month = (date.getMonth() + 1).toString() + params.push(year, month) + } + const query = `select sum("amountOne") from logs ${whereClause}` + + const rows = await logRepository.query(query, params) return rows.length ? +rows[0].sum : 0 } catch (e) { logger.error(e) @@ -112,15 +125,14 @@ export class StatsService { } // added date for Amanda's monthly report => /allstats MM/DD/YYYY - public async getNewUsers (daysPeriod = 0, date?: Date): Promise { + public async getNewUsers (daysPeriod: number, date?: Date): Promise<{ periodUsers: number, monthUsers: number }> { try { const currentTime = moment() const dateStart = moment() .tz('America/Los_Angeles') .set({ hour: 0, minute: 0, second: 0 }) - .subtract(daysPeriod, 'days') - .unix() - const dateEnd = currentTime.unix() + dateStart.subtract(daysPeriod, 'days') + const dateEnd = currentTime const query = logRepository .createQueryBuilder('logs') .select('distinct("first_insert_time")') @@ -130,17 +142,37 @@ export class StatsService { .from(BotLog, 'logs') .groupBy('"tgUserId"'), 'first_inserts') if (date && isValidDate(date)) { - query.where('EXTRACT(YEAR FROM first_insert_time) = :year', { year: date.getFullYear() }) - .andWhere('EXTRACT(MONTH FROM first_insert_time) = :month', { month: date.getMonth() + 1 }) - } else if (daysPeriod > 0) { - query.where('first_insert_time BETWEEN TO_TIMESTAMP(:start) AND TO_TIMESTAMP(:end)', { start: dateStart, end: dateEnd }) + const year = date.getFullYear() + const month = date.getMonth() + 1 + const monthStart = moment(date).startOf('month') + const monthEnd = moment(date).endOf('month') + query + .where('(first_insert_time BETWEEN :periodStart AND :periodEnd)', + { periodStart: dateStart.toDate(), periodEnd: dateEnd.toDate() }) + .orWhere('(EXTRACT(YEAR FROM first_insert_time) = :year AND EXTRACT(MONTH FROM first_insert_time) = :month)', + { year, month }) + + const result = await query.execute() + + const periodUsers = result.filter((r: { first_insert_time: moment.MomentInput }) => + moment(r.first_insert_time).isBetween(dateStart, dateEnd) + ).length + + const monthUsers = result.filter((r: { first_insert_time: moment.MomentInput }) => + moment(r.first_insert_time).isBetween(monthStart, monthEnd) + ).length + + return { periodUsers, monthUsers } + } else { + query.where('first_insert_time BETWEEN :start AND :end', + { start: dateStart.toDate(), end: dateEnd.toDate() }) + + const result = await query.execute() + return { periodUsers: result.length, monthUsers: 0 } } - const result = await query.execute() - // console.log(dateStart, dateEnd, result.length) - return result.length } catch (e) { logger.error(e) - return 0 + return { periodUsers: 0, monthUsers: 0 } } } @@ -294,7 +326,7 @@ export class StatsService { FROM logs ${date ? 'WHERE "createdAt" <= $3' : ''} GROUP BY "tgUserId" - HAVING SUM("amountCredits") > 100 + HAVING SUM("amountCredits") > ${FREE_CREDITS} ), spending AS ( SELECT @@ -308,7 +340,7 @@ export class StatsService { ) SELECT COUNT(*) as user_count, - SUM(GREATEST(credits - 100, 0)) as credits_burned, + SUM(GREATEST(credits - ${FREE_CREDITS}, 0)) as credits_burned, SUM(credits) as total_credits FROM spending ` @@ -348,7 +380,7 @@ export class StatsService { FROM logs ${date ? 'WHERE "createdAt" <= $3' : ''} GROUP BY "tgUserId" - HAVING SUM("amountCredits") > 0 AND SUM("amountCredits") <= 100 + HAVING SUM("amountCredits") > 0 AND SUM("amountCredits") <= ${FREE_CREDITS} ), spending AS ( SELECT @@ -363,7 +395,7 @@ export class StatsService { SELECT COUNT(*) as users_with_free_credits, SUM(credits) as total_free_credits_used, - 100 * COUNT(*) - SUM(credits) as remaining_free_credits + ${FREE_CREDITS} * COUNT(*) - SUM(credits) as remaining_free_credits FROM spending ` const result = await logRepository.query(query, params) diff --git a/src/modules/llms/utils/helpers.ts b/src/modules/llms/utils/helpers.ts index 0e84b3d7..b701c805 100644 --- a/src/modules/llms/utils/helpers.ts +++ b/src/modules/llms/utils/helpers.ts @@ -12,6 +12,7 @@ import { getChatModelPrice } from '../api/llmApi' import { childrenWords, sexWords } from '../../sd-images/words-blacklist' import config from '../../../config' +import { type ParseDate } from './types' export const PRICE_ADJUSTMENT = config.openAi.chatGpt.priceAdjustment @@ -154,6 +155,17 @@ export function isValidDate (date: Date): boolean { return date instanceof Date && !isNaN(date.getTime()) } +export function parseDate (date: Date): ParseDate | null { + if (!isValidDate(date)) { + return null + } + const month = date.getMonth() + 1 // getMonth() returns 0-11 + const year = date.getFullYear() + const monthName = new Intl.DateTimeFormat('en-US', { month: 'long' }).format(date) + + return { month, year, monthName } +} + // doesn't get all the special characters like ! export const hasUserPasswordRegex = (prompt: string): { password: string, user: string } => { const pattern = diff --git a/src/modules/llms/utils/types.ts b/src/modules/llms/utils/types.ts index 58561d56..0f2492f0 100644 --- a/src/modules/llms/utils/types.ts +++ b/src/modules/llms/utils/types.ts @@ -34,3 +34,9 @@ export interface LLMData { chatModels: Record imageModels: Record } + +export interface ParseDate { + month: number + year: number + monthName: string +} diff --git a/src/modules/schedule/index.ts b/src/modules/schedule/index.ts index 872123f1..02327e7f 100644 --- a/src/modules/schedule/index.ts +++ b/src/modules/schedule/index.ts @@ -10,7 +10,7 @@ import { statsService } from '../../database/services' import { abbreviateNumber, lessThan100, precise } from './utils' import { getOneRate } from './exchangeApi' import { getTradingVolume } from './subgraphAPI' -import { isValidDate } from '../llms/utils/helpers' +import { isValidDate, parseDate } from '../llms/utils/helpers' enum SupportedCommands { BOT_STATS = 'botstats', @@ -138,7 +138,7 @@ export class BotSchedule { 'Bot weekly earns, active users, new users: ' + `*${abbreviateNumber(+weeklyRevenue)}* ONE` + `, ${lessThan100(abbreviateNumber(weeklyUsers))}` + - `, ${lessThan100(abbreviateNumber(newUsers))}` + `, ${lessThan100(abbreviateNumber(newUsers.periodUsers))}` const oneBotMetrics = 'Bot total earns, users, messages: ' + @@ -172,8 +172,10 @@ export class BotSchedule { } public async generateFullReport (date?: Date): Promise { + let reportLabel = '' + if (date && !isValidDate(date)) { - return 'Invalid date format' + return 'Invalid date format. Please use MM/DD/YYYY' } const [ botFeesReport, @@ -187,7 +189,8 @@ export class BotSchedule { newUsers, totalUsers, totalPaidUsers, - totalfreePaidUsers + totalfreePaidUsers, + totalOne ] = await Promise.all([ this.getBotFeeReport(this.holderAddress), getBotFee(this.holderAddress, 7), @@ -197,22 +200,33 @@ export class BotSchedule { statsService.getTotalMessages(7, true), this.generateReportEngagementByCommand(7), statsService.getOnetimeUsers(date), - statsService.getNewUsers(7, date), + statsService.getNewUsers(90, date), statsService.getUniqueUsersCount(date), statsService.getPaidUsers(date), - statsService.getFreeCreditUsers(date) + statsService.getFreeCreditUsers(date), + statsService.getTotalONE(date) ]) + if (date) { + const dateParsed = parseDate(date) + reportLabel = `*${dateParsed?.monthName} - ${dateParsed?.year} stats*` + } else { + reportLabel = '*All-time stats*' + } + const report = `\nBot fees: *${botFeesReport}*` + `\nWeekly bot fees collected: *${abbreviateNumber(botFeesWeekly)}*` + `\nDaily Active Users: *${abbreviateNumber(dau)}*` + `\nWeekly active users: *${abbreviateNumber(weeklyUsers)}*` + - `\nWeekly new users: *${abbreviateNumber(newUsers)}*` + + `\nWeekly new users: *${abbreviateNumber(newUsers.periodUsers)}*` + `\nWeekly user engagement (any commands): *${abbreviateNumber(totalMessages)}*` + `\nWeekly user engagement (commands supported by bot): *${abbreviateNumber(totalSupportedMessages)}*` + - `\n\nTotal users: *${totalUsers}*` + + `\n\n${reportLabel}` + + `\nTotal users: *${totalUsers}*` + `\nOne-time users: *${onetimeUsers}*` + - `\nTotal fees users pay in ONE: *${abbreviateNumber(totalPaidUsers.amountCredits + totalPaidUsers.amountOnes)}*` + + `${date ? '\nTotal new users: *' + newUsers.monthUsers + '*' : ''}` + + `\nTotal fees users pay in ONE: *${abbreviateNumber(totalOne)}*` + + `\nTotal fees users pay in credits: *${abbreviateNumber(totalPaidUsers.amountCredits + totalPaidUsers.amountOnes)}*` + `\nTotal fees users pay in free credits: *${abbreviateNumber(totalfreePaidUsers.amountFreeCredits + (totalPaidUsers.freeCreditsBurned))}*` + `\nTotal free credits reamining: *${abbreviateNumber(totalfreePaidUsers.amountFreeCreditsRemaining)}*` + `\nTotal users who paid in ONE: *${totalPaidUsers.users}*` + From 3bc5b32627fd6ed9b17e47f7456aa0402708d591 Mon Sep 17 00:00:00 2001 From: fegloff Date: Mon, 14 Oct 2024 10:53:22 -0500 Subject: [PATCH 06/14] update user stats --- src/database/stats.service.ts | 77 +++++++++++++++++++---------------- src/modules/payment/index.ts | 1 + src/modules/schedule/index.ts | 2 +- 3 files changed, 45 insertions(+), 35 deletions(-) diff --git a/src/database/stats.service.ts b/src/database/stats.service.ts index 554aaca3..877f0355 100644 --- a/src/database/stats.service.ts +++ b/src/database/stats.service.ts @@ -5,6 +5,7 @@ import { BotLog } from './entities/Log' import pino from 'pino' import { isValidDate } from '../modules/llms/utils/helpers' import config from '../config' +import { Brackets, type SelectQueryBuilder } from 'typeorm' const logger = pino({ name: 'StatsService', @@ -126,49 +127,55 @@ export class StatsService { // added date for Amanda's monthly report => /allstats MM/DD/YYYY public async getNewUsers (daysPeriod: number, date?: Date): Promise<{ periodUsers: number, monthUsers: number }> { + // const validCommands: string[] = ['/start', '/help', '/voice-memo', '/openai', '/ask'] try { const currentTime = moment() const dateStart = moment() .tz('America/Los_Angeles') - .set({ hour: 0, minute: 0, second: 0 }) - dateStart.subtract(daysPeriod, 'days') + .subtract(daysPeriod, 'days') + .startOf('day') const dateEnd = currentTime - const query = logRepository + + const baseSubQuery = (subQuery: SelectQueryBuilder): SelectQueryBuilder => + subQuery + .select('"tgUserId", MIN("createdAt") AS "first_insert_time"') + .from(BotLog, 'logs') + .where(new Brackets(qb => { + qb.where('logs."isPrivate" = true') + .orWhere(new Brackets(qb2 => { + qb2.where('logs."groupId" < 0') + .andWhere('logs."isSupportedCommand" = true') + })) + })) + .groupBy('"tgUserId"') + + const baseQuery = logRepository .createQueryBuilder('logs') - .select('distinct("first_insert_time")') - .from(subQuery => - subQuery - .select('"tgUserId", MIN("createdAt") AS "first_insert_time"') - .from(BotLog, 'logs') - .groupBy('"tgUserId"'), 'first_inserts') + .select('COUNT(DISTINCT "first_insert_time")', 'count') + .from(baseSubQuery, 'first_inserts') + if (date && isValidDate(date)) { - const year = date.getFullYear() - const month = date.getMonth() + 1 const monthStart = moment(date).startOf('month') const monthEnd = moment(date).endOf('month') - query - .where('(first_insert_time BETWEEN :periodStart AND :periodEnd)', - { periodStart: dateStart.toDate(), periodEnd: dateEnd.toDate() }) - .orWhere('(EXTRACT(YEAR FROM first_insert_time) = :year AND EXTRACT(MONTH FROM first_insert_time) = :month)', - { year, month }) - - const result = await query.execute() - - const periodUsers = result.filter((r: { first_insert_time: moment.MomentInput }) => - moment(r.first_insert_time).isBetween(dateStart, dateEnd) - ).length - - const monthUsers = result.filter((r: { first_insert_time: moment.MomentInput }) => - moment(r.first_insert_time).isBetween(monthStart, monthEnd) - ).length - + const [periodUsers, monthUsers] = await Promise.all([ + baseQuery + .where('first_insert_time BETWEEN :periodStart AND :periodEnd', + { periodStart: dateStart.toDate(), periodEnd: dateEnd.toDate() }) + .getRawOne() + .then(result => parseInt(result.count, 10)), + baseQuery + .where('first_insert_time BETWEEN :monthStart AND :monthEnd', + { monthStart: monthStart.toDate(), monthEnd: monthEnd.toDate() }) + .getRawOne() + .then(result => parseInt(result.count, 10)) + ]) return { periodUsers, monthUsers } } else { - query.where('first_insert_time BETWEEN :start AND :end', - { start: dateStart.toDate(), end: dateEnd.toDate() }) - - const result = await query.execute() - return { periodUsers: result.length, monthUsers: 0 } + const { count } = await baseQuery + .where('first_insert_time BETWEEN :start AND :end', + { start: dateStart.toDate(), end: dateEnd.toDate() }) + .getRawOne() + return { periodUsers: parseInt(count, 10), monthUsers: 0 } } } catch (e) { logger.error(e) @@ -199,6 +206,7 @@ export class StatsService { query.groupBy('"tgUserId"') const result = await query.getRawMany() const filter = result.filter(row => row.row_count === '1') + // console.log('FCO::::: ', filter) return filter.length } catch (e) { logger.error(e) @@ -234,7 +242,7 @@ export class StatsService { } } - public async getUserEngagementByCommand (daysPeriod = 7): Promise { + public async getUserEngagementByCommand (daysPeriod = 7, date?: Date): Promise { try { const currentTime = moment() const dateStart = moment() @@ -245,7 +253,8 @@ export class StatsService { const dateEnd = currentTime.unix() const rows = await logRepository.createQueryBuilder('logs') .select('logs.command, count(logs.command) as "commandCount", SUM(logs.amountOne) as "oneAmount"') - .where(`logs.createdAt BETWEEN TO_TIMESTAMP(${dateStart}) and TO_TIMESTAMP(${dateEnd})`) + .where('logs.isSupportedCommand=true') + .andWhere(`logs.createdAt BETWEEN TO_TIMESTAMP(${dateStart}) and TO_TIMESTAMP(${dateEnd})`) .groupBy('logs.command') .orderBy('"commandCount"', 'DESC').execute() return rows diff --git a/src/modules/payment/index.ts b/src/modules/payment/index.ts index 26cb62c9..422fa12a 100644 --- a/src/modules/payment/index.ts +++ b/src/modules/payment/index.ts @@ -363,6 +363,7 @@ export class BotPayments { try { const accountId = this.getAccountId(ctx) let [command = ''] = text.split(' ') + if (!command) { if (audio ?? voice) { command = '/voice-memo' diff --git a/src/modules/schedule/index.ts b/src/modules/schedule/index.ts index 02327e7f..1de7cdc0 100644 --- a/src/modules/schedule/index.ts +++ b/src/modules/schedule/index.ts @@ -200,7 +200,7 @@ export class BotSchedule { statsService.getTotalMessages(7, true), this.generateReportEngagementByCommand(7), statsService.getOnetimeUsers(date), - statsService.getNewUsers(90, date), + statsService.getNewUsers(7, date), statsService.getUniqueUsersCount(date), statsService.getPaidUsers(date), statsService.getFreeCreditUsers(date), From 9a173759e76131fe3dbf5bd2103b3ea8a8a62350 Mon Sep 17 00:00:00 2001 From: fegloff Date: Tue, 15 Oct 2024 11:30:44 -0500 Subject: [PATCH 07/14] update user stats query to include isSupportedCommand --- src/database/stats.service.ts | 31 ++++++++++++++++++---------- src/modules/1country/api/relayApi.ts | 1 + src/modules/schedule/index.ts | 10 ++++----- 3 files changed, 26 insertions(+), 16 deletions(-) diff --git a/src/database/stats.service.ts b/src/database/stats.service.ts index 877f0355..008b2223 100644 --- a/src/database/stats.service.ts +++ b/src/database/stats.service.ts @@ -86,14 +86,14 @@ export class StatsService { const params: any[] = [] try { if (date && isValidDate(date)) { - whereClause = 'WHERE (EXTRACT(YEAR FROM "createdAt") = $1) AND (EXTRACT(MONTH FROM "createdAt") = $2)' + whereClause = ' AND (EXTRACT(YEAR FROM "createdAt") = $1) AND (EXTRACT(MONTH FROM "createdAt") = $2)' const year = date.getFullYear().toString() const month = (date.getMonth() + 1).toString() params.push(year, month) } const query = ` - select count(distinct("tgUserId")) from logs + select count(distinct("tgUserId")) from logs WHERE "isSupportedCommand" = true ${whereClause} ` const result = await logRepository.query(query, params) @@ -117,6 +117,7 @@ export class StatsService { .createQueryBuilder('logs') .select('count(distinct(logs."tgUserId"))') .where(`logs.createdAt BETWEEN TO_TIMESTAMP(${dateStart}) and TO_TIMESTAMP(${dateEnd})`) + .andWhere('logs."isSupportedCommand" = true') .execute() return rows.length ? +rows[0].count : 0 } catch (e) { @@ -197,6 +198,7 @@ export class StatsService { .createQueryBuilder('logs') .select('count("tgUserId") AS row_count, "tgUserId", MAX("createdAt") AS max_created') .where(`"createdAt" < TO_TIMESTAMP(${bufferDate})`) + .andWhere('logs.isSupportedCommand=true') if (date && isValidDate(date)) { const year = date.getFullYear().toString() const month = (date.getMonth() + 1).toString() @@ -206,7 +208,7 @@ export class StatsService { query.groupBy('"tgUserId"') const result = await query.getRawMany() const filter = result.filter(row => row.row_count === '1') - // console.log('FCO::::: ', filter) + return filter.length } catch (e) { logger.error(e) @@ -243,14 +245,21 @@ export class StatsService { } public async getUserEngagementByCommand (daysPeriod = 7, date?: Date): Promise { + let dateEnd, dateStart + try { - const currentTime = moment() - const dateStart = moment() - .tz('America/Los_Angeles') - .set({ hour: 0, minute: 0, second: 0 }) - .subtract(daysPeriod, 'days') - .unix() - const dateEnd = currentTime.unix() + if (date) { + dateStart = moment(date).startOf('month').unix() + dateEnd = moment(date).endOf('month').unix() + } else { + const currentTime = moment() + dateStart = moment() + .tz('America/Los_Angeles') + .set({ hour: 0, minute: 0, second: 0 }) + .subtract(daysPeriod, 'days') + .unix() + dateEnd = currentTime.unix() + } const rows = await logRepository.createQueryBuilder('logs') .select('logs.command, count(logs.command) as "commandCount", SUM(logs.amountOne) as "oneAmount"') .where('logs.isSupportedCommand=true') @@ -404,7 +413,7 @@ export class StatsService { SELECT COUNT(*) as users_with_free_credits, SUM(credits) as total_free_credits_used, - ${FREE_CREDITS} * COUNT(*) - SUM(credits) as remaining_free_credits + (${FREE_CREDITS} * COUNT(*)) - SUM(credits) as remaining_free_credits FROM spending ` const result = await logRepository.query(query, params) diff --git a/src/modules/1country/api/relayApi.ts b/src/modules/1country/api/relayApi.ts index 1fee4349..b6248239 100644 --- a/src/modules/1country/api/relayApi.ts +++ b/src/modules/1country/api/relayApi.ts @@ -86,6 +86,7 @@ export const relayApi = (): { async?: boolean }) => { const { data: { success, sld, mcJobId, nakedJobId, error } } = await base.post('/cert', { domain, address, async }) + console.log('CERT RESULT:::::', { success, sld, mcJobId, nakedJobId, error }) return { success, sld, diff --git a/src/modules/schedule/index.ts b/src/modules/schedule/index.ts index 1de7cdc0..2781eb4a 100644 --- a/src/modules/schedule/index.ts +++ b/src/modules/schedule/index.ts @@ -149,8 +149,8 @@ export class BotSchedule { return `${networkUsage}\n${assetsUpdate}\n${oneBotWeeklyMetrics}\n${oneBotMetrics}` } - public async generateReportEngagementByCommand (days: number): Promise { - const dbRows = await statsService.getUserEngagementByCommand(days) + public async generateReportEngagementByCommand (days: number, date?: Date): Promise { + const dbRows = await statsService.getUserEngagementByCommand(days, date) const cropIndex = dbRows.length >= 50 ? 50 : dbRows.length - 1 if (dbRows.length === 0) { return '' @@ -198,7 +198,7 @@ export class BotSchedule { statsService.getActiveUsers(7), statsService.getTotalMessages(7), statsService.getTotalMessages(7, true), - this.generateReportEngagementByCommand(7), + this.generateReportEngagementByCommand(7, date), statsService.getOnetimeUsers(date), statsService.getNewUsers(7, date), statsService.getUniqueUsersCount(date), @@ -228,8 +228,8 @@ export class BotSchedule { `\nTotal fees users pay in ONE: *${abbreviateNumber(totalOne)}*` + `\nTotal fees users pay in credits: *${abbreviateNumber(totalPaidUsers.amountCredits + totalPaidUsers.amountOnes)}*` + `\nTotal fees users pay in free credits: *${abbreviateNumber(totalfreePaidUsers.amountFreeCredits + (totalPaidUsers.freeCreditsBurned))}*` + - `\nTotal free credits reamining: *${abbreviateNumber(totalfreePaidUsers.amountFreeCreditsRemaining)}*` + - `\nTotal users who paid in ONE: *${totalPaidUsers.users}*` + + `${!date ? '\nTotal free credits reamining: *' + abbreviateNumber(totalfreePaidUsers.amountFreeCreditsRemaining) + '*' : ''}` + + `\nTotal users who paid in credits: *${totalPaidUsers.users}*` + `\nTotal users who paid in free credits: *${totalfreePaidUsers.users}*` + `\n\n${engagementByCommand}` return report From d0e676bcee5e036b913e479751612b930c4b3895 Mon Sep 17 00:00:00 2001 From: fegloff Date: Mon, 21 Oct 2024 17:33:36 -0500 Subject: [PATCH 08/14] update error handler text + add provider's model parameter logic + add additional is supported model check --- src/bot.ts | 5 +- src/config.ts | 4 +- src/constants.ts | 2 +- src/modules/errorhandler.ts | 2 +- src/modules/llms/api/athropic.ts | 36 ++++--- src/modules/llms/api/openai.ts | 27 ++++-- src/modules/llms/api/vertex.ts | 16 +++- src/modules/llms/claudeBot.ts | 14 ++- src/modules/llms/llmsBase.ts | 28 ++++-- src/modules/llms/openaiBot.ts | 36 ++----- src/modules/llms/utils/llmModelsManager.ts | 17 +++- src/modules/llms/utils/llmsData.ts | 106 +++++++++++++-------- src/modules/llms/utils/types.ts | 15 ++- src/modules/llms/vertexBot.ts | 13 ++- 14 files changed, 201 insertions(+), 120 deletions(-) diff --git a/src/bot.ts b/src/bot.ts index e15c4a8a..7b3c7a40 100644 --- a/src/bot.ts +++ b/src/bot.ts @@ -185,10 +185,7 @@ bot.use(async (ctx: BotContext, next: NextFunction): Promise => { bot.use( session({ - initial: () => { - logger.info('Creating new session') - return createInitialSessionData() - }, + initial: createInitialSessionData, storage: enhanceStorage({ storage: new MemorySessionStorage>(), millisecondsToLive: config.sessionTimeout * 60 * 60 * 1000 // 48 hours diff --git a/src/config.ts b/src/config.ts index afec235a..1052dca8 100644 --- a/src/config.ts +++ b/src/config.ts @@ -36,7 +36,7 @@ export default { apiEndpoint: process.env.LLMS_ENDPOINT, // // process.env.LLMS_ENDPOINT, // 'http://127.0.0.1:5000', apiKey: process.env.LLMS_API_KEY ?? '', wordLimit: 50, - model: 'chat-bison', + model: 'gpt-4o', minimumBalance: 0, isEnabled: Boolean(parseInt(process.env.LLMS_ENABLED ?? '1')), pdfUrl: process.env.PDF_URL ?? '', @@ -49,7 +49,7 @@ export default { telegramFileUrl: 'https://api.telegram.org/file/bot', completions: { temperature: - (parseInt(process.env.OPENAI_TEMPERATURE ?? '')) ?? + (parseFloat(process.env.OPENAI_TEMPERATURE ?? '')) ?? 0.8 }, defaultPrompt: diff --git a/src/constants.ts b/src/constants.ts index 712b08f4..a357962c 100644 --- a/src/constants.ts +++ b/src/constants.ts @@ -287,7 +287,7 @@ export const PROMPTS = { } export const VOICE_MEMO_FORWARDING = { - enabled: 'Voice note forwarding is now active. The next voice note you send will be forwarded automatically. This setting will deactivate after forwarding one voice note.', + enabled: 'Voice note forwarding is now active. The next voice note you send will be processed automatically. This setting will deactivate after processing the voice note.', restricted: 'Sorry, voice note forwarding can only be enabled by admin users. If you need this feature, please contact an admin for assistance.' } diff --git a/src/modules/errorhandler.ts b/src/modules/errorhandler.ts index 31bf8a6b..13df149d 100644 --- a/src/modules/errorhandler.ts +++ b/src/modules/errorhandler.ts @@ -16,7 +16,7 @@ class ErrorHandler { private writeLog (ctx: OnMessageContext | OnCallBackQueryData, errorMessage: string, logger: Logger): void { const user = ctx.from.username ? ctx.from.username : '' const msg = ctx.message?.text - logger.error(`Error msg:: ${errorMessage} | from user ${user} | msg::${msg}`) + logger.error(`Error msg::: ${errorMessage} | from user ${user} | msg::: ${msg}`) } async onError ( diff --git a/src/modules/llms/api/athropic.ts b/src/modules/llms/api/athropic.ts index 35c3400a..ac0b4ae0 100644 --- a/src/modules/llms/api/athropic.ts +++ b/src/modules/llms/api/athropic.ts @@ -9,6 +9,7 @@ import { type LlmCompletion } from './llmApi' import { sleep } from '../../sd-images/utils' import { headers, headersStream } from './helper' import { LlmModelsEnum } from '../utils/llmModelsManager' +import { type ModelParameters } from '../utils/types' const logger = pino({ name: 'anthropic - llmsBot', @@ -22,16 +23,20 @@ const API_ENDPOINT = config.llms.apiEndpoint // 'http://127.0.0.1:5000' // confi export const anthropicCompletion = async ( conversation: ChatConversation[], - model = LlmModelsEnum.CLAUDE_3_OPUS + model = LlmModelsEnum.CLAUDE_3_OPUS, + parameters?: ModelParameters ): Promise => { logger.info(`Handling ${model} completion`) + parameters = parameters ?? { + system: config.openAi.chatGpt.chatCompletionContext, + max_tokens: +config.openAi.chatGpt.maxTokens + } const data = { model, stream: false, - system: config.openAi.chatGpt.chatCompletionContext, - max_tokens: +config.openAi.chatGpt.maxTokens, messages: conversation.filter(c => c.model === model) - .map(m => { return { content: m.content, role: m.role } }) + .map(m => { return { content: m.content, role: m.role } }), + ...parameters } const url = `${API_ENDPOINT}/anthropic/completions` const response = await axios.post(url, data, headers) @@ -62,14 +67,19 @@ export const anthropicStreamCompletion = async ( model = LlmModelsEnum.CLAUDE_3_OPUS, ctx: OnMessageContext | OnCallBackQueryData, msgId: number, - limitTokens = true + limitTokens = true, + parameters?: ModelParameters ): Promise => { logger.info(`Handling ${model} stream completion`) + parameters = parameters ?? { + system: config.openAi.chatGpt.chatCompletionContext, + max_tokens: +config.openAi.chatGpt.maxTokens + } const data = { model, stream: true, - system: config.openAi.chatGpt.chatCompletionContext, - max_tokens: limitTokens ? +config.openAi.chatGpt.maxTokens : undefined, + system: parameters.system, + max_tokens: limitTokens ? parameters.max_tokens : undefined, messages: conversation.filter(c => c.model === model && c.role !== 'system') // .map(m => { return { content: m.content, role: m.role } }) } let wordCount = 0 @@ -158,16 +168,20 @@ export const anthropicStreamCompletion = async ( export const toolsChatCompletion = async ( conversation: ChatConversation[], - model = LlmModelsEnum.CLAUDE_3_OPUS + model = LlmModelsEnum.CLAUDE_3_OPUS, + parameters?: ModelParameters ): Promise => { logger.info(`Handling ${model} completion`) + parameters = parameters ?? { + system: config.openAi.chatGpt.chatCompletionContext, + max_tokens: +config.openAi.chatGpt.maxTokens + } const input = { model, stream: false, - system: config.openAi.chatGpt.chatCompletionContext, - max_tokens: +config.openAi.chatGpt.maxTokens, messages: conversation.filter(c => c.model === model && c.role !== 'system') - .map(m => { return { content: m.content, role: m.role } }) + .map(m => { return { content: m.content, role: m.role } }), + ...parameters } const url = `${API_ENDPOINT}/anthropic/completions/tools` const response = await axios.post(url, input, headers) diff --git a/src/modules/llms/api/openai.ts b/src/modules/llms/api/openai.ts index 085271b2..c1b3ebe4 100644 --- a/src/modules/llms/api/openai.ts +++ b/src/modules/llms/api/openai.ts @@ -12,7 +12,8 @@ import { pino } from 'pino' import { type ImageModel, type ChatModel, - type DalleImageSize + type DalleImageSize, + type ModelParameters } from '../utils/types' import type fs from 'fs' import { type ChatCompletionMessageParam } from 'openai/resources/chat/completions' @@ -81,14 +82,19 @@ export async function alterGeneratedImg ( export async function chatCompletion ( conversation: ChatConversation[], model = config.openAi.chatGpt.model, - limitTokens = true + limitTokens = true, + parameters?: ModelParameters ): Promise { const messages = conversation.filter(c => c.model === model).map(m => { return { content: m.content, role: m.role } }) + parameters = parameters ?? { + max_completion_tokens: config.openAi.chatGpt.maxTokens, + temperature: config.openAi.dalle.completions.temperature + } const response = await openai.chat.completions.create({ model, - max_completion_tokens: limitTokens ? config.openAi.chatGpt.maxTokens : undefined, - temperature: model === LlmModelsEnum.O1 ? 1 : config.openAi.dalle.completions.temperature, - messages: messages as ChatCompletionMessageParam[] + messages: messages as ChatCompletionMessageParam[], + max_completion_tokens: limitTokens ? parameters.max_completion_tokens : undefined, + temperature: parameters.temperature }) const chatModel = getChatModel(model) if (response.usage?.prompt_tokens === undefined) { @@ -121,17 +127,22 @@ export const streamChatCompletion = async ( ctx: OnMessageContext | OnCallBackQueryData, model = LlmModelsEnum.GPT_4, msgId: number, - limitTokens = true + limitTokens = true, + parameters?: ModelParameters ): Promise => { let completion = '' let wordCountMinimum = 2 const messages = conversation.filter(c => c.model === model).map(m => { return { content: m.content, role: m.role } }) + parameters = parameters ?? { + max_completion_tokens: config.openAi.chatGpt.maxTokens, + temperature: config.openAi.dalle.completions.temperature || 0.8 + } const stream = await openai.chat.completions.create({ model, messages: messages as ChatCompletionMessageParam[], // OpenAI.Chat.Completions.CreateChatCompletionRequestMessage[], stream: true, - max_completion_tokens: limitTokens ? config.openAi.chatGpt.maxTokens : undefined, // max_tokens: - temperature: config.openAi.dalle.completions.temperature || 0.8 + max_completion_tokens: limitTokens ? parameters.max_completion_tokens : undefined, + temperature: parameters.temperature }) let wordCount = 0 if (!ctx.chat?.id) { diff --git a/src/modules/llms/api/vertex.ts b/src/modules/llms/api/vertex.ts index 13757949..f1ca2aa3 100644 --- a/src/modules/llms/api/vertex.ts +++ b/src/modules/llms/api/vertex.ts @@ -7,6 +7,7 @@ import { GrammyError } from 'grammy' import { pino } from 'pino' import { headers, headersStream } from './helper' import { LlmModelsEnum } from '../utils/llmModelsManager' +import { type ModelParameters } from '../utils/types' const API_ENDPOINT = config.llms.apiEndpoint // config.llms.apiEndpoint // 'http://127.0.0.1:5000' // config.llms.apiEndpoint @@ -20,7 +21,8 @@ const logger = pino({ export const vertexCompletion = async ( conversation: ChatConversation[], - model = config.llms.model + model = config.llms.model, + parameters?: ModelParameters ): Promise => { const data = { model, @@ -64,13 +66,19 @@ export const vertexStreamCompletion = async ( model = LlmModelsEnum.CLAUDE_3_OPUS, ctx: OnMessageContext | OnCallBackQueryData, msgId: number, - limitTokens = true + limitTokens = true, + parameters?: ModelParameters ): Promise => { + parameters = parameters ?? { + system: config.openAi.chatGpt.chatCompletionContext, + max_tokens: +config.openAi.chatGpt.maxTokens + } + const data = { model, stream: true, // Set stream to true to receive the completion as a stream - system: config.openAi.chatGpt.chatCompletionContext, - max_tokens: limitTokens ? +config.openAi.chatGpt.maxTokens : undefined, + system: parameters.system, + max_tokens: limitTokens ? parameters.max_tokens : undefined, messages: conversation.filter(c => c.model === model && c.role !== 'system') // .map(m => { return { parts: { text: m.content }, role: m.role !== 'user' ? 'model' : 'user' } }) } diff --git a/src/modules/llms/claudeBot.ts b/src/modules/llms/claudeBot.ts index bfae09ba..eae43eab 100644 --- a/src/modules/llms/claudeBot.ts +++ b/src/modules/llms/claudeBot.ts @@ -9,6 +9,7 @@ import { type LlmCompletion } from './api/llmApi' import { anthropicCompletion, anthropicStreamCompletion, toolsChatCompletion } from './api/athropic' import { LlmsBase } from './llmsBase' import { type ModelVersion } from './utils/llmModelsManager' +import { type ModelParameters } from './utils/types' export class ClaudeBot extends LlmsBase { private readonly claudeModels: ModelVersion[] @@ -41,25 +42,28 @@ export class ClaudeBot extends LlmsBase { model: ModelVersion, ctx: OnMessageContext | OnCallBackQueryData, msgId: number, - limitTokens: boolean): Promise { + limitTokens: boolean, + parameters?: ModelParameters): Promise { return await anthropicStreamCompletion( conversation, model, ctx, msgId, - true // telegram messages has a character limit + true, // telegram messages has a character limit + parameters ) } async chatCompletion ( conversation: ChatConversation[], model: ModelVersion, - hasTools: boolean + hasTools: boolean, + parameters?: ModelParameters ): Promise { if (hasTools) { - return await toolsChatCompletion(conversation, model) + return await toolsChatCompletion(conversation, model, parameters) } - return await anthropicCompletion(conversation, model) + return await anthropicCompletion(conversation, model, parameters) } public async onEvent (ctx: OnMessageContext | OnCallBackQueryData): Promise { diff --git a/src/modules/llms/llmsBase.ts b/src/modules/llms/llmsBase.ts index ccadfaba..817417aa 100644 --- a/src/modules/llms/llmsBase.ts +++ b/src/modules/llms/llmsBase.ts @@ -27,7 +27,7 @@ import { import { type LlmCompletion, deleteCollection } from './api/llmApi' import * as Sentry from '@sentry/node' import { now } from '../../utils/perf' -import { type ChatModel, type LLMModel } from './utils/types' +import { type ModelParameters, type ChatModel, type LLMModel } from './utils/types' import { ErrorHandler } from '../errorhandler' import { SubagentBase } from '../subagents/subagentBase' import { @@ -104,17 +104,23 @@ export abstract class LlmsBase implements PayableBot { public abstract getEstimatedPrice (ctx: any): number + private isSupportedModel (model: string): boolean { + return !!this.supportedModels.find(v => v.version === model) + } + protected abstract chatStreamCompletion ( conversation: ChatConversation[], model: ModelVersion, ctx: OnMessageContext | OnCallBackQueryData, msgId: number, - limitTokens: boolean): Promise + limitTokens: boolean, + parameters?: ModelParameters): Promise protected abstract chatCompletion ( conversation: ChatConversation[], model: ModelVersion, - usesTools: boolean + usesTools: boolean, + parameters?: ModelParameters ): Promise // protected abstract hasPrefix (prompt: string): string @@ -228,10 +234,10 @@ export abstract class LlmsBase implements PayableBot { try { const msg = session.requestQueue.shift() const prompt = msg?.content as string - const model = msg?.model + const model = this.isSupportedModel(msg?.model ?? ctx.session.currentModel) ? msg?.model ?? ctx.session.currentModel : this.supportedModels[0].version let agentCompletions: string[] = [] const { chatConversation } = session - const minBalance = await getMinBalance(ctx, msg?.model as string) + const minBalance = await getMinBalance(ctx, model) let enhancedPrompt = '' if (await this.hasBalance(ctx, minBalance)) { if (!prompt) { @@ -276,7 +282,7 @@ export abstract class LlmsBase implements PayableBot { chatConversation.push(chat) const payload = { conversation: chatConversation, - model: model ?? config.llms.model, + model, ctx } let result: { price: number, chat: ChatConversation[] } = { price: 0, chat: [] } @@ -336,11 +342,13 @@ export abstract class LlmsBase implements PayableBot { if (isTypingEnabled) { ctx.chatAction = 'typing' } + const parameters = this.modelManager.getModelParameters(model) const completion = await this.chatStreamCompletion(conversation, model, ctx, msgId, - true // telegram messages has a character limit + true, // telegram messages has a character limit + parameters ) if (isTypingEnabled) { ctx.chatAction = null @@ -363,7 +371,8 @@ export abstract class LlmsBase implements PayableBot { } } } else { - const response = await this.chatCompletion(conversation, model, usesTools) + const parameters = this.modelManager.getModelParameters(model) + const response = await this.chatCompletion(conversation, model, usesTools, parameters) conversation.push({ role: 'assistant', content: response.completion?.content ?? '', @@ -394,7 +403,8 @@ export abstract class LlmsBase implements PayableBot { await ctx.reply('...', { message_thread_id: ctx.message?.message_thread_id }) ).message_id ctx.chatAction = 'typing' - const response = await this.chatCompletion(conversation, model, usesTools) + const parameters = this.modelManager.getModelParameters(model) + const response = await this.chatCompletion(conversation, model, usesTools, parameters) if (response.completion) { if (model === this.modelsEnum.O1) { const msgs = splitTelegramMessage(response.completion.content as string) diff --git a/src/modules/llms/openaiBot.ts b/src/modules/llms/openaiBot.ts index defe68b9..09c2d083 100644 --- a/src/modules/llms/openaiBot.ts +++ b/src/modules/llms/openaiBot.ts @@ -23,6 +23,7 @@ import { } from './api/openai' import { type SubagentBase } from '../subagents' import { type ModelVersion } from './utils/llmModelsManager' +import { type ModelParameters } from './utils/types' export class OpenAIBot extends LlmsBase { private readonly gpt4oPrefix: string[] @@ -69,22 +70,26 @@ export class OpenAIBot extends LlmsBase { model: ModelVersion, ctx: OnMessageContext | OnCallBackQueryData, msgId: number, - limitTokens: boolean + limitTokens = true, // boolean, + parameters?: ModelParameters ): Promise { return await streamChatCompletion( conversation, ctx, model, msgId, - true // telegram messages has a character limit + true, // telegram messages has a character limit + parameters ) } async chatCompletion ( conversation: ChatConversation[], - model: ModelVersion + model: ModelVersion, + usesTools: boolean, + parameters?: ModelParameters ): Promise { - return await chatCompletion(conversation, model, model !== this.modelsEnum.O1) // limitTokens doesn't apply for o1-preview + return await chatCompletion(conversation, model, model !== this.modelsEnum.O1, parameters) // limitTokens doesn't apply for o1-preview } hasPrefix (prompt: string): string { @@ -130,35 +135,12 @@ export class OpenAIBot extends LlmsBase { return } - // if (ctx.hasCommand(this.commandsEnum.ASK35)) { - // this.updateSessionModel(ctx, this.modelsEnum.GPT_35_TURBO) - // await this.onChat(ctx, this.modelsEnum.GPT_35_TURBO, true, false) - // return - // } - - // if (ctx.hasCommand(this.commandsEnum.GPT4)) { - // this.updateSessionModel(ctx, this.modelsEnum.GPT_4) - // await this.onChat(ctx, this.modelsEnum.GPT_4, true, false) - // return - // } - - // if (ctx.hasCommand([this.commandsEnum.O1, this.commandsEnum.ASK1])) { - // this.updateSessionModel(ctx, this.modelsEnum.O1) - // await this.onChat(ctx, this.modelsEnum.O1, false, false) - // return - // } - const model = this.getModelFromContext(ctx) if (model) { this.updateSessionModel(ctx, model.version) await this.onChat(ctx, model.version, this.getStreamOption(model.version), false) return } - // if (ctx.hasCommand(this.commandsEnum.ASK32)) { - // this.updateSessionModel(ctx, this.modelsEnum.GPT_4_32K) - // await this.onChat(ctx, this.modelsEnum.GPT_4_32K, true, false) - // return - // } if (ctx.hasCommand(SupportedCommands.last)) { await this.onLast(ctx) diff --git a/src/modules/llms/utils/llmModelsManager.ts b/src/modules/llms/utils/llmModelsManager.ts index 3e7a9966..336ba73c 100644 --- a/src/modules/llms/utils/llmModelsManager.ts +++ b/src/modules/llms/utils/llmModelsManager.ts @@ -3,7 +3,8 @@ import { type Provider, type LLMData, type LLMModel, - type ImageModel + type ImageModel, + type ModelParameters } from './types' export class LLMModelsManager { @@ -113,6 +114,20 @@ export class LLMModelsManager { }) as any } + getModelParameters (modelVersion: string): ModelParameters { + const model = this.getModel(modelVersion) + if (!model) { + throw new Error(`Model ${modelVersion} not found`) + } + const providerParams = llmData.providerParameters[model.provider] + const modelOverrides = providerParams.modelOverrides?.[model.name] ?? {} + + return { + ...providerParams.defaultParameters, + ...modelOverrides + } + } + isValidModel (model: string): model is (typeof this.modelsEnum)[keyof typeof this.modelsEnum] { return Object.values(this.modelsEnum).includes(model) } diff --git a/src/modules/llms/utils/llmsData.ts b/src/modules/llms/utils/llmsData.ts index 4b5a4f60..51739f31 100644 --- a/src/modules/llms/utils/llmsData.ts +++ b/src/modules/llms/utils/llmsData.ts @@ -1,7 +1,22 @@ +import config from '../../../config' import { type LLMData } from './types' export const llmData: LLMData = { chatModels: { + 'gemini-15': { + provider: 'vertex', + name: 'gemini-15', + fullName: 'gemini-1.5-pro-latest', + botName: 'VertexBot', + version: 'gemini-1.5-pro-latest', + commands: ['gemini15', 'g15'], + apiSpec: 'https://deepmind.google/technologies/gemini/pro/', + inputPrice: 0.0025, + outputPrice: 0.0075, + maxContextTokens: 1048576, + chargeType: 'CHAR', + stream: true + }, 'gemini-10': { provider: 'vertex', name: 'gemini-10', @@ -17,18 +32,19 @@ export const llmData: LLMData = { chargeType: 'CHAR', stream: true }, - 'gemini-15': { - provider: 'vertex', - name: 'gemini-15', - fullName: 'gemini-1.5-pro-latest', - botName: 'VertexBot', - version: 'gemini-1.5-pro-latest', - commands: ['gemini15', 'g15'], - apiSpec: 'https://deepmind.google/technologies/gemini/pro/', - inputPrice: 0.0025, - outputPrice: 0.0075, - maxContextTokens: 1048576, - chargeType: 'CHAR', + 'claude-35-sonnet': { + provider: 'claude', + name: 'claude-35-sonnet', + fullName: 'Claude Sonnet 3.5', + botName: 'ClaudeBot', + version: 'claude-3-5-sonnet-20240620', + commands: ['sonnet', 'claudes', 's', 'stool'], + prefix: ['s. '], + apiSpec: 'https://www.anthropic.com/news/claude-3-5-sonnet', + inputPrice: 0.003, + outputPrice: 0.015, + maxContextTokens: 8192, + chargeType: 'TOKEN', stream: true }, 'claude-3-opus': { @@ -46,21 +62,6 @@ export const llmData: LLMData = { chargeType: 'TOKEN', stream: true }, - 'claude-35-sonnet': { - provider: 'claude', - name: 'claude-35-sonnet', - fullName: 'Claude Sonnet 3.5', - botName: 'ClaudeBot', - version: 'claude-3-5-sonnet-20240620', - commands: ['sonnet', 'claudes', 's', 'stool'], - prefix: ['s. '], - apiSpec: 'https://www.anthropic.com/news/claude-3-5-sonnet', - inputPrice: 0.003, - outputPrice: 0.015, - maxContextTokens: 8192, - chargeType: 'TOKEN', - stream: true - }, 'claude-3-haiku': { provider: 'claude', name: 'claude-3-haiku', @@ -76,6 +77,21 @@ export const llmData: LLMData = { chargeType: 'TOKEN', stream: true }, + 'gpt-4o': { + provider: 'openai', + name: 'gpt-4o', + fullName: 'GPT-4o', + botName: 'OpenAIBot', + version: 'gpt-4o', + commands: ['gpto', 'ask', 'chat', 'gpt', 'a'], + prefix: ['a. ', '. '], + apiSpec: 'https://platform.openai.com/docs/models/gpt-4o', + inputPrice: 0.005, + outputPrice: 0.0015, + maxContextTokens: 128000, + chargeType: 'TOKEN', + stream: true + }, 'gpt-4': { provider: 'openai', name: 'gpt-4', @@ -119,21 +135,6 @@ export const llmData: LLMData = { chargeType: 'TOKEN', stream: true }, - 'gpt-4o': { - provider: 'openai', - name: 'gpt-4o', - fullName: 'GPT-4o', - botName: 'OpenAIBot', - version: 'gpt-4o', - commands: ['gpto', 'ask', 'chat', 'gpt', 'a'], - prefix: ['a. ', '. '], - apiSpec: 'https://platform.openai.com/docs/models/gpt-4o', - inputPrice: 0.005, - outputPrice: 0.0015, - maxContextTokens: 128000, - chargeType: 'TOKEN', - stream: true - }, o1: { provider: 'openai', name: 'o1', @@ -180,5 +181,26 @@ export const llmData: LLMData = { '1792x1024': 0.12 } } + }, + providerParameters: { + openai: { + defaultParameters: { + temperature: config.openAi.dalle.completions.temperature, + max_completion_tokens: +config.openAi.chatGpt.maxTokens + }, + modelOverrides: { o1: { temperature: 1 } } // uses model name, not model version + }, + claude: { + defaultParameters: { + system: config.openAi.chatGpt.chatCompletionContext, + max_tokens: +config.openAi.chatGpt.maxTokens + } + }, + vertex: { + defaultParameters: { + system: config.openAi.chatGpt.chatCompletionContext, + max_tokens: +config.openAi.chatGpt.maxTokens + } + } } } diff --git a/src/modules/llms/utils/types.ts b/src/modules/llms/utils/types.ts index 0f2492f0..816662bb 100644 --- a/src/modules/llms/utils/types.ts +++ b/src/modules/llms/utils/types.ts @@ -1,4 +1,4 @@ -export type Provider = 'openai' | 'claude' | 'vertex' | 'palm' | 'jurassic' +export type Provider = 'openai' | 'claude' | 'vertex' // | 'palm' | 'jurassic' export type ChargeType = 'TOKEN' | 'CHAR' export type DalleImageSize = '1024x1024' | '1024x1792' | '1792x1024' @@ -15,6 +15,18 @@ interface BaseModel { prefix?: string[] apiSpec: string } +export interface ModelParameters { + temperature?: number + max_tokens?: number + max_completion_tokens?: number + system?: string +} + +export interface ProviderParameters { + defaultParameters: ModelParameters + modelOverrides?: Record> +} + export interface ChatModel extends BaseModel { inputPrice: number outputPrice: number @@ -33,6 +45,7 @@ export type LLMModel = ChatModel | ImageModel export interface LLMData { chatModels: Record imageModels: Record + providerParameters: Record } export interface ParseDate { diff --git a/src/modules/llms/vertexBot.ts b/src/modules/llms/vertexBot.ts index 384b79f4..9c675d72 100644 --- a/src/modules/llms/vertexBot.ts +++ b/src/modules/llms/vertexBot.ts @@ -14,6 +14,7 @@ import { LlmsBase } from './llmsBase' import { vertexCompletion, vertexStreamCompletion } from './api/vertex' import { type SubagentBase } from '../subagents' import { type ModelVersion } from './utils/llmModelsManager' +import { type ModelParameters } from './utils/types' export class VertexBot extends LlmsBase { constructor (payments: BotPayments, subagents?: SubagentBase[]) { @@ -43,20 +44,24 @@ export class VertexBot extends LlmsBase { model: ModelVersion, ctx: OnMessageContext | OnCallBackQueryData, msgId: number, - limitTokens: boolean): Promise { + limitTokens: boolean, + parameters?: ModelParameters): Promise { return await vertexStreamCompletion(conversation, model, ctx, msgId, - true // telegram messages has a character limit + true, // telegram messages has a character limit + parameters ) } async chatCompletion ( conversation: ChatConversation[], - model: ModelVersion + model: ModelVersion, + usesTools: boolean, + parameters?: ModelParameters ): Promise { - return await vertexCompletion(conversation, model) + return await vertexCompletion(conversation, model, parameters) } public async onEvent (ctx: OnMessageContext | OnCallBackQueryData): Promise { From e3f8fb24be7548f4c543be05477daceeea13d628 Mon Sep 17 00:00:00 2001 From: fegloff Date: Mon, 21 Oct 2024 19:49:32 -0500 Subject: [PATCH 09/14] fix model o1 stream error --- src/modules/llms/llmsBase.ts | 15 ++++---- src/modules/llms/utils/helpers.ts | 57 ++++++++++++++++++++++++++++++- 2 files changed, 65 insertions(+), 7 deletions(-) diff --git a/src/modules/llms/llmsBase.ts b/src/modules/llms/llmsBase.ts index 817417aa..a265517b 100644 --- a/src/modules/llms/llmsBase.ts +++ b/src/modules/llms/llmsBase.ts @@ -234,10 +234,12 @@ export abstract class LlmsBase implements PayableBot { try { const msg = session.requestQueue.shift() const prompt = msg?.content as string - const model = this.isSupportedModel(msg?.model ?? ctx.session.currentModel) ? msg?.model ?? ctx.session.currentModel : this.supportedModels[0].version + const modelVersion = this.isSupportedModel(msg?.model ?? ctx.session.currentModel) ? msg?.model ?? ctx.session.currentModel : this.supportedModels[0].version + const model = this.modelManager.getModel(modelVersion) as ChatModel + stream = model?.stream ?? stream let agentCompletions: string[] = [] const { chatConversation } = session - const minBalance = await getMinBalance(ctx, model) + const minBalance = await getMinBalance(ctx, modelVersion) let enhancedPrompt = '' if (await this.hasBalance(ctx, minBalance)) { if (!prompt) { @@ -266,23 +268,23 @@ export abstract class LlmsBase implements PayableBot { continue } } - if (chatConversation.length === 0 && model !== this.modelsEnum.O1) { + if (chatConversation.length === 0 && modelVersion !== this.modelsEnum.O1) { chatConversation.push({ role: 'system', content: config.openAi.chatGpt.chatCompletionContext, - model + model: modelVersion }) } // const hasCode = hasCodeSnippet(ctx) const chat: ChatConversation = { content: enhancedPrompt || prompt, role: 'user', - model + model: modelVersion } chatConversation.push(chat) const payload = { conversation: chatConversation, - model, + model: modelVersion, ctx } let result: { price: number, chat: ChatConversation[] } = { price: 0, chat: [] } @@ -407,6 +409,7 @@ export abstract class LlmsBase implements PayableBot { const response = await this.chatCompletion(conversation, model, usesTools, parameters) if (response.completion) { if (model === this.modelsEnum.O1) { + console.log(response.completion) const msgs = splitTelegramMessage(response.completion.content as string) await ctx.api.editMessageText( ctx.chat.id, diff --git a/src/modules/llms/utils/helpers.ts b/src/modules/llms/utils/helpers.ts index b701c805..0e0b747d 100644 --- a/src/modules/llms/utils/helpers.ts +++ b/src/modules/llms/utils/helpers.ts @@ -309,7 +309,7 @@ export const hasCodeSnippet = (ctx: OnMessageContext | OnCallBackQueryData): boo return entities.length > 0 } -export const splitTelegramMessage = (text: string): string[] => { +export const splitTelegramMessage2 = (text: string): string[] => { const maxLength = 4096 const result: string[] = [] @@ -350,3 +350,58 @@ export const splitTelegramMessage = (text: string): string[] => { return result } + +// Find all Markdown entities and their positions +export const splitTelegramMessage = (text: string): string[] => { + const maxLength = 4096 + const result: string[] = [] + + // Regex to match start of Markdown entities + const entityStartPatterns = [ + /\*\*/g, // bold + /__/g, // italic + /```/g, // code block + /`/g, // inline code + /\[/g // link start + ] + + // Function to find the last safe split position + const findSafeSplitPosition = (text: string, endIndex: number): number => { + // First try to find the last space before endIndex + const lastSpace = text.lastIndexOf(' ', endIndex) + if (lastSpace === -1) return endIndex + + // Check for any entity starts between lastSpace and endIndex + const textSegment = text.slice(0, lastSpace) + + for (const pattern of entityStartPatterns) { + pattern.lastIndex = 0 // Reset regex state + const matches = [...textSegment.matchAll(pattern)] + if (matches.length % 2 === 1) { + // If we have an odd number of entity markers, find the last one + const lastEntityStart = matches[matches.length - 1].index + // Return position just after the last complete entity + return lastEntityStart + } + } + + return lastSpace + } + + let startIndex = 0 + while (startIndex < text.length) { + let endIndex = Math.min(startIndex + maxLength, text.length) + if (endIndex < text.length) { + endIndex = findSafeSplitPosition(text.slice(startIndex, endIndex), maxLength) + } + + const chunk = text.slice(startIndex, startIndex + endIndex).trim() + result.push(chunk) + startIndex += endIndex + // Skip whitespace between chunks + while (startIndex < text.length && /\s/.test(text[startIndex])) { + startIndex++ + } + } + return result +} From 4560c3e9bab8a6167ebc035ca80f0ae9304e30ba Mon Sep 17 00:00:00 2001 From: fegloff Date: Mon, 21 Oct 2024 19:59:20 -0500 Subject: [PATCH 10/14] update markdown entities for o1 completions --- src/modules/llms/utils/helpers.ts | 94 +++++++++++++++---------------- 1 file changed, 47 insertions(+), 47 deletions(-) diff --git a/src/modules/llms/utils/helpers.ts b/src/modules/llms/utils/helpers.ts index 0e0b747d..8e8da85f 100644 --- a/src/modules/llms/utils/helpers.ts +++ b/src/modules/llms/utils/helpers.ts @@ -309,48 +309,6 @@ export const hasCodeSnippet = (ctx: OnMessageContext | OnCallBackQueryData): boo return entities.length > 0 } -export const splitTelegramMessage2 = (text: string): string[] => { - const maxLength = 4096 - const result: string[] = [] - - // Regular expression to match Markdown entities - const markdownRegex = /(\*\*|__|\[.*?\]\(.*?\)|```[\s\S]*?```|`[^`\n]+`)/g - - // Function to find the end index that avoids splitting Markdown entities - const findEndIndex = (startIndex: number, chunk: string): number => { - const matches = [...chunk.matchAll(markdownRegex)] - if (matches.length === 0) return startIndex + maxLength - - const lastMatch = matches[matches.length - 1] - const lastMatchEnd = lastMatch.index + lastMatch[0].length - return lastMatchEnd > chunk.length ? startIndex + lastMatch.index : startIndex + maxLength - } - - let startIndex = 0 - while (startIndex < text.length) { - let endIndex = findEndIndex(startIndex, text.slice(startIndex, startIndex + maxLength)) - endIndex = Math.min(endIndex, text.length) // Ensure endIndex is within bounds - - // Find a natural break point if necessary - if (endIndex < text.length) { - const lastSpaceIndex = text.slice(startIndex, endIndex).lastIndexOf(' ') - if (lastSpaceIndex > 0) { - endIndex = startIndex + lastSpaceIndex - } - } - - result.push(text.slice(startIndex, endIndex).trim()) - startIndex = endIndex - - // Move past any spaces or special characters that might cause issues - while (startIndex < text.length && /\s/.test(text[startIndex])) { - startIndex++ - } - } - - return result -} - // Find all Markdown entities and their positions export const splitTelegramMessage = (text: string): string[] => { const maxLength = 4096 @@ -358,11 +316,11 @@ export const splitTelegramMessage = (text: string): string[] => { // Regex to match start of Markdown entities const entityStartPatterns = [ - /\*\*/g, // bold - /__/g, // italic - /```/g, // code block - /`/g, // inline code - /\[/g // link start + /\*/g, // bold text (single asterisk) + /_/g, // italic text (single underscore) + /```/g, // pre-formatted code block (triple backtick) + /`/g, // inline fixed-width code (single backtick) + /\[/g // inline URL or user mention ] // Function to find the last safe split position @@ -405,3 +363,45 @@ export const splitTelegramMessage = (text: string): string[] => { } return result } + +// export const splitTelegramMessage = (text: string): string[] => { +// const maxLength = 4096 +// const result: string[] = [] + +// // Regular expression to match Markdown entities +// const markdownRegex = /(\*\*|__|\[.*?\]\(.*?\)|```[\s\S]*?```|`[^`\n]+`)/g + +// // Function to find the end index that avoids splitting Markdown entities +// const findEndIndex = (startIndex: number, chunk: string): number => { +// const matches = [...chunk.matchAll(markdownRegex)] +// if (matches.length === 0) return startIndex + maxLength + +// const lastMatch = matches[matches.length - 1] +// const lastMatchEnd = lastMatch.index + lastMatch[0].length +// return lastMatchEnd > chunk.length ? startIndex + lastMatch.index : startIndex + maxLength +// } + +// let startIndex = 0 +// while (startIndex < text.length) { +// let endIndex = findEndIndex(startIndex, text.slice(startIndex, startIndex + maxLength)) +// endIndex = Math.min(endIndex, text.length) // Ensure endIndex is within bounds + +// // Find a natural break point if necessary +// if (endIndex < text.length) { +// const lastSpaceIndex = text.slice(startIndex, endIndex).lastIndexOf(' ') +// if (lastSpaceIndex > 0) { +// endIndex = startIndex + lastSpaceIndex +// } +// } + +// result.push(text.slice(startIndex, endIndex).trim()) +// startIndex = endIndex + +// // Move past any spaces or special characters that might cause issues +// while (startIndex < text.length && /\s/.test(text[startIndex])) { +// startIndex++ +// } +// } + +// return result +// } From f81fdc43e77973f948deece1ba4862bb804a601e Mon Sep 17 00:00:00 2001 From: fegloff Date: Wed, 23 Oct 2024 22:16:30 -0500 Subject: [PATCH 11/14] update sonnet model version to 20241022 --- src/modules/llms/utils/llmsData.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/modules/llms/utils/llmsData.ts b/src/modules/llms/utils/llmsData.ts index 51739f31..5d19d32a 100644 --- a/src/modules/llms/utils/llmsData.ts +++ b/src/modules/llms/utils/llmsData.ts @@ -37,7 +37,7 @@ export const llmData: LLMData = { name: 'claude-35-sonnet', fullName: 'Claude Sonnet 3.5', botName: 'ClaudeBot', - version: 'claude-3-5-sonnet-20240620', + version: 'claude-3-5-sonnet-20241022', commands: ['sonnet', 'claudes', 's', 'stool'], prefix: ['s. '], apiSpec: 'https://www.anthropic.com/news/claude-3-5-sonnet', From cecf31de0a875ee95be5dcd250df1e40203ff946 Mon Sep 17 00:00:00 2001 From: fegloff Date: Thu, 24 Oct 2024 23:52:32 -0500 Subject: [PATCH 12/14] add xai model + refactor system message logic on llmsBase --- src/bot.ts | 3 ++ src/modules/llms/api/athropic.ts | 41 +++++++++++++++ src/modules/llms/api/openai.ts | 16 +++++- src/modules/llms/claudeBot.ts | 2 - src/modules/llms/llmsBase.ts | 7 --- src/modules/llms/openaiBot.ts | 2 - src/modules/llms/utils/llmsData.ts | 21 ++++++++ src/modules/llms/utils/types.ts | 2 +- src/modules/llms/xaiBot.ts | 83 ++++++++++++++++++++++++++++++ 9 files changed, 163 insertions(+), 14 deletions(-) create mode 100644 src/modules/llms/xaiBot.ts diff --git a/src/bot.ts b/src/bot.ts index 7b3c7a40..61b90e19 100644 --- a/src/bot.ts +++ b/src/bot.ts @@ -59,6 +59,7 @@ import { createInitialSessionData, addQuotePrefix, markdownToTelegramHtml } from import { LlamaAgent } from './modules/subagents' import { llmModelManager } from './modules/llms/utils/llmModelsManager' import { HmnyBot } from './modules/hmny' +import { XaiBot } from './modules/llms/xaiBot' Events.EventEmitter.defaultMaxListeners = 30 @@ -208,6 +209,7 @@ const openAiBot = new OpenAIBot(payments, [llamaAgent]) const dalleBot = new DalleBot(payments) const claudeBot = new ClaudeBot(payments) const vertexBot = new VertexBot(payments, [llamaAgent]) +const xaiBot = new XaiBot(payments) const oneCountryBot = new OneCountryBot(payments) const translateBot = new TranslateBot() const telegramPayments = new TelegramPayments(payments) @@ -337,6 +339,7 @@ const PayableBots: Record = { dalleBot: { bot: dalleBot }, claudeBot: { bot: claudeBot }, vertexBot: { bot: vertexBot }, + aixBot: { bot: xaiBot }, openAiBot: { enabled: (ctx: OnMessageContext) => ctx.session.dalle.isEnabled, bot: openAiBot diff --git a/src/modules/llms/api/athropic.ts b/src/modules/llms/api/athropic.ts index ac0b4ae0..149091ff 100644 --- a/src/modules/llms/api/athropic.ts +++ b/src/modules/llms/api/athropic.ts @@ -62,6 +62,47 @@ export const anthropicCompletion = async ( } } +export const xaiCompletion = async ( + conversation: ChatConversation[], + model = LlmModelsEnum.GROK, + parameters?: ModelParameters +): Promise => { + logger.info(`Handling ${model} completion`) + parameters = parameters ?? { + system: config.openAi.chatGpt.chatCompletionContext, + max_tokens: +config.openAi.chatGpt.maxTokens + } + const data = { + model, + stream: false, + messages: conversation.filter(c => c.model === model) + .map(m => { return { content: m.content, role: m.role } }), + ...parameters + } + const url = `${API_ENDPOINT}/xai/completions` + const response = await axios.post(url, data, headers) + const respJson = JSON.parse(response.data) + if (response) { + const totalInputTokens = respJson.usage.input_tokens + const totalOutputTokens = respJson.usage.output_tokens + const completion = respJson.content + return { + completion: { + content: completion[0].text, + role: 'assistant', + model + }, + usage: totalOutputTokens + totalInputTokens, + price: 0 + } + } + return { + completion: undefined, + usage: 0, + price: 0 + } +} + export const anthropicStreamCompletion = async ( conversation: ChatConversation[], model = LlmModelsEnum.CLAUDE_3_OPUS, diff --git a/src/modules/llms/api/openai.ts b/src/modules/llms/api/openai.ts index c1b3ebe4..c2aaed65 100644 --- a/src/modules/llms/api/openai.ts +++ b/src/modules/llms/api/openai.ts @@ -79,13 +79,25 @@ export async function alterGeneratedImg ( } } +const prepareConversation = (conversation: ChatConversation[], model: string): ChatConversation[] => { + const messages = conversation.filter(c => c.model === model).map(m => { return { content: m.content, role: m.role } }) + if (messages.length !== 1 || model === LlmModelsEnum.O1) { + return messages + } + const systemMessage = { + role: 'system', + content: config.openAi.chatGpt.chatCompletionContext + } + return [systemMessage, ...messages] +} + export async function chatCompletion ( conversation: ChatConversation[], model = config.openAi.chatGpt.model, limitTokens = true, parameters?: ModelParameters ): Promise { - const messages = conversation.filter(c => c.model === model).map(m => { return { content: m.content, role: m.role } }) + const messages = prepareConversation(conversation, model) parameters = parameters ?? { max_completion_tokens: config.openAi.chatGpt.maxTokens, temperature: config.openAi.dalle.completions.temperature @@ -132,7 +144,7 @@ export const streamChatCompletion = async ( ): Promise => { let completion = '' let wordCountMinimum = 2 - const messages = conversation.filter(c => c.model === model).map(m => { return { content: m.content, role: m.role } }) + const messages = prepareConversation(conversation, model) parameters = parameters ?? { max_completion_tokens: config.openAi.chatGpt.maxTokens, temperature: config.openAi.dalle.completions.temperature || 0.8 diff --git a/src/modules/llms/claudeBot.ts b/src/modules/llms/claudeBot.ts index eae43eab..d71a7726 100644 --- a/src/modules/llms/claudeBot.ts +++ b/src/modules/llms/claudeBot.ts @@ -12,8 +12,6 @@ import { type ModelVersion } from './utils/llmModelsManager' import { type ModelParameters } from './utils/types' export class ClaudeBot extends LlmsBase { - private readonly claudeModels: ModelVersion[] - constructor (payments: BotPayments) { super(payments, 'ClaudeBot', 'llms') } diff --git a/src/modules/llms/llmsBase.ts b/src/modules/llms/llmsBase.ts index a265517b..0a6b17eb 100644 --- a/src/modules/llms/llmsBase.ts +++ b/src/modules/llms/llmsBase.ts @@ -268,13 +268,6 @@ export abstract class LlmsBase implements PayableBot { continue } } - if (chatConversation.length === 0 && modelVersion !== this.modelsEnum.O1) { - chatConversation.push({ - role: 'system', - content: config.openAi.chatGpt.chatCompletionContext, - model: modelVersion - }) - } // const hasCode = hasCodeSnippet(ctx) const chat: ChatConversation = { content: enhancedPrompt || prompt, diff --git a/src/modules/llms/openaiBot.ts b/src/modules/llms/openaiBot.ts index 09c2d083..497d7e1e 100644 --- a/src/modules/llms/openaiBot.ts +++ b/src/modules/llms/openaiBot.ts @@ -26,8 +26,6 @@ import { type ModelVersion } from './utils/llmModelsManager' import { type ModelParameters } from './utils/types' export class OpenAIBot extends LlmsBase { - private readonly gpt4oPrefix: string[] - constructor (payments: BotPayments, subagents?: SubagentBase[]) { super(payments, 'OpenAIBot', 'chatGpt', subagents) // this.gpt4oPrefix = this.modelManager.getPrefixByModel(this.modelsEnum.GPT_4O) ?? [] diff --git a/src/modules/llms/utils/llmsData.ts b/src/modules/llms/utils/llmsData.ts index 5d19d32a..d2dd0a2a 100644 --- a/src/modules/llms/utils/llmsData.ts +++ b/src/modules/llms/utils/llmsData.ts @@ -163,6 +163,21 @@ export const llmData: LLMData = { maxContextTokens: 128000, chargeType: 'TOKEN', stream: false + }, + grok: { + provider: 'xai', // using grok through claude api + name: 'grok', + fullName: 'Grok', + botName: 'xAIBot', + version: 'grok-beta', + commands: ['gk', 'grok'], + prefix: ['gk. '], + apiSpec: 'https://docs.x.ai/api#introduction', + inputPrice: 0.00500, + outputPrice: 0.01500, + maxContextTokens: 131072, + chargeType: 'TOKEN', + stream: false } }, imageModels: { @@ -196,6 +211,12 @@ export const llmData: LLMData = { max_tokens: +config.openAi.chatGpt.maxTokens } }, + xai: { + defaultParameters: { + system: config.openAi.chatGpt.chatCompletionContext, + max_tokens: +config.openAi.chatGpt.maxTokens + } + }, vertex: { defaultParameters: { system: config.openAi.chatGpt.chatCompletionContext, diff --git a/src/modules/llms/utils/types.ts b/src/modules/llms/utils/types.ts index 816662bb..dabfc35a 100644 --- a/src/modules/llms/utils/types.ts +++ b/src/modules/llms/utils/types.ts @@ -1,4 +1,4 @@ -export type Provider = 'openai' | 'claude' | 'vertex' // | 'palm' | 'jurassic' +export type Provider = 'openai' | 'claude' | 'vertex' | 'xai' // | 'palm' | 'jurassic' export type ChargeType = 'TOKEN' | 'CHAR' export type DalleImageSize = '1024x1024' | '1024x1792' | '1792x1024' diff --git a/src/modules/llms/xaiBot.ts b/src/modules/llms/xaiBot.ts new file mode 100644 index 00000000..35540f15 --- /dev/null +++ b/src/modules/llms/xaiBot.ts @@ -0,0 +1,83 @@ +import { type BotPayments } from '../payment' +import { + type OnMessageContext, + type OnCallBackQueryData, + type ChatConversation +} from '../types' +import { SupportedCommands } from './utils/helpers' +import { type LlmCompletion } from './api/llmApi' +import { xaiCompletion } from './api/athropic' +import { LlmsBase } from './llmsBase' +import { type ModelVersion } from './utils/llmModelsManager' +import { type ModelParameters } from './utils/types' + +export class XaiBot extends LlmsBase { + private readonly claudeModels: ModelVersion[] + + constructor (payments: BotPayments) { + super(payments, 'xAIBot', 'llms') + } + + public getEstimatedPrice (ctx: any): number { + return 0 + } + + public isSupportedEvent ( + ctx: OnMessageContext | OnCallBackQueryData + ): boolean { + const hasCommand = ctx.hasCommand(this.supportedCommands) + + if (ctx.hasCommand(SupportedCommands.new) && this.checkModel(ctx)) { + return true + } + const chatPrefix = this.hasPrefix(ctx.message?.text ?? '') + if (chatPrefix !== '') { + return true + } + return hasCommand + } + + async chatStreamCompletion ( + conversation: ChatConversation[], + model: ModelVersion, + ctx: OnMessageContext | OnCallBackQueryData, + msgId: number, + limitTokens: boolean, + parameters?: ModelParameters): Promise { + return { + completion: undefined, + usage: 0, + price: 0, + inputTokens: 0, + outputTokens: 0 + } + } + + async chatCompletion ( + conversation: ChatConversation[], + model: ModelVersion, + hasTools: boolean, + parameters?: ModelParameters + ): Promise { + return await xaiCompletion(conversation, model, parameters) + } + + public async onEvent (ctx: OnMessageContext | OnCallBackQueryData): Promise { + ctx.transient.analytics.module = this.module + const isSupportedEvent = this.isSupportedEvent(ctx) + if (!isSupportedEvent && ctx.chat?.type !== 'private') { + this.logger.warn(`### unsupported command ${ctx.message?.text}`) + return + } + + const model = this.getModelFromContext(ctx) + if (!model) { + this.logger.warn(`### unsupported model for command ${ctx.message?.text}`) + return + } + this.updateSessionModel(ctx, model.version) + + const usesTools = ctx.hasCommand([this.commandsEnum.CTOOL, this.commandsEnum.STOOL]) + await this.onChat(ctx, model.version, usesTools ? false : this.getStreamOption(model.version), usesTools) + } +} From a6bdd3d46c0c1883fb632cc9d19f94d07c65008c Mon Sep 17 00:00:00 2001 From: fegloff Date: Thu, 24 Oct 2024 23:58:59 -0500 Subject: [PATCH 13/14] update claude and vertex commands and prefix --- src/modules/llms/utils/llmsData.ts | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/src/modules/llms/utils/llmsData.ts b/src/modules/llms/utils/llmsData.ts index d2dd0a2a..7c103605 100644 --- a/src/modules/llms/utils/llmsData.ts +++ b/src/modules/llms/utils/llmsData.ts @@ -9,7 +9,8 @@ export const llmData: LLMData = { fullName: 'gemini-1.5-pro-latest', botName: 'VertexBot', version: 'gemini-1.5-pro-latest', - commands: ['gemini15', 'g15'], + commands: ['gemini15', 'g'], + prefix: ['g. '], apiSpec: 'https://deepmind.google/technologies/gemini/pro/', inputPrice: 0.0025, outputPrice: 0.0075, @@ -23,8 +24,8 @@ export const llmData: LLMData = { botName: 'VertexBot', fullName: 'gemini-1.0-pro', version: 'gemini-1.0-pro', - commands: ['gemini', 'g'], - prefix: ['g. '], + commands: ['gemini', 'g10'], + prefix: ['g10. '], apiSpec: 'https://deepmind.google/technologies/gemini/pro/', inputPrice: 0.000125, outputPrice: 0.000375, @@ -38,8 +39,8 @@ export const llmData: LLMData = { fullName: 'Claude Sonnet 3.5', botName: 'ClaudeBot', version: 'claude-3-5-sonnet-20241022', - commands: ['sonnet', 'claudes', 's', 'stool'], - prefix: ['s. '], + commands: ['sonnet', 'claude', 's', 'stool', 'c', 'ctool'], + prefix: ['s. ', 'c. '], apiSpec: 'https://www.anthropic.com/news/claude-3-5-sonnet', inputPrice: 0.003, outputPrice: 0.015, @@ -53,8 +54,8 @@ export const llmData: LLMData = { fullName: 'Claude Opus', botName: 'ClaudeBot', version: 'claude-3-opus-20240229', - commands: ['claude', 'opus', 'c', 'o', 'ctool'], - prefix: ['c. '], + commands: ['opus', 'o', 'otool'], + prefix: ['o. '], apiSpec: 'https://www.anthropic.com/news/claude-3-family', inputPrice: 0.015, outputPrice: 0.075, From 36955f5567eedc58b46fd908a9070cb6c4209011 Mon Sep 17 00:00:00 2001 From: fegloff Date: Sat, 26 Oct 2024 19:50:02 -0500 Subject: [PATCH 14/14] fix chatCompletion token metrics + complete grok model integration --- src/modules/llms/api/athropic.ts | 8 ++++++-- src/modules/llms/api/openai.ts | 2 +- src/modules/llms/llmsBase.ts | 1 - src/modules/llms/utils/llmsData.ts | 8 ++++---- src/modules/llms/xaiBot.ts | 4 ---- src/modules/payment/index.ts | 1 - 6 files changed, 11 insertions(+), 13 deletions(-) diff --git a/src/modules/llms/api/athropic.ts b/src/modules/llms/api/athropic.ts index 149091ff..b2fba812 100644 --- a/src/modules/llms/api/athropic.ts +++ b/src/modules/llms/api/athropic.ts @@ -52,7 +52,9 @@ export const anthropicCompletion = async ( model }, usage: totalOutputTokens + totalInputTokens, - price: 0 + price: 0, + inputTokens: parseInt(totalInputTokens, 10), + outputTokens: parseInt(totalOutputTokens, 10) } } return { @@ -93,7 +95,9 @@ export const xaiCompletion = async ( model }, usage: totalOutputTokens + totalInputTokens, - price: 0 + price: 0, + inputTokens: parseInt(totalInputTokens, 10), + outputTokens: parseInt(totalOutputTokens, 10) } } return { diff --git a/src/modules/llms/api/openai.ts b/src/modules/llms/api/openai.ts index c2aaed65..8663df91 100644 --- a/src/modules/llms/api/openai.ts +++ b/src/modules/llms/api/openai.ts @@ -127,7 +127,7 @@ export async function chatCompletion ( content: response.choices[0].message?.content ?? 'Error - no completion available', role: 'assistant' }, - usage: 2010, // response.usage?.total_tokens, + usage: response.usage?.total_tokens, // 2010 price: price * config.openAi.chatGpt.priceAdjustment, inputTokens, outputTokens diff --git a/src/modules/llms/llmsBase.ts b/src/modules/llms/llmsBase.ts index 0a6b17eb..bf2f8e43 100644 --- a/src/modules/llms/llmsBase.ts +++ b/src/modules/llms/llmsBase.ts @@ -402,7 +402,6 @@ export abstract class LlmsBase implements PayableBot { const response = await this.chatCompletion(conversation, model, usesTools, parameters) if (response.completion) { if (model === this.modelsEnum.O1) { - console.log(response.completion) const msgs = splitTelegramMessage(response.completion.content as string) await ctx.api.editMessageText( ctx.chat.id, diff --git a/src/modules/llms/utils/llmsData.ts b/src/modules/llms/utils/llmsData.ts index 7c103605..80df59fd 100644 --- a/src/modules/llms/utils/llmsData.ts +++ b/src/modules/llms/utils/llmsData.ts @@ -171,11 +171,11 @@ export const llmData: LLMData = { fullName: 'Grok', botName: 'xAIBot', version: 'grok-beta', - commands: ['gk', 'grok'], - prefix: ['gk. '], + commands: ['gk', 'grok', 'x'], + prefix: ['gk. ', 'x. '], apiSpec: 'https://docs.x.ai/api#introduction', - inputPrice: 0.00500, - outputPrice: 0.01500, + inputPrice: 0.005, + outputPrice: 0.015, maxContextTokens: 131072, chargeType: 'TOKEN', stream: false diff --git a/src/modules/llms/xaiBot.ts b/src/modules/llms/xaiBot.ts index 35540f15..6a40ddb3 100644 --- a/src/modules/llms/xaiBot.ts +++ b/src/modules/llms/xaiBot.ts @@ -4,7 +4,6 @@ import { type OnCallBackQueryData, type ChatConversation } from '../types' -import { SupportedCommands } from './utils/helpers' import { type LlmCompletion } from './api/llmApi' import { xaiCompletion } from './api/athropic' import { LlmsBase } from './llmsBase' @@ -27,9 +26,6 @@ export class XaiBot extends LlmsBase { ): boolean { const hasCommand = ctx.hasCommand(this.supportedCommands) - if (ctx.hasCommand(SupportedCommands.new) && this.checkModel(ctx)) { - return true - } const chatPrefix = this.hasPrefix(ctx.message?.text ?? '') if (chatPrefix !== '') { return true diff --git a/src/modules/payment/index.ts b/src/modules/payment/index.ts index 422fa12a..c0a46567 100644 --- a/src/modules/payment/index.ts +++ b/src/modules/payment/index.ts @@ -441,7 +441,6 @@ export class BotPayments { from.username }] credits total: ${totalCreditsAmount.toFixed()}, to withdraw: ${totalPayAmount.toFixed()}, total balance after: ${totalBalanceDelta.toFixed()}` ) - if (totalBalanceDelta.gte(0)) { const { userPayment, userCredits: userCreditsAfter } = await chatService.withdrawCredits(accountId, totalPayAmount) this.logger.info(`[${from.id} @${