forked from elizaOS/eliza
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy path.env.example
555 lines (457 loc) · 21.5 KB
/
.env.example
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
####################################
#### Server & DB Configurations ####
####################################
# Cache Configs
CACHE_STORE=database # Defaults to database. Other available cache store: redis and filesystem
REDIS_URL= # Redis URL - could be a local redis instance or cloud hosted redis. Also support rediss:// URLs
PGLITE_DATA_DIR= #../pgLite/ if selecting a directory --- or memory:// if selecting in memory
# Eliza Port Config
SERVER_PORT=3000
# Supabase Configuration
SUPABASE_URL=
SUPABASE_ANON_KEY=
###############################
#### Client Configurations ####
###############################
# Discord Configuration
DISCORD_APPLICATION_ID=
DISCORD_API_TOKEN= # Bot token
DISCORD_VOICE_CHANNEL_ID= # The ID of the voice channel the bot should join (optional)
# Farcaster Neynar Configuration
FARCASTER_FID= # The FID associated with the account your are sending casts from
FARCASTER_NEYNAR_API_KEY= # Neynar API key: https://neynar.com/
FARCASTER_NEYNAR_SIGNER_UUID= # Signer for the account you are sending casts from. Create a signer here: https://dev.neynar.com/app
FARCASTER_DRY_RUN=false # Set to true if you want to run the bot without actually publishing casts
FARCASTER_POLL_INTERVAL=120 # How often (in seconds) the bot should check for farcaster interactions (replies and mentions)
# Telegram Configuration
TELEGRAM_BOT_TOKEN=
# Twitter/X Configuration
TWITTER_DRY_RUN=false
TWITTER_USERNAME= # Account username
TWITTER_PASSWORD= # Account password
TWITTER_EMAIL= # Account email
TWITTER_2FA_SECRET=
TWITTER_POLL_INTERVAL=120 # How often (in seconds) the bot should check for interactions
TWITTER_SEARCH_ENABLE=FALSE # Enable timeline search, WARNING this greatly increases your chance of getting banned
TWITTER_TARGET_USERS= # Comma separated list of Twitter user names to interact with
TWITTER_RETRY_LIMIT= # Maximum retry attempts for Twitter login
TWITTER_SPACES_ENABLE=false # Enable or disable Twitter Spaces logic
# Post Interval Settings (in minutes)
POST_INTERVAL_MIN= # Default: 90
POST_INTERVAL_MAX= # Default: 180
POST_IMMEDIATELY= # Default: false
# Twitter action processing configuration
ACTION_INTERVAL= # Interval in minutes between action processing runs (default: 5 minutes)
ENABLE_ACTION_PROCESSING=false # Set to true to enable the action processing loop
MAX_ACTIONS_PROCESSING=1 # Maximum number of actions (e.g., retweets, likes) to process in a single cycle. Helps prevent excessive or uncontrolled actions.
ACTION_TIMELINE_TYPE=foryou # Type of timeline to interact with. Options: "foryou" or "following". Default: "foryou"
# CONFIGURATION FOR APPROVING TWEETS BEFORE IT GETS POSTED
TWITTER_APPROVAL_DISCORD_CHANNEL_ID= # Channel ID for the Discord bot to listen and send approval messages
TWITTER_APPROVAL_DISCORD_BOT_TOKEN= # Discord bot token (this could be a different bot token from DISCORD_API_TOKEN)
TWITTER_APPROVAL_ENABLED= # Enable or disable Twitter approval logic #Default is false
TWITTER_APPROVAL_CHECK_INTERVAL=60000 # Default: 60 seconds
# WhatsApp Cloud API Configuration
WHATSAPP_ACCESS_TOKEN= # Permanent access token from Facebook Developer Console
WHATSAPP_PHONE_NUMBER_ID= # Phone number ID from WhatsApp Business API
WHATSAPP_BUSINESS_ACCOUNT_ID= # Business Account ID from Facebook Business Manager
WHATSAPP_WEBHOOK_VERIFY_TOKEN= # Custom string for webhook verification
WHATSAPP_API_VERSION=v17.0 # WhatsApp API version (default: v17.0)
# Direct Client Setting
EXPRESS_MAX_PAYLOAD= # Default: 100kb
#######################################
#### Model Provider Configurations ####
#######################################
# OpenAI Configuration
OPENAI_API_KEY= # OpenAI API key, starting with sk-
OPENAI_API_URL= # OpenAI API Endpoint (optional), Default: https://api.openai.com/v1
SMALL_OPENAI_MODEL= # Default: gpt-4o-mini
MEDIUM_OPENAI_MODEL= # Default: gpt-4o
LARGE_OPENAI_MODEL= # Default: gpt-4o
EMBEDDING_OPENAI_MODEL= # Default: text-embedding-3-small
IMAGE_OPENAI_MODEL= # Default: dall-e-3
USE_OPENAI_EMBEDDING= # Set to TRUE for OpenAI/1536, leave blank for local
# Eternal AI's Decentralized Inference API
ETERNALAI_URL=
ETERNALAI_MODEL= # Default: "neuralmagic/Meta-Llama-3.1-405B-Instruct-quantized.w4a16"
ETERNALAI_CHAIN_ID=45762 #Default: "45762"
ETERNALAI_API_KEY=
ETERNALAI_LOG=false #Default: false
# Hyperbolic Configuration
HYPERBOLIC_API_KEY= # Hyperbolic API Key
HYPERBOLIC_MODEL=
IMAGE_HYPERBOLIC_MODEL= # Default: FLUX.1-dev
SMALL_HYPERBOLIC_MODEL= # Default: meta-llama/Llama-3.2-3B-Instruct
MEDIUM_HYPERBOLIC_MODEL= # Default: meta-llama/Meta-Llama-3.1-70B-Instruct
LARGE_HYPERBOLIC_MODEL= # Default: meta-llama/Meta-Llama-3.1-405-Instruct
# Infera Configuration
INFERA_API_KEY= # visit api.infera.org/docs to obtain an API key under /signup_user
INFERA_MODEL= # Default: llama3.2:latest
INFERA_SERVER_URL= # Default: https://api.infera.org/
SMALL_INFERA_MODEL= #Recommended: llama3.2:latest
MEDIUM_INFERA_MODEL= #Recommended: mistral-nemo:latest
LARGE_INFERA_MODEL= #Recommended: mistral-small:latest
# Venice Configuration
VENICE_API_KEY= # generate from venice settings
SMALL_VENICE_MODEL= # Default: llama-3.3-70b
MEDIUM_VENICE_MODEL= # Default: llama-3.3-70b
LARGE_VENICE_MODEL= # Default: llama-3.1-405b
IMAGE_VENICE_MODEL= # Default: fluently-xl
# Nineteen.ai Configuration
NINETEEN_AI_API_KEY= # Get a free api key from https://nineteen.ai/app/api
SMALL_NINETEEN_AI_MODEL= # Default: unsloth/Llama-3.2-3B-Instruct
MEDIUM_NINETEEN_AI_MODEL= # Default: unsloth/Meta-Llama-3.1-8B-Instruct
LARGE_NINETEEN_AI_MODEL= # Default: hugging-quants/Meta-Llama-3.1-70B-Instruct-AWQ-INT4
IMAGE_NINETEEN_AI_MODE= # Default: dataautogpt3/ProteusV0.4-Lightning
# Akash Chat API Configuration docs: https://chatapi.akash.network/documentation
AKASH_CHAT_API_KEY= # Get from https://chatapi.akash.network/
SMALL_AKASH_CHAT_API_MODEL= # Default: Meta-Llama-3-2-3B-Instruct
MEDIUM_AKASH_CHAT_API_MODEL= # Default: Meta-Llama-3-3-70B-Instruct
LARGE_AKASH_CHAT_API_MODEL= # Default: Meta-Llama-3-1-405B-Instruct-FP8
# Livepeer configuration
LIVEPEER_GATEWAY_URL= # Free inference gateways and docs: https://livepeer-eliza.com/
LIVEPEER_IMAGE_MODEL= # Default: ByteDance/SDXL-Lightning
# Speech Synthesis
ELEVENLABS_XI_API_KEY= # API key from elevenlabs
# Transcription Provider
TRANSCRIPTION_PROVIDER= # Default: local (possible values: openai, deepgram, local)
# ElevenLabs Settings
ELEVENLABS_MODEL_ID=eleven_multilingual_v2
ELEVENLABS_VOICE_ID=21m00Tcm4TlvDq8ikWAM
ELEVENLABS_VOICE_STABILITY=0.5
ELEVENLABS_VOICE_SIMILARITY_BOOST=0.9
ELEVENLABS_VOICE_STYLE=0.66
ELEVENLABS_VOICE_USE_SPEAKER_BOOST=false
ELEVENLABS_OPTIMIZE_STREAMING_LATENCY=4
ELEVENLABS_OUTPUT_FORMAT=pcm_16000
# OpenRouter Configuration
OPENROUTER_API_KEY= # OpenRouter API Key
OPENROUTER_MODEL= # Default: uses hermes 70b/405b
SMALL_OPENROUTER_MODEL=
MEDIUM_OPENROUTER_MODEL=
LARGE_OPENROUTER_MODEL=
# REDPILL Configuration (https://docs.red-pill.ai/get-started/supported-models)
REDPILL_API_KEY= # REDPILL API Key
REDPILL_MODEL=
SMALL_REDPILL_MODEL= # Default: gpt-4o-mini
MEDIUM_REDPILL_MODEL= # Default: gpt-4o
LARGE_REDPILL_MODEL= # Default: gpt-4o
# Grok Configuration
GROK_API_KEY= # GROK/xAI API Key
SMALL_GROK_MODEL= # Default: grok-2-1212
MEDIUM_GROK_MODEL= # Default: grok-2-1212
LARGE_GROK_MODEL= # Default: grok-2-1212
EMBEDDING_GROK_MODEL= # Default: grok-2-1212
# Ollama Configuration
OLLAMA_SERVER_URL= # Default: localhost:11434
OLLAMA_MODEL=
USE_OLLAMA_EMBEDDING= # Set to TRUE for OLLAMA/1024, leave blank for local
OLLAMA_EMBEDDING_MODEL= # Default: mxbai-embed-large
SMALL_OLLAMA_MODEL= # Default: llama3.2
MEDIUM_OLLAMA_MODEL= # Default: hermes3
LARGE_OLLAMA_MODEL= # Default: hermes3:70b
# Google Configuration
GOOGLE_MODEL=
SMALL_GOOGLE_MODEL= # Default: gemini-1.5-flash-latest
MEDIUM_GOOGLE_MODEL= # Default: gemini-1.5-flash-latest
LARGE_GOOGLE_MODEL= # Default: gemini-1.5-pro-latest
EMBEDDING_GOOGLE_MODEL= # Default: text-embedding-004
# Mistral Configuration
MISTRAL_MODEL=
SMALL_MISTRAL_MODEL= # Default: mistral-small-latest
MEDIUM_MISTRAL_MODEL= # Default: mistral-large-latest
LARGE_MISTRAL_MODEL= # Default: mistral-large-latest
# Groq Configuration
GROQ_API_KEY= # Starts with gsk_
SMALL_GROQ_MODEL= # Default: llama-3.1-8b-instant
MEDIUM_GROQ_MODEL= # Default: llama-3.3-70b-versatile
LARGE_GROQ_MODEL= # Default: llama-3.2-90b-vision-preview
EMBEDDING_GROQ_MODEL= # Default: llama-3.1-8b-instant
# LlamaLocal Configuration
LLAMALOCAL_PATH= # Default: "" which is the current directory in plugin-node/dist/ which gets destroyed and recreated on every build
# NanoGPT Configuration
SMALL_NANOGPT_MODEL= # Default: gpt-4o-mini
MEDIUM_NANOGPT_MODEL= # Default: gpt-4o
LARGE_NANOGPT_MODEL= # Default: gpt-4o
# Anthropic Configuration
ANTHROPIC_API_KEY= # For Claude
SMALL_ANTHROPIC_MODEL= # Default: claude-3-haiku-20240307
MEDIUM_ANTHROPIC_MODEL= # Default: claude-3-5-sonnet-20241022
LARGE_ANTHROPIC_MODEL= # Default: claude-3-5-sonnet-20241022
# Heurist Configuration
HEURIST_API_KEY= # Get from https://heurist.ai/dev-access
SMALL_HEURIST_MODEL= # Default: meta-llama/llama-3-70b-instruct
MEDIUM_HEURIST_MODEL= # Default: meta-llama/llama-3-70b-instruct
LARGE_HEURIST_MODEL= # Default: meta-llama/llama-3.1-405b-instruct
HEURIST_IMAGE_MODEL= # Default: PepeXL
# Gaianet Configuration
GAIANET_MODEL=
GAIANET_SERVER_URL=
SMALL_GAIANET_MODEL= # Default: llama3b
SMALL_GAIANET_SERVER_URL= # Default: https://llama3b.gaia.domains/v1
MEDIUM_GAIANET_MODEL= # Default: llama
MEDIUM_GAIANET_SERVER_URL= # Default: https://llama8b.gaia.domains/v1
LARGE_GAIANET_MODEL= # Default: qwen72b
LARGE_GAIANET_SERVER_URL= # Default: https://qwen72b.gaia.domains/v1
GAIANET_EMBEDDING_MODEL=
USE_GAIANET_EMBEDDING= # Set to TRUE for GAIANET/768, leave blank for local
# Volcengine Configuration
VOLENGINE_API_URL= # Volcengine API Endpoint, Default: https://open.volcengineapi.com/api/v3/
VOLENGINE_MODEL=
SMALL_VOLENGINE_MODEL= # Default: doubao-lite-128k
MEDIUM_VOLENGINE_MODEL= # Default: doubao-pro-128k
LARGE_VOLENGINE_MODEL= # Default: doubao-pro-256k
VOLENGINE_EMBEDDING_MODEL= # Default: doubao-embedding
# DeepSeek Configuration
DEEPSEEK_API_URL= # Default: https://api.deepseek.com
SMALL_DEEPSEEK_MODEL= # Default: deepseek-chat
MEDIUM_DEEPSEEK_MODEL= # Default: deepseek-chat
LARGE_DEEPSEEK_MODEL= # Default: deepseek-chat
# fal.ai Configuration
FAL_API_KEY=
FAL_AI_LORA_PATH=
# LetzAI Configuration
LETZAI_API_KEY= # LetzAI API Key
LETZAI_MODELS= # list of Letzai models to add to each prompt, e.g.: "@modelname1, @modelname2"
# Galadriel Configuration
GALADRIEL_API_KEY=gal-* # Get from https://dashboard.galadriel.com/
SMALL_GALADRIEL_MODEL= # Default: gpt-4o-mini
MEDIUM_GALADRIEL_MODEL= # Default: gpt-4o
LARGE_GALADRIEL_MODEL= # Default: gpt-4o
GALADRIEL_FINE_TUNE_API_KEY= # Use an OpenAI key to use a fine-tuned model with the verified inference endpoint
# Remaining Provider Configurations
GOOGLE_GENERATIVE_AI_API_KEY= # Gemini API key
ALI_BAILIAN_API_KEY= # Ali Bailian API Key
NANOGPT_API_KEY= # NanoGPT API Key
TOGETHER_API_KEY= # Together API Key
######################################
#### Crypto Plugin Configurations ####
######################################
# CoinMarketCap / CMC
COINMARKETCAP_API_KEY=
# CoinGecko
COINGECKO_API_KEY=
COINGECKO_PRO_API_KEY=
# EVM
EVM_PRIVATE_KEY=
EVM_PROVIDER_URL=
# Avalanche
AVALANCHE_PRIVATE_KEY=
AVALANCHE_PUBLIC_KEY=
# Arthera
ARTHERA_PRIVATE_KEY=
# Solana
SOLANA_PRIVATE_KEY=
SOLANA_PUBLIC_KEY=
SOLANA_CLUSTER= # Default: devnet. Solana Cluster: 'devnet' | 'testnet' | 'mainnet-beta'
SOLANA_ADMIN_PRIVATE_KEY= # This wallet is used to verify NFTs
SOLANA_ADMIN_PUBLIC_KEY= # This wallet is used to verify NFTs
SOLANA_VERIFY_TOKEN= # Authentication token for calling the verification API
# Fallback Wallet Configuration (deprecated)
WALLET_PRIVATE_KEY=
WALLET_PUBLIC_KEY=
BIRDEYE_API_KEY=
# Solana Configuration
SOL_ADDRESS=So11111111111111111111111111111111111111112
SLIPPAGE=1
BASE_MINT=So11111111111111111111111111111111111111112
SOLANA_RPC_URL=https://api.mainnet-beta.solana.com
HELIUS_API_KEY=
# Abstract Configuration
ABSTRACT_ADDRESS=
ABSTRACT_PRIVATE_KEY=
ABSTRACT_RPC_URL=https://api.testnet.abs.xyz
# Starknet Configuration
STARKNET_ADDRESS=
STARKNET_PRIVATE_KEY=
STARKNET_RPC_URL=
# Lens Network Configuration
LENS_ADDRESS=
LENS_PRIVATE_KEY=
# Coinbase
COINBASE_COMMERCE_KEY= # From Coinbase developer portal
COINBASE_API_KEY= # From Coinbase developer portal
COINBASE_PRIVATE_KEY= # From Coinbase developer portal
COINBASE_GENERATED_WALLET_ID= # Not your address but the wallet ID from generating a wallet through the plugin
COINBASE_GENERATED_WALLET_HEX_SEED= # Not your address but the wallet hex seed from generating a wallet through the plugin and calling export
COINBASE_NOTIFICATION_URI= # For webhook plugin the uri you want to send the webhook to for dummy ones use https://webhook.site
# Coinbase Charity Configuration
IS_CHARITABLE=false # Set to true to enable charity donations
CHARITY_ADDRESS_BASE=0x1234567890123456789012345678901234567890
CHARITY_ADDRESS_SOL=pWvDXKu6CpbKKvKQkZvDA66hgsTB6X2AgFxksYogHLV
CHARITY_ADDRESS_ETH=0x750EF1D7a0b4Ab1c97B7A623D7917CcEb5ea779C
CHARITY_ADDRESS_ARB=0x1234567890123456789012345678901234567890
CHARITY_ADDRESS_POL=0x1234567890123456789012345678901234567890
# thirdweb
THIRDWEB_SECRET_KEY= # Create key on thirdweb developer dashboard: https://thirdweb.com/
# Conflux Configuration
CONFLUX_CORE_PRIVATE_KEY=
CONFLUX_CORE_SPACE_RPC_URL=
CONFLUX_ESPACE_PRIVATE_KEY=
CONFLUX_ESPACE_RPC_URL=
CONFLUX_MEME_CONTRACT_ADDRESS=
# ZeroG
ZEROG_INDEXER_RPC=
ZEROG_EVM_RPC=
ZEROG_PRIVATE_KEY=
ZEROG_FLOW_ADDRESS=
# TEE Configuration
# TEE_MODE options:
# - LOCAL: Uses simulator at localhost:8090 (for local development)
# - DOCKER: Uses simulator at host.docker.internal:8090 (for docker development)
# - PRODUCTION: No simulator, uses production endpoints
# Defaults to OFF if not specified
TEE_MODE=OFF # LOCAL | DOCKER | PRODUCTION
WALLET_SECRET_SALT= # ONLY define if you want to use TEE Plugin, otherwise it will throw errors
ENABLE_TEE_LOG=false # Set to true to enable TEE logging, only available when running eliza in TEE
# Flow Blockchain Configuration
FLOW_ADDRESS=
FLOW_PRIVATE_KEY= # Private key for SHA3-256 + P256 ECDSA
FLOW_NETWORK= # Default: mainnet
FLOW_ENDPOINT_URL= # Default: https://mainnet.onflow.org
# ICP
INTERNET_COMPUTER_PRIVATE_KEY=
INTERNET_COMPUTER_ADDRESS=
#Cloudflare AI Gateway
CLOUDFLARE_GW_ENABLED= # Set to true to enable Cloudflare AI Gateway
CLOUDFLARE_AI_ACCOUNT_ID= # Cloudflare AI Account ID - found in the Cloudflare Dashboard under AI Gateway
CLOUDFLARE_AI_GATEWAY_ID= # Cloudflare AI Gateway ID - found in the Cloudflare Dashboard under AI Gateway
# Aptos
APTOS_PRIVATE_KEY= # Aptos private key
APTOS_NETWORK= # Must be one of mainnet, testnet
# MultiversX
MVX_PRIVATE_KEY= # Multiversx private key
MVX_NETWORK= # must be one of mainnet, devnet, testnet
# NEAR
NEAR_WALLET_SECRET_KEY= # NEAR Wallet Secret Key
NEAR_WALLET_PUBLIC_KEY= # NEAR Wallet Public Key
NEAR_ADDRESS=
NEAR_SLIPPAGE=1
NEAR_RPC_URL=https://rpc.testnet.near.org
NEAR_NETWORK=testnet # or mainnet
# ZKsync Era Configuration
ZKSYNC_ADDRESS=
ZKSYNC_PRIVATE_KEY=
# Avail DA Configuration
AVAIL_ADDRESS=
AVAIL_SEED=
AVAIL_APP_ID=0
AVAIL_RPC_URL=wss://avail-turing.public.blastapi.io/ # (Default) Testnet: wss://avail-turing.public.blastapi.io/ | Mainnet: wss://avail-mainnet.public.blastapi.io/
# Marlin
TEE_MARLIN= # Set "yes" to enable the plugin
TEE_MARLIN_ATTESTATION_ENDPOINT= # Optional, default "http://127.0.0.1:1350"
# Ton
TON_PRIVATE_KEY= # Ton Mnemonic Seed Phrase Join With Empty String
TON_RPC_URL= # ton rpc
# Sui
SUI_PRIVATE_KEY= # Sui Mnemonic Seed Phrase (`sui keytool generate ed25519`) , Also support `suiprivatekeyxxxx` (sui keytool export --key-identity 0x63)
SUI_NETWORK= # must be one of mainnet, testnet, devnet, localnet
# Story
STORY_PRIVATE_KEY= # Story private key
STORY_API_BASE_URL= # Story API base URL
STORY_API_KEY= # Story API key
PINATA_JWT= # Pinata JWT for uploading files to IPFS
# Cosmos
COSMOS_RECOVERY_PHRASE= # 12 words recovery phrase (need to be in quotes, because of spaces)
COSMOS_AVAILABLE_CHAINS= # mantrachaintestnet2,cosmos # Array of chains
# Cronos zkEVM
CRONOSZKEVM_ADDRESS=
CRONOSZKEVM_PRIVATE_KEY=
# Fuel Ecosystem (FuelVM)
FUEL_WALLET_PRIVATE_KEY=
# Tokenizer Settings
TOKENIZER_MODEL= # Specify the tokenizer model to be used.
TOKENIZER_TYPE= # Options: tiktoken (for OpenAI models) or auto (AutoTokenizer from Hugging Face for non-OpenAI models). Default: tiktoken.
# Spheron
SPHERON_PRIVATE_KEY=
SPHERON_PROVIDER_PROXY_URL=
SPHERON_WALLET_ADDRESS=
# Stargaze NFT marketplace from Cosmos (You can use https://graphql.mainnet.stargaze-apis.com/graphql)
STARGAZE_ENDPOINT=
# GenLayer
GENLAYER_PRIVATE_KEY= # Private key of the GenLayer account to use for the agent in this format (0x0000000000000000000000000000000000000000000000000000000000000000)
####################################
#### Misc Plugin Configurations ####
####################################
# Intiface Configuration
INTIFACE_WEBSOCKET_URL=ws://localhost:12345
# API key for giphy from https://developers.giphy.com/dashboard/
GIPHY_API_KEY=
# OpenWeather
OPEN_WEATHER_API_KEY= # OpenWeather API key
# EchoChambers Configuration
ECHOCHAMBERS_API_URL=http://127.0.0.1:3333
ECHOCHAMBERS_API_KEY=testingkey0011
ECHOCHAMBERS_USERNAME=eliza
ECHOCHAMBERS_DEFAULT_ROOM=general
ECHOCHAMBERS_POLL_INTERVAL=60
ECHOCHAMBERS_MAX_MESSAGES=10
# Allora
ALLORA_API_KEY= # Allora API key, format: UP-f8db7d6558ab432ca0d92716
ALLORA_CHAIN_SLUG= # must be one of mainnet, testnet. If not specified, it will use testnet by default
# Opacity zkTLS
OPACITY_TEAM_ID=f309ac8ae8a9a14a7e62cd1a521b1c5f
OPACITY_CLOUDFLARE_NAME=eigen-test
OPACITY_PROVER_URL=https://opacity-ai-zktls-demo.vercel.app
# AWS S3 Configuration Settings for File Upload
AWS_ACCESS_KEY_ID=
AWS_SECRET_ACCESS_KEY=
AWS_REGION=
AWS_S3_BUCKET=
AWS_S3_UPLOAD_PATH=
# Deepgram
DEEPGRAM_API_KEY=
# Web search API Configuration
TAVILY_API_KEY=
# Verifiable Inference Configuration
VERIFIABLE_INFERENCE_ENABLED=false # Set to false to disable verifiable inference
VERIFIABLE_INFERENCE_PROVIDER=opacity # Options: opacity
# Autonome Configuration
AUTONOME_JWT_TOKEN=
AUTONOME_RPC=https://wizard-bff-rpc.alt.technology/v1/bff/aaa/apps
####################################
#### Akash Network Configuration ####
####################################
AKASH_ENV=mainnet
AKASH_NET=https://raw.githubusercontent.com/ovrclk/net/master/mainnet
RPC_ENDPOINT=https://rpc.akashnet.net:443
AKASH_GAS_PRICES=0.025uakt
AKASH_GAS_ADJUSTMENT=1.5
AKASH_KEYRING_BACKEND=os
AKASH_FROM=default
AKASH_FEES=20000uakt
AKASH_DEPOSIT=500000uakt
AKASH_MNEMONIC=
AKASH_WALLET_ADDRESS=
# Akash Pricing API
AKASH_PRICING_API_URL=https://console-api.akash.network/v1/pricing
# Default values # 1 CPU = 1000 1GB = 1000000000 1GB = 1000000000
AKASH_DEFAULT_CPU=1000
AKASH_DEFAULT_MEMORY=1000000000
AKASH_DEFAULT_STORAGE=1000000000
AKASH_SDL=example.sdl.yml
# Close deployment
# Close all deployments = closeAll
# Close a single deployment = dseq and add the value in AKASH_CLOSE_DSEQ
AKASH_CLOSE_DEP=closeAll
AKASH_CLOSE_DSEQ=19729929
# Provider Info we added one to check you will have to pass this into the action
AKASH_PROVIDER_INFO=akash1ccktptfkvdc67msasmesuy5m7gpc76z75kukpz
# Deployment Status
# AKASH_DEP_STATUS = dseq or param_passed when you are building you wil pass the dseq dinamically to test you
# you can pass the dseq using AKASH_DEP_DSEQ 19729929 is an example of a dseq we test while build.
AKASH_DEP_STATUS=dseq
AKASH_DEP_DSEQ=19729929
# Gas Estimation Options: close, create, or update
# qseq is required when operation is "close" 19729929 is an example of a dseq we test while build.
AKASH_GAS_OPERATION=close
AKASH_GAS_DSEQ=19729929
# Manifest
# Values: "auto" | "manual" | "validate_only" Default: "auto"
AKASH_MANIFEST_MODE=auto
# Default: Will use the SDL directory
AKASH_MANIFEST_PATH=
# Values: "strict" | "lenient" | "none" - Default: "strict"
AKASH_MANIFEST_VALIDATION_LEVEL=strict
# Quai Network Ecosystem
QUAI_PRIVATE_KEY=
QUAI_RPC_URL=https://rpc.quai.network