Skip to content

Commit

Permalink
fix clippy
Browse files Browse the repository at this point in the history
  • Loading branch information
ChuckHend committed Apr 19, 2024
1 parent d8b85d7 commit fb3ff56
Show file tree
Hide file tree
Showing 3 changed files with 5 additions and 32 deletions.
33 changes: 3 additions & 30 deletions core/src/transformers/ollama.rs
Original file line number Diff line number Diff line change
@@ -1,27 +1,21 @@
use anyhow::Result;
use ollama_rs::{
generation::{completion::request::GenerationRequest, options::GenerationOptions},
Ollama,
};
use ollama_rs::{generation::completion::request::GenerationRequest, Ollama};
use url::Url;

use crate::transformers::openai::trim_inputs;
use crate::transformers::types::{EmbeddingPayload, EmbeddingRequest, Inputs};
use crate::types;

pub struct OllamaInstance {
pub model_name: String,
pub instance: Ollama,
}

pub trait LLMFunctions {
fn new(model_name: String, url: String) -> Self;
#[allow(async_fn_in_trait)]
async fn generate_reponse(&self, prompt_text: String) -> Result<String, String>;
}

impl LLMFunctions for OllamaInstance {
fn new(model_name: String, url: String) -> Self {
let parsed_url = Url::parse(&url).expect(format!("invalid url: {}", url).as_str());
let parsed_url = Url::parse(&url).unwrap_or_else(|_| panic!("invalid url: {}", url));
let instance = Ollama::new(
format!(
"{}://{}",
Expand All @@ -46,27 +40,6 @@ impl LLMFunctions for OllamaInstance {
}
}

pub fn prepare_ollama_embedding_request(
vect_meta: types::VectorizeMeta,
inputs: &[Inputs],
model_url: String,
) -> Result<EmbeddingRequest> {
let text_inputs = trim_inputs(inputs);
let payload = EmbeddingPayload {
input: text_inputs,
model: vect_meta.transformer.to_string(),
};

// TBD
let _job_params: types::JobParams = serde_json::from_value(vect_meta.params)?;

Ok(EmbeddingRequest {
url: model_url,
payload,
api_key: None,
})
}

pub fn ollama_embedding_dim(model_name: &str) -> i32 {
match model_name {
"llama2" => 5192,
Expand Down
2 changes: 1 addition & 1 deletion core/src/worker/base.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use crate::transformers::{generic, http_handler, ollama, openai};
use crate::transformers::{generic, http_handler, openai};
use crate::types::{JobMessage, JobParams, ModelSource};
use crate::worker::ops;
use anyhow::Result;
Expand Down
2 changes: 1 addition & 1 deletion extension/src/chat/ops.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ pub fn call_chat(
let job_params = serde_json::from_value::<JobParams>(project_meta.params.clone())
.unwrap_or_else(|e| error!("failed to deserialize job params: {}", e));

// for various token count estimations
// for various token count estimations
let bpe = match chat_model.source {
ModelSource::Ollama => {
// Using gpt-3.5-turbo tokenizer for Ollama since the library does not support llama2
Expand Down

0 comments on commit fb3ff56

Please sign in to comment.