ys1r/
ollama.rs

1use ollama_rs::Ollama;
2use ollama_rs::error::OllamaError;
3use ollama_rs::generation::completion::GenerationResponse;
4use ollama_rs::generation::completion::request::GenerationRequest;
5
6/// Sends a request to the Ollama API and returns the generated response.
7///
8/// # Arguments
9///
10/// * `prompt` - The prompt to send to the Ollama API.
11/// * `model` - The model to use for the request. Defaults to "rnj-1:latest" if not provided.
12///
13/// # Returns
14///
15/// A `Result` containing the `GenerationResponse` if successful, or an `OllamaError` if an error occurred.
16async fn request_ollama_raw(
17    prompt: &str,
18    model: Option<&str>,
19) -> Result<GenerationResponse, OllamaError> {
20    let ollama = Ollama::default();
21    let model = model.unwrap_or("rnj-1:latest");
22    ollama
23        .generate(GenerationRequest::new(model.to_string(), prompt))
24        .await
25}
26
27/// Sends a request to the Ollama API and returns the generated response as a string.
28///
29/// # Arguments
30///
31/// * `prompt` - The prompt to send to the Ollama API.
32/// * `model` - The model to use for the request. Defaults to "rnj-1:latest" if not provided.
33/// * `debug` - If true, returns a debug message instead of sending a request.
34///
35/// # Returns
36///
37/// A `String` containing the generated response if successful, or an error message if an error occurred.
38pub async fn request_ollama(prompt: &str, model: Option<&str>, debug: Option<bool>) -> String {
39    let debug = debug.unwrap_or(false);
40    if debug {
41        return "[DEBUG MODE]".to_string();
42    }
43
44    match request_ollama_raw(prompt, model).await {
45        Ok(res) => res.response,
46        Err(_) => "Failed to get response".to_string(),
47    }
48}
49
50#[cfg(test)]
51mod tests {
52    use super::*;
53
54    #[tokio::test]
55    async fn test_request_ollama() {
56        let response = request_ollama("", None, Some(true)).await;
57        assert_eq!(response, "[DEBUG MODE]");
58    }
59}