1use ollama_rs::Ollama;
2use ollama_rs::error::OllamaError;
3use ollama_rs::generation::completion::GenerationResponse;
4use ollama_rs::generation::completion::request::GenerationRequest;
5
6async fn request_ollama_raw(
17 prompt: &str,
18 model: Option<&str>,
19) -> Result<GenerationResponse, OllamaError> {
20 let ollama = Ollama::default();
21 let model = model.unwrap_or("rnj-1:latest");
22 ollama
23 .generate(GenerationRequest::new(model.to_string(), prompt))
24 .await
25}
26
27pub async fn request_ollama(prompt: &str, model: Option<&str>, debug: Option<bool>) -> String {
39 let debug = debug.unwrap_or(false);
40 if debug {
41 return "[DEBUG MODE]".to_string();
42 }
43
44 match request_ollama_raw(prompt, model).await {
45 Ok(res) => res.response,
46 Err(_) => "Failed to get response".to_string(),
47 }
48}
49
50#[cfg(test)]
51mod tests {
52 use super::*;
53
54 #[tokio::test]
55 async fn test_request_ollama() {
56 let response = request_ollama("", None, Some(true)).await;
57 assert_eq!(response, "[DEBUG MODE]");
58 }
59}