mirror of
https://github.com/elijah-potter/ofc.git
synced 2025-03-03 09:37:02 +03:00
feat: initial commit + functionality
This commit is contained in:
1
.gitignore
vendored
Normal file
1
.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
/target
|
||||
1703
Cargo.lock
generated
Normal file
1703
Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
11
Cargo.toml
Normal file
11
Cargo.toml
Normal file
@@ -0,0 +1,11 @@
|
||||
[package]
|
||||
name = "ofc"
|
||||
version = "0.1.0"
|
||||
edition = "2024"
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.96"
|
||||
clap = { version = "4.5.31", features = ["derive"] }
|
||||
ollama-rs = { version = "0.2.6", features = ["stream"] }
|
||||
tokio = { version = "1.43.0", features = ["full"] }
|
||||
tokio-stream = "0.1.17"
|
||||
7
README.md
Normal file
7
README.md
Normal file
@@ -0,0 +1,7 @@
|
||||
# Ollama Function Caller
|
||||
|
||||
The Ollama function caller, otherwise known as `ofc`, is a command-line tool for prompting Ollama models locally on your system.
|
||||
|
||||
In order to use `ofc`, you need to have Ollama on your system. You may install it from the [Ollama website](https://ollama.com/).
|
||||
|
||||
`ofc` was inspired by [`ooo`](https://github.com/Npahlfer/ooo).
|
||||
59
src/main.rs
Normal file
59
src/main.rs
Normal file
@@ -0,0 +1,59 @@
|
||||
use clap::Parser;
|
||||
use ollama_rs::{
|
||||
Ollama,
|
||||
generation::{completion::request::GenerationRequest, options::GenerationOptions},
|
||||
};
|
||||
use tokio::io::{self, AsyncWriteExt};
|
||||
use tokio_stream::StreamExt;
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(version, about, long_about = None)]
|
||||
struct Opts {
|
||||
/// Define the model to run on Ollama.
|
||||
#[arg(short, long, default_value = "phi4")]
|
||||
model: String,
|
||||
/// Set the context length the model will digest.
|
||||
/// Higher values create better output, but require beefier computers.
|
||||
#[arg(short, long, default_value_t = 4096)]
|
||||
context: u64,
|
||||
#[arg(short, long, default_value_t = 0.6)]
|
||||
temperature: f32,
|
||||
#[arg(
|
||||
short,
|
||||
long,
|
||||
default_value = "You are a command-line program that takes an input and provides an output ONLY. Give me only the output, without any additional labels (e.g., 'Output' or 'Result'). The output should be usable as input in another program that is not an LLM. Avoid unnecessary chat. No preamble, get straight to the point. Generate a text response suitable for downstream processing by another program. Do not change the content of the input unless specifically asked to. Do not repeat back the input."
|
||||
)]
|
||||
system_prompt: String,
|
||||
/// The actual prompt for the LLM.
|
||||
prompt: String,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let args = Opts::parse();
|
||||
|
||||
let ollama = Ollama::default();
|
||||
|
||||
let mut stream = ollama
|
||||
.generate_stream(
|
||||
GenerationRequest::new(args.model, args.prompt)
|
||||
.system(args.system_prompt)
|
||||
.options(
|
||||
GenerationOptions::default()
|
||||
.num_ctx(args.context)
|
||||
.temperature(args.temperature),
|
||||
),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut stdout = io::stdout();
|
||||
while let Some(res) = stream.next().await {
|
||||
let responses = res?;
|
||||
for resp in responses {
|
||||
stdout.write_all(resp.response.as_bytes()).await?;
|
||||
stdout.flush().await?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
Reference in New Issue
Block a user