Generate a chat completion with message history
Arguments
- model
A character string of the model name such as "llama3".
- messages
A list with list of messages for the model (see examples below).
- tools
Tools for the model to use if supported. Requires stream = FALSE. Default is an empty list.
- stream
Enable response streaming. Default is FALSE.
- keep_alive
The duration to keep the connection alive. Default is "5m".
- output
The output format. Default is "resp". Other options are "jsonlist", "raw", "df", "text", "req" (httr2_request object).
- endpoint
The endpoint to chat with the model. Default is "/api/chat".
- host
The base URL to use. Default is NULL, which uses Ollama's default base URL.
- ...
Additional options to pass to the model.
Examples
if (FALSE) { # test_connection()$status_code == 200
# one message
messages <- list(
list(role = "user", content = "How are you doing?")
)
chat("llama3", messages) # returns response by default
chat("llama3", messages, output = "text") # returns text/vector
chat("llama3", messages, temperature = 2.8) # additional options
chat("llama3", messages, stream = TRUE) # stream response
chat("llama3", messages, output = "df", stream = TRUE) # stream and return dataframe
# multiple messages
messages <- list(
list(role = "user", content = "Hello!"),
list(role = "assistant", content = "Hi! How are you?"),
list(role = "user", content = "Who is the prime minister of the uk?"),
list(role = "assistant", content = "Rishi Sunak"),
list(role = "user", content = "List all the previous messages.")
)
chat("llama3", messages, stream = TRUE)
# image
image_path <- file.path(system.file("extdata", package = "ollamar"), "image1.png")
messages <- list(
list(role = "user", content = "What is in the image?", images = image_path)
)
chat("benzie/llava-phi-3", messages, output = 'text')
}