OpenAI API client library for Rust (unofficial)

Installation:

Cargo.toml toml [dependencies] openai-api-rs = "0.1.3"

Example:

bash export OPENAI_API_KEY={YOUR_API}

Chat

```rust use openaiapirs::v1::api::Client; use openaiapirs::v1::chat_completion::{self, ChatCompletionRequest}; use std::env;

[tokio::main]

async fn main() -> Result<(), Box> { let client = Client::new(env::var("OPENAIAPIKEY").unwrap().tostring()); let req = ChatCompletionRequest { model: chatcompletion::GPT35TURBO.tostring(), messages: vec![chatcompletion::ChatCompletionMessage { role: chatcompletion::MessageRole::user, content: String::from("NFTとは?"), }], }; let result = client.chatcompletion(req).await?; println!("{:?}", result.choices[0].message.content);

Ok(())

} ```

Completion

```rust use openaiapirs::v1::completion::{self, CompletionRequest}; use openaiapirs::v1::api::Client; use std::env;

[tokio::main]

async fn main() -> Result<(), Box> { let client = Client::new(env::var("OPENAIAPIKEY").unwrap().tostring()); let req = CompletionRequest { model: completion::GPT3TEXTDAVINCI003.tostring(), prompt: Some(String::from("NFTとは?")), suffix: None, maxtokens: Some(3000), temperature: Some(0.9), topp: Some(1.0), n: None, stream: None, logprobs: None, echo: None, stop: None, presencepenalty: Some(0.6), frequencypenalty: Some(0.0), bestof: None, logit_bias: None, user: None, }; let result = client.completion(req).await?; println!("{:?}", result.choices[0].text);

Ok(())

} ```