Swift examples
Follow these examples if you're using the SwiftOpenAI package.
Initialization
When using the SwiftOpenAI package first make sure you initialize service with:
#if DEBUG && targetEnvironment(simulator)
let service = OpenAIServiceFactory.service(
aiproxyPartialKey: "hardcode_partial_key_here",
aiproxyDeviceCheckBypass: "hardcode_device_check_bypass_here"
)
#else
let service = OpenAIServiceFactory.service(
aiproxyPartialKey: "hardcode_partial_key_here"
)
#endif
The aiproxyPartialKey and aiproxyDeviceCheckBypass values are provided to you on the AIProxy dashboard.
It's important that you do not let the aiproxyDeviceCheckBypass token leak into a distribution build of your app (including TestFlight distributions). Please retain the conditional compilation checks that are present in the sample code above.
Chat Completion Example
import SwiftUI
import SwiftOpenAI
#if DEBUG && targetEnvironment(simulator)
private let service: OpenAIService = OpenAIServiceFactory.service(
aiproxyPartialKey: "hardcode_partial_key_here",
aiproxyDeviceCheckBypass: "hardcode_device_check_bypass_here")
#else
private let service: OpenAIService = OpenAIServiceFactory.service(
aiproxyPartialKey: "hardcode_partial_key_here")
#endif
struct ChatCompletionView: View {
@State var jokeText:String = ""
var body: some View {
VStack{
Text(jokeText)
Button("Tell a joke"){ tellJoke() }
}
}
func tellJoke() {
Task {
jokeText = ""
let prompt = "Tell me a joke"
let parameters = ChatCompletionParameters(messages: [.init(role: .user, content: .text(prompt))], model: .gpt4o)
let stream = try await service.startStreamedChat(parameters: parameters)
for try await result in stream {
guard let choice = result.choices.first,
let content = choice.delta.content else
{
return
}
jokeText += content
}
}
}
}
#Preview {
ChatCompletionView()
}
Translation Example
import SwiftUI
import SwiftOpenAI
#if DEBUG && targetEnvironment(simulator)
private let service: OpenAIService = OpenAIServiceFactory.service(
aiproxyPartialKey: "hardcode_partial_key_here",
aiproxyDeviceCheckBypass: "hardcode_device_check_bypass_here")
#else
private let service: OpenAIService = OpenAIServiceFactory.service(
aiproxyPartialKey: "hardcode_partial_key_here")
#endif
struct TranslationView: View {
private let prompt = "The response is an exact translation from english to spanish. You don't respond with any english."
@State var translatedText = ""
var body: some View {
VStack{
Text(translatedText)
Button("Translate"){
Task {
await translate()
}
}
}
}
func translate() async {
let parameters = ChatCompletionParameters(
messages: [
.init(role: .system, content: .text(prompt)),
.init(role: .user, content: .text("what time is dinner?")),
],
model: .gpt4o
)
do {
let choices = try await service.startChat(parameters: parameters).choices
let message = choices.compactMap(\.message.content)
translatedText = message.first ?? ""
} catch {
print("Could not translate")
}
}
}
#Preview {
TranslationView()
}
For more information about the SwiftOpenAI package visit the GitHub documentation.
Follow these examples if you're using the AIProxy.swift file to integrate instead of SwiftOpenAI.
Add AIProxy to your project:
- Drop the AIProxy.swift file into your Xcode project
- Replace the constants below that you received at dashboard.aiproxy.pro
- Read the integration examples below:
private let aiproxyPartialKey = "partial-key-goes-here"
#if DEBUG && targetEnvironment(simulator)
private let aiproxyDeviceCheckBypass = "device-check-bypass-goes-here"
#endif
There are four different options to integrate when using the AIProxy.swift file:
- Non-streaming chat using async/await
- Non-streaming chat using callbacks
- Streaming chat using async/await
- Streaming chat using callbacks
If you choose to use the callback-based interface, callbacks are guaranteed to be invoked on the main thread. All internal work is done using the modern async/await APIs for URLSession.
Example integration of non-streaming chat using async/await
let requestBody = AIProxy.ChatRequestBody(
model: "gpt-4-0125-preview",
messages: [
AIProxy.Message(role: "user", content: "hello world")
]
)
let task = Task {
do {
let response = try await AIProxy.chatCompletionRequest(
chatRequestBody: requestBody
)
// Do something with `response`. For example:
print(response.choices.first?.message.content ?? "")
} catch {
// Handle error. For example:
print(error.localizedDescription)
}
}
// Uncomment this to cancel the request:
// task.cancel()
Example integration of non-streaming chat using callbacks
let requestBody = AIProxy.ChatRequestBody(
model: "gpt-4-0125-preview",
messages: [
AIProxy.Message(role: "user", content: "hello world")
]
)
let task = AIProxy.chatCompletionRequest(chatRequestBody: requestBody) { result in
switch result {
case .success(let response):
// Do something with `response`. For example:
print(response.choices.first?.message.content ?? "")
case .failure(let error):
// Handle error. For example:
print(error.localizedDescription)
}
}
// Uncomment this to cancel the request:
// task.cancel()
Streaming chat using async/await
let requestBody = AIProxy.ChatRequestBody(
model: "gpt-4-0125-preview",
messages: [
AIProxy.Message(role: "user", content: "hello world")
],
stream: true
)
let task = Task {
do {
let stream = try await AIProxy.streamingChatCompletionRequest(chatRequestBody: requestBody)
for try await chunk in stream {
// Do something with `chunk`. For example:
print(chunk.choices.first?.delta.content ?? "")
}
} catch {
// Handle error. For example:
print(error.localizedDescription)
}
}
// Uncomment this to cancel the request or stop the streaming response:
// task.cancel()
Streaming chat using callbacks
// Craft your request body per the 'Request body' documentation here:
// https://platform.openai.com/docs/api-reference/chat/create
let requestBody = AIProxy.ChatRequestBody(
model: "gpt-4-0125-preview",
messages: [
AIProxy.Message(role: "user", content: "hello world")
],
stream: true
)
let task = AIProxy.streamingChatCompletionRequest(chatRequestBody: requestBody) { chunk in
// Do something with `chunk`. For example:
print(chunk.choices.first?.delta.content ?? "")
} completion: { error in
if let error = error {
// Handle error. For example:
print(error.localizedDescription)
}
}
// Uncomment this to cancel the request or stop the streaming response:
// task.cancel()