Installation
Add InsForge to your Swift Package Manager dependencies:
dependencies: [
.package(url: "https://github.com/insforge/insforge-swift.git", from: "0.0.7")
]
import InsForge
let insforge = InsForgeClient(
baseURL: URL(string: "https://your-app.insforge.app")!,
anonKey: "your-anon-key"
)
Enable Logging (Optional)
For debugging, you can configure the SDK log level and destination:
let options = InsForgeClientOptions(
global: .init(
logLevel: .debug,
logDestination: .osLog,
logSubsystem: "com.example.MyApp"
)
)
let insforge = InsForgeClient(
baseURL: URL(string: "https://your-app.insforge.app")!,
anonKey: "your-anon-key",
options: options
)
Log Levels:
| Level | Description |
|---|
.trace | Most verbose, includes all internal details |
.debug | Detailed information for debugging |
.info | General operational information (default) |
.warning | Warnings that don’t prevent operation |
.error | Errors that affect functionality |
.critical | Critical failures |
Log Destinations:
| Destination | Description |
|---|
.console | Standard output (print) |
.osLog | Apple’s unified logging system (recommended for iOS/macOS) |
.none | Disable logging |
.custom | Provide your own LogHandler factory |
Use .info or .error in production to avoid exposing sensitive data in logs.
chatCompletion()
Generate AI chat completions.
Parameters
model (String) - AI model (e.g., “anthropic/claude-3.5-haiku”, “openai/gpt-4”)
messages ([ChatMessage]) - Array of chat messages
stream (Bool, optional) - Enable streaming mode (default: false)
temperature (Double, optional) - Sampling temperature 0-2
maxTokens (Int, optional) - Max tokens to generate
topP (Double, optional) - Top-p sampling parameter
systemPrompt (String, optional) - System prompt for the conversation
webSearch (WebSearchPlugin?, optional) - Enable web search capabilities
fileParser (FileParserPlugin?, optional) - Enable file/PDF parsing
thinking (Bool?, optional) - Enable extended reasoning mode (Anthropic models only)
Returns
Example
let response = try await insforge.ai.chatCompletion(
model: "anthropic/claude-3.5-haiku",
messages: [
ChatMessage(role: .user, content: "What is the capital of France?")
]
)
print(response.text)
// "The capital of France is Paris."
// Access metadata
if let metadata = response.metadata {
print("Model: \(metadata.model)")
if let usage = metadata.usage {
print("Tokens used: \(usage.totalTokens)")
}
}
// Access annotations if available (from web search)
if let annotations = response.annotations {
for annotation in annotations {
print("Citation: \(annotation.urlCitation.url)")
if let title = annotation.urlCitation.title {
print("Title: \(title)")
}
}
}
Example with System Prompt
let response = try await insforge.ai.chatCompletion(
model: "anthropic/claude-3.5-haiku",
messages: [
ChatMessage(role: .user, content: "Write a haiku about coding")
],
temperature: 0.7,
maxTokens: 100,
systemPrompt: "You are a creative poet who writes in haiku format."
)
print(response.text)
Example with Conversation History
let response = try await insforge.ai.chatCompletion(
model: "openai/gpt-4",
messages: [
ChatMessage(role: .system, content: "You are a helpful assistant."),
ChatMessage(role: .user, content: "What is 2 + 2?"),
ChatMessage(role: .assistant, content: "2 + 2 equals 4."),
ChatMessage(role: .user, content: "What about 3 + 3?")
]
)
print(response.text)
Example with Web Search
let response = try await insforge.ai.chatCompletion(
model: "openai/gpt-4",
messages: [
ChatMessage(role: .user, content: "What's the latest news about AI?")
],
webSearch: WebSearchPlugin(
enabled: true,
maxResults: 5,
engine: .native
)
)
print(response.text)
// Access URL citations from search results
if let annotations = response.annotations {
for annotation in annotations {
print("Source: \(annotation.urlCitation.title ?? "Unknown") - \(annotation.urlCitation.url)")
}
}
Example with PDF Parsing
let response = try await insforge.ai.chatCompletion(
model: "openai/gpt-4",
messages: [
ChatMessage(role: .user, content: "Summarize this PDF document...")
],
fileParser: FileParserPlugin(
enabled: true,
pdf: PdfParserConfig(engine: .mistralOcr)
)
)
print(response.text)
Example with Extended Reasoning (Thinking)
let response = try await insforge.ai.chatCompletion(
model: "anthropic/claude-3-opus",
messages: [
ChatMessage(role: .user, content: "Solve this complex problem step by step...")
],
thinking: true
)
print(response.text)
Example with Combined Features
let response = try await insforge.ai.chatCompletion(
model: "openai/gpt-4",
messages: messages,
webSearch: WebSearchPlugin(enabled: true, maxResults: 3),
fileParser: FileParserPlugin(enabled: true),
thinking: true
)
print(response.text)
generateEmbeddings()
Generate vector embeddings for text input using AI models.
Parameters
model (String) - Embedding model identifier (e.g., “google/gemini-embedding-001”)
input (EmbeddingInput) - Text input to embed (single or multiple)
encodingFormat (EmbeddingEncodingFormat?, optional) - Output format (.float or .base64)
dimensions (Int?, optional) - Number of dimensions for the output embeddings
Returns
Example (Single Text)
let response = try await insforge.ai.generateEmbeddings(
model: "google/gemini-embedding-001",
input: .single("Hello world")
)
print("Generated \(response.data.count) embedding(s)")
print("Dimensions: \(response.data.first?.embedding.count ?? 0)")
if let metadata = response.metadata {
print("Model: \(metadata.model)")
}
Example (Multiple Texts)
let response = try await insforge.ai.generateEmbeddings(
model: "google/gemini-embedding-001",
input: .multiple(["Hello", "World", "How are you?"])
)
for embedding in response.data {
print("Index \(embedding.index): \(embedding.embedding.count) dimensions")
}
Example (With Optional Parameters)
let response = try await insforge.ai.generateEmbeddings(
model: "google/gemini-embedding-001",
input: .single("Hello world"),
encodingFormat: .float,
dimensions: 768
)
print("Embedding dimensions: \(response.data.first?.embedding.count ?? 0)") // 768
Example (Semantic Search)
// Generate embeddings for documents
let documents = [
"Swift is a powerful programming language",
"iOS development with SwiftUI",
"Machine learning with Core ML"
]
let docEmbeddings = try await insforge.ai.generateEmbeddings(
model: "google/gemini-embedding-001",
input: .multiple(documents)
)
// Generate embedding for query
let queryEmbedding = try await insforge.ai.generateEmbeddings(
model: "google/gemini-embedding-001",
input: .single("mobile app development")
)
// Calculate cosine similarity
func cosineSimilarity(_ a: [Double], _ b: [Double]) -> Double {
let dotProduct = zip(a, b).map(*).reduce(0, +)
let normA = sqrt(a.map { $0 * $0 }.reduce(0, +))
let normB = sqrt(b.map { $0 * $0 }.reduce(0, +))
return dotProduct / (normA * normB)
}
// Find most similar document
if let queryVector = queryEmbedding.data.first?.embedding {
let similarities = docEmbeddings.data.enumerated().map { index, embedding in
(index: index, similarity: cosineSimilarity(queryVector, embedding.embedding))
}
if let mostSimilar = similarities.max(by: { $0.similarity < $1.similarity }) {
print("Most similar: \(documents[mostSimilar.index])")
}
}
generateImage()
Generate images using AI models.
Parameters
model (String) - Image generation model
prompt (String) - Text description of the image to generate
Returns
Example
let response = try await insforge.ai.generateImage(
model: "google/gemini-2.5-flash-image-preview",
prompt: "A serene mountain landscape at sunset"
)
// Access generated images
for image in response.images {
print("Image URL: \(image.imageUrl)")
print("Type: \(image.type)")
}
// Get image count
print("Generated \(response.imageCount) images")
// Access metadata
if let metadata = response.metadata {
print("Model: \(metadata.model)")
if let revisedPrompt = metadata.revisedPrompt {
print("Revised prompt: \(revisedPrompt)")
}
}
Example with Storage Upload
let response = try await insforge.ai.generateImage(
model: "google/gemini-2.5-flash-image-preview",
prompt: "A futuristic city skyline"
)
// Download and upload to storage
if let firstImage = response.images.first {
// Fetch image data from URL
let imageURL = URL(string: firstImage.imageUrl)!
let (imageData, _) = try await URLSession.shared.data(from: imageURL)
// Upload to storage
let file = try await insforge.storage
.from("ai-images")
.upload(
data: imageData,
fileName: "generated-image.png",
options: FileOptions(contentType: "image/png")
)
// Save to database
struct GeneratedImage: Codable {
let prompt: String
let imageUrl: String
let model: String?
}
let _: [GeneratedImage] = try await insforge.database
.from("generated_images")
.insert(GeneratedImage(
prompt: "A futuristic city skyline",
imageUrl: file.url,
model: response.model
))
}
listModels()
List available AI models.
Returns
Example
let models = try await insforge.ai.listModels()
// List text generation models
print("Text Models:")
for provider in models.text {
print(" Provider: \(provider.provider) (configured: \(provider.configured))")
for model in provider.models {
print(" - \(model.id)")
print(" Input: \(model.inputModality)")
print(" Output: \(model.outputModality)")
print(" Price level: \(model.priceLevel)")
}
}
// List image generation models
print("\nImage Models:")
for provider in models.image {
print(" Provider: \(provider.provider)")
for model in provider.models {
print(" - \(model.id)")
}
}
SwiftUI Integration
Chat View
import SwiftUI
struct ChatView: View {
@State private var messages: [ChatMessage] = []
@State private var inputText = ""
@State private var isLoading = false
@State private var responseText = ""
var body: some View {
VStack {
ScrollView {
LazyVStack(alignment: .leading, spacing: 12) {
ForEach(Array(messages.enumerated()), id: \.offset) { _, message in
ChatBubble(message: message)
}
if !responseText.isEmpty {
Text(responseText)
.padding()
.background(Color.gray.opacity(0.2))
.cornerRadius(8)
}
}
.padding()
}
HStack {
TextField("Message", text: $inputText)
.textFieldStyle(.roundedBorder)
Button(action: sendMessage) {
Image(systemName: "paperplane.fill")
}
.disabled(inputText.isEmpty || isLoading)
}
.padding()
}
}
func sendMessage() {
let userMessage = ChatMessage(role: .user, content: inputText)
messages.append(userMessage)
inputText = ""
isLoading = true
Task {
do {
let response = try await insforge.ai.chatCompletion(
model: "anthropic/claude-3.5-haiku",
messages: messages
)
let assistantMessage = ChatMessage(role: .assistant, content: response.text)
messages.append(assistantMessage)
} catch {
print("Error: \(error)")
}
isLoading = false
}
}
}
struct ChatBubble: View {
let message: ChatMessage
var body: some View {
HStack {
if message.role == .assistant {
Spacer()
}
Text(message.content)
.padding()
.background(message.role == .user ? Color.blue : Color.gray.opacity(0.2))
.foregroundColor(message.role == .user ? .white : .primary)
.cornerRadius(12)
if message.role == .user {
Spacer()
}
}
}
}
Image Generation View
struct ImageGenerationView: View {
@State private var prompt = ""
@State private var generatedImageURL: String?
@State private var isGenerating = false
var body: some View {
VStack(spacing: 20) {
TextField("Describe your image...", text: $prompt)
.textFieldStyle(.roundedBorder)
Button("Generate Image") {
Task {
await generateImage()
}
}
.disabled(prompt.isEmpty || isGenerating)
if isGenerating {
ProgressView("Generating...")
}
if let imageURL = generatedImageURL,
let url = URL(string: imageURL) {
AsyncImage(url: url) { image in
image
.resizable()
.scaledToFit()
} placeholder: {
ProgressView()
}
.frame(maxHeight: 300)
}
}
.padding()
}
func generateImage() async {
isGenerating = true
do {
let response = try await insforge.ai.generateImage(
model: "google/gemini-2.5-flash-image-preview",
prompt: prompt
)
if let firstImage = response.images.first {
await MainActor.run {
generatedImageURL = firstImage.imageUrl
}
}
} catch {
print("Generation failed: \(error)")
}
isGenerating = false
}
}
Models Reference
Enums
// Search engine options for web search
public enum WebSearchEngine: String, Codable {
case native
case exa
}
// PDF processing engine options
public enum PdfEngine: String, Codable {
case pdfText = "pdf-text"
case mistralOcr = "mistral-ocr"
case native
}
Plugin Configuration
// Web search plugin configuration
public struct WebSearchPlugin: Codable, Sendable {
let enabled: Bool
let engine: WebSearchEngine?
let maxResults: Int? // 1-10, default: 5
let searchPrompt: String? // Custom prompt for search results
init(
enabled: Bool = true,
engine: WebSearchEngine? = nil,
maxResults: Int? = nil,
searchPrompt: String? = nil
)
}
// PDF parser configuration
public struct PdfParserConfig: Codable, Sendable {
let engine: PdfEngine?
init(engine: PdfEngine? = nil)
}
// File parser plugin configuration
public struct FileParserPlugin: Codable, Sendable {
let enabled: Bool
let pdf: PdfParserConfig?
init(enabled: Bool = true, pdf: PdfParserConfig? = nil)
}
ChatMessage
public struct ChatMessage: Codable, Sendable {
let role: Role
let content: String
enum Role: String, Codable {
case user
case assistant
case system
}
}
ChatCompletionResponse
public struct ChatCompletionResponse: Codable, Sendable {
let text: String // The generated response text
let annotations: [UrlCitationAnnotation]? // URL citations from web search
let metadata: Metadata?
// Computed properties
var content: String { text } // Alias for text
var success: Bool // True if text is not empty
struct Metadata: Codable {
let model: String
let usage: TokenUsage?
}
}
Annotations
// URL citation information from web search results
public struct UrlCitation: Codable, Sendable {
let url: String
let title: String?
let content: String?
let startIndex: Int?
let endIndex: Int?
}
// Annotation containing URL citation
public struct UrlCitationAnnotation: Codable, Sendable {
let type: String // "url_citation"
let urlCitation: UrlCitation
}
TokenUsage
public struct TokenUsage: Codable, Sendable {
let promptTokens: Int
let completionTokens: Int
let totalTokens: Int
}
EmbeddingsResponse
// Encoding format options for embeddings
public enum EmbeddingEncodingFormat: String, Codable, Sendable {
case float
case base64
}
// Input type for embeddings (single or multiple texts)
public enum EmbeddingInput: Sendable {
case single(String)
case multiple([String])
}
public struct EmbeddingsResponse: Codable, Sendable {
let object: String // "list"
let data: [EmbeddingObject]
let metadata: Metadata?
struct Metadata: Codable {
let model: String
let usage: EmbeddingsUsage?
}
}
public struct EmbeddingObject: Codable, Sendable {
let object: String // "embedding"
let embedding: [Double] // or String for base64 format
let index: Int
}
public struct EmbeddingsUsage: Codable, Sendable {
let promptTokens: Int?
let totalTokens: Int?
}
ImageGenerationResponse
public struct ImageGenerationResponse: Codable, Sendable {
let model: String?
let images: [ImageMessage]
let text: String?
let count: Int?
let metadata: Metadata?
// Computed property
var imageCount: Int // count ?? images.count
struct Metadata: Codable {
let model: String
let revisedPrompt: String?
let usage: TokenUsage?
}
}
ImageMessage
public struct ImageMessage: Codable, Sendable {
let type: String
let imageUrl: String
// Computed property
var url: String { imageUrl } // Alias for imageUrl
}
AIModel
public struct AIModel: Codable, Sendable {
let id: String
let modelId: String
let provider: String
let inputModality: [String] // e.g., ["text"], ["text", "image"]
let outputModality: [String] // e.g., ["text"], ["image"]
let priceLevel: Int
// Computed properties
var name: String { id }
}
ListModelsResponse
public struct ListModelsResponse: Codable, Sendable {
let text: [ModelProvider] // Text generation models
let image: [ModelProvider] // Image generation models
struct ModelProvider: Codable {
let provider: String
let configured: Bool
let models: [AIModel]
}
}