Skip to content

Commit

Permalink
Release 1.1.8
Browse files Browse the repository at this point in the history
  • Loading branch information
sgusakovsky committed Mar 5, 2023
1 parent 63118c2 commit 3b9889b
Show file tree
Hide file tree
Showing 8 changed files with 176 additions and 6 deletions.
2 changes: 1 addition & 1 deletion OpenAIService.podspec
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
Pod::Spec.new do |spec|
spec.name = 'OpenAIService'
spec.version = '1.1.7'
spec.version = '1.1.8'
spec.homepage = 'https://github.com/sgusakovsky/OpenAIService'
spec.license = {
:type => 'MIT',
Expand Down
17 changes: 17 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,23 @@ service?.sendImageVariation(with: body, completionHandler: { result in
```
The API will return an `OpenAIImageResponse` object containing the corresponding image url items.

Create a call to the audio transcription API.

```swift
let urlPath = Bundle.main.url(forResource: "audio", withExtension: "mp3")!
let data = try? Data(contentsOf: urlPath)!
let body = OpenAIAudioTranscriptionBody(file: data, fileFormat: .mp3)
service?.sendAudioTranscription(with: body, completionHandler: { result in
switch result {
case .success(let response):
print(response.text)
case .failure(let error):
print(error.localizedDescription)
}
})
```
The API will return an `OpenAIAudioResponse` object containing the corresponding image url items.

For a full list of the supported models see [OpenAICompletionModelType.swift](https://github.com/sgusakovsky/OpenAIService/blob/main/Sources/OpenAIService/Models/Completion/OpenAICompletionModelType.swift), [OpenAIChatModelType.swift](https://github.com/sgusakovsky/OpenAIService/blob/main/Sources/OpenAIService/Models/Chat/OpenAIChatModelType.swift), [OpenAIEditsModelType.swift](https://github.com/sgusakovsky/OpenAIService/blob/main/Sources/OpenAIService/Models/Edits/OpenAIEditsModelType.swift). For more information on the models see the [OpenAI API Documentation](https://platform.openai.com/docs/models).

OpenAIService also supports Swift concurrency so you can use Swift’s async/await syntax to fetch completions.
Expand Down
25 changes: 25 additions & 0 deletions Sources/OpenAIService/Models/Audio/AudioFileFormat.swift
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
//
// AudioFileFormat.swift
// OpenAIDemo
//
// Created by Gusakovsky, Sergey on 5.03.23.
//

import Foundation

public enum AudioFileFormat: String {
case mp3, mp4, mpeg, mpga, m4a, wav, webm

public var mimeType: String {
switch self {
case .mp3, .mpeg, .mpga:
return "audio/mpeg"
case .mp4, .m4a:
return "audio/mp4"
case .wav:
return "audio/x-wav"
case .webm:
return "audio/webm"
}
}
}
12 changes: 12 additions & 0 deletions Sources/OpenAIService/Models/Audio/OpenAIAudioResponse.swift
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
//
// OpenAIAudioResponse.swift
// OpenAIDemo
//
// Created by Gusakovsky, Sergey on 5.03.23.
//

import Foundation

public struct OpenAIAudioResponse: Codable {
public let text: String
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
//
// OpenAIAudioModelType.swift
// OpenAIDemo
//
// Created by Gusakovsky, Sergey on 5.03.23.
//

import Foundation

public enum OpenAIAudioModelType: String, Encodable {

/// > Model Name: whisper
case whisper = "whisper-1"
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
//
// OpenAIAudioTranscriptionBody.swift
// OpenAIDemo
//
// Created by Gusakovsky, Sergey on 5.03.23.
//

import Foundation

/// https://platform.openai.com/docs/api-reference/audio/create
public struct OpenAIAudioTranscriptionBody {
public let file: FormData
public let model: OpenAIAudioModelType
public let prompt: String?
public let temperature: Double
public let language: String?

public init(
file: Data,
fileFormat: AudioFileFormat,
model: OpenAIAudioModelType = .whisper,
prompt: String? = nil,
temperature: Double = 0,
language: String? = nil
) {
self.file = FormData(data: file, mimeType: fileFormat.mimeType, fileName: "aiduo.\(fileFormat.rawValue)")
self.model = model
self.prompt = prompt
self.temperature = temperature
self.language = language
}

public var body: [String: Any] {
var result: [String: Any] = [
"file": self.file,
"model": self.model.rawValue,
"temperature": self.temperature
]

if let prompt = self.prompt {
result["prompt"] = prompt
}

if let language = self.language {
result["language"] = language
}

return result
}
}
7 changes: 5 additions & 2 deletions Sources/OpenAIService/Networking/OpenAIEndpoint.swift
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ enum OpenAIEndpoint {
case imagesGenerations
case imageEdits
case imageVariation
case audioTranscriptions

var path: String {
switch self {
Expand All @@ -29,19 +30,21 @@ enum OpenAIEndpoint {
return "/v1/images/edits"
case .imageVariation:
return "/v1/images/variations"
case .audioTranscriptions:
return "/v1/audio/transcriptions"
}
}

var method: HTTPMethod {
switch self {
case .completions, .edits, .chatCompletions, .imagesGenerations, .imageEdits, .imageVariation:
case .completions, .edits, .chatCompletions, .imagesGenerations, .imageEdits, .imageVariation, .audioTranscriptions:
return .post
}
}

func baseURL() -> String {
switch self {
case .completions, .edits, .chatCompletions, .imagesGenerations, .imageEdits, .imageVariation:
case .completions, .edits, .chatCompletions, .imagesGenerations, .imageEdits, .imageVariation, .audioTranscriptions:
return "https://api.openai.com"
}
}
Expand Down
55 changes: 52 additions & 3 deletions Sources/OpenAIService/OpenAIService.swift
Original file line number Diff line number Diff line change
Expand Up @@ -163,6 +163,33 @@ public final class OpenAIService {
)
}

/// Send a audio transcription request to the OpenAI API
/// - Parameters:
/// - body: Body of audio transcription request
/// - completionHandler: Returns an OpenAIAudioResponse Data Model
public func sendAudioTranscription(
with body: OpenAIAudioTranscriptionBody,
networkQueue: DispatchQueue = .global(qos: .background),
responseQueue: DispatchQueue = .main,
completionHandler: @escaping (Result<OpenAIAudioResponse, OpenAIAPIError>) -> Void
) {
let endpoint = OpenAIEndpoint.audioTranscriptions
guard let request = apiClient.prepareMultipartFormDataRequest(endpoint, body: body.body, config: config) else {
completionHandler(.failure(.genericError(error: RequestError())))
return
}

apiClient.makeRequest(
request: request,
networkQueue: networkQueue,
responseQueue: responseQueue,
completionHandler: completionHandler
)
}

}

extension OpenAIService {
/// Send a Completion to the OpenAI API
/// - Parameters:
/// - body: Body of chat completion request
Expand Down Expand Up @@ -232,7 +259,7 @@ public final class OpenAIService {
/// Send a Image generation request to the OpenAI API
/// - Parameters:
/// - body: Body of image generation request
/// - Returns: Returns an OpenAIGenerationImageResponse Data Model
/// - Returns: Returns an OpenAIImageResponse Data Model
@available(swift 5.5)
@available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *)
public func sendImageGeneration(
Expand All @@ -254,7 +281,7 @@ public final class OpenAIService {
/// Send a Image edits request to the OpenAI API
/// - Parameters:
/// - body: Body of image edits request
/// - Returns: Returns an OpenAIGenerationImageResponse Data Model
/// - Returns: Returns an OpenAIImageResponse Data Model
@available(swift 5.5)
@available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *)
public func sendImageEdits(
Expand All @@ -276,7 +303,7 @@ public final class OpenAIService {
/// Send a Image vatiation request to the OpenAI API
/// - Parameters:
/// - body: Body of image variation request
/// - Returns: Returns an OpenAIGenerationImageResponse Data Model
/// - Returns: Returns an OpenAIImageResponse Data Model
@available(swift 5.5)
@available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *)
public func sendImageVariation(
Expand All @@ -294,4 +321,26 @@ public final class OpenAIService {
}
}
}

/// Send a Image vatiation request to the OpenAI API
/// - Parameters:
/// - body: Body of audio transcription request
/// - Returns: Returns an OpenAIAudioResponse Data Model
@available(swift 5.5)
@available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *)
public func sendAudioTranscription(
with body: OpenAIAudioTranscriptionBody,
networkQueue: DispatchQueue = .global(qos: .background),
responseQueue: DispatchQueue = .main
) async throws -> OpenAIAudioResponse {
return try await withCheckedThrowingContinuation { continuation in
sendAudioTranscription(
with: body,
networkQueue: networkQueue,
responseQueue: responseQueue
) { result in
continuation.resume(with: result)
}
}
}
}

0 comments on commit 3b9889b

Please sign in to comment.