Open ElicaInc opened 1 year ago
big +1, language settings support would be incredible. for reference, here's the link to the Vision framework documentation on Language Settings: https://developer.apple.com/documentation/vision/recognizing_text_in_images#3601256
Here is a sample code in TextRecognition.swift
@objc(recognize:withOptions:withResolver:withRejecter:)
func recognize(imgPath: String, options: [String: Any], resolve: @escaping RCTPromiseResolveBlock, reject: @escaping RCTPromiseRejectBlock) {
guard !imgPath.isEmpty else { reject("ERR", "You must include the image path", nil); return }
let formattedImgPath = imgPath.stripPrefix("file://")
var threshold: Float = 0.0
var languages: [String] = ["en-US"]
var autoDetetsLanguage: Bool = true
var customWords: [String] = []
var useLanguageCorrection: Bool = false
var recognitionLevel: VNRequestTextRecognitionLevel = .accurate
if let ignoreThreshold = options["visionIgnoreThreshold"] as? Float, !ignoreThreshold.isZero {
threshold = ignoreThreshold
}
if let automaticallyDetectsLanguage = options["automaticallyDetectsLanguage"] as? Bool {
autoDetetsLanguage = automaticallyDetectsLanguage
}
if let recognitionLanguages = options["recognitionLanguages"] as? [String] {
languages = recognitionLanguages
}
if let words = options["customWords"] as? [String] {
customWords = words
}
if let usesCorrection = options["usesLanguageCorrection"] as? Bool {
useLanguageCorrection = usesCorrection
}
if let level = options["recognitionLevel"] as? String, level == "fast" {
recognitionLevel = .fast
}
do {
let imgData = try Data(contentsOf: URL(fileURLWithPath: formattedImgPath))
let image = UIImage(data: imgData)
guard let cgImage = image?.cgImage else { return }
let requestHandler = VNImageRequestHandler(cgImage: cgImage)
let ocrRequest = VNRecognizeTextRequest { (request: VNRequest, error: Error?) in
self.recognizeTextHandler(request: request, threshold: threshold, error: error, resolve: resolve, reject: reject)
}
/* Revision 3, .accurate, iOS 16 and higher
["en-US", "fr-FR", "it-IT", "de-DE", "es-ES", "pt-BR", "zh-Hans", "zh-Hant", "yue-Hans", "yue-Hant", "ko-KR", "ja-JP", "ru-RU", "uk-UA"]
*/
/* Revision 3, .fast, iOS 16 and higher
["en-US", "fr-FR", "it-IT", "de-DE", "es-ES", "pt-BR"]
*/
/* Revision 2, .accurate, iOS 14 and higher
["en-US", "fr-FR", "it-IT", "de-DE", "es-ES", "pt-BR", "zh-Hans", "zh-Hant"]
*/
/* Revision 2, .fast iOS, 14 and higher
["en-US", "fr-FR", "it-IT", "de-DE", "es-ES", "pt-BR"]
*/
if autoDetetsLanguage {
if #available(iOS 16.0, *) {
ocrRequest.automaticallyDetectsLanguage = true
} else {
ocrRequest.recognitionLanguages = languages
}
} else {
ocrRequest.recognitionLanguages = languages
}
ocrRequest.customWords = customWords
ocrRequest.usesLanguageCorrection = useLanguageCorrection
ocrRequest.recognitionLevel = recognitionLevel
try requestHandler.perform([ocrRequest])
} catch {
print(error)
reject("ERR", error.localizedDescription, nil)
}
}
Call 'recognitionLanguages' in JS
const text = await TextRecognition.recognize(img, {
automaticallyDetectsLanguage: false,
recognitionLanguages: ["en-US"]
});
Hope this works!
@ElicaInc yeah this feels right. I'm thinking typing the supported langs for recognitionLanguages
would be nice:
// react-native-text-recognition/src/index.tsx
import { NativeModules } from 'react-native';
type SupportedLanuages = "en-US" | "fr-FR" | "it-IT" | "de-DE" | "es-ES" | "pt-BR" | "zh-Hans" | "zh-Hant" | "yue-Hans" | "yue-Hant" | "ko-KR" | "ja-JP" | "ru-RU" | "uk-UA"
export type TextRecognitionOptions = {
visionIgnoreThreshold?: number;
automaticallyDetectsLanguage: boolean,
recognitionLanguages: SupportedLanuages[],
};
...
As for the swift code — I will admit that while swift is not my forte, I will happily test any PRs! 😄
@JoeyEamigh do you have any thoughts here?
Thank you for sharing this great module! if we can set automaticallyDetectsLanguage, recognitionLanguages, customWords, usesLanguageCorrection, and recognitionLevel in JS, it would be nice.