mirror of
https://github.com/status-im/react-native-camera.git
synced 2025-02-24 01:38:18 +00:00
fix master after accidental pull-request - sorry
This commit is contained in:
parent
71480c609b
commit
1b06297c3a
@ -43,6 +43,6 @@ dependencies {
|
||||
compile "com.drewnoakes:metadata-extractor:2.9.1"
|
||||
compile 'com.google.android.gms:play-services-vision:+'
|
||||
compile "com.android.support:exifinterface:+"
|
||||
compile project(':openCVLibrary340')
|
||||
compile 'com.github.react-native-community:cameraview:d529251d24c0a367a28eea28f0eac2269d12f772'
|
||||
|
||||
compile 'com.github.react-native-community:cameraview:cc47bb28ed2fc54a8c56a4ce9ce53edd1f0af3a5'
|
||||
}
|
||||
|
@ -32,183 +32,174 @@ import java.io.ByteArrayOutputStream;
|
||||
import javax.annotation.Nullable;
|
||||
|
||||
public class CameraModule extends ReactContextBaseJavaModule {
|
||||
private static final String TAG = "CameraModule";
|
||||
private static final String TAG = "CameraModule";
|
||||
|
||||
private ScopedContext mScopedContext;
|
||||
static final int VIDEO_2160P = 0;
|
||||
static final int VIDEO_1080P = 1;
|
||||
static final int VIDEO_720P = 2;
|
||||
static final int VIDEO_480P = 3;
|
||||
static final int VIDEO_4x3 = 4;
|
||||
private ScopedContext mScopedContext;
|
||||
static final int VIDEO_2160P = 0;
|
||||
static final int VIDEO_1080P = 1;
|
||||
static final int VIDEO_720P = 2;
|
||||
static final int VIDEO_480P = 3;
|
||||
static final int VIDEO_4x3 = 4;
|
||||
|
||||
public static final Map<String, Object> VALID_BARCODE_TYPES =
|
||||
Collections.unmodifiableMap(new HashMap<String, Object>() {
|
||||
{
|
||||
put("aztec", BarcodeFormat.AZTEC.toString());
|
||||
put("ean13", BarcodeFormat.EAN_13.toString());
|
||||
put("ean8", BarcodeFormat.EAN_8.toString());
|
||||
put("qr", BarcodeFormat.QR_CODE.toString());
|
||||
put("pdf417", BarcodeFormat.PDF_417.toString());
|
||||
put("upc_e", BarcodeFormat.UPC_E.toString());
|
||||
put("datamatrix", BarcodeFormat.DATA_MATRIX.toString());
|
||||
put("code39", BarcodeFormat.CODE_39.toString());
|
||||
put("code93", BarcodeFormat.CODE_93.toString());
|
||||
put("interleaved2of5", BarcodeFormat.ITF.toString());
|
||||
put("codabar", BarcodeFormat.CODABAR.toString());
|
||||
put("code128", BarcodeFormat.CODE_128.toString());
|
||||
put("maxicode", BarcodeFormat.MAXICODE.toString());
|
||||
put("rss14", BarcodeFormat.RSS_14.toString());
|
||||
put("rssexpanded", BarcodeFormat.RSS_EXPANDED.toString());
|
||||
put("upc_a", BarcodeFormat.UPC_A.toString());
|
||||
put("upc_ean", BarcodeFormat.UPC_EAN_EXTENSION.toString());
|
||||
}
|
||||
public static final Map<String, Object> VALID_BARCODE_TYPES =
|
||||
Collections.unmodifiableMap(new HashMap<String, Object>() {
|
||||
{
|
||||
put("aztec", BarcodeFormat.AZTEC.toString());
|
||||
put("ean13", BarcodeFormat.EAN_13.toString());
|
||||
put("ean8", BarcodeFormat.EAN_8.toString());
|
||||
put("qr", BarcodeFormat.QR_CODE.toString());
|
||||
put("pdf417", BarcodeFormat.PDF_417.toString());
|
||||
put("upc_e", BarcodeFormat.UPC_E.toString());
|
||||
put("datamatrix", BarcodeFormat.DATA_MATRIX.toString());
|
||||
put("code39", BarcodeFormat.CODE_39.toString());
|
||||
put("code93", BarcodeFormat.CODE_93.toString());
|
||||
put("interleaved2of5", BarcodeFormat.ITF.toString());
|
||||
put("codabar", BarcodeFormat.CODABAR.toString());
|
||||
put("code128", BarcodeFormat.CODE_128.toString());
|
||||
put("maxicode", BarcodeFormat.MAXICODE.toString());
|
||||
put("rss14", BarcodeFormat.RSS_14.toString());
|
||||
put("rssexpanded", BarcodeFormat.RSS_EXPANDED.toString());
|
||||
put("upc_a", BarcodeFormat.UPC_A.toString());
|
||||
put("upc_ean", BarcodeFormat.UPC_EAN_EXTENSION.toString());
|
||||
}
|
||||
});
|
||||
|
||||
public CameraModule(ReactApplicationContext reactContext) {
|
||||
super(reactContext);
|
||||
mScopedContext = new ScopedContext(reactContext);
|
||||
}
|
||||
|
||||
public ScopedContext getScopedContext() {
|
||||
return mScopedContext;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return "RNCameraModule";
|
||||
}
|
||||
|
||||
@Nullable
|
||||
@Override
|
||||
public Map<String, Object> getConstants() {
|
||||
return Collections.unmodifiableMap(new HashMap<String, Object>() {
|
||||
{
|
||||
put("Type", getTypeConstants());
|
||||
put("FlashMode", getFlashModeConstants());
|
||||
put("AutoFocus", getAutoFocusConstants());
|
||||
put("WhiteBalance", getWhiteBalanceConstants());
|
||||
put("VideoQuality", getVideoQualityConstants());
|
||||
put("BarCodeType", getBarCodeConstants());
|
||||
put("FaceDetection", Collections.unmodifiableMap(new HashMap<String, Object>() {
|
||||
{
|
||||
put("Mode", getFaceDetectionModeConstants());
|
||||
put("Landmarks", getFaceDetectionLandmarksConstants());
|
||||
put("Classifications", getFaceDetectionClassificationsConstants());
|
||||
}
|
||||
|
||||
private Map<String, Object> getFaceDetectionModeConstants() {
|
||||
return Collections.unmodifiableMap(new HashMap<String, Object>() {
|
||||
{
|
||||
put("fast", RNFaceDetector.FAST_MODE);
|
||||
put("accurate", RNFaceDetector.ACCURATE_MODE);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public CameraModule(ReactApplicationContext reactContext) {
|
||||
super(reactContext);
|
||||
mScopedContext = new ScopedContext(reactContext);
|
||||
}
|
||||
private Map<String, Object> getFaceDetectionClassificationsConstants() {
|
||||
return Collections.unmodifiableMap(new HashMap<String, Object>() {
|
||||
{
|
||||
put("all", RNFaceDetector.ALL_CLASSIFICATIONS);
|
||||
put("none", RNFaceDetector.NO_CLASSIFICATIONS);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public ScopedContext getScopedContext() {
|
||||
return mScopedContext;
|
||||
}
|
||||
private Map<String, Object> getFaceDetectionLandmarksConstants() {
|
||||
return Collections.unmodifiableMap(new HashMap<String, Object>() {
|
||||
{
|
||||
put("all", RNFaceDetector.ALL_LANDMARKS);
|
||||
put("none", RNFaceDetector.NO_LANDMARKS);
|
||||
}
|
||||
});
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return "RNCameraModule";
|
||||
}
|
||||
|
||||
@Nullable
|
||||
@Override
|
||||
public Map<String, Object> getConstants() {
|
||||
private Map<String, Object> getTypeConstants() {
|
||||
return Collections.unmodifiableMap(new HashMap<String, Object>() {
|
||||
{
|
||||
put("Type", getTypeConstants());
|
||||
put("FlashMode", getFlashModeConstants());
|
||||
put("AutoFocus", getAutoFocusConstants());
|
||||
put("WhiteBalance", getWhiteBalanceConstants());
|
||||
put("VideoQuality", getVideoQualityConstants());
|
||||
put("BarCodeType", getBarCodeConstants());
|
||||
put("FaceDetection", Collections.unmodifiableMap(new HashMap<String, Object>() {
|
||||
{
|
||||
put("Mode", getFaceDetectionModeConstants());
|
||||
put("Landmarks", getFaceDetectionLandmarksConstants());
|
||||
put("Classifications", getFaceDetectionClassificationsConstants());
|
||||
}
|
||||
|
||||
private Map<String, Object> getFaceDetectionModeConstants() {
|
||||
return Collections.unmodifiableMap(new HashMap<String, Object>() {
|
||||
{
|
||||
put("fast", RNFaceDetector.FAST_MODE);
|
||||
put("accurate", RNFaceDetector.ACCURATE_MODE);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private Map<String, Object> getFaceDetectionClassificationsConstants() {
|
||||
return Collections.unmodifiableMap(new HashMap<String, Object>() {
|
||||
{
|
||||
put("all", RNFaceDetector.ALL_CLASSIFICATIONS);
|
||||
put("none", RNFaceDetector.NO_CLASSIFICATIONS);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private Map<String, Object> getFaceDetectionLandmarksConstants() {
|
||||
return Collections.unmodifiableMap(new HashMap<String, Object>() {
|
||||
{
|
||||
put("all", RNFaceDetector.ALL_LANDMARKS);
|
||||
put("none", RNFaceDetector.NO_LANDMARKS);
|
||||
}
|
||||
});
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
private Map<String, Object> getTypeConstants() {
|
||||
return Collections.unmodifiableMap(new HashMap<String, Object>() {
|
||||
{
|
||||
put("front", Constants.FACING_FRONT);
|
||||
put("back", Constants.FACING_BACK);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private Map<String, Object> getFlashModeConstants() {
|
||||
return Collections.unmodifiableMap(new HashMap<String, Object>() {
|
||||
{
|
||||
put("off", Constants.FLASH_OFF);
|
||||
put("on", Constants.FLASH_ON);
|
||||
put("auto", Constants.FLASH_AUTO);
|
||||
put("torch", Constants.FLASH_TORCH);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private Map<String, Object> getAutoFocusConstants() {
|
||||
return Collections.unmodifiableMap(new HashMap<String, Object>() {
|
||||
{
|
||||
put("on", true);
|
||||
put("off", false);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private Map<String, Object> getWhiteBalanceConstants() {
|
||||
return Collections.unmodifiableMap(new HashMap<String, Object>() {
|
||||
{
|
||||
put("auto", Constants.WB_AUTO);
|
||||
put("cloudy", Constants.WB_CLOUDY);
|
||||
put("sunny", Constants.WB_SUNNY);
|
||||
put("shadow", Constants.WB_SHADOW);
|
||||
put("fluorescent", Constants.WB_FLUORESCENT);
|
||||
put("incandescent", Constants.WB_INCANDESCENT);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private Map<String, Object> getVideoQualityConstants() {
|
||||
return Collections.unmodifiableMap(new HashMap<String, Object>() {
|
||||
{
|
||||
put("2160p", VIDEO_2160P);
|
||||
put("1080p", VIDEO_1080P);
|
||||
put("720p", VIDEO_720P);
|
||||
put("480p", VIDEO_480P);
|
||||
put("4:3", VIDEO_4x3);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private Map<String, Object> getBarCodeConstants() {
|
||||
return VALID_BARCODE_TYPES;
|
||||
}
|
||||
{
|
||||
put("front", Constants.FACING_FRONT);
|
||||
put("back", Constants.FACING_BACK);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@ReactMethod
|
||||
public void takePicture(final ReadableMap options, final int viewTag, final Promise promise) {
|
||||
final ReactApplicationContext context = getReactApplicationContext();
|
||||
final File cacheDirectory = mScopedContext.getCacheDirectory();
|
||||
UIManagerModule uiManager = context.getNativeModule(UIManagerModule.class);
|
||||
uiManager.addUIBlock(new UIBlock() {
|
||||
@Override
|
||||
public void execute(NativeViewHierarchyManager nativeViewHierarchyManager) {
|
||||
RNCameraView cameraView = (RNCameraView) nativeViewHierarchyManager.resolveView(viewTag);
|
||||
try {
|
||||
if (!Build.FINGERPRINT.contains("generic")) {
|
||||
if (cameraView.isCameraOpened()) {
|
||||
cameraView.takePicture(options, promise, cacheDirectory);
|
||||
} else {
|
||||
promise.reject("E_CAMERA_UNAVAILABLE", "Camera is not running");
|
||||
}
|
||||
} else {
|
||||
Bitmap image = RNCameraViewHelper.generateSimulatorPhoto(cameraView.getWidth(), cameraView.getHeight());
|
||||
ByteBuffer byteBuffer = ByteBuffer.allocate(image.getRowBytes() * image.getHeight());
|
||||
image.copyPixelsToBuffer(byteBuffer);
|
||||
new ResolveTakenPictureAsyncTask(byteBuffer.array(), promise, options).execute();
|
||||
}
|
||||
} catch (Exception e) {
|
||||
promise.reject("E_CAMERA_BAD_VIEWTAG", "takePictureAsync: Expected a Camera component");
|
||||
private Map<String, Object> getFlashModeConstants() {
|
||||
return Collections.unmodifiableMap(new HashMap<String, Object>() {
|
||||
{
|
||||
put("off", Constants.FLASH_OFF);
|
||||
put("on", Constants.FLASH_ON);
|
||||
put("auto", Constants.FLASH_AUTO);
|
||||
put("torch", Constants.FLASH_TORCH);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private Map<String, Object> getAutoFocusConstants() {
|
||||
return Collections.unmodifiableMap(new HashMap<String, Object>() {
|
||||
{
|
||||
put("on", true);
|
||||
put("off", false);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private Map<String, Object> getWhiteBalanceConstants() {
|
||||
return Collections.unmodifiableMap(new HashMap<String, Object>() {
|
||||
{
|
||||
put("auto", Constants.WB_AUTO);
|
||||
put("cloudy", Constants.WB_CLOUDY);
|
||||
put("sunny", Constants.WB_SUNNY);
|
||||
put("shadow", Constants.WB_SHADOW);
|
||||
put("fluorescent", Constants.WB_FLUORESCENT);
|
||||
put("incandescent", Constants.WB_INCANDESCENT);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private Map<String, Object> getVideoQualityConstants() {
|
||||
return Collections.unmodifiableMap(new HashMap<String, Object>() {
|
||||
{
|
||||
put("2160p", VIDEO_2160P);
|
||||
put("1080p", VIDEO_1080P);
|
||||
put("720p", VIDEO_720P);
|
||||
put("480p", VIDEO_480P);
|
||||
put("4:3", VIDEO_4x3);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private Map<String, Object> getBarCodeConstants() {
|
||||
return VALID_BARCODE_TYPES;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@ReactMethod
|
||||
public void takePicture(final ReadableMap options, final int viewTag, final Promise promise) {
|
||||
final ReactApplicationContext context = getReactApplicationContext();
|
||||
final File cacheDirectory = mScopedContext.getCacheDirectory();
|
||||
UIManagerModule uiManager = context.getNativeModule(UIManagerModule.class);
|
||||
uiManager.addUIBlock(new UIBlock() {
|
||||
@Override
|
||||
public void execute(NativeViewHierarchyManager nativeViewHierarchyManager) {
|
||||
RNCameraView cameraView = (RNCameraView) nativeViewHierarchyManager.resolveView(viewTag);
|
||||
try {
|
||||
if (!Build.FINGERPRINT.contains("generic")) {
|
||||
if (cameraView.isCameraOpened()) {
|
||||
cameraView.takePicture(options, promise, cacheDirectory);
|
||||
} else {
|
||||
promise.reject("E_CAMERA_UNAVAILABLE", "Camera is not running");
|
||||
}
|
||||
} else {
|
||||
Bitmap image = RNCameraViewHelper.generateSimulatorPhoto(cameraView.getWidth(), cameraView.getHeight());
|
||||
@ -226,76 +217,76 @@ public class CameraModule extends ReactContextBaseJavaModule {
|
||||
});
|
||||
}
|
||||
|
||||
@ReactMethod
|
||||
public void record(final ReadableMap options, final int viewTag, final Promise promise) {
|
||||
final ReactApplicationContext context = getReactApplicationContext();
|
||||
final File cacheDirectory = mScopedContext.getCacheDirectory();
|
||||
UIManagerModule uiManager = context.getNativeModule(UIManagerModule.class);
|
||||
@ReactMethod
|
||||
public void record(final ReadableMap options, final int viewTag, final Promise promise) {
|
||||
final ReactApplicationContext context = getReactApplicationContext();
|
||||
final File cacheDirectory = mScopedContext.getCacheDirectory();
|
||||
UIManagerModule uiManager = context.getNativeModule(UIManagerModule.class);
|
||||
|
||||
uiManager.addUIBlock(new UIBlock() {
|
||||
@Override
|
||||
public void execute(NativeViewHierarchyManager nativeViewHierarchyManager) {
|
||||
final RNCameraView cameraView;
|
||||
uiManager.addUIBlock(new UIBlock() {
|
||||
@Override
|
||||
public void execute(NativeViewHierarchyManager nativeViewHierarchyManager) {
|
||||
final RNCameraView cameraView;
|
||||
|
||||
try {
|
||||
cameraView = (RNCameraView) nativeViewHierarchyManager.resolveView(viewTag);
|
||||
if (cameraView.isCameraOpened()) {
|
||||
cameraView.record(options, promise, cacheDirectory);
|
||||
} else {
|
||||
promise.reject("E_CAMERA_UNAVAILABLE", "Camera is not running");
|
||||
}
|
||||
} catch (Exception e) {
|
||||
promise.reject("E_CAMERA_BAD_VIEWTAG", "recordAsync: Expected a Camera component");
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
try {
|
||||
cameraView = (RNCameraView) nativeViewHierarchyManager.resolveView(viewTag);
|
||||
if (cameraView.isCameraOpened()) {
|
||||
cameraView.record(options, promise, cacheDirectory);
|
||||
} else {
|
||||
promise.reject("E_CAMERA_UNAVAILABLE", "Camera is not running");
|
||||
}
|
||||
} catch (Exception e) {
|
||||
promise.reject("E_CAMERA_BAD_VIEWTAG", "recordAsync: Expected a Camera component");
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@ReactMethod
|
||||
public void stopRecording(final int viewTag) {
|
||||
final ReactApplicationContext context = getReactApplicationContext();
|
||||
UIManagerModule uiManager = context.getNativeModule(UIManagerModule.class);
|
||||
uiManager.addUIBlock(new UIBlock() {
|
||||
@Override
|
||||
public void execute(NativeViewHierarchyManager nativeViewHierarchyManager) {
|
||||
final RNCameraView cameraView;
|
||||
@ReactMethod
|
||||
public void stopRecording(final int viewTag) {
|
||||
final ReactApplicationContext context = getReactApplicationContext();
|
||||
UIManagerModule uiManager = context.getNativeModule(UIManagerModule.class);
|
||||
uiManager.addUIBlock(new UIBlock() {
|
||||
@Override
|
||||
public void execute(NativeViewHierarchyManager nativeViewHierarchyManager) {
|
||||
final RNCameraView cameraView;
|
||||
|
||||
try {
|
||||
cameraView = (RNCameraView) nativeViewHierarchyManager.resolveView(viewTag);
|
||||
if (cameraView.isCameraOpened()) {
|
||||
cameraView.stopRecording();
|
||||
}
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
try {
|
||||
cameraView = (RNCameraView) nativeViewHierarchyManager.resolveView(viewTag);
|
||||
if (cameraView.isCameraOpened()) {
|
||||
cameraView.stopRecording();
|
||||
}
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@ReactMethod
|
||||
public void getSupportedRatios(final int viewTag, final Promise promise) {
|
||||
final ReactApplicationContext context = getReactApplicationContext();
|
||||
UIManagerModule uiManager = context.getNativeModule(UIManagerModule.class);
|
||||
uiManager.addUIBlock(new UIBlock() {
|
||||
@Override
|
||||
public void execute(NativeViewHierarchyManager nativeViewHierarchyManager) {
|
||||
final RNCameraView cameraView;
|
||||
try {
|
||||
cameraView = (RNCameraView) nativeViewHierarchyManager.resolveView(viewTag);
|
||||
WritableArray result = Arguments.createArray();
|
||||
if (cameraView.isCameraOpened()) {
|
||||
Set<AspectRatio> ratios = cameraView.getSupportedAspectRatios();
|
||||
for (AspectRatio ratio : ratios) {
|
||||
result.pushString(ratio.toString());
|
||||
}
|
||||
promise.resolve(result);
|
||||
} else {
|
||||
promise.reject("E_CAMERA_UNAVAILABLE", "Camera is not running");
|
||||
}
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
@ReactMethod
|
||||
public void getSupportedRatios(final int viewTag, final Promise promise) {
|
||||
final ReactApplicationContext context = getReactApplicationContext();
|
||||
UIManagerModule uiManager = context.getNativeModule(UIManagerModule.class);
|
||||
uiManager.addUIBlock(new UIBlock() {
|
||||
@Override
|
||||
public void execute(NativeViewHierarchyManager nativeViewHierarchyManager) {
|
||||
final RNCameraView cameraView;
|
||||
try {
|
||||
cameraView = (RNCameraView) nativeViewHierarchyManager.resolveView(viewTag);
|
||||
WritableArray result = Arguments.createArray();
|
||||
if (cameraView.isCameraOpened()) {
|
||||
Set<AspectRatio> ratios = cameraView.getSupportedAspectRatios();
|
||||
for (AspectRatio ratio : ratios) {
|
||||
result.pushString(ratio.toString());
|
||||
}
|
||||
promise.resolve(result);
|
||||
} else {
|
||||
promise.reject("E_CAMERA_UNAVAILABLE", "Camera is not running");
|
||||
}
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
@ -114,22 +114,11 @@ public class CameraViewManager extends ViewGroupManager<RNCameraView> {
|
||||
view.setShouldScanBarCodes(barCodeScannerEnabled);
|
||||
}
|
||||
|
||||
|
||||
@ReactProp(name = "useCamera2Api")
|
||||
public void setUseCamera2Api(RNCameraView view, boolean useCamera2Api) {
|
||||
view.setUsingCamera2Api(useCamera2Api);
|
||||
}
|
||||
|
||||
@ReactProp(name = "faceDetectionExpectedOrientation")
|
||||
public void setFaceDetectionExpectedOrientation(RNCameraView view, int faceDetectionExpectedOrientation) {
|
||||
view.setFaceDetectionExpectedOrientation(faceDetectionExpectedOrientation);
|
||||
}
|
||||
|
||||
@ReactProp(name = "objectsToDetect")
|
||||
public void updateObjectsToDetect(RNCameraView view, int objectsToDetect) {
|
||||
view.updateObjectsToDetect(objectsToDetect);
|
||||
}
|
||||
|
||||
@ReactProp(name = "faceDetectorEnabled")
|
||||
public void setFaceDetecting(RNCameraView view, boolean faceDetectorEnabled) {
|
||||
view.setShouldDetectFaces(faceDetectorEnabled);
|
||||
|
@ -5,11 +5,10 @@ import android.content.pm.PackageManager;
|
||||
import android.graphics.Color;
|
||||
import android.media.CamcorderProfile;
|
||||
import android.os.Build;
|
||||
import android.os.Build.VERSION;
|
||||
import android.support.v4.content.ContextCompat;
|
||||
import android.support.v4.view.ViewCompat;
|
||||
import android.util.SparseArray;
|
||||
import android.view.View;
|
||||
|
||||
import com.facebook.react.bridge.Arguments;
|
||||
import com.facebook.react.bridge.LifecycleEventListener;
|
||||
import com.facebook.react.bridge.Promise;
|
||||
@ -17,11 +16,21 @@ import com.facebook.react.bridge.ReadableMap;
|
||||
import com.facebook.react.bridge.WritableMap;
|
||||
import com.facebook.react.uimanager.ThemedReactContext;
|
||||
import com.google.android.cameraview.CameraView;
|
||||
import com.google.android.cameraview.CameraView.Callback;
|
||||
import com.google.android.gms.vision.face.Face;
|
||||
import com.google.zxing.BarcodeFormat;
|
||||
import com.google.zxing.DecodeHintType;
|
||||
import com.google.zxing.MultiFormatReader;
|
||||
import com.google.zxing.Result;
|
||||
|
||||
import org.reactnative.camera.tasks.BarCodeScannerAsyncTask;
|
||||
import org.reactnative.camera.tasks.BarCodeScannerAsyncTaskDelegate;
|
||||
import org.reactnative.camera.tasks.FaceDetectorAsyncTask;
|
||||
import org.reactnative.camera.tasks.FaceDetectorAsyncTaskDelegate;
|
||||
import org.reactnative.camera.tasks.ResolveTakenPictureAsyncTask;
|
||||
import org.reactnative.camera.utils.ImageDimensions;
|
||||
import org.reactnative.camera.utils.RNFileUtils;
|
||||
import org.reactnative.facedetector.RNFaceDetector;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.EnumMap;
|
||||
@ -31,17 +40,8 @@ import java.util.Map;
|
||||
import java.util.Queue;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ConcurrentLinkedQueue;
|
||||
import org.reactnative.camera.tasks.BarCodeScannerAsyncTask;
|
||||
import org.reactnative.camera.tasks.BarCodeScannerAsyncTaskDelegate;
|
||||
import org.reactnative.camera.tasks.OpenCVProcessorAsyncTask;
|
||||
import org.reactnative.camera.tasks.OpenCVProcessorAsyncTaskDelegate;
|
||||
import org.reactnative.camera.tasks.ResolveTakenPictureAsyncTask;
|
||||
import org.reactnative.camera.utils.ImageDimensions;
|
||||
import org.reactnative.camera.utils.RNFileUtils;
|
||||
import org.reactnative.facedetector.RNFaceDetector;
|
||||
import org.reactnative.opencv.OpenCVProcessor;
|
||||
|
||||
public class RNCameraView extends CameraView implements LifecycleEventListener, BarCodeScannerAsyncTaskDelegate, OpenCVProcessorAsyncTaskDelegate {
|
||||
public class RNCameraView extends CameraView implements LifecycleEventListener, BarCodeScannerAsyncTaskDelegate, FaceDetectorAsyncTaskDelegate {
|
||||
private ThemedReactContext mThemedReactContext;
|
||||
private Queue<Promise> mPictureTakenPromises = new ConcurrentLinkedQueue<>();
|
||||
private Map<Promise, ReadableMap> mPictureTakenOptions = new ConcurrentHashMap<>();
|
||||
@ -58,22 +58,18 @@ public class RNCameraView extends CameraView implements LifecycleEventListener,
|
||||
|
||||
// Scanning-related properties
|
||||
private final MultiFormatReader mMultiFormatReader = new MultiFormatReader();
|
||||
// private final RNFaceDetector mFaceDetector;
|
||||
private final RNFaceDetector mFaceDetector;
|
||||
private boolean mShouldDetectFaces = false;
|
||||
private boolean mShouldScanBarCodes = false;
|
||||
private int mFaceDetectionExpectedOrientation = -1;
|
||||
private int mObjectsToDetect = 0;
|
||||
private int mFaceDetectorMode = RNFaceDetector.FAST_MODE;
|
||||
private int mFaceDetectionLandmarks = RNFaceDetector.NO_LANDMARKS;
|
||||
private int mFaceDetectionClassifications = RNFaceDetector.NO_CLASSIFICATIONS;
|
||||
private final OpenCVProcessor openCVProcessor;
|
||||
|
||||
public RNCameraView(ThemedReactContext themedReactContext) {
|
||||
super(themedReactContext, true);
|
||||
initBarcodeReader();
|
||||
mThemedReactContext = themedReactContext;
|
||||
// mFaceDetector = new RNFaceDetector(themedReactContext);
|
||||
this.openCVProcessor = new OpenCVProcessor(themedReactContext);
|
||||
mFaceDetector = new RNFaceDetector(themedReactContext);
|
||||
setupFaceDetector();
|
||||
themedReactContext.addLifecycleEventListener(this);
|
||||
|
||||
@ -120,15 +116,11 @@ public class RNCameraView extends CameraView implements LifecycleEventListener,
|
||||
new BarCodeScannerAsyncTask(delegate, mMultiFormatReader, data, width, height).execute();
|
||||
}
|
||||
|
||||
// if (mShouldDetectFaces && !faceDetectorTaskLock && cameraView instanceof FaceDetectorAsyncTaskDelegate) {
|
||||
// faceDetectorTaskLock = true;
|
||||
// FaceDetectorAsyncTaskDelegate delegate = (FaceDetectorAsyncTaskDelegate) cameraView;
|
||||
// new FaceDetectorAsyncTask(delegate, mFaceDetector, data, width, height, correctRotation).execute();
|
||||
// }
|
||||
|
||||
OpenCVProcessorAsyncTaskDelegate delegate = (OpenCVProcessorAsyncTaskDelegate) cameraView;
|
||||
new OpenCVProcessorAsyncTask(delegate, openCVProcessor, data, width, height, correctRotation).execute();
|
||||
|
||||
if (mShouldDetectFaces && !faceDetectorTaskLock && cameraView instanceof FaceDetectorAsyncTaskDelegate) {
|
||||
faceDetectorTaskLock = true;
|
||||
FaceDetectorAsyncTaskDelegate delegate = (FaceDetectorAsyncTaskDelegate) cameraView;
|
||||
new FaceDetectorAsyncTask(delegate, mFaceDetector, data, width, height, correctRotation).execute();
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
@ -239,45 +231,31 @@ public class RNCameraView extends CameraView implements LifecycleEventListener,
|
||||
* Initial setup of the face detector
|
||||
*/
|
||||
private void setupFaceDetector() {
|
||||
// mFaceDetector.setMode(mFaceDetectorMode);
|
||||
// mFaceDetector.setLandmarkType(mFaceDetectionLandmarks);
|
||||
// mFaceDetector.setClassificationType(mFaceDetectionClassifications);
|
||||
// mFaceDetector.setTracking(true);
|
||||
}
|
||||
|
||||
public void setFaceDetectionExpectedOrientation(int expectedFaceOrientation) {
|
||||
mFaceDetectionExpectedOrientation = expectedFaceOrientation;
|
||||
if (openCVProcessor != null) {
|
||||
openCVProcessor.setFaceDetectionExpectedOrientation(expectedFaceOrientation);
|
||||
}
|
||||
}
|
||||
|
||||
public void updateObjectsToDetect(int objectsToDetect){
|
||||
mObjectsToDetect = objectsToDetect;
|
||||
if(openCVProcessor != null){
|
||||
openCVProcessor.updateObjectsToDetect(objectsToDetect);
|
||||
}
|
||||
mFaceDetector.setMode(mFaceDetectorMode);
|
||||
mFaceDetector.setLandmarkType(mFaceDetectionLandmarks);
|
||||
mFaceDetector.setClassificationType(mFaceDetectionClassifications);
|
||||
mFaceDetector.setTracking(true);
|
||||
}
|
||||
|
||||
public void setFaceDetectionLandmarks(int landmarks) {
|
||||
mFaceDetectionLandmarks = landmarks;
|
||||
// if (mFaceDetector != null) {
|
||||
// mFaceDetector.setLandmarkType(landmarks);
|
||||
// }
|
||||
if (mFaceDetector != null) {
|
||||
mFaceDetector.setLandmarkType(landmarks);
|
||||
}
|
||||
}
|
||||
|
||||
public void setFaceDetectionClassifications(int classifications) {
|
||||
mFaceDetectionClassifications = classifications;
|
||||
// if (mFaceDetector != null) {
|
||||
// mFaceDetector.setClassificationType(classifications);
|
||||
// }
|
||||
if (mFaceDetector != null) {
|
||||
mFaceDetector.setClassificationType(classifications);
|
||||
}
|
||||
}
|
||||
|
||||
public void setFaceDetectionMode(int mode) {
|
||||
mFaceDetectorMode = mode;
|
||||
// if (mFaceDetector != null) {
|
||||
// mFaceDetector.setMode(mode);
|
||||
// }
|
||||
if (mFaceDetector != null) {
|
||||
mFaceDetector.setMode(mode);
|
||||
}
|
||||
}
|
||||
|
||||
public void setShouldDetectFaces(boolean shouldDetectFaces) {
|
||||
@ -285,13 +263,23 @@ public class RNCameraView extends CameraView implements LifecycleEventListener,
|
||||
setScanning(mShouldDetectFaces || mShouldScanBarCodes);
|
||||
}
|
||||
|
||||
public void onFacesDetected(SparseArray<Map<String, Float>> facesReported, int sourceWidth, int sourceHeight, int sourceRotation) {
|
||||
if (facesReported != null) {
|
||||
RNCameraViewHelper.emitFacesDetectedEvent(this, facesReported, new ImageDimensions(sourceWidth, sourceHeight, sourceRotation, getFacing()));
|
||||
public void onFacesDetected(SparseArray<Face> facesReported, int sourceWidth, int sourceHeight, int sourceRotation) {
|
||||
if (!mShouldDetectFaces) {
|
||||
return;
|
||||
}
|
||||
|
||||
SparseArray<Face> facesDetected = facesReported == null ? new SparseArray<Face>() : facesReported;
|
||||
|
||||
ImageDimensions dimensions = new ImageDimensions(sourceWidth, sourceHeight, sourceRotation, getFacing());
|
||||
RNCameraViewHelper.emitFacesDetectedEvent(this, facesDetected, dimensions);
|
||||
}
|
||||
|
||||
public void onFaceDetectionError(OpenCVProcessor faceDetector) {
|
||||
public void onFaceDetectionError(RNFaceDetector faceDetector) {
|
||||
if (!mShouldDetectFaces) {
|
||||
return;
|
||||
}
|
||||
|
||||
RNCameraViewHelper.emitFaceDetectionErrorEvent(this, faceDetector);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -326,7 +314,7 @@ public class RNCameraView extends CameraView implements LifecycleEventListener,
|
||||
|
||||
@Override
|
||||
public void onHostDestroy() {
|
||||
// mFaceDetector.release();
|
||||
mFaceDetector.release();
|
||||
stop();
|
||||
}
|
||||
|
||||
|
@ -23,16 +23,15 @@ import org.reactnative.camera.events.CameraMountErrorEvent;
|
||||
import org.reactnative.camera.events.CameraReadyEvent;
|
||||
import org.reactnative.camera.events.FaceDetectionErrorEvent;
|
||||
import org.reactnative.camera.events.FacesDetectedEvent;
|
||||
import org.reactnative.camera.events.OpenCVProcessorFacesDetectedEvent;
|
||||
import org.reactnative.camera.utils.ImageDimensions;
|
||||
import org.reactnative.facedetector.RNFaceDetector;
|
||||
|
||||
import java.text.SimpleDateFormat;
|
||||
import java.util.Calendar;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
|
||||
public class RNCameraViewHelper {
|
||||
|
||||
public static final String[][] exifTags = new String[][]{
|
||||
{"string", ExifInterface.TAG_ARTIST},
|
||||
{"int", ExifInterface.TAG_BITS_PER_SAMPLE},
|
||||
@ -182,24 +181,22 @@ public class RNCameraViewHelper {
|
||||
|
||||
// Face detection events
|
||||
|
||||
// Face detection events
|
||||
|
||||
public static void emitFacesDetectedEvent(
|
||||
ViewGroup view,
|
||||
SparseArray<Map<String, Float>> faces,
|
||||
ImageDimensions dimensions
|
||||
) {
|
||||
ViewGroup view,
|
||||
SparseArray<Face> faces,
|
||||
ImageDimensions dimensions
|
||||
) {
|
||||
float density = view.getResources().getDisplayMetrics().density;
|
||||
|
||||
double scaleX = (double) view.getWidth() / (dimensions.getWidth() * density);
|
||||
double scaleY = (double) view.getHeight() / (dimensions.getHeight() * density);
|
||||
|
||||
OpenCVProcessorFacesDetectedEvent event = OpenCVProcessorFacesDetectedEvent.obtain(
|
||||
view.getId(),
|
||||
faces,
|
||||
dimensions,
|
||||
scaleX,
|
||||
scaleY
|
||||
FacesDetectedEvent event = FacesDetectedEvent.obtain(
|
||||
view.getId(),
|
||||
faces,
|
||||
dimensions,
|
||||
scaleX,
|
||||
scaleY
|
||||
);
|
||||
|
||||
ReactContext reactContext = (ReactContext) view.getContext();
|
||||
@ -303,4 +300,4 @@ public class RNCameraViewHelper {
|
||||
|
||||
return fakePhoto;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,103 +0,0 @@
|
||||
package org.reactnative.camera.events;
|
||||
|
||||
import android.support.v4.util.Pools;
|
||||
import android.support.v4.util.Pools.SynchronizedPool;
|
||||
import android.util.SparseArray;
|
||||
import com.facebook.react.bridge.Arguments;
|
||||
import com.facebook.react.bridge.WritableArray;
|
||||
import com.facebook.react.bridge.WritableMap;
|
||||
import com.facebook.react.uimanager.events.Event;
|
||||
import com.facebook.react.uimanager.events.RCTEventEmitter;
|
||||
import com.google.android.cameraview.CameraView;
|
||||
import com.google.android.gms.vision.face.Face;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import org.reactnative.camera.CameraViewManager;
|
||||
import org.reactnative.camera.CameraViewManager.Events;
|
||||
import org.reactnative.camera.utils.ImageDimensions;
|
||||
import org.reactnative.facedetector.FaceDetectorUtils;
|
||||
|
||||
public class OpenCVProcessorFacesDetectedEvent extends Event<OpenCVProcessorFacesDetectedEvent> {
|
||||
private static final Pools.SynchronizedPool<OpenCVProcessorFacesDetectedEvent> EVENTS_POOL =
|
||||
new Pools.SynchronizedPool<>(3);
|
||||
|
||||
private SparseArray<Map<String, Float>> mFaces;
|
||||
private double mScaleX;
|
||||
private double mScaleY;
|
||||
private ImageDimensions mImageDimensions;
|
||||
|
||||
private OpenCVProcessorFacesDetectedEvent() {}
|
||||
|
||||
public static OpenCVProcessorFacesDetectedEvent obtain(
|
||||
int viewTag,
|
||||
SparseArray<Map<String, Float>> faces,
|
||||
ImageDimensions dimensions,
|
||||
double scaleX,
|
||||
double scaleY
|
||||
) {
|
||||
OpenCVProcessorFacesDetectedEvent event = EVENTS_POOL.acquire();
|
||||
if (event == null) {
|
||||
event = new OpenCVProcessorFacesDetectedEvent();
|
||||
}
|
||||
event.init(viewTag, faces, dimensions, scaleX, scaleY);
|
||||
return event;
|
||||
}
|
||||
|
||||
private void init(
|
||||
int viewTag,
|
||||
SparseArray<Map<String, Float>> faces,
|
||||
ImageDimensions dimensions,
|
||||
double scaleX,
|
||||
double scaleY
|
||||
) {
|
||||
super.init(viewTag);
|
||||
mFaces = faces;
|
||||
mImageDimensions = dimensions;
|
||||
mScaleX = scaleX;
|
||||
mScaleY = scaleY;
|
||||
}
|
||||
|
||||
/**
|
||||
* note(@sjchmiela)
|
||||
* Should the events about detected faces coalesce, the best strategy will be
|
||||
* to ensure that events with different faces count are always being transmitted.
|
||||
*/
|
||||
@Override
|
||||
public short getCoalescingKey() {
|
||||
if (mFaces.size() > Short.MAX_VALUE) {
|
||||
return Short.MAX_VALUE;
|
||||
}
|
||||
|
||||
return (short) mFaces.size();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getEventName() {
|
||||
return CameraViewManager.Events.EVENT_ON_FACES_DETECTED.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void dispatch(RCTEventEmitter rctEventEmitter) {
|
||||
rctEventEmitter.receiveEvent(getViewTag(), getEventName(), serializeEventData());
|
||||
}
|
||||
|
||||
private WritableMap serializeEventData() {
|
||||
WritableArray facesList = Arguments.createArray();
|
||||
for (int i = 0; i < this.mFaces.size(); i++) {
|
||||
Map<String, Float> face = (Map) this.mFaces.valueAt(i);
|
||||
WritableMap serializedFace = Arguments.createMap();
|
||||
serializedFace.putDouble("x", face.get("x"));
|
||||
serializedFace.putDouble("y", face.get("y"));
|
||||
serializedFace.putDouble("width", face.get("width"));
|
||||
serializedFace.putDouble("height", face.get("height"));
|
||||
serializedFace.putDouble("orientation", face.get("orientation"));
|
||||
facesList.pushMap(serializedFace);
|
||||
}
|
||||
WritableMap event = Arguments.createMap();
|
||||
event.putString("type", "face");
|
||||
event.putArray("faces", facesList);
|
||||
event.putInt("target", getViewTag());
|
||||
return event;
|
||||
}
|
||||
}
|
@ -1,41 +0,0 @@
|
||||
package org.reactnative.camera.tasks;
|
||||
|
||||
import android.os.AsyncTask;
|
||||
import android.util.SparseArray;
|
||||
import java.util.Map;
|
||||
import org.reactnative.opencv.OpenCVProcessor;
|
||||
|
||||
public class OpenCVProcessorAsyncTask extends AsyncTask<Void, Void, SparseArray<Map<String, Float>>> {
|
||||
private OpenCVProcessorAsyncTaskDelegate mDelegate;
|
||||
private int mHeight;
|
||||
private byte[] mImageData;
|
||||
private OpenCVProcessor mOpenCVProcessor;
|
||||
private int mRotation;
|
||||
private int mWidth;
|
||||
|
||||
public OpenCVProcessorAsyncTask(OpenCVProcessorAsyncTaskDelegate delegate, OpenCVProcessor openCVProcessor, byte[] imageData, int width, int height, int rotation) {
|
||||
this.mImageData = imageData;
|
||||
this.mWidth = width;
|
||||
this.mHeight = height;
|
||||
this.mRotation = rotation;
|
||||
this.mDelegate = delegate;
|
||||
this.mOpenCVProcessor = openCVProcessor;
|
||||
}
|
||||
|
||||
protected SparseArray<Map<String, Float>> doInBackground(Void... ignored) {
|
||||
if (isCancelled() || this.mDelegate == null || this.mOpenCVProcessor == null) {
|
||||
return null;
|
||||
}
|
||||
return this.mOpenCVProcessor.detect(this.mImageData, this.mWidth, this.mHeight, this.mRotation);
|
||||
}
|
||||
|
||||
protected void onPostExecute(SparseArray<Map<String, Float>> faces) {
|
||||
super.onPostExecute(faces);
|
||||
if (faces == null) {
|
||||
this.mDelegate.onFaceDetectionError(this.mOpenCVProcessor);
|
||||
return;
|
||||
}
|
||||
this.mDelegate.onFacesDetected(faces, this.mWidth, this.mHeight, this.mRotation);
|
||||
this.mDelegate.onFaceDetectingTaskCompleted();
|
||||
}
|
||||
}
|
@ -1,13 +0,0 @@
|
||||
package org.reactnative.camera.tasks;
|
||||
|
||||
import android.util.SparseArray;
|
||||
import java.util.Map;
|
||||
import org.reactnative.opencv.OpenCVProcessor;
|
||||
|
||||
public interface OpenCVProcessorAsyncTaskDelegate {
|
||||
void onFaceDetectingTaskCompleted();
|
||||
|
||||
void onFaceDetectionError(OpenCVProcessor openCVProcessor);
|
||||
|
||||
void onFacesDetected(SparseArray<Map<String, Float>> sparseArray, int sourceWidth, int sourceHeight, int sourceRotation);
|
||||
}
|
@ -1,321 +0,0 @@
|
||||
package org.reactnative.opencv;
|
||||
|
||||
import android.content.Context;
|
||||
import android.util.Log;
|
||||
import android.util.SparseArray;
|
||||
import android.view.Surface;
|
||||
import android.view.WindowManager;
|
||||
|
||||
import com.facebook.react.common.ReactConstants;
|
||||
import java.io.File;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import org.opencv.core.Core;
|
||||
import org.opencv.core.CvType;
|
||||
import org.opencv.core.Mat;
|
||||
import org.opencv.core.MatOfInt4;
|
||||
import org.opencv.core.MatOfPoint;
|
||||
import org.opencv.core.MatOfPoint2f;
|
||||
import org.opencv.core.MatOfRect;
|
||||
import org.opencv.core.Point;
|
||||
import org.opencv.core.Rect;
|
||||
import org.opencv.core.RotatedRect;
|
||||
import org.opencv.core.Scalar;
|
||||
import org.opencv.core.Size;
|
||||
import org.opencv.imgcodecs.Imgcodecs;
|
||||
import org.opencv.imgproc.Imgproc;
|
||||
import org.opencv.objdetect.CascadeClassifier;
|
||||
import org.reactnative.camera.R;
|
||||
|
||||
import static org.opencv.core.CvType.CV_32F;
|
||||
import static org.opencv.imgproc.Imgproc.morphologyEx;
|
||||
|
||||
public class OpenCVProcessor {
|
||||
private CascadeClassifier faceDetector;
|
||||
private int frame = 0;
|
||||
private Context reactContext;
|
||||
private int faceDetectionExpectedOrientation = -1;
|
||||
private int objectsToDetect = 0;
|
||||
private boolean saveDemoFrame = false;
|
||||
|
||||
public OpenCVProcessor(Context context) {
|
||||
this.reactContext = context;
|
||||
try {
|
||||
InputStream is = this.reactContext.getResources().openRawResource(R.raw.lbpcascade_frontalface_improved);
|
||||
File mCascadeFile = new File(this.reactContext.getDir("cascade", 0), "lbpcascade_frontalface_improved.xml");
|
||||
FileOutputStream os = new FileOutputStream(mCascadeFile);
|
||||
byte[] buffer = new byte[4096];
|
||||
while (true) {
|
||||
int bytesRead = is.read(buffer);
|
||||
if (bytesRead == -1) {
|
||||
break;
|
||||
}
|
||||
os.write(buffer, 0, bytesRead);
|
||||
}
|
||||
is.close();
|
||||
os.close();
|
||||
this.faceDetector = new CascadeClassifier(mCascadeFile.getAbsolutePath());
|
||||
if (this.faceDetector.empty()) {
|
||||
Log.e(ReactConstants.TAG, "Failed to load cascade classifier");
|
||||
this.faceDetector = null;
|
||||
} else {
|
||||
Log.i(ReactConstants.TAG, "Loaded cascade classifier from " + mCascadeFile.getAbsolutePath());
|
||||
}
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace();
|
||||
Log.e(ReactConstants.TAG, "Failed to load cascade. Exception thrown: " + e);
|
||||
}
|
||||
Log.d(ReactConstants.TAG, "---OpenCV Constructor---");
|
||||
}
|
||||
|
||||
private void saveMatToDisk(Mat mat) {
|
||||
Imgcodecs.imwrite("/sdcard/nect/" + String.valueOf(System.currentTimeMillis()) + ".jpg", mat);
|
||||
}
|
||||
|
||||
private int rotateImage(Mat image, int rotation){
|
||||
int imageRotation = 1;
|
||||
switch(rotation) {
|
||||
case 90:
|
||||
imageRotation = 2;
|
||||
break;
|
||||
case 180:
|
||||
imageRotation = 3;
|
||||
break;
|
||||
case 270:
|
||||
imageRotation = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
int expectedFaceOrientation = 3;
|
||||
|
||||
if(faceDetectionExpectedOrientation != -1){
|
||||
expectedFaceOrientation = faceDetectionExpectedOrientation;
|
||||
} else {
|
||||
// rotate image according to device-orientation
|
||||
WindowManager wManager = (WindowManager) reactContext.getSystemService(reactContext.WINDOW_SERVICE);
|
||||
int deviceRotation = wManager.getDefaultDisplay().getRotation();
|
||||
|
||||
switch (deviceRotation) {
|
||||
case Surface.ROTATION_0:
|
||||
expectedFaceOrientation = 0;
|
||||
break;
|
||||
case Surface.ROTATION_90:
|
||||
expectedFaceOrientation = 1;
|
||||
break;
|
||||
case Surface.ROTATION_180:
|
||||
expectedFaceOrientation = 2;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
int rotationToBeApplied = expectedFaceOrientation + imageRotation % 4;
|
||||
|
||||
switch(rotationToBeApplied){
|
||||
case 2:
|
||||
Core.transpose(image, image);
|
||||
Core.flip(image, image,1);
|
||||
break;
|
||||
case 3:
|
||||
Core.flip(image, image,-1);
|
||||
break;
|
||||
case 0:
|
||||
Core.transpose(image, image);
|
||||
Core.flip(image, image,0);
|
||||
break;
|
||||
}
|
||||
|
||||
return expectedFaceOrientation;
|
||||
}
|
||||
|
||||
private float resizeImage(Mat image, float width){
|
||||
float scale = width / image.cols();
|
||||
|
||||
Imgproc.resize(image, image, new Size(), scale, scale, 2);
|
||||
|
||||
return scale;
|
||||
}
|
||||
|
||||
public SparseArray<Map<String, Float>> detect(byte[] imageData, int width, int height, int rotation) {
|
||||
if (this.frame % 15 == 0) {
|
||||
SparseArray<Map<String, Float>> objects = new SparseArray();
|
||||
Mat mat = new Mat((height / 2) + height, width, CvType.CV_8UC1);
|
||||
mat.put(0, 0, imageData);
|
||||
|
||||
Mat grayMat = new Mat();
|
||||
Imgproc.cvtColor(mat, grayMat, Imgproc.COLOR_YUV2GRAY_420);
|
||||
|
||||
switch(objectsToDetect){
|
||||
case 0:
|
||||
objects = detectFaces(grayMat, rotation);
|
||||
break;
|
||||
case 1:
|
||||
objects = detectTextBlocks(grayMat, rotation);
|
||||
break;
|
||||
}
|
||||
|
||||
return objects;
|
||||
}
|
||||
this.frame++;
|
||||
return null;
|
||||
}
|
||||
|
||||
private SparseArray<Map<String, Float>> detectFaces(Mat image, int rotation) {
|
||||
SparseArray<Map<String, Float>> faces = new SparseArray();
|
||||
int expectedFaceOrientation = rotateImage(image, rotation);
|
||||
|
||||
float imageWidth = 480f;
|
||||
float scale = resizeImage(image, imageWidth);
|
||||
|
||||
float imageHeight = image.rows();
|
||||
|
||||
// Save Demo Frame
|
||||
if (saveDemoFrame && this.frame == 30) {
|
||||
Log.d(ReactConstants.TAG, "---SAVE IMAGE!!--- ");
|
||||
saveMatToDisk(image);
|
||||
}
|
||||
|
||||
MatOfRect rec = new MatOfRect();
|
||||
this.faceDetector.detectMultiScale(image, rec, 1.3, 3, 0, new Size(50, 50), new Size());
|
||||
|
||||
Rect[] detectedObjects = rec.toArray();
|
||||
if (detectedObjects.length > 0) {
|
||||
Log.d(ReactConstants.TAG, "---FOUND FACE!!--- ");
|
||||
|
||||
for (int i = 0; i < detectedObjects.length; i++) {
|
||||
Map<String, Float> face = new HashMap();
|
||||
face.put("x", detectedObjects[i].x / imageWidth);
|
||||
face.put("y", detectedObjects[i].y / imageHeight);
|
||||
face.put("width", detectedObjects[i].width / imageWidth);
|
||||
face.put("height", detectedObjects[i].height / imageHeight);
|
||||
face.put("orientation", (float) expectedFaceOrientation);
|
||||
faces.append(i, face);
|
||||
}
|
||||
}
|
||||
|
||||
return faces;
|
||||
}
|
||||
|
||||
private SparseArray<Map<String, Float>> detectTextBlocks(Mat image, int rotation) {
|
||||
SparseArray<Map<String, Float>> objects = new SparseArray();
|
||||
|
||||
int orientation = rotateImage(image, rotation);
|
||||
|
||||
float algorithmWidth = 1080f;
|
||||
float imageWidth = 480f;
|
||||
float algorithmScale = imageWidth / algorithmWidth;
|
||||
float scale = resizeImage(image, imageWidth);
|
||||
|
||||
float imageHeight = image.rows();
|
||||
|
||||
float rectKernX = 17f * algorithmScale;
|
||||
float rectKernY = 6f * algorithmScale;
|
||||
float sqKernXY = 40f * algorithmScale;
|
||||
float minSize = 3000f * algorithmScale;
|
||||
float maxSize = 100000f * algorithmScale;
|
||||
|
||||
Mat processedImage = image.clone();
|
||||
|
||||
// initialize a rectangular and square structuring kernel
|
||||
//float factor = (float)min(image.rows, image.cols) / 600.;
|
||||
Mat rectKernel = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(rectKernX, rectKernY));
|
||||
Mat rectKernel2 = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(sqKernXY, (int)(0.666666*sqKernXY)));
|
||||
|
||||
// Smooth the image using a 3x3 Gaussian, then apply the blackhat morphological
|
||||
// operator to find dark regions on a light background
|
||||
Imgproc.GaussianBlur(processedImage, processedImage, new Size(3, 3), 0);
|
||||
morphologyEx(processedImage, processedImage, Imgproc.MORPH_BLACKHAT, rectKernel);
|
||||
|
||||
|
||||
// Compute the Scharr gradient of the blackhat image
|
||||
Mat imageGrad = new Mat();
|
||||
|
||||
//Sobel(processedImage, imageGrad, CV_32F, 1, 0, CV_SCHARR);
|
||||
Imgproc.Sobel(processedImage, imageGrad, CV_32F, 1, 0);
|
||||
// Core.convertScaleAbs(imageGrad/8, processedImage);
|
||||
Core.convertScaleAbs(imageGrad, processedImage);
|
||||
|
||||
// Apply a closing operation using the rectangular kernel to close gaps in between
|
||||
// letters, then apply Otsu's thresholding method
|
||||
morphologyEx(processedImage, processedImage, Imgproc.MORPH_CLOSE, rectKernel);
|
||||
Imgproc.threshold(processedImage, processedImage, 0, 255, Imgproc.THRESH_BINARY | Imgproc.THRESH_OTSU);
|
||||
// , 1, 1
|
||||
Imgproc.erode(processedImage, processedImage, new Mat(), new Point(-1, -1), 2);
|
||||
|
||||
|
||||
// Perform another closing operation, this time using the square kernel to close gaps
|
||||
// between lines of TextBlocks
|
||||
morphologyEx(processedImage, processedImage, Imgproc.MORPH_CLOSE, rectKernel2);
|
||||
|
||||
|
||||
// Find contours in the thresholded image and sort them by size
|
||||
float minContourArea = minSize;
|
||||
float maxContourArea = maxSize;
|
||||
|
||||
// https://github.com/codertimo/Vision/issues/7
|
||||
// http://answers.opencv.org/question/6206/opencv4android-conversion-from-matofkeypoint-to-matofpoint2f/
|
||||
List<MatOfPoint> contours = new ArrayList<>();
|
||||
MatOfInt4 hierarchy = new MatOfInt4();
|
||||
Imgproc.findContours(processedImage, contours, hierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
|
||||
|
||||
// Create a result vector
|
||||
List<RotatedRect> minRects = new ArrayList<>();
|
||||
for (int i = 0, I = contours.size(); i < I; ++i) {
|
||||
// Filter by provided area limits
|
||||
if (Imgproc.contourArea(contours.get(i)) > minContourArea && Imgproc.contourArea(contours.get(i)) < maxContourArea)
|
||||
minRects.add(Imgproc.minAreaRect(new MatOfPoint2f(contours.get(i).toArray())));
|
||||
}
|
||||
|
||||
if(saveDemoFrame){
|
||||
Mat debugDrawing = image.clone();
|
||||
for (int i = 0, I = minRects.size(); i < I; ++i) {
|
||||
Point[] rect_points = new Point[4];
|
||||
minRects.get(i).points( rect_points );
|
||||
for( int j = 0; j < 4; ++j )
|
||||
Imgproc.line( debugDrawing, rect_points[j], rect_points[(j+1)%4], new Scalar(255,0,0), 1, 8, 0 );
|
||||
}
|
||||
|
||||
saveMatToDisk(debugDrawing);
|
||||
}
|
||||
|
||||
if (minRects.size() > 0) {
|
||||
|
||||
for (int i = 0; i < minRects.size(); i++) {
|
||||
Point[] rect_points = new Point[4];
|
||||
minRects.get(i).points( rect_points );
|
||||
|
||||
float xRel = (float) rect_points[1].x / imageWidth;
|
||||
float yRel = (float) rect_points[1].y / imageHeight;
|
||||
float widthRel = (float) Math.abs(rect_points[3].x - rect_points[1].x) / imageWidth;
|
||||
float heightRel = (float) Math.abs(rect_points[3].y - rect_points[1].y) / imageHeight;
|
||||
float sizeRel = Math.abs(widthRel * heightRel);
|
||||
float ratio = (float) Math.abs(rect_points[3].x - rect_points[1].x) / (float) Math.abs(rect_points[3].y - rect_points[1].y);
|
||||
|
||||
// if object large enough
|
||||
if(sizeRel >= 0.025 & ratio >= 5.5 & ratio <= 8.5){
|
||||
Map<String, Float> object = new HashMap();
|
||||
object.put("x", xRel);
|
||||
object.put("y", yRel);
|
||||
object.put("width", widthRel);
|
||||
object.put("height", heightRel);
|
||||
object.put("orientation", (float) orientation);
|
||||
objects.append(i, object);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return objects;
|
||||
}
|
||||
|
||||
public void setFaceDetectionExpectedOrientation(int expectedOrientation){
|
||||
faceDetectionExpectedOrientation = expectedOrientation;
|
||||
}
|
||||
|
||||
public void updateObjectsToDetect(int objToDetect){
|
||||
objectsToDetect = objToDetect;
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -1,30 +0,0 @@
|
||||
#ifdef __cplusplus
|
||||
#import <opencv2/opencv.hpp>
|
||||
#import <opencv2/objdetect.hpp>
|
||||
#import <opencv2/videoio/cap_ios.h>
|
||||
using namespace cv;
|
||||
#endif
|
||||
|
||||
#import <AVFoundation/AVFoundation.h>
|
||||
|
||||
@protocol OpenCVProcessorFaceDetectorDelegate
|
||||
- (void)onFacesDetected:(NSArray<NSDictionary *> *)faces;
|
||||
@end
|
||||
|
||||
@class OpenCVProcessor;
|
||||
|
||||
@interface OpenCVProcessor : NSObject <AVCaptureVideoDataOutputSampleBufferDelegate>
|
||||
{
|
||||
#ifdef __cplusplus
|
||||
std::vector<cv::Rect> objects;
|
||||
cv::CascadeClassifier cascade;
|
||||
#endif
|
||||
id delegate;
|
||||
}
|
||||
|
||||
- (id) init;
|
||||
- (id) initWithDelegate:(id <OpenCVProcessorFaceDetectorDelegate>)delegateObj;
|
||||
- (void)setExpectedFaceOrientation:(NSInteger)expectedFaceOrientation;
|
||||
- (void)updateObjectsToDetect:(NSInteger)objectsToDetect;
|
||||
@end
|
||||
|
@ -1,331 +0,0 @@
|
||||
#import "OpenCVProcessor.hpp"
|
||||
#import <opencv2/opencv.hpp>
|
||||
#import <opencv2/objdetect.hpp>
|
||||
|
||||
@implementation OpenCVProcessor{
|
||||
BOOL saveDemoFrame;
|
||||
int processedFrames;
|
||||
NSInteger expectedFaceOrientation;
|
||||
NSInteger objectsToDetect;
|
||||
}
|
||||
|
||||
- (id) init {
|
||||
|
||||
saveDemoFrame = false;
|
||||
processedFrames = 0;
|
||||
expectedFaceOrientation = -1;
|
||||
objectsToDetect = 0; // face
|
||||
|
||||
NSString *path = [[NSBundle mainBundle] pathForResource:@"lbpcascade_frontalface_improved.xml"
|
||||
ofType:nil];
|
||||
|
||||
std::string cascade_path = (char *)[path UTF8String];
|
||||
if (!cascade.load(cascade_path)) {
|
||||
NSLog(@"Couldn't load haar cascade file.");
|
||||
}
|
||||
|
||||
if (self = [super init]) {
|
||||
// Initialize self
|
||||
}
|
||||
return self;
|
||||
}
|
||||
|
||||
- (id) initWithDelegate:(id)delegateObj {
|
||||
delegate = delegateObj;
|
||||
return self;
|
||||
}
|
||||
|
||||
- (void)setExpectedFaceOrientation:(NSInteger)expectedOrientation
|
||||
{
|
||||
expectedFaceOrientation = expectedOrientation;
|
||||
}
|
||||
|
||||
- (void)updateObjectsToDetect:(NSInteger)givenObjectsToDetect
|
||||
{
|
||||
objectsToDetect = givenObjectsToDetect;
|
||||
}
|
||||
|
||||
# pragma mark - OpenCV-Processing
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
||||
- (void)saveImageToDisk:(Mat&)image;
|
||||
{
|
||||
NSLog(@"----------------SAVE IMAGE-----------------");
|
||||
saveDemoFrame = false;
|
||||
|
||||
NSData *data = [NSData dataWithBytes:image.data length:image.elemSize()*image.total()];
|
||||
CGColorSpaceRef colorSpace;
|
||||
|
||||
if (image.elemSize() == 1) {
|
||||
colorSpace = CGColorSpaceCreateDeviceGray();
|
||||
} else {
|
||||
colorSpace = CGColorSpaceCreateDeviceRGB();
|
||||
}
|
||||
|
||||
CGDataProviderRef provider = CGDataProviderCreateWithCFData((__bridge CFDataRef)data);
|
||||
|
||||
// Creating CGImage from cv::Mat
|
||||
CGImageRef imageRef = CGImageCreate(image.cols, //width
|
||||
image.rows, //height
|
||||
8, //bits per component
|
||||
8 * image.elemSize(), //bits per pixel
|
||||
image.step[0], //bytesPerRow
|
||||
colorSpace, //colorspace
|
||||
kCGImageAlphaNone|kCGBitmapByteOrderDefault,// bitmap info
|
||||
provider, //CGDataProviderRef
|
||||
NULL, //decode
|
||||
false, //should interpolate
|
||||
kCGRenderingIntentDefault //intent
|
||||
);
|
||||
|
||||
|
||||
// Getting UIImage from CGImage
|
||||
UIImage *finalImage = [UIImage imageWithCGImage:imageRef];
|
||||
CGImageRelease(imageRef);
|
||||
CGDataProviderRelease(provider);
|
||||
CGColorSpaceRelease(colorSpace);
|
||||
|
||||
UIImageWriteToSavedPhotosAlbum(finalImage, nil, nil, nil);
|
||||
}
|
||||
|
||||
- (int)rotateImage:(Mat&)image;
|
||||
{
|
||||
int orientation = 3;
|
||||
//cv::equalizeHist(image, image);
|
||||
|
||||
if(expectedFaceOrientation != -1){
|
||||
orientation = expectedFaceOrientation;
|
||||
} else {
|
||||
// rotate image according to device-orientation
|
||||
UIDeviceOrientation interfaceOrientation = [[UIDevice currentDevice] orientation];
|
||||
if (interfaceOrientation == UIDeviceOrientationPortrait) {
|
||||
orientation = 0;
|
||||
} else if (interfaceOrientation == UIDeviceOrientationPortraitUpsideDown) {
|
||||
orientation = 2;
|
||||
} else if (interfaceOrientation == UIDeviceOrientationLandscapeLeft) {
|
||||
orientation = 1;
|
||||
}
|
||||
}
|
||||
|
||||
switch(orientation){
|
||||
case 0:
|
||||
transpose(image, image);
|
||||
flip(image, image,1);
|
||||
break;
|
||||
case 1:
|
||||
flip(image, image,-1);
|
||||
break;
|
||||
case 2:
|
||||
transpose(image, image);
|
||||
flip(image, image,0);
|
||||
break;
|
||||
}
|
||||
|
||||
return orientation;
|
||||
}
|
||||
|
||||
- (float)resizeImage:(Mat&)image width:(float)width;
|
||||
{
|
||||
float scale = width / (float)image.cols;
|
||||
|
||||
cv::resize(image, image, cv::Size(0,0), scale, scale, cv::INTER_CUBIC);
|
||||
|
||||
return scale;
|
||||
}
|
||||
|
||||
- (void)processImageFaces:(Mat&)image;
|
||||
{
|
||||
int orientation = [self rotateImage:image];
|
||||
|
||||
float imageWidth = 480.;
|
||||
int scale = [self resizeImage:image width:imageWidth];
|
||||
float imageHeight = (float)image.rows;
|
||||
|
||||
if(saveDemoFrame){
|
||||
[self saveImageToDisk:image];
|
||||
}
|
||||
|
||||
objects.clear();
|
||||
cascade.detectMultiScale(image,
|
||||
objects,
|
||||
1.2,
|
||||
3,
|
||||
0,
|
||||
cv::Size(10, 10));
|
||||
|
||||
NSMutableArray *faces = [[NSMutableArray alloc] initWithCapacity:objects.size()];
|
||||
if(objects.size() > 0){
|
||||
for( int i = 0; i < objects.size(); i++ )
|
||||
{
|
||||
cv::Rect face = objects[i];
|
||||
|
||||
NSDictionary *faceDescriptor = @{
|
||||
@"x" : [NSNumber numberWithFloat:face.x / imageWidth],
|
||||
@"y" : [NSNumber numberWithFloat:face.y / imageHeight],
|
||||
@"width": [NSNumber numberWithFloat:face.width / imageWidth],
|
||||
@"height": [NSNumber numberWithFloat:face.height / imageHeight],
|
||||
@"orientation": @(orientation)
|
||||
};
|
||||
|
||||
[faces addObject:faceDescriptor];
|
||||
}
|
||||
}
|
||||
[delegate onFacesDetected:faces];
|
||||
}
|
||||
|
||||
- (BOOL) compareContourAreasReverse: (std::vector<cv::Point>) contour1 contour2:(std::vector<cv::Point>) contour2 {
|
||||
double i = fabs( contourArea(cv::Mat(contour1)) );
|
||||
double j = fabs( contourArea(cv::Mat(contour2)) );
|
||||
return ( i > j );
|
||||
}
|
||||
|
||||
- (void)processImageTextBlocks:(Mat&)image;
|
||||
{
|
||||
int orientation = [self rotateImage:image];
|
||||
|
||||
float algorithmWidth = 1080.;
|
||||
float imageWidth = 480.;
|
||||
float algorithmScale = imageWidth / algorithmWidth;
|
||||
float scale = [self resizeImage:image width:imageWidth];
|
||||
|
||||
float imageHeight = image.rows;
|
||||
|
||||
float rectKernX = 17. * algorithmScale;
|
||||
float rectKernY = 6. * algorithmScale;
|
||||
float sqKernXY = 40. * algorithmScale;
|
||||
float minSize = 3000. * algorithmScale;
|
||||
float maxSize = 100000. * algorithmScale;
|
||||
|
||||
cv::Mat processedImage = image.clone();
|
||||
|
||||
// initialize a rectangular and square structuring kernel
|
||||
//float factor = (float)min(image.rows, image.cols) / 600.;
|
||||
Mat rectKernel = getStructuringElement(MORPH_RECT, cv::Size(rectKernX, rectKernY));
|
||||
Mat rectKernel2 = getStructuringElement(MORPH_RECT, cv::Size(sqKernXY, (int)(0.666666*sqKernXY)));
|
||||
|
||||
// Smooth the image using a 3x3 Gaussian, then apply the blackhat morphological
|
||||
// operator to find dark regions on a light background
|
||||
GaussianBlur(processedImage, processedImage, cv::Size(3, 3), 0);
|
||||
morphologyEx(processedImage, processedImage, MORPH_BLACKHAT, rectKernel);
|
||||
|
||||
|
||||
// Compute the Scharr gradient of the blackhat image
|
||||
Mat imageGrad;
|
||||
Sobel(processedImage, imageGrad, CV_32F, 1, 0, CV_SCHARR);
|
||||
convertScaleAbs(imageGrad/8, processedImage);
|
||||
|
||||
// Apply a closing operation using the rectangular kernel to close gaps in between
|
||||
// letters, then apply Otsu's thresholding method
|
||||
morphologyEx(processedImage, processedImage, MORPH_CLOSE, rectKernel);
|
||||
threshold(processedImage, processedImage, 0, 255, THRESH_BINARY | THRESH_OTSU);
|
||||
erode(processedImage, processedImage, Mat(), cv::Point(-1, -1), 2, 1, 1);
|
||||
|
||||
|
||||
// Perform another closing operation, this time using the square kernel to close gaps
|
||||
// between lines of TextBlocks
|
||||
morphologyEx(processedImage, processedImage, MORPH_CLOSE, rectKernel2);
|
||||
|
||||
|
||||
// Find contours in the thresholded image and sort them by size
|
||||
float minContourArea = minSize;
|
||||
float maxContourArea = maxSize;
|
||||
std::vector< std::vector<cv::Point> > contours;
|
||||
std::vector<Vec4i> hierarchy;
|
||||
findContours(processedImage, contours, hierarchy, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
|
||||
|
||||
// Create a result vector
|
||||
std::vector<RotatedRect> minRects;
|
||||
for (int i = 0, I = contours.size(); i < I; ++i) {
|
||||
// Filter by provided area limits
|
||||
if (contourArea(contours[i]) > minContourArea && contourArea(contours[i]) < maxContourArea)
|
||||
minRects.push_back(minAreaRect(Mat(contours[i])));
|
||||
}
|
||||
|
||||
if(saveDemoFrame){
|
||||
cv::Mat debugDrawing = image.clone();
|
||||
for (int i = 0, I = minRects.size(); i < I; ++i) {
|
||||
Point2f rect_points[4]; minRects[i].points( rect_points );
|
||||
for( int j = 0; j < 4; ++j )
|
||||
line( debugDrawing, rect_points[j], rect_points[(j+1)%4], Scalar(255,0,0), 1, 8 );
|
||||
}
|
||||
|
||||
[self saveImageToDisk:debugDrawing];
|
||||
}
|
||||
|
||||
NSMutableArray *detectedObjects = [[NSMutableArray alloc] init];
|
||||
if(minRects.size() > 0){
|
||||
for(int i = 0, I = minRects.size(); i < I; ++i){
|
||||
Point2f rect_points[4];
|
||||
minRects[i].points( rect_points );
|
||||
|
||||
float xRel = rect_points[1].x / imageWidth;
|
||||
float yRel = rect_points[1].y / imageHeight;
|
||||
float widthRel = fabsf(rect_points[3].x - rect_points[1].x) / imageWidth;
|
||||
float heightRel = fabsf(rect_points[3].y - rect_points[1].y) / imageHeight;
|
||||
float sizeRel = fabsf(widthRel * heightRel);
|
||||
float ratio = fabsf(rect_points[3].x - rect_points[1].x) / fabsf(rect_points[3].y - rect_points[1].y);
|
||||
|
||||
// if object large enough
|
||||
if(sizeRel >= 0.025 & ratio >= 5.5 & ratio <= 8.5){
|
||||
NSDictionary *objectDescriptor = @{
|
||||
@"x" : [NSNumber numberWithFloat:xRel],
|
||||
@"y" : [NSNumber numberWithFloat:yRel],
|
||||
@"width": [NSNumber numberWithFloat:widthRel],
|
||||
@"height": [NSNumber numberWithFloat:heightRel],
|
||||
@"orientation": @(orientation)
|
||||
};
|
||||
|
||||
[detectedObjects addObject:objectDescriptor];
|
||||
}
|
||||
}
|
||||
}
|
||||
[delegate onFacesDetected:detectedObjects];
|
||||
}
|
||||
|
||||
|
||||
|
||||
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
|
||||
{
|
||||
// https://github.com/opencv/opencv/blob/master/modules/videoio/src/cap_ios_video_camera.mm
|
||||
if(processedFrames % 10 == 0){
|
||||
(void)captureOutput;
|
||||
(void)connection;
|
||||
|
||||
// convert from Core Media to Core Video
|
||||
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
|
||||
CVPixelBufferLockBaseAddress(imageBuffer, 0);
|
||||
|
||||
void* bufferAddress;
|
||||
size_t width;
|
||||
size_t height;
|
||||
size_t bytesPerRow;
|
||||
|
||||
int format_opencv = CV_8UC1;
|
||||
|
||||
bufferAddress = CVPixelBufferGetBaseAddressOfPlane(imageBuffer, 0);
|
||||
width = CVPixelBufferGetWidthOfPlane(imageBuffer, 0);
|
||||
height = CVPixelBufferGetHeightOfPlane(imageBuffer, 0);
|
||||
bytesPerRow = CVPixelBufferGetBytesPerRowOfPlane(imageBuffer, 0);
|
||||
|
||||
// delegate image processing to the delegate
|
||||
cv::Mat image((int)height, (int)width, format_opencv, bufferAddress, bytesPerRow);
|
||||
|
||||
switch(objectsToDetect){
|
||||
case 0:
|
||||
[self processImageFaces:image];
|
||||
break;
|
||||
case 1:
|
||||
[self processImageTextBlocks:image];
|
||||
break;
|
||||
}
|
||||
|
||||
// cleanup
|
||||
CVPixelBufferUnlockBaseAddress(imageBuffer, 0);
|
||||
}
|
||||
processedFrames++;
|
||||
}
|
||||
#endif
|
||||
|
||||
@end
|
||||
|
@ -1,4 +1,3 @@
|
||||
#import "OpenCVProcessor.hpp"
|
||||
#import <AVFoundation/AVFoundation.h>
|
||||
#import <React/RCTBridge.h>
|
||||
#import <React/RCTBridgeModule.h>
|
||||
@ -12,7 +11,7 @@
|
||||
|
||||
@class RNCamera;
|
||||
|
||||
@interface RNCamera : UIView <AVCaptureMetadataOutputObjectsDelegate, AVCaptureFileOutputRecordingDelegate, RNFaceDetectorDelegate, OpenCVProcessorFaceDetectorDelegate>
|
||||
@interface RNCamera : UIView <AVCaptureMetadataOutputObjectsDelegate, AVCaptureFileOutputRecordingDelegate, RNFaceDetectorDelegate>
|
||||
|
||||
@property(nonatomic, strong) dispatch_queue_t sessionQueue;
|
||||
@property(nonatomic, strong) AVCaptureSession *session;
|
||||
@ -41,8 +40,6 @@
|
||||
- (void)updateZoom;
|
||||
- (void)updateWhiteBalance;
|
||||
- (void)updateFaceDetecting:(id)isDetectingFaces;
|
||||
- (void)updateFaceDetectionExpectedOrientation:(NSInteger)faceDetectionExpectedOrientation;
|
||||
- (void)updateObjectsToDetect:(NSInteger)objectsToDetect;
|
||||
- (void)updateFaceDetectionMode:(id)requestedMode;
|
||||
- (void)updateFaceDetectionLandmarks:(id)requestedLandmarks;
|
||||
- (void)updateFaceDetectionClassifications:(id)requestedClassifications;
|
||||
|
@ -7,7 +7,6 @@
|
||||
#import <React/RCTUtils.h>
|
||||
#import <React/UIView+React.h>
|
||||
|
||||
|
||||
@interface RNCamera ()
|
||||
|
||||
@property (nonatomic, weak) RCTBridge *bridge;
|
||||
@ -17,7 +16,6 @@
|
||||
@property (nonatomic, strong) RCTPromiseResolveBlock videoRecordedResolve;
|
||||
@property (nonatomic, strong) RCTPromiseRejectBlock videoRecordedReject;
|
||||
@property (nonatomic, strong) id faceDetectorManager;
|
||||
@property (nonatomic, strong) OpenCVProcessor *openCVProcessor;
|
||||
|
||||
@property (nonatomic, copy) RCTDirectEventBlock onCameraReady;
|
||||
@property (nonatomic, copy) RCTDirectEventBlock onMountError;
|
||||
@ -36,8 +34,7 @@ static NSDictionary *defaultFaceDetectorOptions = nil;
|
||||
self.bridge = bridge;
|
||||
self.session = [AVCaptureSession new];
|
||||
self.sessionQueue = dispatch_queue_create("cameraQueue", DISPATCH_QUEUE_SERIAL);
|
||||
// self.faceDetectorManager = [self createFaceDetectorManager];
|
||||
self.openCVProcessor = [[OpenCVProcessor new] initWithDelegate:self];
|
||||
self.faceDetectorManager = [self createFaceDetectorManager];
|
||||
#if !(TARGET_IPHONE_SIMULATOR)
|
||||
self.previewLayer =
|
||||
[AVCaptureVideoPreviewLayer layerWithSession:self.session];
|
||||
@ -287,16 +284,6 @@ static NSDictionary *defaultFaceDetectorOptions = nil;
|
||||
[device unlockForConfiguration];
|
||||
}
|
||||
|
||||
- (void)updateFaceDetectionExpectedOrientation:(NSInteger)expectedFaceOrientation
|
||||
{
|
||||
[_openCVProcessor setExpectedFaceOrientation:expectedFaceOrientation];
|
||||
}
|
||||
|
||||
- (void)updateObjectsToDetect:(NSInteger)objectsToDetect
|
||||
{
|
||||
[_openCVProcessor updateObjectsToDetect:objectsToDetect];
|
||||
}
|
||||
|
||||
- (void)updateFaceDetecting:(id)faceDetecting
|
||||
{
|
||||
[_faceDetectorManager setIsEnabled:faceDetecting];
|
||||
@ -395,7 +382,7 @@ static NSDictionary *defaultFaceDetectorOptions = nil;
|
||||
// At the time of writing AVCaptureMovieFileOutput and AVCaptureVideoDataOutput (> GMVDataOutput)
|
||||
// cannot coexist on the same AVSession (see: https://stackoverflow.com/a/4986032/1123156).
|
||||
// We stop face detection here and restart it in when AVCaptureMovieFileOutput finishes recording.
|
||||
// [_faceDetectorManager stopFaceDetection];
|
||||
[_faceDetectorManager stopFaceDetection];
|
||||
[self setupMovieFileCapture];
|
||||
}
|
||||
|
||||
@ -470,31 +457,7 @@ static NSDictionary *defaultFaceDetectorOptions = nil;
|
||||
self.stillImageOutput = stillImageOutput;
|
||||
}
|
||||
|
||||
// create VideoOutput for processing
|
||||
AVCaptureVideoDataOutput *videoDataOutput = [AVCaptureVideoDataOutput new];
|
||||
NSDictionary *newSettings = @{ (NSString *)kCVPixelBufferPixelFormatTypeKey : @(kCVPixelFormatType_420YpCbCr8BiPlanarFullRange) };
|
||||
videoDataOutput.videoSettings = newSettings;
|
||||
|
||||
// discard if the data output queue is blocked (as we process the still image
|
||||
[videoDataOutput setAlwaysDiscardsLateVideoFrames:YES];
|
||||
|
||||
// create a serial dispatch queue used for the sample buffer delegate as well as when a still image is captured
|
||||
// a serial dispatch queue must be used to guarantee that video frames will be delivered in order
|
||||
// see the header doc for setSampleBufferDelegate:queue: for more information
|
||||
dispatch_queue_t videoDataOutputQueue = dispatch_queue_create("VideoDataOutputQueue", DISPATCH_QUEUE_SERIAL);
|
||||
[videoDataOutput setSampleBufferDelegate:self.openCVProcessor queue:videoDataOutputQueue];
|
||||
// [videoDataOutput setSampleBufferDelegate:self]
|
||||
|
||||
if ([self.session canAddOutput:videoDataOutput]) {
|
||||
[self.session addOutput:videoDataOutput];
|
||||
}
|
||||
else {
|
||||
NSLog(@"Error: [captureSession addOutput:videoDataOutput];");
|
||||
// Handle the failure.
|
||||
}
|
||||
|
||||
|
||||
// [_faceDetectorManager maybeStartFaceDetectionOnSession:_session withPreviewLayer:_previewLayer];
|
||||
[_faceDetectorManager maybeStartFaceDetectionOnSession:_session withPreviewLayer:_previewLayer];
|
||||
[self setupOrDisableBarcodeScanner];
|
||||
|
||||
__weak RNCamera *weakSelf = self;
|
||||
@ -520,7 +483,7 @@ static NSDictionary *defaultFaceDetectorOptions = nil;
|
||||
return;
|
||||
#endif
|
||||
dispatch_async(self.sessionQueue, ^{
|
||||
// [_faceDetectorManager stopFaceDetection];
|
||||
[_faceDetectorManager stopFaceDetection];
|
||||
[self.previewLayer removeFromSuperlayer];
|
||||
[self.session commitConfiguration];
|
||||
[self.session stopRunning];
|
||||
@ -781,7 +744,7 @@ static NSDictionary *defaultFaceDetectorOptions = nil;
|
||||
[self cleanupMovieFileCapture];
|
||||
// If face detection has been running prior to recording to file
|
||||
// we reenable it here (see comment in -record).
|
||||
// [_faceDetectorManager maybeStartFaceDetectionOnSession:_session withPreviewLayer:_previewLayer];
|
||||
[_faceDetectorManager maybeStartFaceDetectionOnSession:_session withPreviewLayer:_previewLayer];
|
||||
|
||||
if (self.session.sessionPreset != AVCaptureSessionPresetHigh) {
|
||||
[self updateSessionPreset:AVCaptureSessionPresetHigh];
|
||||
|
@ -150,17 +150,6 @@ RCT_CUSTOM_VIEW_PROPERTY(whiteBalance, NSInteger, RNCamera)
|
||||
[view updateWhiteBalance];
|
||||
}
|
||||
|
||||
RCT_CUSTOM_VIEW_PROPERTY(faceDetectionExpectedOrientation, NSInteger, RNCamera)
|
||||
{
|
||||
[view updateFaceDetectionExpectedOrientation:[RCTConvert NSInteger:json]];
|
||||
}
|
||||
|
||||
RCT_CUSTOM_VIEW_PROPERTY(objectsToDetect, NSInteger, RNCamera)
|
||||
{
|
||||
[view updateObjectsToDetect:[RCTConvert NSInteger:json]];
|
||||
}
|
||||
|
||||
|
||||
RCT_CUSTOM_VIEW_PROPERTY(faceDetectorEnabled, BOOL, RNCamera)
|
||||
{
|
||||
[view updateFaceDetecting:json];
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -7,9 +7,6 @@
|
||||
objects = {
|
||||
|
||||
/* Begin PBXBuildFile section */
|
||||
001F67882027265A001A21D8 /* opencv2.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 001F67872027265A001A21D8 /* opencv2.framework */; };
|
||||
001F681120274350001A21D8 /* OpenCVProcessor.mm in Sources */ = {isa = PBXBuildFile; fileRef = 001F681020274350001A21D8 /* OpenCVProcessor.mm */; };
|
||||
0036F41F202C958F002EF644 /* lbpcascade_frontalface_improved.xml in CopyFiles */ = {isa = PBXBuildFile; fileRef = 0036F41E202C9563002EF644 /* lbpcascade_frontalface_improved.xml */; };
|
||||
0314E39D1B661A460092D183 /* CameraFocusSquare.m in Sources */ = {isa = PBXBuildFile; fileRef = 0314E39C1B661A460092D183 /* CameraFocusSquare.m */; };
|
||||
4107014D1ACB732B00C6AA39 /* RCTCamera.m in Sources */ = {isa = PBXBuildFile; fileRef = 410701481ACB732B00C6AA39 /* RCTCamera.m */; };
|
||||
4107014E1ACB732B00C6AA39 /* RCTCameraManager.m in Sources */ = {isa = PBXBuildFile; fileRef = 4107014A1ACB732B00C6AA39 /* RCTCameraManager.m */; };
|
||||
@ -35,17 +32,12 @@
|
||||
dstPath = "include/$(PRODUCT_NAME)";
|
||||
dstSubfolderSpec = 16;
|
||||
files = (
|
||||
0036F41F202C958F002EF644 /* lbpcascade_frontalface_improved.xml in CopyFiles */,
|
||||
);
|
||||
runOnlyForDeploymentPostprocessing = 0;
|
||||
};
|
||||
/* End PBXCopyFilesBuildPhase section */
|
||||
|
||||
/* Begin PBXFileReference section */
|
||||
001F67872027265A001A21D8 /* opencv2.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = opencv2.framework; path = ../../../../../Downloads/opencv2.framework; sourceTree = "<group>"; };
|
||||
001F680F2027431C001A21D8 /* OpenCVProcessor.hpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.h; path = OpenCVProcessor.hpp; sourceTree = "<group>"; };
|
||||
001F681020274350001A21D8 /* OpenCVProcessor.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = OpenCVProcessor.mm; sourceTree = "<group>"; };
|
||||
0036F41E202C9563002EF644 /* lbpcascade_frontalface_improved.xml */ = {isa = PBXFileReference; lastKnownFileType = text.xml; path = lbpcascade_frontalface_improved.xml; sourceTree = "<group>"; };
|
||||
0314E39B1B661A0C0092D183 /* CameraFocusSquare.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = CameraFocusSquare.h; sourceTree = "<group>"; };
|
||||
0314E39C1B661A460092D183 /* CameraFocusSquare.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = CameraFocusSquare.m; sourceTree = "<group>"; };
|
||||
4107012F1ACB723B00C6AA39 /* libRNCamera.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libRNCamera.a; sourceTree = BUILT_PRODUCTS_DIR; };
|
||||
@ -85,25 +77,15 @@
|
||||
isa = PBXFrameworksBuildPhase;
|
||||
buildActionMask = 2147483647;
|
||||
files = (
|
||||
001F67882027265A001A21D8 /* opencv2.framework in Frameworks */,
|
||||
);
|
||||
runOnlyForDeploymentPostprocessing = 0;
|
||||
};
|
||||
/* End PBXFrameworksBuildPhase section */
|
||||
|
||||
/* Begin PBXGroup section */
|
||||
001F678620272624001A21D8 /* Frameworks */ = {
|
||||
isa = PBXGroup;
|
||||
children = (
|
||||
001F67872027265A001A21D8 /* opencv2.framework */,
|
||||
);
|
||||
path = Frameworks;
|
||||
sourceTree = "<group>";
|
||||
};
|
||||
410701241ACB719800C6AA39 = {
|
||||
isa = PBXGroup;
|
||||
children = (
|
||||
001F678620272624001A21D8 /* Frameworks */,
|
||||
7147DBB02015317E003C59C3 /* FaceDetector */,
|
||||
714166162013E1B600EE9FCC /* RN */,
|
||||
714166152013E19D00EE9FCC /* RCT */,
|
||||
@ -138,7 +120,6 @@
|
||||
714166162013E1B600EE9FCC /* RN */ = {
|
||||
isa = PBXGroup;
|
||||
children = (
|
||||
0036F41E202C9563002EF644 /* lbpcascade_frontalface_improved.xml */,
|
||||
71C7FFD42013C824006EB75A /* RNFileSystem.h */,
|
||||
71C7FFD52013C824006EB75A /* RNFileSystem.m */,
|
||||
71C7FFD12013C817006EB75A /* RNImageUtils.h */,
|
||||
@ -151,8 +132,6 @@
|
||||
71C7FFCC2013C7BF006EB75A /* RNCamera.m */,
|
||||
7103647920195C53009691D1 /* RNFaceDetectorManagerStub.h */,
|
||||
7103647A20195C53009691D1 /* RNFaceDetectorManagerStub.m */,
|
||||
001F680F2027431C001A21D8 /* OpenCVProcessor.hpp */,
|
||||
001F681020274350001A21D8 /* OpenCVProcessor.mm */,
|
||||
);
|
||||
path = RN;
|
||||
sourceTree = "<group>";
|
||||
@ -233,7 +212,6 @@
|
||||
71D5C728201B8A2A0030A15E /* RNFaceEncoder.m in Sources */,
|
||||
454EBCF41B5082DC00AD0F86 /* NSMutableDictionary+ImageMetadata.m in Sources */,
|
||||
71C7FFD62013C824006EB75A /* RNFileSystem.m in Sources */,
|
||||
001F681120274350001A21D8 /* OpenCVProcessor.mm in Sources */,
|
||||
71D5C729201B8A2D0030A15E /* RNFaceDetectorUtils.m in Sources */,
|
||||
4107014E1ACB732B00C6AA39 /* RCTCameraManager.m in Sources */,
|
||||
71D5C72C201B8A360030A15E /* RNFaceDetectorManager.m in Sources */,
|
||||
@ -286,7 +264,6 @@
|
||||
FRAMEWORK_SEARCH_PATHS = (
|
||||
"$(SRCROOT)/../../../ios/**",
|
||||
"${BUILT_PRODUCTS_DIR}/**",
|
||||
"$(PROJECT_DIR)/../../../ios/**",
|
||||
);
|
||||
GCC_C_LANGUAGE_STANDARD = gnu99;
|
||||
GCC_DYNAMIC_NO_PIC = NO;
|
||||
@ -341,7 +318,6 @@
|
||||
FRAMEWORK_SEARCH_PATHS = (
|
||||
"$(SRCROOT)/../../../ios/**",
|
||||
"${BUILT_PRODUCTS_DIR}/**",
|
||||
"$(PROJECT_DIR)/../../../ios/**",
|
||||
);
|
||||
GCC_C_LANGUAGE_STANDARD = gnu99;
|
||||
GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
|
||||
|
@ -51,8 +51,6 @@ type PropsType = ViewPropTypes & {
|
||||
onCameraReady?: Function,
|
||||
onBarCodeRead?: Function,
|
||||
faceDetectionMode?: number,
|
||||
faceDetectionExpectedOrientation?: number,
|
||||
objectsToDetect?: number,
|
||||
flashMode?: number | string,
|
||||
barCodeTypes?: Array<string>,
|
||||
whiteBalance?: number | string,
|
||||
@ -125,19 +123,13 @@ export default class Camera extends React.Component<PropsType> {
|
||||
onBarCodeRead: PropTypes.func,
|
||||
onFacesDetected: PropTypes.func,
|
||||
faceDetectionMode: PropTypes.number,
|
||||
faceDetectionExpectedOrientation: PropTypes.number,
|
||||
objectsToDetect: PropTypes.number,
|
||||
faceDetectionLandmarks: PropTypes.number,
|
||||
faceDetectionClassifications: PropTypes.number,
|
||||
barCodeTypes: PropTypes.arrayOf(PropTypes.string),
|
||||
type: PropTypes.oneOfType([PropTypes.string, PropTypes.number]),
|
||||
flashMode: PropTypes.oneOfType([PropTypes.string, PropTypes.number]),
|
||||
whiteBalance: PropTypes.oneOfType([PropTypes.string, PropTypes.number]),
|
||||
autoFocus: PropTypes.oneOfType([
|
||||
PropTypes.string,
|
||||
PropTypes.number,
|
||||
PropTypes.bool,
|
||||
]),
|
||||
autoFocus: PropTypes.oneOfType([PropTypes.string, PropTypes.number, PropTypes.bool]),
|
||||
permissionDialogTitle: PropTypes.string,
|
||||
permissionDialogMessage: PropTypes.string,
|
||||
notAuthorizedView: PropTypes.element,
|
||||
@ -154,13 +146,10 @@ export default class Camera extends React.Component<PropsType> {
|
||||
autoFocus: CameraManager.AutoFocus.on,
|
||||
flashMode: CameraManager.FlashMode.off,
|
||||
whiteBalance: CameraManager.WhiteBalance.auto,
|
||||
faceDetectionExpectedOrientation: -1,
|
||||
objectsToDetect: -1,
|
||||
faceDetectionMode: CameraManager.FaceDetection.fast,
|
||||
barCodeTypes: Object.values(CameraManager.BarCodeType),
|
||||
faceDetectionLandmarks: CameraManager.FaceDetection.Landmarks.none,
|
||||
faceDetectionClassifications:
|
||||
CameraManager.FaceDetection.Classifications.none,
|
||||
faceDetectionClassifications: CameraManager.FaceDetection.Classifications.none,
|
||||
permissionDialogTitle: '',
|
||||
permissionDialogMessage: '',
|
||||
notAuthorizedView: (
|
||||
@ -169,12 +158,14 @@ export default class Camera extends React.Component<PropsType> {
|
||||
flex: 1,
|
||||
alignItems: 'center',
|
||||
justifyContent: 'center',
|
||||
}}>
|
||||
}}
|
||||
>
|
||||
<Text
|
||||
style={{
|
||||
textAlign: 'center',
|
||||
fontSize: 16,
|
||||
}}>
|
||||
}}
|
||||
>
|
||||
Camera not authorized
|
||||
</Text>
|
||||
</View>
|
||||
@ -185,7 +176,8 @@ export default class Camera extends React.Component<PropsType> {
|
||||
flex: 1,
|
||||
alignItems: 'center',
|
||||
justifyContent: 'center',
|
||||
}}>
|
||||
}}
|
||||
>
|
||||
<ActivityIndicator size="small" />
|
||||
</View>
|
||||
),
|
||||
@ -251,9 +243,7 @@ export default class Camera extends React.Component<PropsType> {
|
||||
}
|
||||
};
|
||||
|
||||
_onObjectDetected = (callback: ?Function) => ({
|
||||
nativeEvent,
|
||||
}: EventCallbackArgumentsType) => {
|
||||
_onObjectDetected = (callback: ?Function) => ({ nativeEvent }: EventCallbackArgumentsType) => {
|
||||
const { type } = nativeEvent;
|
||||
|
||||
if (
|
||||
|
Loading…
x
Reference in New Issue
Block a user