|
@@ -26,17 +26,6 @@
|
|
|
#include "../SDL_camera_c.h"
|
|
|
#include "../../thread/SDL_systhread.h"
|
|
|
|
|
|
-#if defined(HAVE_COREMEDIA) && defined(SDL_PLATFORM_MACOS) && (__MAC_OS_X_VERSION_MAX_ALLOWED < 101500)
|
|
|
-// AVCaptureDeviceTypeBuiltInWideAngleCamera requires macOS SDK 10.15
|
|
|
-#undef HAVE_COREMEDIA
|
|
|
-#endif
|
|
|
-
|
|
|
-#ifdef SDL_PLATFORM_TVOS
|
|
|
-#undef HAVE_COREMEDIA
|
|
|
-#endif
|
|
|
-
|
|
|
-#ifdef HAVE_COREMEDIA
|
|
|
-
|
|
|
#import <AVFoundation/AVFoundation.h>
|
|
|
#import <CoreMedia/CoreMedia.h>
|
|
|
|
|
@@ -50,537 +39,434 @@
|
|
|
* MACOSX:
|
|
|
* Add to the Code Sign Entitlement file:
|
|
|
* <key>com.apple.security.device.camera</key> <true/>
|
|
|
- *
|
|
|
- *
|
|
|
- * IOS:
|
|
|
- *
|
|
|
- * - Need to link with:: CoreMedia CoreVideo
|
|
|
- * - Add #define SDL_CAMERA 1
|
|
|
- * to SDL_build_config_ios.h
|
|
|
*/
|
|
|
|
|
|
-@class MySampleBufferDelegate;
|
|
|
-
|
|
|
-struct SDL_PrivateCameraData
|
|
|
-{
|
|
|
- dispatch_queue_t queue;
|
|
|
- MySampleBufferDelegate *delegate;
|
|
|
- AVCaptureSession *session;
|
|
|
- CMSimpleQueueRef frame_queue;
|
|
|
-};
|
|
|
-
|
|
|
-static NSString *fourcc_to_nstring(Uint32 code)
|
|
|
+static Uint32 CoreMediaFormatToSDL(FourCharCode fmt)
|
|
|
{
|
|
|
- Uint8 buf[4];
|
|
|
- *(Uint32 *)buf = code;
|
|
|
- return [NSString stringWithFormat:@"%c%c%c%c", buf[3], buf[2], buf[1], buf[0]];
|
|
|
+ switch (fmt) {
|
|
|
+ #define CASE(x, y) case x: return y
|
|
|
+ // the 16LE ones should use 16BE if we're on a Bigendian system like PowerPC,
|
|
|
+ // but at current time there is no bigendian Apple platform that has CoreMedia.
|
|
|
+ CASE(kCMPixelFormat_16LE555, SDL_PIXELFORMAT_RGB555);
|
|
|
+ CASE(kCMPixelFormat_16LE5551, SDL_PIXELFORMAT_RGBA5551);
|
|
|
+ CASE(kCMPixelFormat_16LE565, SDL_PIXELFORMAT_RGB565);
|
|
|
+ CASE(kCMPixelFormat_24RGB, SDL_PIXELFORMAT_RGB24);
|
|
|
+ CASE(kCMPixelFormat_32ARGB, SDL_PIXELFORMAT_ARGB32);
|
|
|
+ CASE(kCMPixelFormat_32BGRA, SDL_PIXELFORMAT_BGRA32);
|
|
|
+ CASE(kCMPixelFormat_422YpCbCr8, SDL_PIXELFORMAT_YUY2);
|
|
|
+ CASE(kCMPixelFormat_422YpCbCr8_yuvs, SDL_PIXELFORMAT_UYVY);
|
|
|
+ #undef CASE
|
|
|
+ default:
|
|
|
+ #if DEBUG_CAMERA
|
|
|
+ SDL_Log("CAMERA: Unknown format FourCharCode '%d'", (int) fmt);
|
|
|
+ #endif
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ return SDL_PIXELFORMAT_UNKNOWN;
|
|
|
}
|
|
|
|
|
|
-static NSArray<AVCaptureDevice *> *DiscoverCameraDevices()
|
|
|
-{
|
|
|
- NSArray *deviceType = @[AVCaptureDeviceTypeBuiltInWideAngleCamera];
|
|
|
+@class SDLCaptureVideoDataOutputSampleBufferDelegate;
|
|
|
|
|
|
- AVCaptureDeviceDiscoverySession *discoverySession = [AVCaptureDeviceDiscoverySession
|
|
|
- discoverySessionWithDeviceTypes:deviceType
|
|
|
- mediaType:AVMediaTypeVideo
|
|
|
- position:AVCaptureDevicePositionUnspecified];
|
|
|
+// just a simple wrapper to help ARC manage memory...
|
|
|
+@interface SDLPrivateCameraData : NSObject
|
|
|
+@property(nonatomic, retain) AVCaptureSession *session;
|
|
|
+@property(nonatomic, retain) SDLCaptureVideoDataOutputSampleBufferDelegate *delegate;
|
|
|
+@property(nonatomic, assign) CMSampleBufferRef current_sample;
|
|
|
+@end
|
|
|
|
|
|
- NSArray<AVCaptureDevice *> *devices = discoverySession.devices;
|
|
|
+@implementation SDLPrivateCameraData
|
|
|
+@end
|
|
|
|
|
|
- if ([devices count] > 0) {
|
|
|
- return devices;
|
|
|
- } else {
|
|
|
- AVCaptureDevice *captureDevice = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
|
|
|
- if (captureDevice == nil) {
|
|
|
- return devices;
|
|
|
+
|
|
|
+static SDL_bool CheckCameraPermissions(SDL_CameraDevice *device)
|
|
|
+{
|
|
|
+ if (device->permission == 0) { // still expecting a permission result.
|
|
|
+ if (@available(macOS 14, *)) {
|
|
|
+ const AVAuthorizationStatus status = [AVCaptureDevice authorizationStatusForMediaType:AVMediaTypeVideo];
|
|
|
+ if (status != AVAuthorizationStatusNotDetermined) { // NotDetermined == still waiting for an answer from the user.
|
|
|
+ SDL_CameraDevicePermissionOutcome(device, (status == AVAuthorizationStatusAuthorized) ? SDL_TRUE : SDL_FALSE);
|
|
|
+ }
|
|
|
} else {
|
|
|
- NSArray<AVCaptureDevice *> *default_device = @[ captureDevice ];
|
|
|
- return default_device;
|
|
|
+ SDL_CameraDevicePermissionOutcome(device, SDL_TRUE); // always allowed (or just unqueryable...?) on older macOS.
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- return devices;
|
|
|
+ return (device->permission > 0);
|
|
|
}
|
|
|
|
|
|
-static AVCaptureDevice *GetCameraDeviceByName(const char *dev_name)
|
|
|
-{
|
|
|
- NSArray<AVCaptureDevice *> *devices = DiscoverCameraDevices();
|
|
|
+// this delegate just receives new video frames on a Grand Central Dispatch queue, and fires off the
|
|
|
+// main device thread iterate function directly to consume it.
|
|
|
+@interface SDLCaptureVideoDataOutputSampleBufferDelegate : NSObject<AVCaptureVideoDataOutputSampleBufferDelegate>
|
|
|
+ @property SDL_CameraDevice *device;
|
|
|
+ -(id) init:(SDL_CameraDevice *) dev;
|
|
|
+ -(void) captureOutput:(AVCaptureOutput *)output didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection;
|
|
|
+@end
|
|
|
|
|
|
- for (AVCaptureDevice *device in devices) {
|
|
|
- char buf[1024];
|
|
|
- NSString *cameraID = [device localizedName];
|
|
|
- const char *str = [cameraID UTF8String];
|
|
|
- SDL_snprintf(buf, sizeof (buf) - 1, "%s", str);
|
|
|
- if (SDL_strcmp(buf, dev_name) == 0) {
|
|
|
- return device;
|
|
|
+@implementation SDLCaptureVideoDataOutputSampleBufferDelegate
|
|
|
+
|
|
|
+ -(id) init:(SDL_CameraDevice *) dev {
|
|
|
+ if ( self = [super init] ) {
|
|
|
+ _device = dev;
|
|
|
}
|
|
|
+ return self;
|
|
|
}
|
|
|
- return nil;
|
|
|
-}
|
|
|
|
|
|
-static Uint32 nsfourcc_to_sdlformat(NSString *nsfourcc)
|
|
|
-{
|
|
|
- const char *str = [nsfourcc UTF8String];
|
|
|
-
|
|
|
- /* FIXME
|
|
|
- * on IOS this mode gives 2 planes, and it's NV12
|
|
|
- * on macos, 1 plane/ YVYU
|
|
|
- */
|
|
|
- #ifdef SDL_PLATFORM_MACOS
|
|
|
- if (SDL_strcmp("420v", str) == 0) return SDL_PIXELFORMAT_YVYU;
|
|
|
- #else
|
|
|
- if (SDL_strcmp("420v", str) == 0) return SDL_PIXELFORMAT_NV12;
|
|
|
- #endif
|
|
|
+ - (void) captureOutput:(AVCaptureOutput *)output didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
|
|
|
+ {
|
|
|
+ SDL_CameraDevice *device = self.device;
|
|
|
+ if (!device || !device->hidden) {
|
|
|
+ return; // oh well.
|
|
|
+ }
|
|
|
|
|
|
- if (SDL_strcmp("yuvs", str) == 0) return SDL_PIXELFORMAT_UYVY;
|
|
|
- if (SDL_strcmp("420f", str) == 0) return SDL_PIXELFORMAT_UNKNOWN;
|
|
|
+ if (!CheckCameraPermissions(device)) {
|
|
|
+ return; // nothing to do right now, dump what is probably a completely black frame.
|
|
|
+ }
|
|
|
|
|
|
- #if DEBUG_CAMERA
|
|
|
- SDL_Log("CAMERA: Unknown format '%s'", str);
|
|
|
- #endif
|
|
|
+ SDLPrivateCameraData *hidden = (__bridge SDLPrivateCameraData *) device->hidden;
|
|
|
+ hidden.current_sample = sampleBuffer;
|
|
|
+ SDL_CameraThreadIterate(device);
|
|
|
+ hidden.current_sample = NULL;
|
|
|
+ }
|
|
|
|
|
|
- return SDL_PIXELFORMAT_UNKNOWN;
|
|
|
-}
|
|
|
+ - (void)captureOutput:(AVCaptureOutput *)output didDropSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
|
|
|
+ {
|
|
|
+ #if DEBUG_CAMERA
|
|
|
+ SDL_Log("CAMERA: Drop frame.");
|
|
|
+ #endif
|
|
|
+ }
|
|
|
+@end
|
|
|
|
|
|
-static NSString *sdlformat_to_nsfourcc(Uint32 fmt)
|
|
|
+static int COREMEDIA_WaitDevice(SDL_CameraDevice *device)
|
|
|
{
|
|
|
- const char *str = "";
|
|
|
- NSString *result;
|
|
|
-
|
|
|
-#ifdef SDL_PLATFORM_MACOS
|
|
|
- if (fmt == SDL_PIXELFORMAT_YVYU) str = "420v";
|
|
|
-#else
|
|
|
- if (fmt == SDL_PIXELFORMAT_NV12) str = "420v";
|
|
|
-#endif
|
|
|
- if (fmt == SDL_PIXELFORMAT_UYVY) str = "yuvs";
|
|
|
-
|
|
|
- return [[NSString alloc] initWithUTF8String: str];
|
|
|
+ return 0; // this isn't used atm, since we run our own thread out of Grand Central Dispatch.
|
|
|
}
|
|
|
|
|
|
+static int COREMEDIA_AcquireFrame(SDL_CameraDevice *device, SDL_Surface *frame, Uint64 *timestampNS)
|
|
|
+{
|
|
|
+ int retval = 1;
|
|
|
+ SDLPrivateCameraData *hidden = (__bridge SDLPrivateCameraData *) device->hidden;
|
|
|
+ CMSampleBufferRef sample_buffer = hidden.current_sample;
|
|
|
+ hidden.current_sample = NULL;
|
|
|
+ SDL_assert(sample_buffer != NULL); // should only have been called from our delegate with a new frame.
|
|
|
+
|
|
|
+ CMSampleTimingInfo timinginfo;
|
|
|
+ if (CMSampleBufferGetSampleTimingInfo(sample_buffer, 0, &timinginfo) == noErr) {
|
|
|
+ *timestampNS = (Uint64) (CMTimeGetSeconds(timinginfo.presentationTimeStamp) * ((Float64) SDL_NS_PER_SECOND));
|
|
|
+ } else {
|
|
|
+ SDL_assert(!"this shouldn't happen, I think.");
|
|
|
+ *timestampNS = 0;
|
|
|
+ }
|
|
|
|
|
|
-@interface MySampleBufferDelegate : NSObject<AVCaptureVideoDataOutputSampleBufferDelegate>
|
|
|
- @property struct SDL_PrivateCameraData *hidden;
|
|
|
- - (void) set: (struct SDL_PrivateCameraData *) val;
|
|
|
-@end
|
|
|
+ CVImageBufferRef image = CMSampleBufferGetImageBuffer(sample_buffer); // does not retain `image` (and we don't want it to).
|
|
|
+ const int numPlanes = (int) CVPixelBufferGetPlaneCount(image);
|
|
|
+ const int planar = (int) CVPixelBufferIsPlanar(image);
|
|
|
|
|
|
-@implementation MySampleBufferDelegate
|
|
|
+ #if DEBUG_CAMERA
|
|
|
+ const int w = (int) CVPixelBufferGetWidth(image);
|
|
|
+ const int h = (int) CVPixelBufferGetHeight(image);
|
|
|
+ const int sz = (int) CVPixelBufferGetDataSize(image);
|
|
|
+ const int pitch = (int) CVPixelBufferGetBytesPerRow(image);
|
|
|
+ SDL_Log("CAMERA: buffer planar=%d numPlanes=%d %d x %d sz=%d pitch=%d", planar, numPlanes, w, h, sz, pitch);
|
|
|
+ #endif
|
|
|
|
|
|
- - (void) set: (struct SDL_PrivateCameraData *) val {
|
|
|
- _hidden = val;
|
|
|
- }
|
|
|
+ // !!! FIXME: this currently copies the data to the surface (see FIXME about non-contiguous planar surfaces, but in theory we could just keep this locked until ReleaseFrame...
|
|
|
+ CVPixelBufferLockBaseAddress(image, 0);
|
|
|
|
|
|
- - (void) captureOutput:(AVCaptureOutput *)output
|
|
|
- didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
|
|
|
- fromConnection:(AVCaptureConnection *) connection {
|
|
|
- CFRetain(sampleBuffer);
|
|
|
- CMSimpleQueueEnqueue(_hidden->frame_queue, sampleBuffer);
|
|
|
+ if ((planar == 0) && (numPlanes == 0)) {
|
|
|
+ const int pitch = (int) CVPixelBufferGetBytesPerRow(image);
|
|
|
+ const size_t buflen = pitch * frame->h;
|
|
|
+ frame->pixels = SDL_aligned_alloc(SDL_SIMDGetAlignment(), buflen);
|
|
|
+ if (frame->pixels == NULL) {
|
|
|
+ retval = -1;
|
|
|
+ } else {
|
|
|
+ frame->pitch = pitch;
|
|
|
+ SDL_memcpy(frame->pixels, CVPixelBufferGetBaseAddress(image), buflen);
|
|
|
}
|
|
|
-
|
|
|
- - (void)captureOutput:(AVCaptureOutput *)output
|
|
|
- didDropSampleBuffer:(CMSampleBufferRef)sampleBuffer
|
|
|
- fromConnection:(AVCaptureConnection *)connection {
|
|
|
- #if DEBUG_CAMERA
|
|
|
- SDL_Log("CAMERA: Drop frame..");
|
|
|
- #endif
|
|
|
+ } else {
|
|
|
+ // !!! FIXME: we have an open issue in SDL3 to allow SDL_Surface to support non-contiguous planar data, but we don't have it yet.
|
|
|
+ size_t buflen = 0;
|
|
|
+ for (int i = 0; (i < numPlanes) && (i < 3); i++) {
|
|
|
+ buflen += CVPixelBufferGetBytesPerRowOfPlane(image, i);
|
|
|
}
|
|
|
-@end
|
|
|
+ buflen *= frame->h;
|
|
|
|
|
|
-static int COREMEDIA_OpenDevice(SDL_CameraDevice *_this)
|
|
|
-{
|
|
|
- _this->hidden = (struct SDL_PrivateCameraData *) SDL_calloc(1, sizeof (struct SDL_PrivateCameraData));
|
|
|
- if (_this->hidden == NULL) {
|
|
|
- return -1;
|
|
|
+ frame->pixels = SDL_aligned_alloc(SDL_SIMDGetAlignment(), buflen);
|
|
|
+ if (frame->pixels == NULL) {
|
|
|
+ retval = -1;
|
|
|
+ } else {
|
|
|
+ Uint8 *dst = frame->pixels;
|
|
|
+ frame->pitch = (int) CVPixelBufferGetBytesPerRowOfPlane(image, 0); // this is what SDL3 currently expects, probably incorrectly.
|
|
|
+ for (int i = 0; (i < numPlanes) && (i < 3); i++) {
|
|
|
+ const void *src = CVPixelBufferGetBaseAddressOfPlane(image, i);
|
|
|
+ const size_t pitch = CVPixelBufferGetBytesPerRowOfPlane(image, i);
|
|
|
+ SDL_memcpy(dst, src, pitch * frame->h);
|
|
|
+ dst += pitch * frame->h;
|
|
|
+ }
|
|
|
+ }
|
|
|
}
|
|
|
- return 0;
|
|
|
+
|
|
|
+ CVPixelBufferUnlockBaseAddress(image, 0);
|
|
|
+
|
|
|
+ return retval;
|
|
|
}
|
|
|
|
|
|
-static void COREMEDIA_CloseDevice(SDL_CameraDevice *_this)
|
|
|
+static void COREMEDIA_ReleaseFrame(SDL_CameraDevice *device, SDL_Surface *frame)
|
|
|
{
|
|
|
- if (!_this) {
|
|
|
- return;
|
|
|
- }
|
|
|
+ // !!! FIXME: this currently copies the data to the surface, but in theory we could just keep this locked until ReleaseFrame...
|
|
|
+ SDL_aligned_free(frame->pixels);
|
|
|
+}
|
|
|
|
|
|
- if (_this->hidden) {
|
|
|
- AVCaptureSession *session = _this->hidden->session;
|
|
|
+static void COREMEDIA_CloseDevice(SDL_CameraDevice *device)
|
|
|
+{
|
|
|
+ if (device && device->hidden) {
|
|
|
+ SDLPrivateCameraData *hidden = (SDLPrivateCameraData *) CFBridgingRelease(device->hidden);
|
|
|
+ device->hidden = NULL;
|
|
|
|
|
|
+ AVCaptureSession *session = hidden.session;
|
|
|
if (session) {
|
|
|
- AVCaptureInput *input;
|
|
|
- AVCaptureVideoDataOutput *output;
|
|
|
- input = [session.inputs objectAtIndex:0];
|
|
|
- [session removeInput:input];
|
|
|
- output = (AVCaptureVideoDataOutput*)[session.outputs objectAtIndex:0];
|
|
|
- [session removeOutput:output];
|
|
|
- // TODO more cleanup ?
|
|
|
+ hidden.session = nil;
|
|
|
+ [session stopRunning];
|
|
|
+ [session removeInput:[session.inputs objectAtIndex:0]];
|
|
|
+ [session removeOutput:(AVCaptureVideoDataOutput*)[session.outputs objectAtIndex:0]];
|
|
|
+ session = nil;
|
|
|
}
|
|
|
|
|
|
- if (_this->hidden->frame_queue) {
|
|
|
- CFRelease(_this->hidden->frame_queue);
|
|
|
- }
|
|
|
-
|
|
|
- SDL_free(_this->hidden);
|
|
|
- _this->hidden = NULL;
|
|
|
+ hidden.delegate = NULL;
|
|
|
+ hidden.current_sample = NULL;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static int COREMEDIA_InitDevice(SDL_CameraDevice *_this)
|
|
|
+static int COREMEDIA_OpenDevice(SDL_CameraDevice *device, const SDL_CameraSpec *spec)
|
|
|
{
|
|
|
- // !!! FIXME: autorelease pool?
|
|
|
- NSString *fmt = sdlformat_to_nsfourcc(_this->spec.format);
|
|
|
- int w = _this->spec.width;
|
|
|
- int h = _this->spec.height;
|
|
|
-
|
|
|
- NSError *error = nil;
|
|
|
- AVCaptureDevice *device = nil;
|
|
|
- AVCaptureDeviceInput *input = nil;
|
|
|
- AVCaptureVideoDataOutput *output = nil;
|
|
|
-
|
|
|
- AVCaptureDeviceFormat *spec_format = nil;
|
|
|
-
|
|
|
-#ifdef SDL_PLATFORM_MACOS
|
|
|
- if (@available(macOS 10.15, *)) {
|
|
|
- // good.
|
|
|
- } else {
|
|
|
- return -1;
|
|
|
- }
|
|
|
-#endif
|
|
|
-
|
|
|
- device = GetCameraDeviceByName(_this->dev_name);
|
|
|
- if (!device) {
|
|
|
- goto error;
|
|
|
- }
|
|
|
-
|
|
|
- _this->hidden->session = [[AVCaptureSession alloc] init];
|
|
|
- if (_this->hidden->session == nil) {
|
|
|
- goto error;
|
|
|
- }
|
|
|
-
|
|
|
- [_this->hidden->session setSessionPreset:AVCaptureSessionPresetHigh];
|
|
|
+ AVCaptureDevice *avdevice = (__bridge AVCaptureDevice *) device->handle;
|
|
|
|
|
|
// Pick format that matches the spec
|
|
|
- NSArray<AVCaptureDeviceFormat *> *formats = [device formats];
|
|
|
+ const Uint32 sdlfmt = spec->format;
|
|
|
+ const int w = spec->width;
|
|
|
+ const int h = spec->height;
|
|
|
+ const int rate = spec->interval_denominator;
|
|
|
+ AVCaptureDeviceFormat *spec_format = nil;
|
|
|
+ NSArray<AVCaptureDeviceFormat *> *formats = [avdevice formats];
|
|
|
for (AVCaptureDeviceFormat *format in formats) {
|
|
|
CMFormatDescriptionRef formatDescription = [format formatDescription];
|
|
|
- FourCharCode mediaSubType = CMFormatDescriptionGetMediaSubType(formatDescription);
|
|
|
- NSString *str = fourcc_to_nstring(mediaSubType);
|
|
|
- if ([str isEqualToString:fmt]) {
|
|
|
- CMVideoDimensions dim = CMVideoFormatDescriptionGetDimensions(formatDescription);
|
|
|
- if (dim.width == w && dim.height == h) {
|
|
|
- spec_format = format;
|
|
|
- break;
|
|
|
- }
|
|
|
+ if (CoreMediaFormatToSDL(CMFormatDescriptionGetMediaSubType(formatDescription)) != sdlfmt) {
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
+ const CMVideoDimensions dim = CMVideoFormatDescriptionGetDimensions(formatDescription);
|
|
|
+ if ( ((int) dim.width != w) || (((int) dim.height) != h) ) {
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
+ for (AVFrameRateRange *framerate in format.videoSupportedFrameRateRanges) {
|
|
|
+ if ((rate == (int) SDL_ceil((double) framerate.minFrameRate)) || (rate == (int) SDL_floor((double) framerate.maxFrameRate))) {
|
|
|
+ spec_format = format;
|
|
|
+ break;
|
|
|
}
|
|
|
}
|
|
|
+
|
|
|
+ if (spec_format != nil) {
|
|
|
+ break;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
if (spec_format == nil) {
|
|
|
- return SDL_SetError("format not found");
|
|
|
+ return SDL_SetError("camera spec format not available");
|
|
|
+ } else if (![avdevice lockForConfiguration:NULL]) {
|
|
|
+ return SDL_SetError("Cannot lockForConfiguration");
|
|
|
}
|
|
|
|
|
|
- // Set format
|
|
|
- if ([device lockForConfiguration:NULL] == YES) {
|
|
|
- device.activeFormat = spec_format;
|
|
|
- [device unlockForConfiguration];
|
|
|
- } else {
|
|
|
- return SDL_SetError("Cannot lockForConfiguration");
|
|
|
+ avdevice.activeFormat = spec_format;
|
|
|
+ [avdevice unlockForConfiguration];
|
|
|
+
|
|
|
+ AVCaptureSession *session = [[AVCaptureSession alloc] init];
|
|
|
+ if (session == nil) {
|
|
|
+ return SDL_SetError("Failed to allocate/init AVCaptureSession");
|
|
|
}
|
|
|
|
|
|
- // Input
|
|
|
- input = [AVCaptureDeviceInput deviceInputWithDevice:device error:&error];
|
|
|
+ session.sessionPreset = AVCaptureSessionPresetHigh;
|
|
|
+
|
|
|
+ NSError *error = nil;
|
|
|
+ AVCaptureDeviceInput *input = [AVCaptureDeviceInput deviceInputWithDevice:avdevice error:&error];
|
|
|
if (!input) {
|
|
|
return SDL_SetError("Cannot create AVCaptureDeviceInput");
|
|
|
}
|
|
|
|
|
|
- // Output
|
|
|
- output = [[AVCaptureVideoDataOutput alloc] init];
|
|
|
-
|
|
|
-#ifdef SDL_PLATFORM_MACOS
|
|
|
- // FIXME this now fail on ios ... but not using anything works...
|
|
|
-
|
|
|
- // Specify the pixel format
|
|
|
- output.videoSettings =
|
|
|
- [NSDictionary dictionaryWithObject:
|
|
|
- [NSNumber numberWithInt:kCVPixelFormatType_422YpCbCr8]
|
|
|
- forKey:(id)kCVPixelBufferPixelFormatTypeKey];
|
|
|
-#endif
|
|
|
-
|
|
|
- _this->hidden->delegate = [[MySampleBufferDelegate alloc] init];
|
|
|
- [_this->hidden->delegate set:_this->hidden];
|
|
|
-
|
|
|
+ AVCaptureVideoDataOutput *output = [[AVCaptureVideoDataOutput alloc] init];
|
|
|
+ if (!output) {
|
|
|
+ return SDL_SetError("Cannot create AVCaptureVideoDataOutput");
|
|
|
+ }
|
|
|
|
|
|
- CMSimpleQueueCreate(kCFAllocatorDefault, 30 /* buffers */, &_this->hidden->frame_queue);
|
|
|
- if (_this->hidden->frame_queue == nil) {
|
|
|
- return SDL_SetError("CMSimpleQueueCreate() failed");
|
|
|
+ char threadname[64];
|
|
|
+ SDL_GetCameraThreadName(device, threadname, sizeof (threadname));
|
|
|
+ dispatch_queue_t queue = dispatch_queue_create(threadname, NULL);
|
|
|
+ //dispatch_queue_t queue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
|
|
|
+ if (!queue) {
|
|
|
+ return SDL_SetError("dispatch_queue_create() failed");
|
|
|
}
|
|
|
|
|
|
- _this->hidden->queue = dispatch_queue_create("my_queue", NULL);
|
|
|
- [output setSampleBufferDelegate:_this->hidden->delegate queue:_this->hidden->queue];
|
|
|
+ SDLCaptureVideoDataOutputSampleBufferDelegate *delegate = [[SDLCaptureVideoDataOutputSampleBufferDelegate alloc] init:device];
|
|
|
+ if (delegate == nil) {
|
|
|
+ return SDL_SetError("Cannot create SDLCaptureVideoDataOutputSampleBufferDelegate");
|
|
|
+ }
|
|
|
+ [output setSampleBufferDelegate:delegate queue:queue];
|
|
|
|
|
|
- if ([_this->hidden->session canAddInput:input] ){
|
|
|
- [_this->hidden->session addInput:input];
|
|
|
- } else {
|
|
|
+ if (![session canAddInput:input]) {
|
|
|
return SDL_SetError("Cannot add AVCaptureDeviceInput");
|
|
|
}
|
|
|
+ [session addInput:input];
|
|
|
|
|
|
- if ([_this->hidden->session canAddOutput:output] ){
|
|
|
- [_this->hidden->session addOutput:output];
|
|
|
- } else {
|
|
|
+ if (![session canAddOutput:output]) {
|
|
|
return SDL_SetError("Cannot add AVCaptureVideoDataOutput");
|
|
|
}
|
|
|
+ [session addOutput:output];
|
|
|
|
|
|
- [_this->hidden->session commitConfiguration];
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
+ [session commitConfiguration];
|
|
|
|
|
|
-static int COREMEDIA_GetDeviceSpec(SDL_CameraDevice *_this, SDL_CameraSpec *spec)
|
|
|
-{
|
|
|
- // !!! FIXME: make sure higher level checks spec != NULL
|
|
|
- if (spec) {
|
|
|
- SDL_copyp(spec, &_this->spec);
|
|
|
- return 0;
|
|
|
+ SDLPrivateCameraData *hidden = [[SDLPrivateCameraData alloc] init];
|
|
|
+ if (hidden == nil) {
|
|
|
+ return SDL_SetError("Cannot create SDLPrivateCameraData");
|
|
|
}
|
|
|
- return -1;
|
|
|
-}
|
|
|
|
|
|
-static int COREMEDIA_StartCamera(SDL_CameraDevice *_this)
|
|
|
-{
|
|
|
- [_this->hidden->session startRunning];
|
|
|
- return 0;
|
|
|
-}
|
|
|
+ hidden.session = session;
|
|
|
+ hidden.delegate = delegate;
|
|
|
+ hidden.current_sample = NULL;
|
|
|
+ device->hidden = (struct SDL_PrivateCameraData *)CFBridgingRetain(hidden);
|
|
|
|
|
|
-static int COREMEDIA_StopCamera(SDL_CameraDevice *_this)
|
|
|
-{
|
|
|
- [_this->hidden->session stopRunning];
|
|
|
- return 0;
|
|
|
-}
|
|
|
+ [session startRunning]; // !!! FIXME: docs say this can block while camera warms up and shouldn't be done on main thread. Maybe push through `queue`?
|
|
|
|
|
|
-static int COREMEDIA_AcquireFrame(SDL_CameraDevice *_this, SDL_CameraFrame *frame)
|
|
|
-{
|
|
|
- if (CMSimpleQueueGetCount(_this->hidden->frame_queue) > 0) {
|
|
|
- CMSampleBufferRef sampleBuffer = (CMSampleBufferRef)CMSimpleQueueDequeue(_this->hidden->frame_queue);
|
|
|
- frame->internal = (void *) sampleBuffer;
|
|
|
- frame->timestampNS = SDL_GetTicksNS();
|
|
|
-
|
|
|
- CVImageBufferRef image = CMSampleBufferGetImageBuffer(sampleBuffer);
|
|
|
- const int numPlanes = CVPixelBufferGetPlaneCount(image);
|
|
|
- const int planar = CVPixelBufferIsPlanar(image);
|
|
|
-
|
|
|
- #if DEBUG_CAMERA
|
|
|
- const int w = CVPixelBufferGetWidth(image);
|
|
|
- const int h = CVPixelBufferGetHeight(image);
|
|
|
- const int sz = CVPixelBufferGetDataSize(image);
|
|
|
- const int pitch = CVPixelBufferGetBytesPerRow(image);
|
|
|
- SDL_Log("CAMERA: buffer planar=%d count:%d %d x %d sz=%d pitch=%d", planar, numPlanes, w, h, sz, pitch);
|
|
|
- #endif
|
|
|
+ CheckCameraPermissions(device); // check right away, in case the process is already granted permission.
|
|
|
|
|
|
- CVPixelBufferLockBaseAddress(image, 0);
|
|
|
-
|
|
|
- if ((planar == 0) && (numPlanes == 0)) {
|
|
|
- frame->pitch[0] = CVPixelBufferGetBytesPerRow(image);
|
|
|
- frame->data[0] = CVPixelBufferGetBaseAddress(image);
|
|
|
- frame->num_planes = 1;
|
|
|
- } else {
|
|
|
- for (int i = 0; (i < numPlanes) && (i < 3); i++) {
|
|
|
- frame->num_planes += 1;
|
|
|
- frame->data[i] = CVPixelBufferGetBaseAddressOfPlane(image, i);
|
|
|
- frame->pitch[i] = CVPixelBufferGetBytesPerRowOfPlane(image, i);
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- // Unlocked when frame is released
|
|
|
- } else {
|
|
|
- // no frame
|
|
|
- SDL_Delay(20); // TODO fix some delay
|
|
|
- }
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static int COREMEDIA_ReleaseFrame(SDL_CameraDevice *_this, SDL_CameraFrame *frame)
|
|
|
+static void COREMEDIA_FreeDeviceHandle(SDL_CameraDevice *device)
|
|
|
{
|
|
|
- if (frame->internal) {
|
|
|
- CMSampleBufferRef sampleBuffer = (CMSampleBufferRef) frame->internal;
|
|
|
- CVImageBufferRef image = CMSampleBufferGetImageBuffer(sampleBuffer);
|
|
|
- CVPixelBufferUnlockBaseAddress(image, 0);
|
|
|
- CFRelease(sampleBuffer);
|
|
|
+ if (device && device->handle) {
|
|
|
+ CFBridgingRelease(device->handle);
|
|
|
}
|
|
|
-
|
|
|
- return 0;
|
|
|
}
|
|
|
|
|
|
-static int COREMEDIA_GetNumFormats(SDL_CameraDevice *_this)
|
|
|
+static void GatherCameraSpecs(AVCaptureDevice *device, CameraFormatAddData *add_data)
|
|
|
{
|
|
|
- AVCaptureDevice *device = GetCameraDeviceByName(_this->dev_name);
|
|
|
- if (device) {
|
|
|
- // LIST FORMATS
|
|
|
- NSMutableOrderedSet<NSString *> *array_formats = [NSMutableOrderedSet new];
|
|
|
- NSArray<AVCaptureDeviceFormat *> *formats = [device formats];
|
|
|
- for (AVCaptureDeviceFormat *format in formats) {
|
|
|
- // NSLog(@"%@", formats);
|
|
|
- CMFormatDescriptionRef formatDescription = [format formatDescription];
|
|
|
- //NSLog(@"%@", formatDescription);
|
|
|
- FourCharCode mediaSubType = CMFormatDescriptionGetMediaSubType(formatDescription);
|
|
|
- NSString *str = fourcc_to_nstring(mediaSubType);
|
|
|
- [array_formats addObject:str];
|
|
|
- }
|
|
|
- return [array_formats count];
|
|
|
- }
|
|
|
- return 0;
|
|
|
-}
|
|
|
+ SDL_zerop(add_data);
|
|
|
|
|
|
-static int COREMEDIA_GetFormat(SDL_CameraDevice *_this, int index, Uint32 *format)
|
|
|
-{
|
|
|
- AVCaptureDevice *device = GetCameraDeviceByName(_this->dev_name);
|
|
|
- if (device) {
|
|
|
- // LIST FORMATS
|
|
|
- NSMutableOrderedSet<NSString *> *array_formats = [NSMutableOrderedSet new];
|
|
|
- NSArray<AVCaptureDeviceFormat *> *formats = [device formats];
|
|
|
- NSString *str;
|
|
|
-
|
|
|
- for (AVCaptureDeviceFormat *f in formats) {
|
|
|
- FourCharCode mediaSubType;
|
|
|
- CMFormatDescriptionRef formatDescription;
|
|
|
-
|
|
|
- formatDescription = [f formatDescription];
|
|
|
- mediaSubType = CMFormatDescriptionGetMediaSubType(formatDescription);
|
|
|
- str = fourcc_to_nstring(mediaSubType);
|
|
|
- [array_formats addObject:str];
|
|
|
+ for (AVCaptureDeviceFormat *fmt in device.formats) {
|
|
|
+ if (CMFormatDescriptionGetMediaType(fmt.formatDescription) != kCMMediaType_Video) {
|
|
|
+ continue;
|
|
|
}
|
|
|
|
|
|
- str = array_formats[index];
|
|
|
- *format = nsfourcc_to_sdlformat(str);
|
|
|
+ const Uint32 sdlfmt = CoreMediaFormatToSDL(CMFormatDescriptionGetMediaSubType(fmt.formatDescription));
|
|
|
+ if (sdlfmt == SDL_PIXELFORMAT_UNKNOWN) {
|
|
|
+ continue;
|
|
|
+ }
|
|
|
|
|
|
- return 0;
|
|
|
- }
|
|
|
- return -1;
|
|
|
-}
|
|
|
+ const CMVideoDimensions dims = CMVideoFormatDescriptionGetDimensions(fmt.formatDescription);
|
|
|
+ const int w = (int) dims.width;
|
|
|
+ const int h = (int) dims.height;
|
|
|
+ for (AVFrameRateRange *framerate in fmt.videoSupportedFrameRateRanges) {
|
|
|
+ int rate;
|
|
|
|
|
|
-static int COREMEDIA_GetNumFrameSizes(SDL_CameraDevice *_this, Uint32 format)
|
|
|
-{
|
|
|
- AVCaptureDevice *device = GetCameraDeviceByName(_this->dev_name);
|
|
|
- if (device) {
|
|
|
- NSString *fmt = sdlformat_to_nsfourcc(format);
|
|
|
- int count = 0;
|
|
|
-
|
|
|
- NSArray<AVCaptureDeviceFormat *> *formats = [device formats];
|
|
|
- for (AVCaptureDeviceFormat *f in formats) {
|
|
|
- CMFormatDescriptionRef formatDescription = [f formatDescription];
|
|
|
- FourCharCode mediaSubType = CMFormatDescriptionGetMediaSubType(formatDescription);
|
|
|
- NSString *str = fourcc_to_nstring(mediaSubType);
|
|
|
-
|
|
|
- if ([str isEqualToString:fmt]) {
|
|
|
- count++;
|
|
|
+ rate = (int) SDL_ceil((double) framerate.minFrameRate);
|
|
|
+ if (rate) {
|
|
|
+ SDL_AddCameraFormat(add_data, sdlfmt, w, h, 1, rate);
|
|
|
+ }
|
|
|
+ rate = (int) SDL_floor((double) framerate.maxFrameRate);
|
|
|
+ if (rate) {
|
|
|
+ SDL_AddCameraFormat(add_data, sdlfmt, w, h, 1, rate);
|
|
|
}
|
|
|
}
|
|
|
- return count;
|
|
|
}
|
|
|
- return 0;
|
|
|
}
|
|
|
|
|
|
-static int COREMEDIA_GetFrameSize(SDL_CameraDevice *_this, Uint32 format, int index, int *width, int *height)
|
|
|
+static SDL_bool FindCoreMediaCameraDeviceByUniqueID(SDL_CameraDevice *device, void *userdata)
|
|
|
{
|
|
|
- AVCaptureDevice *device = GetCameraDeviceByName(_this->dev_name);
|
|
|
- if (device) {
|
|
|
- NSString *fmt = sdlformat_to_nsfourcc(format);
|
|
|
- int count = 0;
|
|
|
-
|
|
|
- NSArray<AVCaptureDeviceFormat *> *formats = [device formats];
|
|
|
- for (AVCaptureDeviceFormat *f in formats) {
|
|
|
- CMFormatDescriptionRef formatDescription = [f formatDescription];
|
|
|
- FourCharCode mediaSubType = CMFormatDescriptionGetMediaSubType(formatDescription);
|
|
|
- NSString *str = fourcc_to_nstring(mediaSubType);
|
|
|
-
|
|
|
- if ([str isEqualToString:fmt]) {
|
|
|
- if (index == count) {
|
|
|
- CMVideoDimensions dim = CMVideoFormatDescriptionGetDimensions(formatDescription);
|
|
|
- *width = dim.width;
|
|
|
- *height = dim.height;
|
|
|
- return 0;
|
|
|
- }
|
|
|
- count++;
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
- return -1;
|
|
|
+ NSString *uniqueid = (__bridge NSString *) userdata;
|
|
|
+ AVCaptureDevice *avdev = (__bridge AVCaptureDevice *) device->handle;
|
|
|
+ return ([uniqueid isEqualToString:avdev.uniqueID]) ? SDL_TRUE : SDL_FALSE;
|
|
|
}
|
|
|
|
|
|
-static int COREMEDIA_GetDeviceName(SDL_CameraDeviceID instance_id, char *buf, int size)
|
|
|
+static void MaybeAddDevice(AVCaptureDevice *device)
|
|
|
{
|
|
|
- int index = instance_id - 1;
|
|
|
- NSArray<AVCaptureDevice *> *devices = DiscoverCameraDevices();
|
|
|
- if (index < [devices count]) {
|
|
|
- AVCaptureDevice *device = devices[index];
|
|
|
- NSString *cameraID = [device localizedName];
|
|
|
- const char *str = [cameraID UTF8String];
|
|
|
- SDL_snprintf(buf, size, "%s", str);
|
|
|
- return 0;
|
|
|
+ if (!device.connected) {
|
|
|
+ return; // not connected.
|
|
|
+ } else if (![device hasMediaType:AVMediaTypeVideo]) {
|
|
|
+ return; // not a camera.
|
|
|
+ } else if (SDL_FindPhysicalCameraDeviceByCallback(FindCoreMediaCameraDeviceByUniqueID, (__bridge void *) device.uniqueID)) {
|
|
|
+ return; // already have this one.
|
|
|
}
|
|
|
- return -1;
|
|
|
-}
|
|
|
|
|
|
-static int GetNumCameraDevices(void)
|
|
|
-{
|
|
|
- NSArray<AVCaptureDevice *> *devices = DiscoverCameraDevices();
|
|
|
- return [devices count];
|
|
|
+ CameraFormatAddData add_data;
|
|
|
+ GatherCameraSpecs(device, &add_data);
|
|
|
+ if (add_data.num_specs > 0) {
|
|
|
+ SDL_AddCameraDevice(device.localizedName.UTF8String, add_data.num_specs, add_data.specs, (void *) CFBridgingRetain(device));
|
|
|
+ }
|
|
|
+ SDL_free(add_data.specs);
|
|
|
}
|
|
|
|
|
|
-static SDL_CameraDeviceID *COREMEDIA_GetDevices(int *count)
|
|
|
+static void COREMEDIA_DetectDevices(void)
|
|
|
{
|
|
|
- // hard-coded list of ID
|
|
|
- const int num = GetNumCameraDevices();
|
|
|
- SDL_CameraDeviceID *retval = (SDL_CameraDeviceID *)SDL_calloc((num + 1), sizeof(*ret));
|
|
|
+ NSArray<AVCaptureDevice *> *devices = nil;
|
|
|
+
|
|
|
+ if (@available(macOS 10.15, iOS 13, *)) {
|
|
|
+ // kind of annoying that there isn't a "give me anything that looks like a camera" option,
|
|
|
+ // so this list will need to be updated when Apple decides to add
|
|
|
+ // AVCaptureDeviceTypeBuiltInQuadrupleCamera some day.
|
|
|
+ NSArray *device_types = @[
|
|
|
+ #ifdef SDL_PLATFORM_IOS
|
|
|
+ AVCaptureDeviceTypeBuiltInTelephotoCamera,
|
|
|
+ AVCaptureDeviceTypeBuiltInDualCamera,
|
|
|
+ AVCaptureDeviceTypeBuiltInDualWideCamera,
|
|
|
+ AVCaptureDeviceTypeBuiltInTripleCamera,
|
|
|
+ AVCaptureDeviceTypeBuiltInUltraWideCamera,
|
|
|
+ #else
|
|
|
+ AVCaptureDeviceTypeExternalUnknown,
|
|
|
+ #endif
|
|
|
+ AVCaptureDeviceTypeBuiltInWideAngleCamera
|
|
|
+ ];
|
|
|
|
|
|
- if (retval == NULL) {
|
|
|
- *count = 0;
|
|
|
- return NULL;
|
|
|
- }
|
|
|
+ AVCaptureDeviceDiscoverySession *discoverySession = [AVCaptureDeviceDiscoverySession
|
|
|
+ discoverySessionWithDeviceTypes:device_types
|
|
|
+ mediaType:AVMediaTypeVideo
|
|
|
+ position:AVCaptureDevicePositionUnspecified];
|
|
|
|
|
|
- for (int i = 0; i < num; i++) {
|
|
|
- retval[i] = i + 1;
|
|
|
+ devices = discoverySession.devices;
|
|
|
+ // !!! FIXME: this can use Key Value Observation to get hotplug events.
|
|
|
+ } else {
|
|
|
+ // this is deprecated but works back to macOS 10.7; 10.15 added AVCaptureDeviceDiscoverySession as a replacement.
|
|
|
+ devices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo];
|
|
|
+ // !!! FIXME: this can use AVCaptureDeviceWasConnectedNotification and AVCaptureDeviceWasDisconnectedNotification with NSNotificationCenter to get hotplug events.
|
|
|
}
|
|
|
- retval[num] = 0;
|
|
|
- *count = num;
|
|
|
- return ret;
|
|
|
-}
|
|
|
|
|
|
-static void COREMEDIA_DetectDevices(void)
|
|
|
-{
|
|
|
+ for (AVCaptureDevice *device in devices) {
|
|
|
+ MaybeAddDevice(device);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
static void COREMEDIA_Deinitialize(void)
|
|
|
{
|
|
|
+ // !!! FIXME: disable hotplug.
|
|
|
}
|
|
|
|
|
|
static SDL_bool COREMEDIA_Init(SDL_CameraDriverImpl *impl)
|
|
|
{
|
|
|
-#ifndef HAVE_COREMEDIA
|
|
|
- return SDL_FALSE;
|
|
|
-#else
|
|
|
impl->DetectDevices = COREMEDIA_DetectDevices;
|
|
|
impl->OpenDevice = COREMEDIA_OpenDevice;
|
|
|
impl->CloseDevice = COREMEDIA_CloseDevice;
|
|
|
- impl->InitDevice = COREMEDIA_InitDevice;
|
|
|
- impl->GetDeviceSpec = COREMEDIA_GetDeviceSpec;
|
|
|
- impl->StartCamera = COREMEDIA_StartCamera;
|
|
|
- impl->StopCamera = COREMEDIA_StopCamera;
|
|
|
+ impl->WaitDevice = COREMEDIA_WaitDevice;
|
|
|
impl->AcquireFrame = COREMEDIA_AcquireFrame;
|
|
|
impl->ReleaseFrame = COREMEDIA_ReleaseFrame;
|
|
|
- impl->GetNumFormats = COREMEDIA_GetNumFormats;
|
|
|
- impl->GetFormat = COREMEDIA_GetFormat;
|
|
|
- impl->GetNumFrameSizes = COREMEDIA_GetNumFrameSizes;
|
|
|
- impl->GetFrameSize = COREMEDIA_GetFrameSize;
|
|
|
- impl->GetDeviceName = COREMEDIA_GetDeviceName;
|
|
|
- impl->GetDevices = COREMEDIA_GetDevices;
|
|
|
+ impl->FreeDeviceHandle = COREMEDIA_FreeDeviceHandle;
|
|
|
impl->Deinitialize = COREMEDIA_Deinitialize;
|
|
|
|
|
|
+ impl->ProvidesOwnCallbackThread = SDL_TRUE;
|
|
|
+
|
|
|
return SDL_TRUE;
|
|
|
-#endif
|
|
|
}
|
|
|
|
|
|
CameraBootStrap COREMEDIA_bootstrap = {
|
|
|
"coremedia", "SDL Apple CoreMedia camera driver", COREMEDIA_Init, SDL_FALSE
|
|
|
};
|
|
|
|
|
|
-#endif // HAVE_COREMEDIA
|
|
|
-
|
|
|
-#endif // SDL_CAMERA_COREMEDIA
|
|
|
+#endif // SDL_CAMERA_DRIVER_COREMEDIA
|
|
|
|