mirror of https://github.com/status-im/qzxing.git
Take captureRect into account when converting YUV420P
This allows us to only copy the actually required data from the source frame into the greyscale target image. Without this patch, my desktop machine with a camera providing YUV420 frames takes about ~25ms to decode a single frame 1920x1080 frame using a 25% capture rect. With the patch applied, the performance goes up drastically, and we now only take about 17ms to decode a single frame. The same should be done for NV12 frames, but I didn't have a device at hand to test the code there yet.
This commit is contained in:
parent
6d945cce4f
commit
e77dcccd5f
|
@ -117,27 +117,49 @@ static bool isRectValid(const QRect& rect)
|
||||||
return rect.x() >= 0 && rect.y() >= 0 && rect.isValid();
|
return rect.x() >= 0 && rect.y() >= 0 && rect.isValid();
|
||||||
}
|
}
|
||||||
|
|
||||||
static QImage rgbDataToGrayscale(const uchar* data, const int width, const int height,
|
struct CaptureRect
|
||||||
|
{
|
||||||
|
CaptureRect(const QRect& captureRect, int sourceWidth, int sourceHeight)
|
||||||
|
: isValid(isRectValid(captureRect))
|
||||||
|
, sourceWidth(sourceWidth)
|
||||||
|
, sourceHeight(sourceHeight)
|
||||||
|
, startX(isValid ? captureRect.x() : 0)
|
||||||
|
, targetWidth(isValid ? captureRect.width() : sourceWidth)
|
||||||
|
, endX(startX + targetWidth)
|
||||||
|
, startY(isValid ? captureRect.y() : 0)
|
||||||
|
, targetHeight(isValid ? captureRect.height() : sourceHeight)
|
||||||
|
, endY(startY + targetHeight)
|
||||||
|
{}
|
||||||
|
|
||||||
|
bool isValid;
|
||||||
|
int sourceWidth;
|
||||||
|
int sourceHeight;
|
||||||
|
|
||||||
|
int startX;
|
||||||
|
int targetWidth;
|
||||||
|
int endX;
|
||||||
|
|
||||||
|
int startY;
|
||||||
|
int targetHeight;
|
||||||
|
int endY;
|
||||||
|
};
|
||||||
|
|
||||||
|
static QImage rgbDataToGrayscale(const uchar* data, const CaptureRect& captureRect,
|
||||||
const int alpha, const int red,
|
const int alpha, const int red,
|
||||||
const int green, const int blue,
|
const int green, const int blue,
|
||||||
const QRect& captureRect,
|
|
||||||
const bool isPremultiplied = false)
|
const bool isPremultiplied = false)
|
||||||
{
|
{
|
||||||
const int stride = (alpha < 0) ? 3 : 4;
|
const int stride = (alpha < 0) ? 3 : 4;
|
||||||
|
|
||||||
const int startX = isRectValid(captureRect) ? captureRect.x() : 0;
|
const int endX = captureRect.sourceWidth - captureRect.startX - captureRect.targetWidth;
|
||||||
const int startY = isRectValid(captureRect) ? captureRect.y() : 0;
|
const int skipX = (endX + captureRect.startX) * stride;
|
||||||
const int targetWidth = isRectValid(captureRect) ? captureRect.width() : width;
|
|
||||||
const int targetHeight = isRectValid(captureRect) ? captureRect.height() : height;
|
|
||||||
const int endX = width - startX - targetWidth;
|
|
||||||
const int skipX = (endX + startX ) * stride;
|
|
||||||
|
|
||||||
QImage image(targetWidth, targetHeight, QImage::Format_Grayscale8);
|
QImage image(captureRect.targetWidth, captureRect.targetHeight, QImage::Format_Grayscale8);
|
||||||
uchar* pixelInit = image.bits();
|
uchar* pixelInit = image.bits();
|
||||||
data += (startY * width + startX) * stride;
|
data += (captureRect.startY * captureRect.sourceWidth + captureRect.startX) * stride;
|
||||||
for (int y = 1; y <= targetHeight; ++y) {
|
for (int y = 1; y <= captureRect.targetHeight; ++y) {
|
||||||
uchar* pixel = pixelInit + (targetHeight - y) * targetWidth;
|
uchar* pixel = pixelInit + (captureRect.targetHeight - y) * captureRect.targetWidth;
|
||||||
for (int x = 0; x < targetWidth; ++x) {
|
for (int x = 0; x < captureRect.targetWidth; ++x) {
|
||||||
uchar r = data[red];
|
uchar r = data[red];
|
||||||
uchar g = data[green];
|
uchar g = data[green];
|
||||||
uchar b = data[blue];
|
uchar b = data[blue];
|
||||||
|
@ -167,32 +189,33 @@ static QImage rgbDataToGrayscale(const uchar* data, const int width, const int h
|
||||||
return image;
|
return image;
|
||||||
}
|
}
|
||||||
|
|
||||||
void QZXingFilterRunnable::processVideoFrameProbed(SimpleVideoFrame & videoFrame, const QRect& captureRect)
|
void QZXingFilterRunnable::processVideoFrameProbed(SimpleVideoFrame & videoFrame, const QRect& _captureRect)
|
||||||
{
|
{
|
||||||
static unsigned int i = 0; i++;
|
static unsigned int i = 0; i++;
|
||||||
// qDebug() << "Future: Going to process frame: " << i;
|
// qDebug() << "Future: Going to process frame: " << i;
|
||||||
|
|
||||||
const int width = videoFrame.size.width();
|
const int width = videoFrame.size.width();
|
||||||
const int height = videoFrame.size.height();
|
const int height = videoFrame.size.height();
|
||||||
|
const CaptureRect captureRect(_captureRect, width, height);
|
||||||
const uchar* data = (uchar*) videoFrame.data.constData();
|
const uchar* data = (uchar*) videoFrame.data.constData();
|
||||||
/// Create QImage from QVideoFrame.
|
/// Create QImage from QVideoFrame.
|
||||||
QImage image;
|
QImage image;
|
||||||
|
|
||||||
/// Let's try to convert it from RGB formats
|
/// Let's try to convert it from RGB formats
|
||||||
if (videoFrame.pixelFormat == QVideoFrame::Format_RGB32)
|
if (videoFrame.pixelFormat == QVideoFrame::Format_RGB32)
|
||||||
image = rgbDataToGrayscale(data, width, height, 0, 1, 2, 3, captureRect);
|
image = rgbDataToGrayscale(data, captureRect, 0, 1, 2, 3);
|
||||||
else if (videoFrame.pixelFormat == QVideoFrame::Format_ARGB32)
|
else if (videoFrame.pixelFormat == QVideoFrame::Format_ARGB32)
|
||||||
image = rgbDataToGrayscale(data, width, height, 0, 1, 2, 3, captureRect);
|
image = rgbDataToGrayscale(data, captureRect, 0, 1, 2, 3);
|
||||||
else if (videoFrame.pixelFormat == QVideoFrame::Format_ARGB32_Premultiplied)
|
else if (videoFrame.pixelFormat == QVideoFrame::Format_ARGB32_Premultiplied)
|
||||||
image = rgbDataToGrayscale(data, width, height, 0, 1, 2, 3, captureRect, true);
|
image = rgbDataToGrayscale(data, captureRect, 0, 1, 2, 3, true);
|
||||||
else if (videoFrame.pixelFormat == QVideoFrame::Format_BGRA32)
|
else if (videoFrame.pixelFormat == QVideoFrame::Format_BGRA32)
|
||||||
image = rgbDataToGrayscale(data, width, height, 3, 2, 1, 0, captureRect);
|
image = rgbDataToGrayscale(data, captureRect, 3, 2, 1, 0);
|
||||||
else if (videoFrame.pixelFormat == QVideoFrame::Format_BGRA32_Premultiplied)
|
else if (videoFrame.pixelFormat == QVideoFrame::Format_BGRA32_Premultiplied)
|
||||||
image = rgbDataToGrayscale(data, width, height, 3, 2, 1, 0, captureRect, true);
|
image = rgbDataToGrayscale(data, captureRect, 3, 2, 1, 0, true);
|
||||||
else if (videoFrame.pixelFormat == QVideoFrame::Format_BGR32)
|
else if (videoFrame.pixelFormat == QVideoFrame::Format_BGR32)
|
||||||
image = rgbDataToGrayscale(data, width, height, 3, 2, 1, 0, captureRect);
|
image = rgbDataToGrayscale(data, captureRect, 3, 2, 1, 0);
|
||||||
else if (videoFrame.pixelFormat == QVideoFrame::Format_BGR24)
|
else if (videoFrame.pixelFormat == QVideoFrame::Format_BGR24)
|
||||||
image = rgbDataToGrayscale(data, width, height, -1, 2, 1, 0, captureRect);
|
image = rgbDataToGrayscale(data, captureRect, -1, 2, 1, 0);
|
||||||
|
|
||||||
/// This is a forced "conversion", colors end up swapped.
|
/// This is a forced "conversion", colors end up swapped.
|
||||||
if(image.isNull() && videoFrame.pixelFormat == QVideoFrame::Format_BGR555)
|
if(image.isNull() && videoFrame.pixelFormat == QVideoFrame::Format_BGR555)
|
||||||
|
@ -204,17 +227,18 @@ void QZXingFilterRunnable::processVideoFrameProbed(SimpleVideoFrame & videoFrame
|
||||||
|
|
||||||
//fix for issues #4 and #9
|
//fix for issues #4 and #9
|
||||||
if(image.isNull() && videoFrame.pixelFormat == QVideoFrame::Format_YUV420P) {
|
if(image.isNull() && videoFrame.pixelFormat == QVideoFrame::Format_YUV420P) {
|
||||||
image = QImage(videoFrame.size, QImage::Format_Grayscale8);
|
image = QImage(captureRect.targetWidth, captureRect.targetHeight, QImage::Format_Grayscale8);
|
||||||
uchar* pixel = image.bits();
|
uchar* pixel = image.bits();
|
||||||
const int wh = width * height;
|
const int wh = width * height;
|
||||||
const int w_2 = width / 2;
|
const int w_2 = width / 2;
|
||||||
const int wh_54 = wh * 5 / 4;
|
const int wh_54 = wh * 5 / 4;
|
||||||
for (int y = 0; y < height; y++) {
|
|
||||||
|
for (int y = captureRect.startY; y < captureRect.endY; y++) {
|
||||||
const int Y_offset = y * width;
|
const int Y_offset = y * width;
|
||||||
const int y_2 = y / 2;
|
const int y_2 = y / 2;
|
||||||
const int U_offset = y_2 * w_2 + wh;
|
const int U_offset = y_2 * w_2 + wh;
|
||||||
const int V_offset = y_2 * w_2 + wh_54;
|
const int V_offset = y_2 * w_2 + wh_54;
|
||||||
for (int x = 0; x < width; x++) {
|
for (int x = captureRect.startX; x < captureRect.endX; x++) {
|
||||||
const int x_2 = x / 2;
|
const int x_2 = x / 2;
|
||||||
const uchar Y = data[Y_offset + x];
|
const uchar Y = data[Y_offset + x];
|
||||||
const uchar U = data[U_offset + x_2];
|
const uchar U = data[U_offset + x_2];
|
||||||
|
@ -262,8 +286,8 @@ void QZXingFilterRunnable::processVideoFrameProbed(SimpleVideoFrame & videoFrame
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (isRectValid(captureRect) && image.size() != captureRect.size())
|
if (captureRect.isValid && image.size() != _captureRect.size())
|
||||||
image = image.copy(captureRect);
|
image = image.copy(_captureRect);
|
||||||
|
|
||||||
// qDebug() << "image.size()" << image.size();
|
// qDebug() << "image.size()" << image.size();
|
||||||
// qDebug() << "image.format()" << image.format();
|
// qDebug() << "image.format()" << image.format();
|
||||||
|
|
Loading…
Reference in New Issue