Create a Virtual Camera to Test Scanning using Static Images or Videos

Creating a virtual camera is useful to test camera-dependent applications. We can use static images or videos as the image source for the virtual camera to test things like barcode reading, text recognition, and document scanning.

Adopting pyvirtualcam

Creating a virtual camera involves complex driver development. Fortunately, there have already been some projects like OBS for Windows/macOS and v4l2loopback for Linux, which can add a virtual camera to the system.

pyvirtualcam is a Python library that sends frames to the virtual camera based on OBS and v4l2loopback.

In this article, we are going to use pyvirtualcam to control the virtual camera.

Here is a video demonstrating the whole process:

Environment Setup

  1. Install Python3.
  2. Install Python packages: pip install pyvirtualcam opencv-python Pillow numpy av
  3. Install the virtual camera:

    For Windows and macOS, install OBS.

    For Linux, install v4l2loopback following its instruction.

Using Videos

PyAV is a Pythonic binding for the FFmpeg libraries. We are going to use it to extract frames of videos and send them to the virtual camera.

Here is the complete code:

import av
import pyvirtualcam
import sys

def main(path):
    container = av.open(path)
    height = container.streams[0].codec_context.coded_height
    width = container.streams[0].codec_context.coded_width

    cam = pyvirtualcam.Camera(width=width, height=height, fps=20)

    while True:
        container = av.open("example.mp4")
        stream = container.streams.video[0]
        for frame in container.decode(stream):
            frame = frame.to_ndarray(format='bgr24')
            cam.send(frame)
            cam.sleep_until_next_frame()
        
if __name__ == "__main__":
    path = "example.mp4"
    if len(sys.argv) == 2 :
        path = sys.argv[1]
    print(path)
    main(path)

Using Images

We use OpenCV to read static images. Remember to convert the color space from BGR to RGB.

import pyvirtualcam
import cv2
import sys
import utils

def main(path):
    frame = cv2.imread(path)
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    frame = utils.add_padding(frame)
    width = frame.shape[1]
    height = frame.shape[0]

    cam = pyvirtualcam.Camera(width=width, height=height, fps=20)
    while True:
        cam.send(frame)
        cam.sleep_until_next_frame()
    
if __name__ == "__main__":
    path = "example.png"
    if len(sys.argv) == 2 :
        path = sys.argv[1]
    print(path)
    main(path)

Since the camera may have a resolution like 1280x720, the image will be scaled. We can add padding to it using the following code:

import cv2

def get_img_radio(img):
    width = img.shape[1]
    height = img.shape[0]
    if width>height:
        return width/height
    else:
        return height/width

def add_padding(img):
    width = img.shape[1]
    height = img.shape[0]
    ratio = 16/9
    
    desired_height = height
    desired_width = width
    top = 0
    bottom = 0
    left = 0
    right = 0
    if get_img_radio(img) > ratio: #17/9 > 16/9 add padding to short side
        if width>height:
            desired_width = width
            desired_height = width / ratio
            top = int((desired_height - height)/2)
            bottom = top
        else:
            desired_width = height / ratio
            desired_height = height
            left = int((desired_width - width)/2)
            right = left
    else: # 4/3 < 16/9 add padding to long side
        if width>=height:
            desired_width = height * ratio
            desired_height = height
            left = int((desired_width - width)/2)
            right = left
        else:
            desired_width = width
            desired_height = width * ratio
            top = int((desired_height - height)/2)
            bottom = top

    img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT,value=[255,255,255])
    return img

Making the Image Draggable

We can make the image draggable to adjust its location in the camera stream.

  1. Use OpenCV’s imshow to show the image.

     def main():
         width = img.shape[1]
         height = img.shape[0]
         cam = pyvirtualcam.Camera(width=width, height=height, fps=20)
         cv2.namedWindow("image", cv2.WINDOW_NORMAL)
         while True:
             time.sleep(0.05)
             img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
             cam.send(img_rgb)
             cam.sleep_until_next_frame()
             cv2.imshow('image', img)
             k = cv2.waitKey(1) & 0xFF
             if k == 27: #esc
                 break
                    
     if __name__ == "__main__":
         path = "example.png"
         if len(sys.argv) == 2 :
             path = sys.argv[1]
         img = cv2.imread(path)
         img = utils.add_padding(img)
         vis = img.copy()
         main()
    
  2. Set up mouse callback to handle mouse events. If the left button is down, the image will be moved if the mouse moves.

     ix,iy = -1,-1
     previous_tx = 0
     previous_ty = 0
     current_tx = 0
     current_ty = 0
     is_mouse_down = False
        
     def handle_mouse_events(event,x,y,flags,param):
         global ix,iy, is_mouse_down, vis, img, cam
         if event == cv2.EVENT_LBUTTONDOWN:
             ix,iy = x,y
             is_mouse_down = True
    
         elif event == cv2.EVENT_MOUSEMOVE:
             tx = ix - x
             ty = iy - y
             if is_mouse_down:
                 img = move_image(vis, -tx, -ty)
    
         elif event == cv2.EVENT_LBUTTONUP:
             is_mouse_down = False
             previous_tx = current_tx
             previous_ty = current_ty
            
     def main():
         cv2.setMouseCallback('image', handle_mouse_events)
    

    warpAffine is used to shift the image.

     def move_image(image, tx,ty):
         global current_tx, current_ty
         tx = previous_tx + tx
         ty = previous_ty + ty
         current_tx = tx
         current_ty = ty
         rows, cols=image.shape[:2]
         moving_matrix=np.float64([[1,0,tx],[0,1,ty]])
         return cv2.warpAffine(image, moving_matrix,(cols,rows))
    

Testing QR Code Scanning using the Virtual Camera

Let’s test the virtual camera using the JavaScript version of Dynamsoft Barcode Reader (DBR).

Here is the HTML file:

<!DOCTYPE html>
<html>

<body>
  <script src="https://cdn.jsdelivr.net/npm/dynamsoft-javascript-barcode@9.0.0/dist/dbr.js"></script>
  <script>
    // initializes and uses the library
    (async () => {
      let scanner = await Dynamsoft.DBR.BarcodeScanner.createInstance();
      
      let settings = await scanner.getRuntimeSettings();
      /*
      * The following code shrinks the decoding region by 25% on all sides
      */
      settings.region.regionMeasuredByPercentage = 1;
      settings.region.regionLeft = 25;
      settings.region.regionTop = 25;
      settings.region.regionRight = 75;
      settings.region.regionBottom = 75;
      await scanner.updateRuntimeSettings(settings);
      
      scanner.onFrameRead = results => {
        if (results.length > 0) console.log(results);
      };
      scanner.onUnduplicatedRead = (txt, result) => {
        alert(txt);
      };
      await scanner.show();
    })();
  </script>
</body>

</html>

A scan region is set by updating DBR’s runtime settings.

Congratulations! You have now successfully created a virtual camera that can be used to test scanning using static images or videos.

Source Code

You can find the complete code here:

https://github.com/xulihang/Virtual-Camera