Python ball pits

From ShawnReevesWiki
Revision as of 21:05, 28 February 2026 by Shawn (talk | contribs) (New page with code)
(diff) ← Older revision | Latest revision (diff) | Newer revision → (diff)
Jump to navigationJump to search

I'd enjoyed the interactive particles on a screen with a camera I've seen at the Museum of Science and other places, and I've wanted to program them for years, so as I started to learn Python I used Devstral LLM to jumpstart a program, then added features. Here's the code:

from cv2_enumerate_cameras import enumerate_cameras
import cv2
import numpy as np
import random
from collections import deque
import time
"""Todo
Display on frame notice if no cameras found.
Button for quitting.
Button for going back to camera selection.
Convert from global variables to parameters passed to functions.
Variable particle colors, size, mass
Sliders/dials for parameters
Smoothly add/reduce particles
KNOW THAT VARIABLES INSIDE A FUNCTION CAN BE ACCESSED BY ENCLOSED FUNCTIONS

"""
# Global variables
particle_count = 80  # Initial number of particles
min_area = 500       # Minimum area to consider as motion
history = 10         # Number of frames for background subtraction
particles = deque(maxlen=particle_count)
particle_size = 10
particle_color = (250, 200, 50) 
motion_objects = []  # List to store motion objects (center, direction)
damping = 0.9995
gravity = 0.09
MAX_MOTION_OBJECTS = 10
STATES = ("Camera selection", "Activity")
state = 0
window_size = {"width":1024,"height":768}

def capture_still(camera):
    """Capture a still image from the specified camera on its backend"""
    cap = cv2.VideoCapture(camera.index, camera.backend)
    time.sleep(0.6) # Wait for camera to open before getting snapshot; some short times cause error.
    #print("Captured a still")
    ret, frame = cap.read()
    cap.release()
    return frame
            
def display_cameras_and_get_selection(cameras):
    """Display stills from all cameras and let user select one"""
    # cameras is a list of CameraInfo objects with properties we'll use: CameraInfo.name and CameraInfo.index
    camera_ID = None
    camera_indices = [camera.index for camera in cameras]
    #print(f"debug: camera_indices:{camera_indices}")

    def on_mouse_event_camera_selection(event, x, y, flags, param):
        nonlocal camera_ID
        """Click event during camera selection"""
        if event == cv2.EVENT_LBUTTONDOWN:
            # Get the list of camera indices and their positions
            camera_indices, positions = param
            # Check which camera was clicked
            for i, (cx, cy, cw, ch) in enumerate(positions):
                if cx <= x <= cx + cw and cy <= y <= cy + ch:
                    camera_ID = camera_indices[i]
                    #print(f"on_mouse_event_camera_selection set camera_ID: {camera_ID}")
                    return
        return

    # Capture stills from all cameras
    stills = []
    for camera in cameras:
        stills.append(capture_still(camera))

    # Create a window to display all stills
    window_name = "Select Camera"
    cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
    cv2.resizeWindow(window_name, window_size["width"], window_size["height"])

    # Calculate grid layout
    cols = 4
    rows = (len(stills) + cols - 1) // cols

    # Prepare to store camera positions
    camera_positions = []

    # Calculate image size and spacing
    spacing = 10
    img_width = (window_size["width"] - 2*spacing)//cols
    img_height = img_width * window_size["height"] // window_size["width"]
    #print(img_width, " ", img_height)

    # Create a blank canvas of 1024x768
    canvas = np.ones((window_size["height"], window_size["width"], 3), dtype=np.uint8)

    # Display all stills in a grid on the canvas
    for i, still in enumerate(stills):
        row = i // cols
        col = i % cols

        # Resize image to 320x240
        #print(f"Resizing: img_width, img_height:{img_width}, {img_height}")
        resized = cv2.resize(still, (img_width, img_height))

        # Calculate position in the grid
        x = col * (img_width + spacing)
        y = row * (img_height + spacing)

        # Store camera position for click detection
        camera_positions.append((x, y, img_width, img_height))

        # Place image on canvas
        canvas[y:y+img_height, x:x+img_width] = resized

        # Add camera index text
        cv2.putText(canvas, f"Camera {camera_indices[i]}", (x+10, y+20),
                   cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)

    # Display the canvas
    cv2.imshow(window_name, canvas)

    # Set mouse callback
    cv2.setMouseCallback(window_name, on_mouse_event_camera_selection, (camera_indices, camera_positions))

    # Wait for user to click on an image
    last_time=time.time()
    while True:
        key = cv2.waitKey(1) & 0xFF
        if time.time()-last_time > 1.0:
            last_time = time.time()
        if key == 27:  # ESC key to exit
            cv2.destroyAllWindows()
            return None
        if camera_ID is not None:
            cv2.destroyAllWindows()
            return camera_ID

def activity(camera):    
    global particle_count
    global particles
    class button:
        def __init__(self, x, y, width, height, title):
            self.x = x
            self.y = y
            self.width = width
            self.height = height
            self.title = title
            self.clicked = False
        
    reset_button = button(10,10,100,40,"RESET")
    quit_button = button(120,10,100,40,"QUIT")
    buttons = [reset_button, quit_button]
    
    def on_mouse_click(event, x, y, flags, param):
        buttons = param
        if event == cv2.EVENT_LBUTTONDOWN:
            #print("mouse clicked")
            # Check if click is within the reset button area
            for button in buttons:
                if button.x <= x <= button.x+button.width and button.y <= y <= button.y+button.width:
                #print("reset clicked")
                    button.clicked = True
        return None

    def on_particle_count_change(val):
        """Callback function for particle count trackbar"""
        global particle_count
        particle_count = val
    def update_particles(motion_objects):
        """Update particle positions and accelerate near motion objects"""
        global particles

        for i in range(len(particles)):
            x, y, dx, dy = particles[i]
            # Damp motion, more for faster particles
            dx *= damping/(1+(np.abs(dx)/1000))
            dy *= damping/(1+(np.abs(dy)/1000))
            dy += gravity
    
            # Check proximity to motion objects
            for obj in motion_objects:
                obj_x, obj_y, obj_dx, obj_dy = obj
                distance = np.sqrt((x - obj_x)**2 + (y - obj_y)**2)
    
                # If particle is near a motion object, accelerate in that direction
                if distance < 100:  # Proximity threshold
                    # Add motion vector to particle velocity
                    dx += obj_dx * 1/(distance+2)
                    dy += obj_dy * 1/(distance+2)
    
            # Update position
            x += dx
            y += dy
    
            # Bounce off edges
            frame_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
            frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
    
            if x <= 0:
                dx = np.abs(dx)
                x=0
            if x >= frame_width:
                dx = -1 * np.abs(dx)
                x = frame_width
            if y <= 0:
                dy = np.abs(dy)
                y=0
            if y >= frame_height:
                dy = -1 * np.abs(dy)
                y = frame_height
    
            particles[i] = [x, y, dx, dy]

    def generate_particles(count=particle_count):
        """Generate particles at random positions"""
        global particles
        nonlocal cap
        particles.clear()
        frame_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
        frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
    
        for _ in range(count):
            x = random.randint(0, int(frame_width))
            y = random.randint(0, int(frame_height))
            dx = random.uniform(-1, 1)  # Random initial velocity
            dy = random.uniform(-1, 1)
            particles.append([x, y, dx, dy])
    

    # Initialize video capture with selected camera
    try:
        cap = cv2.VideoCapture(camera.index, camera.backend)
        if not cap.isOpened():
            pass
            #print(f"camera {camera_index} not available anymore.")
    except ValueError:
        pass
        #print(f"camera {camera_index} not available anymore. error {ValueError}")
    
    # Create window and trackbar
    activityWindowName='Motion Detection with Particles'
    cv2.namedWindow(activityWindowName)
    cv2.createTrackbar('Particles', activityWindowName, particle_count, 100, on_particle_count_change)
    
    # Initialize background subtractor
    backSub = cv2.createBackgroundSubtractorMOG2(history, 16, True)
    
    # Generate initial particles
    generate_particles()
    cv2.setMouseCallback(activityWindowName, on_mouse_click, buttons)

    while True:
        ret, frame = cap.read()
        if not ret:
            continue
    
        # Flip the frame horizontally
        frame = cv2.flip(frame, 1)
    
        # Get current particle count from trackbar
        current_particle_count = cv2.getTrackbarPos('Particles', activityWindowName)
        if current_particle_count != particle_count:
            particle_count = current_particle_count
            particles = deque(maxlen=particle_count)
            generate_particles(particle_count)
            
        # Check if reset button was clicked
        if reset_button.clicked:
            #print("reset clicked")
            reset_button.clicked = False
            generate_particles(particle_count)
        if quit_button.clicked:
            break
    
        # Convert to grayscale and apply background subtraction
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        fgMask = backSub.apply(gray)
    
        # Find contours in the motion mask
        contours, _ = cv2.findContours(fgMask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    
        motion_objects = []  # Reset motion objects each frame
    
        for cnt in contours:
            if cv2.contourArea(cnt) > min_area and len(motion_objects) < MAX_MOTION_OBJECTS:
                # Get bounding rectangle
                x, y, w, h = cv2.boundingRect(cnt)
    
                # Calculate center of motion
                center_x = x + w // 2
                center_y = y + h // 2
    
                # Calculate direction from center to motion area
                frame_center_x = frame.shape[1] // 2
                frame_center_y = frame.shape[0] // 2
    
                dx = center_x - frame_center_x
                dy = center_y - frame_center_y
    
                motion_objects.append((center_x, center_y, dx, dy))
                #cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
                cv2.circle(frame, (center_x, center_y), 20, (255, 0, 255), -1)
                starsize=20
                cv2.line(frame, (center_x, center_y), (center_x+starsize, center_y+starsize), (0, 255, 0), 2)
                cv2.line(frame, (center_x, center_y), (center_x-starsize, center_y+starsize), (0, 255, 0), 2)
                cv2.line(frame, (center_x, center_y), (center_x+starsize, center_y-starsize), (0, 255, 0), 2)
                cv2.line(frame, (center_x, center_y), (center_x-starsize, center_y-starsize), (0, 255, 0), 2)
    
        # Update and draw particles
        update_particles(motion_objects)
        for particle in particles:
            x, y, _, _ = particle
            cv2.circle(frame, (int(x), int(y)), particle_size, particle_color, -1)
        for button in buttons:
            cv2.rectangle(frame, (button.x, button.y), (button.width, button.height), (255, 255, 255), -1)
            cv2.putText(frame, button.title, (button.x + 10, button.y + button.height - 15), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2)

        # Display the frame
        cv2.imshow(activityWindowName, frame)
    
        # Exit on 'q' key
        if cv2.waitKey(30) & 0xFF == ord('q'):
            break
    
    # Release resources
    cap.release()
    cv2.destroyAllWindows()
    return None

def main():
    # Get available cameras
    #cameras = get_available_cameras
    cameras = enumerate_cameras()
    if not cameras:
        #print("No cameras found!")
        return
    if 0 == len(cameras):
        #print("No cameras found!")
        return

    # Display stills and get user selection
    selected_camera = display_cameras_and_get_selection(cameras)
    # print(f"selected_camera:{selected_camera}")

    if selected_camera is not None:
        #print(f"Starting activity with selected camera: {selected_camera}")
        # You can now use this camera index for further processing
        camera = next((camera for camera in cameras if camera.index == selected_camera), None)

        if camera:
            # print(f"Found camera: {camera.index}, {camera.backend}")
            pass
        else:
            # print("Camera not found")
            pass

        activity(camera)
    else:
        #print("No camera selected or operation cancelled")
        return None

if __name__ == "__main__":
    main()