Skip to main content

Audio and Video Processors

Processors extend the agent’s capabilities by analysing and transforming audio/video streams in real-time. Examples of what you can support with processors are:
  • API calls or state: Often you need some additional state. Like the score/stats of a video game/ sport match etc.
  • Video Analysis: Pose detection, object recognition etc. The annotated video is sent to the realtime LLM
  • Video/image capture: Easily support AI driven video capture or images.
  • Video/audio transform: Video avatars, video effects etc
Examples clarify it best:

Simple Examples

Simple logging
import logging
from vision_agents.core.processors import AudioVideoProcessor, AudioProcessorMixin

class AudioLogger(AudioVideoProcessor, AudioProcessorMixin):
    def __init__(self, interval: int = 2):
        super().__init__(interval, receive_audio=True, receive_video=False)
        self.audio_count = 0

    async def process_audio(
        self, audio_data: bytes, user_id: str, metadata = None
    ) -> None:
        """Log audio data information."""
        if self.should_process():
            self.audio_count += 1
            logging.info(
                f"🎵 Audio #{self.audio_count} from {user_id}: {len(audio_data)} bytes"
            )
Thumbnail Capture
import asyncio
from pathlib import Path
from PIL import Image
from vision_agents.core.processors import AudioVideoProcessor, ImageProcessorMixin

class ImageCapture(AudioVideoProcessor, ImageProcessorMixin):
    """Captures video frames at regular intervals."""

    def __init__(self, output_dir: str = "captured_frames", interval: int = 3):
        super().__init__(interval=interval, receive_audio=False, receive_video=True)
        self.output_dir = Path(output_dir)
        self.frame_count = 0
        self.output_dir.mkdir(exist_ok=True)

    async def process_image(
        self, image: Image.Image, user_id: str, metadata = None
    ):
        if not self.should_process():
            return None

        timestamp = int(asyncio.get_event_loop().time())
        filename = f"frame_{user_id}_{timestamp}_{self.frame_count:04d}.jpg"
        filepath = self.output_dir / filename

        # Save the frame as JPG
        image.save(filepath, "JPEG", quality=90)
        self.frame_count += 1
        
        return str(filepath)
Green Hue Effect
from PIL import Image, ImageEnhance
from vision_agents.core.processors import AudioVideoProcessor, ImageProcessorMixin

class GreenHueEffect(AudioVideoProcessor, ImageProcessorMixin):
    """Applies a green hue effect to video frames."""

    def __init__(self, intensity: float = 0.5):
        super().__init__(interval=0, receive_audio=False, receive_video=True)
        self.intensity = intensity

    async def process_image(
        self, image: Image.Image, user_id: str, metadata = None
    ):
        """Apply green hue to the image."""
        # Convert to RGB if needed
        if image.mode != 'RGB':
            image = image.convert('RGB')
        
        # Split into channels
        r, g, b = image.split()
        
        # Enhance green channel
        green_enhancer = ImageEnhance.Brightness(g)
        g = green_enhancer.enhance(1.0 + self.intensity)
        
        # Reduce red and blue
        red_enhancer = ImageEnhance.Brightness(r)
        r = red_enhancer.enhance(1.0 - self.intensity * 0.5)
        
        blue_enhancer = ImageEnhance.Brightness(b)
        b = blue_enhancer.enhance(1.0 - self.intensity * 0.5)
        
        # Merge channels back
        return Image.merge('RGB', (r, g, b))

Advanced Examples

YoloPose This processor implements YOLO pose detection and annotates video frames with skeleton overlays.
from vision_agents.core import Agent, User
from vision_agents.plugins import getstream, gemini, ultralytics

# In your agent setup
agent = Agent(
    edge=getstream.Edge(),
    agent_user=User(name="AI golf coach"),
    instructions="Read @golf_coach.md",
    llm=gemini.Realtime(fps=10),
    processors=[
        ultralytics.YOLOPoseProcessor(
            model_path="yolo11n-pose.pt",
            conf_threshold=0.5,
            enable_hand_tracking=True
        )
    ],
)
The YOLOPoseProcessor processes video frames, detects human poses, and annotates the frames with skeleton overlays that are sent to the LLM for visual analysis. This enables use cases like:
  • Golf swing analysis
  • Fitness form checking
  • Dance instruction
  • Physical therapy monitoring

Processor API

Audio
# process incoming audio
async def process_audio(
    self, audio_data: bytes, user_id: str, metadata: Optional[dict] = None
) -> None:
    """Process audio data. Override this method to implement audio processing."""
    pass
    
# publish outgoing audio (optional)
def publish_audio_track(self):
    """Return an audio track to publish transformed audio."""
    return aiortc.AudioStreamTrack()
Images
async def process_image(
    self, image: Image.Image, user_id: str, metadata: Optional[dict] = None
):
    """Process image frames extracted from video."""
    pass
Video
# process incoming video
async def process_video(
    self,
    track: aiortc.mediastreams.MediaStreamTrack,
    user_id: str,
    shared_forwarder = None,
):
    """Process raw video track."""
    pass

# publish outgoing video (optional)
def publish_video_track(self):
    """Return a video track to publish transformed video."""
    return aiortc.VideoStreamTrack()

Using Processors

Add processors to your agent:
from vision_agents.core import Agent, User
from vision_agents.core.processors import AudioLogger, ImageCapture
from vision_agents.plugins import getstream, openai

agent = Agent(
    edge=getstream.Edge(),
    agent_user=User(name="AI Assistant"),
    instructions="You're a helpful assistant.",
    llm=openai.LLM(model="gpt-4o-mini"),
    processors=[
        ImageCapture(output_dir="./captures", interval=5),
        AudioLogger(interval=2),
    ],
)

Processor Base Classes

  • AudioVideoProcessor: Base class with interval-based processing support
  • AudioProcessorMixin: Implement process_audio() to process audio streams
  • ImageProcessorMixin: Implement process_image() to process video frames as PIL Images
  • VideoProcessorMixin: Implement process_video() to process raw video tracks
  • VideoPublisherMixin: Implement publish_video_track() to publish transformed video
  • AudioPublisherMixin: Implement publish_audio_track() to publish transformed audio
The should_process() method respects the interval parameter, allowing you to control processing frequency and reduce computational overhead.
I