From 05f7aa4589ce4b5858de359072aab30c05f42b57 Mon Sep 17 00:00:00 2001 From: Mason Remaley Date: Mon, 1 Dec 2025 16:57:11 -0800 Subject: [PATCH 01/13] Splits video play into zin and sdl example --- build.zig | 48 +- .../{video_play.zig => video_play_sdl.zig} | 0 src/examples/video_play_zin.zig | 800 ++++++++++++++++++ 3 files changed, 844 insertions(+), 4 deletions(-) rename src/examples/{video_play.zig => video_play_sdl.zig} (100%) create mode 100644 src/examples/video_play_zin.zig diff --git a/build.zig b/build.zig index 3f722d9..955b483 100644 --- a/build.zig +++ b/build.zig @@ -302,12 +302,12 @@ pub fn build(b: *std.Build) void { }, }); - // Build the video play example. + // Build the video play SDL example. { const video_play = b.addExecutable(.{ - .name = "video-play", + .name = "video-play-sdl", .root_module = b.createModule(.{ - .root_source_file = b.path("src/examples/video_play.zig"), + .root_source_file = b.path("src/examples/video_play_sdl.zig"), .target = target, .optimize = optimize, }), @@ -330,7 +330,47 @@ pub fn build(b: *std.Build) void { video_play.linkLibrary(sdl.artifact("SDL3")); b.installArtifact(video_play); - const run_step = b.step("video-play", "Run the video-play example"); + const run_step = b.step("video-play-sdl", "Run the video-play example"); + + const run_cmd = b.addRunArtifact(video_play); + run_step.dependOn(&run_cmd.step); + + run_cmd.step.dependOn(b.getInstallStep()); + + if (b.args) |args| { + run_cmd.addArgs(args); + } + } + + // Build the video play ZIN example. + { + const video_play = b.addExecutable(.{ + .name = "video-play-zin", + .root_module = b.createModule(.{ + .root_source_file = b.path("src/examples/video_play_zin.zig"), + .target = target, + .optimize = optimize, + }), + }); + + const sdl = b.dependency("sdl", .{ + .optimize = optimize, + .target = target, + }); + + if (use_zig_module) { + video_play.root_module.addImport("pipewire", libpipewire_zig); + } else { + video_play.linkLibrary(libpipewire); + video_play.root_module.addImport("pipewire", c); + } + + video_play.root_module.addOptions("example_options", example_options); + + video_play.linkLibrary(sdl.artifact("SDL3")); + b.installArtifact(video_play); + + const run_step = b.step("video-play-zin", "Run the video-play example"); const run_cmd = b.addRunArtifact(video_play); run_step.dependOn(&run_cmd.step); diff --git a/src/examples/video_play.zig b/src/examples/video_play_sdl.zig similarity index 100% rename from src/examples/video_play.zig rename to src/examples/video_play_sdl.zig diff --git a/src/examples/video_play_zin.zig b/src/examples/video_play_zin.zig new file mode 100644 index 0000000..bc5691c --- /dev/null +++ b/src/examples/video_play_zin.zig @@ -0,0 +1,800 @@ +// `pipewire/src/examples/video-play.c` translated to Zig. + +const std = @import("std"); +const log = std.log; +const example_options = @import("example_options"); + +// Normal code wouldn't do this, this +const pw = if (example_options.use_zig_module) + // Example of linking with the pipewire zig module + @import("pipewire") +else + // Example of linking with the pipewire static library. We're wrapping it like this just to + // match the Zig module so the rest of the code doesn't need conditionals. + struct { + pub const c = @import("pipewire"); + }; + +const sdl = @cImport({ + @cDefine("WIDTH", std.fmt.comptimePrint("{}", .{width})); + @cDefine("HEIGHT", std.fmt.comptimePrint("{}", .{height})); + @cDefine("RATE", std.fmt.comptimePrint("{}", .{rate})); + @cInclude("SDL3/SDL.h"); +}); + +const width = 1920; +const height = 1080; +const rate = 30; +const max_buffers = 64; + +pub const std_options: std.Options = .{ + .log_level = .info, +}; + +pub fn main() !void { + // If we're linking with the Zig module, set up logging. + var logger = if (example_options.use_zig_module) pw.Logger.init() else {}; + if (example_options.use_zig_module) { + pw.c.pw_log_set(&logger); + pw.c.pw_log_set_level(pw.Logger.default_level); + } + + pw.c.pw_init(0, null); + defer pw.c.pw_deinit(); + + var data: Data = .{}; + + // Create a main loop + data.loop = pw.c.pw_main_loop_new(null).?; + defer pw.c.pw_main_loop_destroy(data.loop); + + _ = pw.c.pw_loop_add_signal(pw.c.pw_main_loop_get_loop(data.loop), pw.c.SIGINT, &doQuit, &data); + _ = pw.c.pw_loop_add_signal(pw.c.pw_main_loop_get_loop(data.loop), pw.c.SIGTERM, &doQuit, &data); + + // create a simple stream, the simple stream manages to core and remote objects for you if you + // don't need to deal with them + // + // If you plan to autoconnect your stream, you need to provide at least media, category and role + // properties + // + // Pass your events and a user_data pointer as the last arguments. This will inform you about + // the stream state. The most important event you need to listen to is the process event where + // you need to consume the data provided to you. + const props = pw.c.pw_properties_new( + pw.c.PW_KEY_MEDIA_TYPE, + "Video", + pw.c.PW_KEY_MEDIA_CATEGORY, + "Capture", + pw.c.PW_KEY_MEDIA_ROLE, + "Camera", + @as(?*anyopaque, null), + ).?; + + var args: std.process.ArgIterator = .init(); + _ = args.next(); + if (args.next()) |arg| { + _ = pw.c.pw_properties_set(props, pw.c.PW_KEY_TARGET_OBJECT, arg); + } + + data.stream = pw.c.pw_stream_new_simple( + pw.c.pw_main_loop_get_loop(data.loop), + "video-play", + props, + &.{ + .version = pw.c.PW_VERSION_STREAM_EVENTS, + .state_changed = &onStreamStateChanged, + .io_changed = &onStreamIoChanged, + .param_changed = &onStreamParamChanged, + .process = &onProcess, + }, + &data, + ).?; + defer pw.c.pw_stream_destroy(data.stream); + + if (!sdl.SDL_Init(sdl.SDL_INIT_VIDEO)) { + log.err("can't initialize SDL: {s}", .{sdl.SDL_GetError()}); + std.process.exit(1); + } + + if (!sdl.SDL_CreateWindowAndRenderer( + "Demo", + width, + height, + sdl.SDL_WINDOW_RESIZABLE, + &data.window, + &data.renderer, + )) { + log.err("can't create window: {s}", .{sdl.SDL_GetError()}); + std.process.exit(1); + } + defer { + if (data.texture) |texture| sdl.SDL_DestroyTexture(texture); + if (data.cursor) |cursor| sdl.SDL_DestroyTexture(cursor); + sdl.SDL_DestroyRenderer(data.renderer); + sdl.SDL_DestroyWindow(data.window); + } + + var buffer: [1024]u8 align(@alignOf(u32)) = undefined; + var b = std.mem.zeroInit(pw.c.spa_pod_builder, .{ + .data = &buffer, + .size = buffer.len, + }); + + // build the extra parameters to connect with. To connect, we can provide a list of supported + // formats. We use a builder that writes the param object to the stack. + var params_buf: [3]?*const pw.c.spa_pod = undefined; + var params: std.ArrayList(?*const pw.c.spa_pod) = .initBuffer(¶ms_buf); + buildFormat(&data, &b, ¶ms); + + { + var f: pw.c.spa_pod_frame = undefined; + // send a tag, input tags travel upstream + pw.c.spa_tag_build_start(&b, &f, pw.c.SPA_PARAM_Tag, pw.c.SPA_DIRECTION_INPUT); + const items: [1]pw.c.spa_dict_item = .{ + pw.c.SPA_DICT_ITEM_INIT("my-tag-other-key", "my-special-other-tag-value"), + }; + pw.c.spa_tag_build_add_dict(&b, &pw.c.SPA_DICT_INIT(items, 1)); + params.appendBounded(pw.c.spa_tag_build_end(&b, &f)) catch @panic("OOB"); + } + + // now connect the stream, we need a direction (input/output), + // an optional target node to connect to, some flags and parameters + // + const res = pw.c.pw_stream_connect( + data.stream, + pw.c.PW_DIRECTION_INPUT, + pw.c.PW_ID_ANY, + pw.c.PW_STREAM_FLAG_AUTOCONNECT | // try to automatically connect this stream + pw.c.PW_STREAM_FLAG_INACTIVE | // we will activate ourselves + pw.c.PW_STREAM_FLAG_MAP_BUFFERS, // mmap the buffer data for us + // extra parameters, see above + params.items.ptr, + @intCast(params.items.len), + ); + if (res < 0) { + log.err("can't connect: {s}", .{pw.c.spa_strerror(res)}); + std.process.exit(1); + } + + // /do things until we quit the mainloop + _ = pw.c.pw_main_loop_run(data.loop); +} + +const Pixel = extern struct { + r: f32, + g: f32, + b: f32, + a: f32, +}; + +const Data = struct { + renderer: ?*sdl.SDL_Renderer = null, + window: ?*sdl.SDL_Window = null, + texture: ?*sdl.SDL_Texture = null, + cursor: ?*sdl.SDL_Texture = null, + + loop: ?*pw.c.pw_main_loop = null, + stream: ?*pw.c.pw_stream = null, + + position: ?*pw.c.spa_io_position = null, + + format: pw.c.spa_video_info = .{}, + stride: i32 = 0, + size: pw.c.spa_rectangle = .{}, + + rect: sdl.SDL_FRect = .{}, + cursor_rect: sdl.SDL_FRect = .{}, + is_yuv: bool = false, +}; + +fn doQuit(userdata: ?*anyopaque, signal_number: c_int) callconv(.c) void { + _ = signal_number; + const data: *Data = @ptrCast(@alignCast(userdata)); + _ = pw.c.pw_main_loop_quit(data.loop); +} + +// our data processing function is in general: +// ``` +// struct pw_buffer *b; +// b = pw_stream_dequeue_buffer(stream); +// +// .. do stuff with buffer ... +// +// pw_stream_queue_buffer(stream, b); +// ``` +fn onProcess(userdata: ?*anyopaque) callconv(.c) void { + const data: *Data = @ptrCast(@alignCast(userdata)); + const stream = data.stream; + + var render_cursor = false; + + var maybe_buffer: ?*pw.c.pw_buffer = null; + while (true) { + const t = pw.c.pw_stream_dequeue_buffer(stream) orelse break; + if (maybe_buffer) |b| _ = pw.c.pw_stream_queue_buffer(stream, b); + maybe_buffer = t; + } + const b = maybe_buffer orelse { + log.warn("out of buffers", .{}); + return; + }; + defer _ = pw.c.pw_stream_queue_buffer(stream, b); + + const buf: *pw.c.spa_buffer = b.buffer; + + log.debug("new buffer {*}", .{buf}); + + handleEvents(data); + + const sdata = buf.datas[0].data orelse return; + + const maybe_h: ?*pw.c.spa_meta_header = @ptrCast(@alignCast(pw.c.spa_buffer_find_meta_data(buf, pw.c.SPA_META_Header, @sizeOf(pw.c.spa_meta_header)))); + if (maybe_h) |h| { + const now = pw.c.pw_stream_get_nsec(stream); + log.debug("now:{} pts:{} diff:{}", .{ now, h.pts, now - @as(u64, @intCast(h.pts)) }); + } + + // get the videocrop metadata if any + const maybe_mc: ?*pw.c.spa_meta_region = @ptrCast(@alignCast(pw.c.spa_buffer_find_meta_data(buf, pw.c.SPA_META_VideoCrop, @sizeOf(pw.c.spa_meta_region)))); + if (maybe_mc) |mc| { + if (pw.c.spa_meta_region_is_valid(mc)) { + data.rect.x = @floatFromInt(mc.region.position.x); + data.rect.y = @floatFromInt(mc.region.position.y); + data.rect.w = @floatFromInt(mc.region.size.width); + data.rect.h = @floatFromInt(mc.region.size.height); + } + } + // get cursor metadata + const maybe_mcs: ?*pw.c.spa_meta_cursor = @ptrCast(@alignCast(pw.c.spa_buffer_find_meta_data(buf, pw.c.SPA_META_Cursor, @sizeOf(pw.c.spa_meta_cursor)))); + if (maybe_mcs) |mcs| { + if (pw.c.spa_meta_cursor_is_valid(mcs)) { + data.cursor_rect.x = @floatFromInt(mcs.position.x); + data.cursor_rect.y = @floatFromInt(mcs.position.y); + + const mb: *pw.c.spa_meta_bitmap = @ptrFromInt(@intFromPtr(mcs) + mcs.bitmap_offset); + data.cursor_rect.w = @floatFromInt(mb.size.width); + data.cursor_rect.h = @floatFromInt(mb.size.height); + + if (data.cursor == null) { + data.cursor = sdl.SDL_CreateTexture( + data.renderer, + idToSdlFormat(mb.format), + sdl.SDL_TEXTUREACCESS_STREAMING, + @intCast(mb.size.width), + @intCast(mb.size.height), + ); + _ = sdl.SDL_SetTextureBlendMode(data.cursor, sdl.SDL_BLENDMODE_BLEND); + } + + var cdata: [*c]u8 = undefined; + var cstride: c_int = undefined; + if (!sdl.SDL_LockTexture(data.cursor, null, &cdata, &cstride)) { + log.err("Couldn't lock cursor texture: {s}", .{sdl.SDL_GetError()}); + return; + } + defer sdl.SDL_UnlockTexture(data.cursor); + + // copy the cursor bitmap into the texture + var src: [*]u8 = @ptrFromInt(@intFromPtr(mb) + mb.offset); + var dst = cdata; + const ostride: usize = @intCast(@min(cstride, mb.stride)); + + for (0..mb.size.height) |_| { + @memcpy(dst[0..ostride], src[0..ostride]); + dst += @intCast(cstride); + src += @intCast(mb.stride); + } + + render_cursor = true; + } + } + + // copy video image in texture + if (data.is_yuv) { + var datas: [4]?[*]u8 = undefined; + const sstride = data.stride; + if (buf.n_datas == 1) { + _ = sdl.SDL_UpdateTexture(data.texture, null, sdata, sstride); + } else { + datas[0] = @ptrCast(sdata); + datas[1] = @ptrCast(buf.datas[1].data); + datas[2] = @ptrCast(buf.datas[2].data); + _ = sdl.SDL_UpdateYUVTexture( + data.texture, + null, + datas[0], + sstride, + datas[1], + @divExact(sstride, 2), + datas[2], + @divExact(sstride, 2), + ); + } + } else { + var dstride: c_int = undefined; + var ddata: ?*anyopaque = undefined; + if (!sdl.SDL_LockTexture(data.texture, null, &ddata, &dstride)) { + log.err("Couldn't lock texture: {s}", .{sdl.SDL_GetError()}); + } + defer sdl.SDL_UnlockTexture(data.texture); + + var sstride: u32 = @intCast(buf.datas[0].chunk.*.stride); + if (sstride == 0) sstride = buf.datas[0].chunk.*.size / data.size.height; + const ostride = @min(sstride, dstride); + + var src: [*]u8 = @ptrCast(sdata); + var dst: [*]u8 = @ptrCast(ddata); + + if (data.format.media_subtype == pw.c.SPA_MEDIA_SUBTYPE_dsp) { + for (0..data.size.height) |_| { + const pixel: [*]Pixel = @ptrCast(@alignCast(src)); + for (0..data.size.width) |j| { + dst[j * 4 + 0] = @intFromFloat(std.math.clamp(pixel[j].r * 255.0, 0, 255)); + dst[j * 4 + 1] = @intFromFloat(std.math.clamp(pixel[j].g * 255.0, 0, 255)); + dst[j * 4 + 2] = @intFromFloat(std.math.clamp(pixel[j].b * 255.0, 0, 255)); + dst[j * 4 + 3] = @intFromFloat(std.math.clamp(pixel[j].a * 255.0, 0, 255)); + } + src += sstride; + dst += @intCast(dstride); + } + } else { + for (0..data.size.height) |_| { + @memcpy(dst[0..@intCast(ostride)], src[0..@intCast(ostride)]); + src += sstride; + dst += @intCast(dstride); + } + } + } + + _ = sdl.SDL_RenderClear(data.renderer); + // now render the video and then the cursor if any + _ = sdl.SDL_RenderTexture(data.renderer, data.texture, &data.rect, null); + if (render_cursor) _ = sdl.SDL_RenderTexture( + data.renderer, + data.cursor, + null, + &data.cursor_rect, + ); + _ = sdl.SDL_RenderPresent(data.renderer); +} + +fn handleEvents(data: *Data) void { + var event: sdl.SDL_Event = undefined; + while (sdl.SDL_PollEvent(&event)) { + switch (event.type) { + sdl.SDL_EVENT_QUIT => { + _ = pw.c.pw_main_loop_quit(data.loop); + }, + else => {}, + } + } +} + +fn onStreamStateChanged( + userdata: ?*anyopaque, + old: pw.c.pw_stream_state, + state: pw.c.pw_stream_state, + err: [*c]const u8, +) callconv(.c) void { + _ = old; + _ = err; + const data: *Data = @ptrCast(@alignCast(userdata)); + log.info("stream state: \"{s}\"", .{pw.c.pw_stream_state_as_string(state)}); + switch (state) { + pw.c.PW_STREAM_STATE_UNCONNECTED => _ = pw.c.pw_main_loop_quit(data.loop), + // because we started inactive, activate ourselves now + pw.c.PW_STREAM_STATE_PAUSED => _ = pw.c.pw_stream_set_active(data.stream, true), + else => {}, + } +} + +fn onStreamIoChanged(userdata: ?*anyopaque, id: u32, area: ?*anyopaque, size: u32) callconv(.c) void { + _ = size; + const data: *Data = @ptrCast(@alignCast(userdata)); + if (id == pw.c.SPA_IO_Position) { + data.position = @ptrCast(@alignCast(area)); + } +} + +// Be notified when the stream param changes. We're only looking at the +// format changes. +// +// We are now supposed to call pw_stream_finish_format() with success or +// failure, depending on if we can support the format. Because we gave +// a list of supported formats, this should be ok. +// +// As part of pw_stream_finish_format() we can provide parameters that +// will control the buffer memory allocation. This includes the metadata +// that we would like on our buffer, the size, alignment, etp. +fn onStreamParamChanged(userdata: ?*anyopaque, id: u32, param: [*c]const pw.c.spa_pod) callconv(.c) void { + const data: *Data = @ptrCast(@alignCast(userdata)); + const stream = data.stream; + var params_buffer: [1024]u8 align(@alignOf(u32)) = undefined; + var b: pw.c.spa_pod_builder = .{ + .data = ¶ms_buffer, + .size = params_buffer.len, + ._padding = 0, + .state = .{ .offset = 0, .flags = 0, .frame = null }, + .callbacks = .{ .funcs = null, .data = null }, + }; + + if (param != null and id == pw.c.SPA_PARAM_Tag) { + log.err("invalid pod", .{}); + return; + } + if (param != null and id == pw.c.SPA_PARAM_Latency) { + var info: pw.c.spa_latency_info = undefined; + if (pw.c.spa_latency_parse(param, &info) >= 0) { + log.info("got latency: {}", .{@divTrunc((info.min_ns + info.max_ns), 2)}); + } + return; + } + // NULL means to clear the format + if (param == null or id != pw.c.SPA_PARAM_Format) return; + + log.info("got format:", .{}); + _ = pw.c.spa_debug_format(2, null, param); + + if (pw.c.spa_format_parse(param, &data.format.media_type, &data.format.media_subtype) < 0) { + return; + } + + if (data.format.media_type != pw.c.SPA_MEDIA_TYPE_video) return; + + const sdl_format, const mult: i32 = switch (data.format.media_subtype) { + pw.c.SPA_MEDIA_SUBTYPE_raw => b: { + // call a helper function to parse the format for us. + _ = pw.c.spa_format_video_raw_parse(param, &data.format.info.raw); + data.size = pw.c.SPA_RECTANGLE(data.format.info.raw.size.width, data.format.info.raw.size.height); + break :b .{ idToSdlFormat(data.format.info.raw.format), 1 }; + }, + pw.c.SPA_MEDIA_SUBTYPE_dsp => b: { + _ = pw.c.spa_format_video_dsp_parse(param, &data.format.info.dsp); + if (data.format.info.dsp.format != pw.c.SPA_VIDEO_FORMAT_DSP_F32) return; + data.size = pw.c.SPA_RECTANGLE(data.position.?.video.size.width, data.position.?.video.size.height); + break :b .{ sdl.SDL_PIXELFORMAT_RGBA32, 4 }; + }, + else => .{ sdl.SDL_PIXELFORMAT_UNKNOWN, 0 }, + }; + + if (sdl_format == sdl.SDL_PIXELFORMAT_UNKNOWN) { + _ = pw.c.pw_stream_set_error(stream, -pw.c.EINVAL, "unknown pixel format"); + return; + } + if (data.size.width == 0 or data.size.height == 0) { + _ = pw.c.pw_stream_set_error(stream, -pw.c.EINVAL, "invalid size"); + return; + } + + data.texture = sdl.SDL_CreateTexture( + data.renderer, + sdl_format, + sdl.SDL_TEXTUREACCESS_STREAMING, + @intCast(data.size.width), + @intCast(data.size.height), + ); + var d: ?*anyopaque = null; + const size: i32, const blocks: i32 = switch (sdl_format) { + sdl.SDL_PIXELFORMAT_YV12, sdl.SDL_PIXELFORMAT_IYUV => b: { + data.stride = @intCast(data.size.width); + data.is_yuv = true; + break :b .{ + @divExact((data.stride * @as(i32, @intCast(data.size.height))) * 3, 2), + 3, + }; + }, + sdl.SDL_PIXELFORMAT_YUY2 => b: { + data.is_yuv = true; + data.stride = @intCast(data.size.width * 2); + break :b .{ + data.stride * @as(i32, @intCast(data.size.height)), + 1, + }; + }, + else => b: { + if (!sdl.SDL_LockTexture(data.texture, null, &d, &data.stride)) { + log.err("Couldn't lock texture: {s}", .{sdl.SDL_GetError()}); + data.stride = @intCast(data.size.width * 2); + } else { + sdl.SDL_UnlockTexture(data.texture); + } + break :b .{ + data.stride * @as(i32, @intCast(data.size.height)), + 1, + }; + }, + }; + + data.rect.x = 0; + data.rect.y = 0; + data.rect.w = @floatFromInt(data.size.width); + data.rect.h = @floatFromInt(data.size.height); + + // a SPA_TYPE_OBJECT_ParamBuffers object defines the acceptable size, + // number, stride etc of the buffers + var params_buf: [5]?*const pw.c.spa_pod = undefined; + var params: std.ArrayList(?*const pw.c.spa_pod) = .initBuffer(¶ms_buf); + var f: pw.c.spa_pod_frame = undefined; + + _ = pw.c.spa_pod_builder_push_object( + &b, + &f, + pw.c.SPA_TYPE_OBJECT_ParamBuffers, + pw.c.SPA_PARAM_Buffers, + ); + _ = pw.c.spa_pod_builder_add( + &b, + + pw.c.SPA_PARAM_BUFFERS_buffers, + "?ri", + @as(c_int, 3), + @as(c_int, 8), + @as(c_int, 2), + @as(c_int, max_buffers), + + pw.c.SPA_PARAM_BUFFERS_blocks, + "i", + blocks, + + pw.c.SPA_PARAM_BUFFERS_size, + "i", + size * mult, + + pw.c.SPA_PARAM_BUFFERS_stride, + "i", + data.stride * mult, + + pw.c.SPA_PARAM_BUFFERS_dataType, + "?fi", + @as(c_int, 1), + @as(c_int, 1 << pw.c.SPA_DATA_MemPtr), + + @as(c_int, 0), + ); + params.appendBounded(@ptrCast(@alignCast(pw.c.spa_pod_builder_pop(&b, &f)))) catch @panic("OOB"); + + // a header metadata with timing information + _ = pw.c.spa_pod_builder_push_object( + &b, + &f, + pw.c.SPA_TYPE_OBJECT_ParamMeta, + pw.c.SPA_PARAM_Meta, + ); + _ = pw.c.spa_pod_builder_add( + &b, + + pw.c.SPA_PARAM_META_type, + "I", + pw.c.SPA_META_Header, + + pw.c.SPA_PARAM_META_size, + "i", + @as(usize, @sizeOf(pw.c.spa_meta_header)), + + @as(c_int, 0), + ); + params.appendBounded(@ptrCast(@alignCast(pw.c.spa_pod_builder_pop(&b, &f)))) catch @panic("OOB"); + + // video cropping information + _ = pw.c.spa_pod_builder_push_object( + &b, + &f, + pw.c.SPA_TYPE_OBJECT_ParamMeta, + pw.c.SPA_PARAM_Meta, + ); + _ = pw.c.spa_pod_builder_add( + &b, + + pw.c.SPA_PARAM_META_type, + "I", + pw.c.SPA_META_VideoCrop, + + pw.c.SPA_PARAM_META_size, + "i", + @as(usize, @sizeOf(pw.c.spa_meta_region)), + + @as(c_int, 0), + ); + params.appendBounded(@ptrCast(@alignCast(pw.c.spa_pod_builder_pop(&b, &f)))) catch @panic("OOB"); + + // cursor information + _ = pw.c.spa_pod_builder_push_object( + &b, + &f, + pw.c.SPA_TYPE_OBJECT_ParamMeta, + pw.c.SPA_PARAM_Meta, + ); + _ = pw.c.spa_pod_builder_add( + &b, + + pw.c.SPA_PARAM_META_type, + "I", + pw.c.SPA_META_Cursor, + + pw.c.SPA_PARAM_META_size, + "?ri", + @as(c_int, 3), + cursorMetaSize(64, 64), + cursorMetaSize(1, 1), + cursorMetaSize(256, 256), + + @as(c_int, 0), + ); + params.appendBounded(@ptrCast(@alignCast(pw.c.spa_pod_builder_pop(&b, &f)))) catch @panic("OOB"); + + // we are done + _ = pw.c.pw_stream_update_params(stream, params.items.ptr, @intCast(params.items.len)); +} + +fn cursorMetaSize(w: usize, h: usize) usize { + return @sizeOf(pw.c.spa_meta_cursor) + @sizeOf(pw.c.spa_meta_bitmap) + w * h * 4; +} + +fn buildFormat(data: *Data, b: *pw.c.spa_pod_builder, params: *std.ArrayList(?*const pw.c.spa_pod)) void { + { + const format = sdlBuildFormats(data.renderer.?, b); + log.info("supported SDL formats:", .{}); + _ = pw.c.spa_debug_format(2, null, format); + params.appendBounded(format) catch @panic("OOB"); + } + + { + var f: pw.c.spa_pod_frame = undefined; + _ = pw.c.spa_pod_builder_push_object(b, &f, pw.c.SPA_TYPE_OBJECT_Format, pw.c.SPA_PARAM_EnumFormat); + _ = pw.c.spa_pod_builder_add( + b, + pw.c.SPA_FORMAT_mediaType, + "I", + pw.c.SPA_MEDIA_TYPE_video, + + pw.c.SPA_FORMAT_mediaSubtype, + "I", + pw.c.SPA_MEDIA_SUBTYPE_dsp, + + pw.c.SPA_FORMAT_VIDEO_format, + + "I", + pw.c.SPA_VIDEO_FORMAT_DSP_F32, + + @as(c_int, 0), + ); + const format: *const pw.c.spa_pod = @ptrCast(@alignCast(pw.c.spa_pod_builder_pop(b, &f))); + _ = pw.c.spa_debug_format(2, null, format); + params.appendBounded(format) catch @panic("OOB"); + } +} + +const FormatPair = struct { + format: u32, + id: u32, +}; + +const sdl_video_formats = [_]FormatPair{ + .{ .format = sdl.SDL_PIXELFORMAT_UNKNOWN, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, + .{ .format = sdl.SDL_PIXELFORMAT_INDEX1LSB, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, + .{ .format = sdl.SDL_PIXELFORMAT_UNKNOWN, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, + .{ .format = sdl.SDL_PIXELFORMAT_INDEX1LSB, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, + .{ .format = sdl.SDL_PIXELFORMAT_INDEX1MSB, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, + .{ .format = sdl.SDL_PIXELFORMAT_INDEX4LSB, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, + .{ .format = sdl.SDL_PIXELFORMAT_INDEX4MSB, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, + .{ .format = sdl.SDL_PIXELFORMAT_INDEX8, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, + .{ .format = sdl.SDL_PIXELFORMAT_RGB332, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, + .{ .format = sdl.SDL_PIXELFORMAT_XRGB4444, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, + .{ .format = sdl.SDL_PIXELFORMAT_XRGB1555, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, + .{ .format = sdl.SDL_PIXELFORMAT_XBGR1555, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, + .{ .format = sdl.SDL_PIXELFORMAT_ARGB4444, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, + .{ .format = sdl.SDL_PIXELFORMAT_RGBA4444, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, + .{ .format = sdl.SDL_PIXELFORMAT_ABGR4444, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, + .{ .format = sdl.SDL_PIXELFORMAT_BGRA4444, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, + .{ .format = sdl.SDL_PIXELFORMAT_ARGB1555, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, + .{ .format = sdl.SDL_PIXELFORMAT_RGBA5551, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, + .{ .format = sdl.SDL_PIXELFORMAT_ABGR1555, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, + .{ .format = sdl.SDL_PIXELFORMAT_BGRA5551, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, + .{ .format = sdl.SDL_PIXELFORMAT_RGB565, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, + .{ .format = sdl.SDL_PIXELFORMAT_BGR565, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, + .{ .format = sdl.SDL_PIXELFORMAT_RGB24, .id = pw.c.SPA_VIDEO_FORMAT_BGR }, + .{ .format = sdl.SDL_PIXELFORMAT_XRGB8888, .id = pw.c.SPA_VIDEO_FORMAT_BGR }, + .{ .format = sdl.SDL_PIXELFORMAT_RGBX8888, .id = pw.c.SPA_VIDEO_FORMAT_xBGR }, + .{ .format = sdl.SDL_PIXELFORMAT_BGR24, .id = pw.c.SPA_VIDEO_FORMAT_RGB }, + .{ .format = sdl.SDL_PIXELFORMAT_XBGR8888, .id = pw.c.SPA_VIDEO_FORMAT_RGB }, + .{ .format = sdl.SDL_PIXELFORMAT_BGRX8888, .id = pw.c.SPA_VIDEO_FORMAT_xRGB }, + .{ .format = sdl.SDL_PIXELFORMAT_ARGB2101010, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, + .{ .format = sdl.SDL_PIXELFORMAT_RGBA8888, .id = pw.c.SPA_VIDEO_FORMAT_ABGR }, + .{ .format = sdl.SDL_PIXELFORMAT_ARGB8888, .id = pw.c.SPA_VIDEO_FORMAT_BGRA }, + .{ .format = sdl.SDL_PIXELFORMAT_BGRA8888, .id = pw.c.SPA_VIDEO_FORMAT_ARGB }, + .{ .format = sdl.SDL_PIXELFORMAT_ABGR8888, .id = pw.c.SPA_VIDEO_FORMAT_RGBA }, + .{ .format = sdl.SDL_PIXELFORMAT_YV12, .id = pw.c.SPA_VIDEO_FORMAT_YV12 }, + .{ .format = sdl.SDL_PIXELFORMAT_IYUV, .id = pw.c.SPA_VIDEO_FORMAT_I420 }, + .{ .format = sdl.SDL_PIXELFORMAT_YUY2, .id = pw.c.SPA_VIDEO_FORMAT_YUY2 }, + .{ .format = sdl.SDL_PIXELFORMAT_UYVY, .id = pw.c.SPA_VIDEO_FORMAT_UYVY }, + .{ .format = sdl.SDL_PIXELFORMAT_YVYU, .id = pw.c.SPA_VIDEO_FORMAT_YVYU }, + .{ .format = sdl.SDL_PIXELFORMAT_NV12, .id = pw.c.SPA_VIDEO_FORMAT_NV12 }, + .{ .format = sdl.SDL_PIXELFORMAT_NV21, .id = pw.c.SPA_VIDEO_FORMAT_NV21 }, +}; + +fn sdlFormatToId(format: u32) u32 { + for (sdl_video_formats) |f| { + if (f.format == format) { + return f.id; + } + } + return pw.c.SPA_VIDEO_FORMAT_UNKNOWN; +} + +fn idToSdlFormat(id: u32) u32 { + for (sdl_video_formats) |f| { + if (f.id == id) { + return f.format; + } + } + return sdl.SDL_PIXELFORMAT_UNKNOWN; +} + +fn sdlBuildFormats(renderer: *sdl.SDL_Renderer, b: *pw.c.spa_pod_builder) *pw.c.spa_pod { + var f: [2]pw.c.spa_pod_frame = undefined; + + // make an object of type SPA_TYPE_OBJECT_Format and id SPA_PARAM_EnumFormat. The object type is + // important because it defines the properties that are acceptable. The id gives more context + // about what the object is meant to contain. In this case we enumerate supported formats. + _ = pw.c.spa_pod_builder_push_object(b, &f[0], pw.c.SPA_TYPE_OBJECT_Format, pw.c.SPA_PARAM_EnumFormat); + // add media type and media subtype properties + _ = pw.c.spa_pod_builder_prop(b, pw.c.SPA_FORMAT_mediaType, 0); + _ = pw.c.spa_pod_builder_id(b, pw.c.SPA_MEDIA_TYPE_video); + _ = pw.c.spa_pod_builder_prop(b, pw.c.SPA_FORMAT_mediaSubtype, 0); + _ = pw.c.spa_pod_builder_id(b, pw.c.SPA_MEDIA_SUBTYPE_raw); + + // build an enumeration of formats + _ = pw.c.spa_pod_builder_prop(b, pw.c.SPA_FORMAT_VIDEO_format, 0); + _ = pw.c.spa_pod_builder_push_choice(b, &f[1], pw.c.SPA_CHOICE_Enum, 0); + + const props: sdl.SDL_PropertiesID = sdl.SDL_GetRendererProperties(renderer); + + const texture_formats: [*]sdl.SDL_PixelFormat = @ptrCast(@alignCast(sdl.SDL_GetPointerProperty( + props, + sdl.SDL_PROP_RENDERER_TEXTURE_FORMATS_POINTER, + null, + ))); + + // first the formats supported by the textures + var i: u32 = 0; + var ci: u32 = 0; + while (texture_formats[i] != sdl.SDL_PIXELFORMAT_UNKNOWN) : (i += 1) { + const id: u32 = sdlFormatToId(texture_formats[i]); + if (id == 0) continue; + if (ci == 0) _ = pw.c.spa_pod_builder_id(b, pw.c.SPA_VIDEO_FORMAT_UNKNOWN); + ci += 1; + _ = pw.c.spa_pod_builder_id(b, id); + } + // then all the other ones SDL can convert from/to + for (sdl_video_formats) |format| { + const id: u32 = format.id; + if (id != pw.c.SPA_VIDEO_FORMAT_UNKNOWN) { + _ = pw.c.spa_pod_builder_id(b, id); + } + } + _ = pw.c.spa_pod_builder_id(b, pw.c.SPA_VIDEO_FORMAT_RGBA_F32); + _ = pw.c.spa_pod_builder_pop(b, &f[1]); + // add size and framerate ranges + const max_texture_size: u32 = @intCast(sdl.SDL_GetNumberProperty( + props, + sdl.SDL_PROP_RENDERER_MAX_TEXTURE_SIZE_NUMBER, + 0, + )); + _ = pw.c.spa_pod_builder_add( + b, + pw.c.SPA_FORMAT_VIDEO_size, + pw.c.SPA_POD_CHOICE_RANGE_Rectangle( + &pw.c.SPA_RECTANGLE(width, height), + &pw.c.SPA_RECTANGLE(1, 1), + &pw.c.SPA_RECTANGLE(max_texture_size, max_texture_size), + ), + pw.c.SPA_FORMAT_VIDEO_framerate, + pw.c.SPA_POD_CHOICE_RANGE_Fraction( + &pw.c.SPA_FRACTION(rate, 1), + &pw.c.SPA_FRACTION(0, 1), + &pw.c.SPA_FRACTION(30, 1), + ), + @as(c_int, 0), + ); + return @ptrCast(@alignCast(pw.c.spa_pod_builder_pop(b, &f[0]))); +} From 1881647fd521575b574457afe950a53da605c03f Mon Sep 17 00:00:00 2001 From: Mason Remaley Date: Wed, 3 Dec 2025 14:50:39 -0800 Subject: [PATCH 02/13] WIP, working but cleaning up --- README.md | 6 + build.zig | 27 +- build.zig.zon | 4 +- src/examples/video_play_sdl.zig | 5 +- src/examples/video_play_zin.zig | 1093 ++++++++++++++----------------- 5 files changed, 516 insertions(+), 619 deletions(-) diff --git a/README.md b/README.md index a28763e..c281fef 100644 --- a/README.md +++ b/README.md @@ -54,3 +54,9 @@ defer pw.pw_deinit(); ``` See [`src/examples`](`src/examples`) for more information. + +### Help, I'm getting undefined symbols! + +If you import the Pipewire zig module but don't reference it, the import won't get evaluated and the wrapper functions won't get exported. + +To resolve this, use something from the pipewire module, or declare `comptime { _ = @import("pipewire"); }` to force evaluation. diff --git a/build.zig b/build.zig index 955b483..9fb8350 100644 --- a/build.zig +++ b/build.zig @@ -134,11 +134,9 @@ pub fn build(b: *std.Build) void { .HAVE_GRP_H = {}, .HAVE_GSTREAMER_DEVICE_PROVIDER = {}, .HAVE_MALLOC_INFO = {}, - .HAVE_MALLOC_TRIM = {}, .HAVE_MEMFD_CREATE = {}, .HAVE_PIDFD_OPEN = {}, .HAVE_PWD_H = {}, - .HAVE_RANDOM_R = {}, .HAVE_REALLOCARRAY = {}, .HAVE_SIGABBREV_NP = {}, .HAVE_SPA_PLUGINS = {}, @@ -162,6 +160,12 @@ pub fn build(b: *std.Build) void { .RTPRIO_CLIENT = rtprio_client, .RTPRIO_SERVER = rtprio_server, }); + if (target.result.isGnuLibC()) { + config_h.addValues(.{ + .HAVE_MALLOC_TRIM = {}, + .HAVE_RANDOM_R = {}, + }); + } // Build the library plugins and modules { @@ -344,20 +348,23 @@ pub fn build(b: *std.Build) void { // Build the video play ZIN example. { + const zin = b.dependency("zin", .{ + .optimize = optimize, + .target = target, + }).module("zin"); + const video_play = b.addExecutable(.{ .name = "video-play-zin", .root_module = b.createModule(.{ .root_source_file = b.path("src/examples/video_play_zin.zig"), .target = target, .optimize = optimize, + .imports = &.{ + .{ .name = "zin", .module = zin }, + }, }), }); - const sdl = b.dependency("sdl", .{ - .optimize = optimize, - .target = target, - }); - if (use_zig_module) { video_play.root_module.addImport("pipewire", libpipewire_zig); } else { @@ -367,7 +374,6 @@ pub fn build(b: *std.Build) void { video_play.root_module.addOptions("example_options", example_options); - video_play.linkLibrary(sdl.artifact("SDL3")); b.installArtifact(video_play); const run_step = b.step("video-play-zin", "Run the video-play example"); @@ -422,11 +428,6 @@ const flags: []const []const u8 = &.{ // we just wrap the aliases as well. "-D__open_2=__wrap_open_2", "-D__open_alias=__wrap___open_alias", - - // Since `spa_autoclose` points to a function defined in a header, its close doesn't get - // wrapped. Wrap it manually. - "-Dspa_autoclose=__attribute__((__cleanup__(__wrap_close)))", - "-Dspa_autoclose=__attribute__((__cleanup__(__wrap_close)))", }; pub const PluginAndModuleCtx = struct { diff --git a/build.zig.zon b/build.zig.zon index 8a2f58e..b170231 100644 --- a/build.zig.zon +++ b/build.zig.zon @@ -21,8 +21,8 @@ .hash = "sdl-0.0.0-i4QD0btgqAABajEXrQnyZr1xVsk7LM48w2nBmuQ5gdfr", }, .zin = .{ - .url = "git+https://github.com/marler8997/zin#d3e230690f09debdb99dffe4586ad19f9dfb4716", - .hash = "zin-0.0.0-W7QDx6kKAwCVg-wWMO4FdpvZMH66NJpc9wFDdxp5c8E2", + .url = "git+https://github.com/marler8997/zin#f59fa3511ad6b666d3cb028605dd7b2983841b8d", + .hash = "zin-0.0.0-W7QDx4NXAwA67HYz0-tKcNsXV1LgVMNTEHxh6JuHG_c5", }, }, .paths = .{ diff --git a/src/examples/video_play_sdl.zig b/src/examples/video_play_sdl.zig index bc5691c..44e9fac 100644 --- a/src/examples/video_play_sdl.zig +++ b/src/examples/video_play_sdl.zig @@ -1,10 +1,11 @@ -// `pipewire/src/examples/video-play.c` translated to Zig. +//! `pipewire/src/examples/video-play.c` translated to Zig. const std = @import("std"); const log = std.log; const example_options = @import("example_options"); -// Normal code wouldn't do this, this +// Normal code wouldn't need this conditional, we're just demonstrating both the static library and +// the Zig module here. Prefer the Zig module when possible. const pw = if (example_options.use_zig_module) // Example of linking with the pipewire zig module @import("pipewire") diff --git a/src/examples/video_play_zin.zig b/src/examples/video_play_zin.zig index bc5691c..058bc0b 100644 --- a/src/examples/video_play_zin.zig +++ b/src/examples/video_play_zin.zig @@ -1,10 +1,27 @@ -// `pipewire/src/examples/video-play.c` translated to Zig. +//! `pipewire/src/examples/video-play.c` translated to Zig and ported from SDL to Zin to demonstrate +//! video without a dynamic linker. This is not an efficient way to render, each pixel is rendered +//! as a rectangle, in a real application you'll want a better strategy for this. +const builtin = @import("builtin"); const std = @import("std"); +const zin = @import("zin"); +const win32 = zin.platform.win32; const log = std.log; const example_options = @import("example_options"); +const assert = std.debug.assert; -// Normal code wouldn't do this, this +const Allocator = std.mem.Allocator; + +pub const std_options: std.Options = .{ + .log_level = .info, + .log_scope_levels = &.{ + // XXX: these info logs should be changed to debug + .{ .scope = .x11, .level = .warn }, + }, +}; + +// Normal code wouldn't need this conditional, we're just demonstrating both the static library and +// the Zig module here. Prefer the Zig module when possible. const pw = if (example_options.use_zig_module) // Example of linking with the pipewire zig module @import("pipewire") @@ -15,164 +32,55 @@ else pub const c = @import("pipewire"); }; -const sdl = @cImport({ - @cDefine("WIDTH", std.fmt.comptimePrint("{}", .{width})); - @cDefine("HEIGHT", std.fmt.comptimePrint("{}", .{height})); - @cDefine("RATE", std.fmt.comptimePrint("{}", .{rate})); - @cInclude("SDL3/SDL.h"); -}); - -const width = 1920; -const height = 1080; -const rate = 30; -const max_buffers = 64; - -pub const std_options: std.Options = .{ - .log_level = .info, +pub const zin_config: zin.Config = .{ + .StaticWindowId = StaticWindowId, }; -pub fn main() !void { - // If we're linking with the Zig module, set up logging. - var logger = if (example_options.use_zig_module) pw.Logger.init() else {}; - if (example_options.use_zig_module) { - pw.c.pw_log_set(&logger); - pw.c.pw_log_set_level(pw.Logger.default_level); - } - - pw.c.pw_init(0, null); - defer pw.c.pw_deinit(); - - var data: Data = .{}; - - // Create a main loop - data.loop = pw.c.pw_main_loop_new(null).?; - defer pw.c.pw_main_loop_destroy(data.loop); - - _ = pw.c.pw_loop_add_signal(pw.c.pw_main_loop_get_loop(data.loop), pw.c.SIGINT, &doQuit, &data); - _ = pw.c.pw_loop_add_signal(pw.c.pw_main_loop_get_loop(data.loop), pw.c.SIGTERM, &doQuit, &data); - - // create a simple stream, the simple stream manages to core and remote objects for you if you - // don't need to deal with them - // - // If you plan to autoconnect your stream, you need to provide at least media, category and role - // properties - // - // Pass your events and a user_data pointer as the last arguments. This will inform you about - // the stream state. The most important event you need to listen to is the process event where - // you need to consume the data provided to you. - const props = pw.c.pw_properties_new( - pw.c.PW_KEY_MEDIA_TYPE, - "Video", - pw.c.PW_KEY_MEDIA_CATEGORY, - "Capture", - pw.c.PW_KEY_MEDIA_ROLE, - "Camera", - @as(?*anyopaque, null), - ).?; - - var args: std.process.ArgIterator = .init(); - _ = args.next(); - if (args.next()) |arg| { - _ = pw.c.pw_properties_set(props, pw.c.PW_KEY_TARGET_OBJECT, arg); - } - - data.stream = pw.c.pw_stream_new_simple( - pw.c.pw_main_loop_get_loop(data.loop), - "video-play", - props, - &.{ - .version = pw.c.PW_VERSION_STREAM_EVENTS, - .state_changed = &onStreamStateChanged, - .io_changed = &onStreamIoChanged, - .param_changed = &onStreamParamChanged, - .process = &onProcess, - }, - &data, - ).?; - defer pw.c.pw_stream_destroy(data.stream); - - if (!sdl.SDL_Init(sdl.SDL_INIT_VIDEO)) { - log.err("can't initialize SDL: {s}", .{sdl.SDL_GetError()}); - std.process.exit(1); - } - - if (!sdl.SDL_CreateWindowAndRenderer( - "Demo", - width, - height, - sdl.SDL_WINDOW_RESIZABLE, - &data.window, - &data.renderer, - )) { - log.err("can't create window: {s}", .{sdl.SDL_GetError()}); - std.process.exit(1); - } - defer { - if (data.texture) |texture| sdl.SDL_DestroyTexture(texture); - if (data.cursor) |cursor| sdl.SDL_DestroyTexture(cursor); - sdl.SDL_DestroyRenderer(data.renderer); - sdl.SDL_DestroyWindow(data.window); - } - - var buffer: [1024]u8 align(@alignOf(u32)) = undefined; - var b = std.mem.zeroInit(pw.c.spa_pod_builder, .{ - .data = &buffer, - .size = buffer.len, - }); - - // build the extra parameters to connect with. To connect, we can provide a list of supported - // formats. We use a builder that writes the param object to the stack. - var params_buf: [3]?*const pw.c.spa_pod = undefined; - var params: std.ArrayList(?*const pw.c.spa_pod) = .initBuffer(¶ms_buf); - buildFormat(&data, &b, ¶ms); - - { - var f: pw.c.spa_pod_frame = undefined; - // send a tag, input tags travel upstream - pw.c.spa_tag_build_start(&b, &f, pw.c.SPA_PARAM_Tag, pw.c.SPA_DIRECTION_INPUT); - const items: [1]pw.c.spa_dict_item = .{ - pw.c.SPA_DICT_ITEM_INIT("my-tag-other-key", "my-special-other-tag-value"), +const StaticWindowId = enum { + main, + pub fn getConfig(self: StaticWindowId) zin.WindowConfigData { + return switch (self) { + .main => .{ + .window_size_events = true, + .key_events = true, + .mouse_events = true, + .timers = .one, + .background = .{ .r = 49, .g = 49, .b = 49 }, + .dynamic_background = false, + .win32 = .{ .render = .{ .gdi = .{} } }, + .x11 = .{ .render_kind = .double_buffered }, + }, }; - pw.c.spa_tag_build_add_dict(&b, &pw.c.SPA_DICT_INIT(items, 1)); - params.appendBounded(pw.c.spa_tag_build_end(&b, &f)) catch @panic("OOB"); } +}; - // now connect the stream, we need a direction (input/output), - // an optional target node to connect to, some flags and parameters - // - const res = pw.c.pw_stream_connect( - data.stream, - pw.c.PW_DIRECTION_INPUT, - pw.c.PW_ID_ANY, - pw.c.PW_STREAM_FLAG_AUTOCONNECT | // try to automatically connect this stream - pw.c.PW_STREAM_FLAG_INACTIVE | // we will activate ourselves - pw.c.PW_STREAM_FLAG_MAP_BUFFERS, // mmap the buffer data for us - // extra parameters, see above - params.items.ptr, - @intCast(params.items.len), - ); - if (res < 0) { - log.err("can't connect: {s}", .{pw.c.spa_strerror(res)}); - std.process.exit(1); - } +pub const panic = zin.panic(.{ .title = "Hello Panic!" }); + +// XXX: ... +const global = struct { + var class_extra: ?zin.WindowClass = null; + var last_animation: ?std.time.Instant = null; + var text_position: f32 = 0; + var mouse_position: ?zin.XY = null; + var mouse_down: zin.MouseButtonsDown = .{ + .left = false, + .right = false, + .middle = false, + }; +}; - // /do things until we quit the mainloop - _ = pw.c.pw_main_loop_run(data.loop); -} +const timer_ms = 33; +const texel_width = 10; +const max_buffers = 64; -const Pixel = extern struct { - r: f32, - g: f32, - b: f32, - a: f32, +const FRect = struct { + x: f32 = 0, + y: f32 = 0, + w: f32 = 0, + h: f32 = 0, }; const Data = struct { - renderer: ?*sdl.SDL_Renderer = null, - window: ?*sdl.SDL_Window = null, - texture: ?*sdl.SDL_Texture = null, - cursor: ?*sdl.SDL_Texture = null, - loop: ?*pw.c.pw_main_loop = null, stream: ?*pw.c.pw_stream = null, @@ -182,191 +90,230 @@ const Data = struct { stride: i32 = 0, size: pw.c.spa_rectangle = .{}, - rect: sdl.SDL_FRect = .{}, - cursor_rect: sdl.SDL_FRect = .{}, + rect: FRect = .{}, is_yuv: bool = false, -}; - -fn doQuit(userdata: ?*anyopaque, signal_number: c_int) callconv(.c) void { - _ = signal_number; - const data: *Data = @ptrCast(@alignCast(userdata)); - _ = pw.c.pw_main_loop_quit(data.loop); -} -// our data processing function is in general: -// ``` -// struct pw_buffer *b; -// b = pw_stream_dequeue_buffer(stream); -// -// .. do stuff with buffer ... -// -// pw_stream_queue_buffer(stream, b); -// ``` -fn onProcess(userdata: ?*anyopaque) callconv(.c) void { - const data: *Data = @ptrCast(@alignCast(userdata)); - const stream = data.stream; + draw: ?*const zin.Draw(.{ .static = .main }) = null, +}; - var render_cursor = false; +var data: Data = .{}; - var maybe_buffer: ?*pw.c.pw_buffer = null; - while (true) { - const t = pw.c.pw_stream_dequeue_buffer(stream) orelse break; - if (maybe_buffer) |b| _ = pw.c.pw_stream_queue_buffer(stream, b); - maybe_buffer = t; +pub fn main() !void { + // If we're linking with the Zig module, set up logging. + var logger = if (example_options.use_zig_module) pw.Logger.init() else {}; + if (example_options.use_zig_module) { + pw.c.pw_log_set(&logger); + pw.c.pw_log_set_level(pw.Logger.default_level); } - const b = maybe_buffer orelse { - log.warn("out of buffers", .{}); - return; - }; - defer _ = pw.c.pw_stream_queue_buffer(stream, b); - - const buf: *pw.c.spa_buffer = b.buffer; - log.debug("new buffer {*}", .{buf}); + // Initialize pipewire + pw.c.pw_init(0, null); + defer pw.c.pw_deinit(); - handleEvents(data); + // Create the pipewire loop + data.loop = pw.c.pw_main_loop_new(null).?; + defer pw.c.pw_main_loop_destroy(data.loop); - const sdata = buf.datas[0].data orelse return; + // Create the pipewire stream + { + const props = pw.c.pw_properties_new( + pw.c.PW_KEY_MEDIA_TYPE, + "Video", + pw.c.PW_KEY_MEDIA_CATEGORY, + "Capture", + pw.c.PW_KEY_MEDIA_ROLE, + "Camera", + @as(?*anyopaque, null), + ).?; + + var args: std.process.ArgIterator = .init(); + _ = args.next(); + if (args.next()) |arg| { + check(pw.c.pw_properties_set(props, pw.c.PW_KEY_TARGET_OBJECT, arg)); + } - const maybe_h: ?*pw.c.spa_meta_header = @ptrCast(@alignCast(pw.c.spa_buffer_find_meta_data(buf, pw.c.SPA_META_Header, @sizeOf(pw.c.spa_meta_header)))); - if (maybe_h) |h| { - const now = pw.c.pw_stream_get_nsec(stream); - log.debug("now:{} pts:{} diff:{}", .{ now, h.pts, now - @as(u64, @intCast(h.pts)) }); + data.stream = pw.c.pw_stream_new_simple( + pw.c.pw_main_loop_get_loop(data.loop), + "video-play", + props, + &.{ + .version = pw.c.PW_VERSION_STREAM_EVENTS, + .state_changed = &onStreamStateChanged, + .io_changed = &onStreamIoChanged, + .param_changed = &onStreamParamChanged, + .process = &onProcess, + }, + null, + ).?; } + defer pw.c.pw_stream_destroy(data.stream); - // get the videocrop metadata if any - const maybe_mc: ?*pw.c.spa_meta_region = @ptrCast(@alignCast(pw.c.spa_buffer_find_meta_data(buf, pw.c.SPA_META_VideoCrop, @sizeOf(pw.c.spa_meta_region)))); - if (maybe_mc) |mc| { - if (pw.c.spa_meta_region_is_valid(mc)) { - data.rect.x = @floatFromInt(mc.region.position.x); - data.rect.y = @floatFromInt(mc.region.position.y); - data.rect.w = @floatFromInt(mc.region.size.width); - data.rect.h = @floatFromInt(mc.region.size.height); - } - } - // get cursor metadata - const maybe_mcs: ?*pw.c.spa_meta_cursor = @ptrCast(@alignCast(pw.c.spa_buffer_find_meta_data(buf, pw.c.SPA_META_Cursor, @sizeOf(pw.c.spa_meta_cursor)))); - if (maybe_mcs) |mcs| { - if (pw.c.spa_meta_cursor_is_valid(mcs)) { - data.cursor_rect.x = @floatFromInt(mcs.position.x); - data.cursor_rect.y = @floatFromInt(mcs.position.y); - - const mb: *pw.c.spa_meta_bitmap = @ptrFromInt(@intFromPtr(mcs) + mcs.bitmap_offset); - data.cursor_rect.w = @floatFromInt(mb.size.width); - data.cursor_rect.h = @floatFromInt(mb.size.height); - - if (data.cursor == null) { - data.cursor = sdl.SDL_CreateTexture( - data.renderer, - idToSdlFormat(mb.format), - sdl.SDL_TEXTUREACCESS_STREAMING, - @intCast(mb.size.width), - @intCast(mb.size.height), - ); - _ = sdl.SDL_SetTextureBlendMode(data.cursor, sdl.SDL_BLENDMODE_BLEND); + // Connect to the stream + { + var builder_buf: [1024]u8 align(@alignOf(u32)) = undefined; + var b = std.mem.zeroInit(pw.c.spa_pod_builder, .{ + .data = &builder_buf, + .size = builder_buf.len, + }); + + var params_buf: [2]?*const pw.c.spa_pod = undefined; + var params: std.ArrayList(?*const pw.c.spa_pod) = .initBuffer(¶ms_buf); + + // Tell pipewire which formats we support + { + + // make an object of type SPA_TYPE_OBJECT_Format and id SPA_PARAM_EnumFormat. The object type is + // important because it defines the properties that are acceptable. The id gives more context + // about what the object is meant to contain. In this case we enumerate supported formats. + var format_frame: pw.c.spa_pod_frame = undefined; + check(pw.c.spa_pod_builder_push_object( + &b, + &format_frame, + pw.c.SPA_TYPE_OBJECT_Format, + pw.c.SPA_PARAM_EnumFormat, + )); + // add media type and media subtype properties + check(pw.c.spa_pod_builder_prop(&b, pw.c.SPA_FORMAT_mediaType, 0)); + check(pw.c.spa_pod_builder_id(&b, pw.c.SPA_MEDIA_TYPE_video)); + check(pw.c.spa_pod_builder_prop(&b, pw.c.SPA_FORMAT_mediaSubtype, 0)); + check(pw.c.spa_pod_builder_id(&b, pw.c.SPA_MEDIA_SUBTYPE_raw)); + + // build an enumeration of formats + { + var choice_frame: pw.c.spa_pod_frame = undefined; + check(pw.c.spa_pod_builder_prop(&b, pw.c.SPA_FORMAT_VIDEO_format, 0)); + check(pw.c.spa_pod_builder_push_choice(&b, &choice_frame, pw.c.SPA_CHOICE_Enum, 0)); + // We only support one format + check(pw.c.spa_pod_builder_id(&b, pw.c.SPA_VIDEO_FORMAT_UNKNOWN)); + check(pw.c.spa_pod_builder_id(&b, pw.c.SPA_VIDEO_FORMAT_RGBx)); + // XXX: ... oh, we actually need to do all the conversions ourselves? that's really annoying. + // there's supposed to be a way to get it to convert for us i think? maybe we need more modules + // for video conversion or something. or is it doing it? idk + check(pw.c.spa_pod_builder_id(&b, pw.c.SPA_VIDEO_FORMAT_YUY2)); + assert(pw.c.spa_pod_builder_pop(&b, &choice_frame) != null); } - var cdata: [*c]u8 = undefined; - var cstride: c_int = undefined; - if (!sdl.SDL_LockTexture(data.cursor, null, &cdata, &cstride)) { - log.err("Couldn't lock cursor texture: {s}", .{sdl.SDL_GetError()}); - return; - } - defer sdl.SDL_UnlockTexture(data.cursor); + // add size and framerate ranges - // copy the cursor bitmap into the texture - var src: [*]u8 = @ptrFromInt(@intFromPtr(mb) + mb.offset); - var dst = cdata; - const ostride: usize = @intCast(@min(cstride, mb.stride)); + { + var choice_frame: pw.c.spa_pod_frame = undefined; + check(pw.c.spa_pod_builder_prop(&b, pw.c.SPA_FORMAT_VIDEO_size, 0)); + check(pw.c.spa_pod_builder_push_choice(&b, &choice_frame, pw.c.SPA_CHOICE_Range, 0)); + check(pw.c.spa_pod_builder_rectangle(&b, 1920 / texel_width, 1080 / texel_width)); + check(pw.c.spa_pod_builder_rectangle(&b, 1, 1)); + check(pw.c.spa_pod_builder_rectangle(&b, 1920 / texel_width, 1080 / texel_width)); + assert(pw.c.spa_pod_builder_pop(&b, &choice_frame) != null); + } - for (0..mb.size.height) |_| { - @memcpy(dst[0..ostride], src[0..ostride]); - dst += @intCast(cstride); - src += @intCast(mb.stride); + { + var choice_frame: pw.c.spa_pod_frame = undefined; + check(pw.c.spa_pod_builder_prop(&b, pw.c.SPA_FORMAT_VIDEO_framerate, 0)); + check(pw.c.spa_pod_builder_push_choice(&b, &choice_frame, pw.c.SPA_CHOICE_Range, 0)); + check(pw.c.spa_pod_builder_fraction(&b, 1000, timer_ms)); + check(pw.c.spa_pod_builder_fraction(&b, 0, 1)); + check(pw.c.spa_pod_builder_fraction(&b, 30, 1)); + assert(pw.c.spa_pod_builder_pop(&b, &choice_frame) != null); } - render_cursor = true; - } - } + const format: *pw.c.spa_pod = @ptrCast(@alignCast(pw.c.spa_pod_builder_pop(&b, &format_frame).?)); - // copy video image in texture - if (data.is_yuv) { - var datas: [4]?[*]u8 = undefined; - const sstride = data.stride; - if (buf.n_datas == 1) { - _ = sdl.SDL_UpdateTexture(data.texture, null, sdata, sstride); - } else { - datas[0] = @ptrCast(sdata); - datas[1] = @ptrCast(buf.datas[1].data); - datas[2] = @ptrCast(buf.datas[2].data); - _ = sdl.SDL_UpdateYUVTexture( - data.texture, - null, - datas[0], - sstride, - datas[1], - @divExact(sstride, 2), - datas[2], - @divExact(sstride, 2), - ); + log.info("supported formats:", .{}); + check(pw.c.spa_debug_format(2, null, format)); + params.appendBounded(format) catch @panic("OOB"); } - } else { - var dstride: c_int = undefined; - var ddata: ?*anyopaque = undefined; - if (!sdl.SDL_LockTexture(data.texture, null, &ddata, &dstride)) { - log.err("Couldn't lock texture: {s}", .{sdl.SDL_GetError()}); + + // Request the webcam feed + { + var format_frame: pw.c.spa_pod_frame = undefined; + check(pw.c.spa_pod_builder_push_object(&b, &format_frame, pw.c.SPA_TYPE_OBJECT_Format, pw.c.SPA_PARAM_EnumFormat)); + + check(pw.c.spa_pod_builder_prop(&b, pw.c.SPA_FORMAT_mediaType, 0)); + check(pw.c.spa_pod_builder_id(&b, pw.c.SPA_MEDIA_TYPE_video)); + + check(pw.c.spa_pod_builder_prop(&b, pw.c.SPA_FORMAT_mediaSubtype, 0)); + check(pw.c.spa_pod_builder_id(&b, pw.c.SPA_MEDIA_SUBTYPE_dsp)); + + check(pw.c.spa_pod_builder_prop(&b, pw.c.SPA_FORMAT_VIDEO_format, 0)); + check(pw.c.spa_pod_builder_id(&b, pw.c.SPA_VIDEO_FORMAT_DSP_F32)); + + const format: *const pw.c.spa_pod = @ptrCast(@alignCast(pw.c.spa_pod_builder_pop(&b, &format_frame))); + check(pw.c.spa_debug_format(2, null, format)); + params.appendBounded(format) catch @panic("OOB"); } - defer sdl.SDL_UnlockTexture(data.texture); - - var sstride: u32 = @intCast(buf.datas[0].chunk.*.stride); - if (sstride == 0) sstride = buf.datas[0].chunk.*.size / data.size.height; - const ostride = @min(sstride, dstride); - - var src: [*]u8 = @ptrCast(sdata); - var dst: [*]u8 = @ptrCast(ddata); - - if (data.format.media_subtype == pw.c.SPA_MEDIA_SUBTYPE_dsp) { - for (0..data.size.height) |_| { - const pixel: [*]Pixel = @ptrCast(@alignCast(src)); - for (0..data.size.width) |j| { - dst[j * 4 + 0] = @intFromFloat(std.math.clamp(pixel[j].r * 255.0, 0, 255)); - dst[j * 4 + 1] = @intFromFloat(std.math.clamp(pixel[j].g * 255.0, 0, 255)); - dst[j * 4 + 2] = @intFromFloat(std.math.clamp(pixel[j].b * 255.0, 0, 255)); - dst[j * 4 + 3] = @intFromFloat(std.math.clamp(pixel[j].a * 255.0, 0, 255)); - } - src += sstride; - dst += @intCast(dstride); - } - } else { - for (0..data.size.height) |_| { - @memcpy(dst[0..@intCast(ostride)], src[0..@intCast(ostride)]); - src += sstride; - dst += @intCast(dstride); - } + + // now connect the stream, we need a direction (input/output), + // an optional target node to connect to, some flags and parameters + // + const res = pw.c.pw_stream_connect( + data.stream, + pw.c.PW_DIRECTION_INPUT, + pw.c.PW_ID_ANY, + pw.c.PW_STREAM_FLAG_AUTOCONNECT | // try to automatically connect this stream + pw.c.PW_STREAM_FLAG_INACTIVE | // we will activate ourselves + pw.c.PW_STREAM_FLAG_MAP_BUFFERS, // mmap the buffer data for us + // extra parameters, see above + params.items.ptr, + @intCast(params.items.len), + ); + if (res < 0) { + log.err("can't connect: {s}", .{pw.c.spa_strerror(res)}); + std.process.exit(1); } } - _ = sdl.SDL_RenderClear(data.renderer); - // now render the video and then the cursor if any - _ = sdl.SDL_RenderTexture(data.renderer, data.texture, &data.rect, null); - if (render_cursor) _ = sdl.SDL_RenderTexture( - data.renderer, - data.cursor, - null, - &data.cursor_rect, - ); - _ = sdl.SDL_RenderPresent(data.renderer); + // Setup Zin + try zin.processInit(.{}); + { + var err: zin.X11ConnectError = undefined; + zin.x11Connect(&err) catch std.debug.panic("X11 connect failed: {f}", .{err}); + } + defer zin.x11Disconnect(); + + zin.staticWindow(.main).registerClass(.{ + .callback = callback, + .win32_name = zin.L("VideoPlay"), + .macos_view = "VideoPlay", + }, .{ + .win32_icon_large = .none, + .win32_icon_small = .none, + }); + defer zin.staticWindow(.main).unregisterClass(); + + try zin.staticWindow(.main).create(.{ + .title = "Video Play", + .size = .{ .client_points = .{ .x = 1920, .y = 1080 } }, + .pos = null, + }); + defer zin.staticWindow(.main).destroy(); + zin.staticWindow(.main).show(); + zin.staticWindow(.main).startTimer({}, timer_ms); + + try zin.mainLoop(); } -fn handleEvents(data: *Data) void { - var event: sdl.SDL_Event = undefined; - while (sdl.SDL_PollEvent(&event)) { - switch (event.type) { - sdl.SDL_EVENT_QUIT => { - _ = pw.c.pw_main_loop_quit(data.loop); - }, - else => {}, - } +fn callback(cb: zin.Callback(.{ .static = .main })) void { + switch (cb) { + .close => zin.quitMainLoop(), + .window_size => {}, + .draw => |d| { + data.draw = &d; + defer data.draw = null; + + // Early out if we're redrawing too fast (e.g. during a resize) + { + const now = std.time.Instant.now() catch @panic("?"); + const elapsed_ns = if (global.last_animation) |l| now.since(l) else 0; + global.last_animation = now; + if (elapsed_ns / std.time.ns_per_ms < timer_ms / 2) return; + } + + // XXX: zin update on refresh rate? + // XXX: should be main loop? + // Try to render a frame with Pipewire. + if (pw.c.pw_loop_iterate(pw.c.pw_main_loop_get_loop(data.loop), 0) < 0) return; + }, + .timer => zin.staticWindow(.main).invalidate(), + else => {}, } } @@ -378,19 +325,19 @@ fn onStreamStateChanged( ) callconv(.c) void { _ = old; _ = err; - const data: *Data = @ptrCast(@alignCast(userdata)); + _ = userdata; log.info("stream state: \"{s}\"", .{pw.c.pw_stream_state_as_string(state)}); switch (state) { - pw.c.PW_STREAM_STATE_UNCONNECTED => _ = pw.c.pw_main_loop_quit(data.loop), + pw.c.PW_STREAM_STATE_UNCONNECTED => check(pw.c.pw_main_loop_quit(data.loop)), // because we started inactive, activate ourselves now - pw.c.PW_STREAM_STATE_PAUSED => _ = pw.c.pw_stream_set_active(data.stream, true), + pw.c.PW_STREAM_STATE_PAUSED => check(pw.c.pw_stream_set_active(data.stream, true)), else => {}, } } fn onStreamIoChanged(userdata: ?*anyopaque, id: u32, area: ?*anyopaque, size: u32) callconv(.c) void { _ = size; - const data: *Data = @ptrCast(@alignCast(userdata)); + _ = userdata; if (id == pw.c.SPA_IO_Position) { data.position = @ptrCast(@alignCast(area)); } @@ -407,7 +354,7 @@ fn onStreamIoChanged(userdata: ?*anyopaque, id: u32, area: ?*anyopaque, size: u3 // will control the buffer memory allocation. This includes the metadata // that we would like on our buffer, the size, alignment, etp. fn onStreamParamChanged(userdata: ?*anyopaque, id: u32, param: [*c]const pw.c.spa_pod) callconv(.c) void { - const data: *Data = @ptrCast(@alignCast(userdata)); + _ = userdata; const stream = data.stream; var params_buffer: [1024]u8 align(@alignOf(u32)) = undefined; var b: pw.c.spa_pod_builder = .{ @@ -425,7 +372,7 @@ fn onStreamParamChanged(userdata: ?*anyopaque, id: u32, param: [*c]const pw.c.sp if (param != null and id == pw.c.SPA_PARAM_Latency) { var info: pw.c.spa_latency_info = undefined; if (pw.c.spa_latency_parse(param, &info) >= 0) { - log.info("got latency: {}", .{@divTrunc((info.min_ns + info.max_ns), 2)}); + log.info("got latency: {}ns", .{@divTrunc((info.min_ns + info.max_ns), 2)}); } return; } @@ -433,7 +380,7 @@ fn onStreamParamChanged(userdata: ?*anyopaque, id: u32, param: [*c]const pw.c.sp if (param == null or id != pw.c.SPA_PARAM_Format) return; log.info("got format:", .{}); - _ = pw.c.spa_debug_format(2, null, param); + check(pw.c.spa_debug_format(2, null, param)); if (pw.c.spa_format_parse(param, &data.format.media_type, &data.format.media_subtype) < 0) { return; @@ -441,41 +388,38 @@ fn onStreamParamChanged(userdata: ?*anyopaque, id: u32, param: [*c]const pw.c.sp if (data.format.media_type != pw.c.SPA_MEDIA_TYPE_video) return; - const sdl_format, const mult: i32 = switch (data.format.media_subtype) { + // XXX: do we even need to check the format? + const format, const mult: i32 = switch (data.format.media_subtype) { pw.c.SPA_MEDIA_SUBTYPE_raw => b: { // call a helper function to parse the format for us. _ = pw.c.spa_format_video_raw_parse(param, &data.format.info.raw); data.size = pw.c.SPA_RECTANGLE(data.format.info.raw.size.width, data.format.info.raw.size.height); - break :b .{ idToSdlFormat(data.format.info.raw.format), 1 }; + break :b .{ data.format.info.raw.format, 1 }; }, pw.c.SPA_MEDIA_SUBTYPE_dsp => b: { - _ = pw.c.spa_format_video_dsp_parse(param, &data.format.info.dsp); + check(pw.c.spa_format_video_dsp_parse(param, &data.format.info.dsp)); if (data.format.info.dsp.format != pw.c.SPA_VIDEO_FORMAT_DSP_F32) return; data.size = pw.c.SPA_RECTANGLE(data.position.?.video.size.width, data.position.?.video.size.height); - break :b .{ sdl.SDL_PIXELFORMAT_RGBA32, 4 }; + // XXX: is this correct? + break :b .{ pw.c.SPA_VIDEO_FORMAT_DSP_F32, 4 }; }, - else => .{ sdl.SDL_PIXELFORMAT_UNKNOWN, 0 }, + else => .{ pw.c.SPA_VIDEO_FORMAT_UNKNOWN, 0 }, }; - if (sdl_format == sdl.SDL_PIXELFORMAT_UNKNOWN) { - _ = pw.c.pw_stream_set_error(stream, -pw.c.EINVAL, "unknown pixel format"); + if (format == pw.c.SPA_VIDEO_FORMAT_UNKNOWN) { + check(pw.c.pw_stream_set_error(stream, -pw.c.EINVAL, "unknown pixel format")); return; } if (data.size.width == 0 or data.size.height == 0) { - _ = pw.c.pw_stream_set_error(stream, -pw.c.EINVAL, "invalid size"); + check(pw.c.pw_stream_set_error(stream, -pw.c.EINVAL, "invalid size")); return; } - data.texture = sdl.SDL_CreateTexture( - data.renderer, - sdl_format, - sdl.SDL_TEXTUREACCESS_STREAMING, - @intCast(data.size.width), - @intCast(data.size.height), - ); - var d: ?*anyopaque = null; - const size: i32, const blocks: i32 = switch (sdl_format) { - sdl.SDL_PIXELFORMAT_YV12, sdl.SDL_PIXELFORMAT_IYUV => b: { + // if (data.texture) |texture| data.gpa.free(texture); + // data.texture = data.gpa.alloc(u8, data.size.width * data.size.height * 3) catch @panic("OOM"); + // XXX: don't we always know the format? + const size: i32, const blocks: i32 = switch (format) { + pw.c.SPA_VIDEO_FORMAT_YV12, pw.c.SPA_VIDEO_FORMAT_I420 => b: { data.stride = @intCast(data.size.width); data.is_yuv = true; break :b .{ @@ -483,7 +427,7 @@ fn onStreamParamChanged(userdata: ?*anyopaque, id: u32, param: [*c]const pw.c.sp 3, }; }, - sdl.SDL_PIXELFORMAT_YUY2 => b: { + pw.c.SPA_VIDEO_FORMAT_YUY2 => b: { data.is_yuv = true; data.stride = @intCast(data.size.width * 2); break :b .{ @@ -492,12 +436,7 @@ fn onStreamParamChanged(userdata: ?*anyopaque, id: u32, param: [*c]const pw.c.sp }; }, else => b: { - if (!sdl.SDL_LockTexture(data.texture, null, &d, &data.stride)) { - log.err("Couldn't lock texture: {s}", .{sdl.SDL_GetError()}); - data.stride = @intCast(data.size.width * 2); - } else { - sdl.SDL_UnlockTexture(data.texture); - } + data.stride = @intCast(data.size.width * 2); break :b .{ data.stride * @as(i32, @intCast(data.size.height)), 1, @@ -510,291 +449,241 @@ fn onStreamParamChanged(userdata: ?*anyopaque, id: u32, param: [*c]const pw.c.sp data.rect.w = @floatFromInt(data.size.width); data.rect.h = @floatFromInt(data.size.height); + var params_buf: [3]?*const pw.c.spa_pod = undefined; + var params: std.ArrayList(?*const pw.c.spa_pod) = .initBuffer(¶ms_buf); + // a SPA_TYPE_OBJECT_ParamBuffers object defines the acceptable size, // number, stride etc of the buffers - var params_buf: [5]?*const pw.c.spa_pod = undefined; - var params: std.ArrayList(?*const pw.c.spa_pod) = .initBuffer(¶ms_buf); - var f: pw.c.spa_pod_frame = undefined; - - _ = pw.c.spa_pod_builder_push_object( - &b, - &f, - pw.c.SPA_TYPE_OBJECT_ParamBuffers, - pw.c.SPA_PARAM_Buffers, - ); - _ = pw.c.spa_pod_builder_add( - &b, - - pw.c.SPA_PARAM_BUFFERS_buffers, - "?ri", - @as(c_int, 3), - @as(c_int, 8), - @as(c_int, 2), - @as(c_int, max_buffers), - - pw.c.SPA_PARAM_BUFFERS_blocks, - "i", - blocks, - - pw.c.SPA_PARAM_BUFFERS_size, - "i", - size * mult, - - pw.c.SPA_PARAM_BUFFERS_stride, - "i", - data.stride * mult, - - pw.c.SPA_PARAM_BUFFERS_dataType, - "?fi", - @as(c_int, 1), - @as(c_int, 1 << pw.c.SPA_DATA_MemPtr), - - @as(c_int, 0), - ); - params.appendBounded(@ptrCast(@alignCast(pw.c.spa_pod_builder_pop(&b, &f)))) catch @panic("OOB"); + { + var param_buffers_frame: pw.c.spa_pod_frame = undefined; + check(pw.c.spa_pod_builder_push_object( + &b, + ¶m_buffers_frame, + pw.c.SPA_TYPE_OBJECT_ParamBuffers, + pw.c.SPA_PARAM_Buffers, + )); + + { + var choice_frame: pw.c.spa_pod_frame = undefined; + check(pw.c.spa_pod_builder_prop(&b, pw.c.SPA_PARAM_BUFFERS_buffers, 0)); + check(pw.c.spa_pod_builder_push_choice(&b, &choice_frame, pw.c.SPA_CHOICE_Range, 0)); + check(pw.c.spa_pod_builder_int(&b, 8)); + check(pw.c.spa_pod_builder_int(&b, 2)); + check(pw.c.spa_pod_builder_int(&b, max_buffers)); + assert(pw.c.spa_pod_builder_pop(&b, &choice_frame) != null); + } - // a header metadata with timing information - _ = pw.c.spa_pod_builder_push_object( - &b, - &f, - pw.c.SPA_TYPE_OBJECT_ParamMeta, - pw.c.SPA_PARAM_Meta, - ); - _ = pw.c.spa_pod_builder_add( - &b, - - pw.c.SPA_PARAM_META_type, - "I", - pw.c.SPA_META_Header, - - pw.c.SPA_PARAM_META_size, - "i", - @as(usize, @sizeOf(pw.c.spa_meta_header)), - - @as(c_int, 0), - ); - params.appendBounded(@ptrCast(@alignCast(pw.c.spa_pod_builder_pop(&b, &f)))) catch @panic("OOB"); + check(pw.c.spa_pod_builder_prop(&b, pw.c.SPA_PARAM_BUFFERS_blocks, 0)); + check(pw.c.spa_pod_builder_int(&b, blocks)); - // video cropping information - _ = pw.c.spa_pod_builder_push_object( - &b, - &f, - pw.c.SPA_TYPE_OBJECT_ParamMeta, - pw.c.SPA_PARAM_Meta, - ); - _ = pw.c.spa_pod_builder_add( - &b, - - pw.c.SPA_PARAM_META_type, - "I", - pw.c.SPA_META_VideoCrop, - - pw.c.SPA_PARAM_META_size, - "i", - @as(usize, @sizeOf(pw.c.spa_meta_region)), - - @as(c_int, 0), - ); - params.appendBounded(@ptrCast(@alignCast(pw.c.spa_pod_builder_pop(&b, &f)))) catch @panic("OOB"); - - // cursor information - _ = pw.c.spa_pod_builder_push_object( - &b, - &f, - pw.c.SPA_TYPE_OBJECT_ParamMeta, - pw.c.SPA_PARAM_Meta, - ); - _ = pw.c.spa_pod_builder_add( - &b, - - pw.c.SPA_PARAM_META_type, - "I", - pw.c.SPA_META_Cursor, - - pw.c.SPA_PARAM_META_size, - "?ri", - @as(c_int, 3), - cursorMetaSize(64, 64), - cursorMetaSize(1, 1), - cursorMetaSize(256, 256), - - @as(c_int, 0), - ); - params.appendBounded(@ptrCast(@alignCast(pw.c.spa_pod_builder_pop(&b, &f)))) catch @panic("OOB"); + check(pw.c.spa_pod_builder_prop(&b, pw.c.SPA_PARAM_BUFFERS_size, 0)); + check(pw.c.spa_pod_builder_int(&b, size * mult)); - // we are done - _ = pw.c.pw_stream_update_params(stream, params.items.ptr, @intCast(params.items.len)); -} + check(pw.c.spa_pod_builder_prop(&b, pw.c.SPA_PARAM_BUFFERS_stride, 0)); + check(pw.c.spa_pod_builder_int(&b, data.stride * mult)); -fn cursorMetaSize(w: usize, h: usize) usize { - return @sizeOf(pw.c.spa_meta_cursor) + @sizeOf(pw.c.spa_meta_bitmap) + w * h * 4; -} + check(pw.c.spa_pod_builder_prop(&b, pw.c.SPA_PARAM_BUFFERS_stride, 0)); + check(pw.c.spa_pod_builder_int(&b, data.stride * mult)); -fn buildFormat(data: *Data, b: *pw.c.spa_pod_builder, params: *std.ArrayList(?*const pw.c.spa_pod)) void { - { - const format = sdlBuildFormats(data.renderer.?, b); - log.info("supported SDL formats:", .{}); - _ = pw.c.spa_debug_format(2, null, format); - params.appendBounded(format) catch @panic("OOB"); + { + var choice_frame: pw.c.spa_pod_frame = undefined; + check(pw.c.spa_pod_builder_prop(&b, pw.c.SPA_PARAM_BUFFERS_dataType, 0)); + check(pw.c.spa_pod_builder_push_choice(&b, &choice_frame, pw.c.SPA_CHOICE_Range, 0)); + check(pw.c.spa_pod_builder_int(&b, 8)); + check(pw.c.spa_pod_builder_int(&b, 2)); + check(pw.c.spa_pod_builder_int(&b, max_buffers)); + assert(pw.c.spa_pod_builder_pop(&b, &choice_frame) != null); + } + + { + var choice_frame: pw.c.spa_pod_frame = undefined; + check(pw.c.spa_pod_builder_prop(&b, pw.c.SPA_PARAM_BUFFERS_dataType, 0)); + check(pw.c.spa_pod_builder_push_choice(&b, &choice_frame, pw.c.SPA_CHOICE_Flags, 0)); + check(pw.c.spa_pod_builder_int(&b, 1 << pw.c.SPA_DATA_MemPtr)); + assert(pw.c.spa_pod_builder_pop(&b, &choice_frame) != null); + } + + // XXX: remove sdl example once done since it's pretty out of date at this point, remove from deps too + params.appendBounded(@ptrCast(@alignCast(pw.c.spa_pod_builder_pop(&b, ¶m_buffers_frame)))) catch @panic("OOB"); } + // a header metadata with timing information { - var f: pw.c.spa_pod_frame = undefined; - _ = pw.c.spa_pod_builder_push_object(b, &f, pw.c.SPA_TYPE_OBJECT_Format, pw.c.SPA_PARAM_EnumFormat); - _ = pw.c.spa_pod_builder_add( - b, - pw.c.SPA_FORMAT_mediaType, - "I", - pw.c.SPA_MEDIA_TYPE_video, + var timing_frame: pw.c.spa_pod_frame = undefined; + check(pw.c.spa_pod_builder_push_object( + &b, + &timing_frame, + pw.c.SPA_TYPE_OBJECT_ParamMeta, + pw.c.SPA_PARAM_Meta, + )); + check(pw.c.spa_pod_builder_prop(&b, pw.c.SPA_PARAM_META_type, 0)); + check(pw.c.spa_pod_builder_id(&b, pw.c.SPA_META_Header)); + + check(pw.c.spa_pod_builder_prop(&b, pw.c.SPA_PARAM_META_size, 0)); + check(pw.c.spa_pod_builder_int(&b, @sizeOf(pw.c.spa_meta_header))); + + params.appendBounded(@ptrCast(@alignCast(pw.c.spa_pod_builder_pop(&b, &timing_frame)))) catch @panic("OOB"); + } - pw.c.SPA_FORMAT_mediaSubtype, - "I", - pw.c.SPA_MEDIA_SUBTYPE_dsp, + // video cropping information + { + var crop_frame: pw.c.spa_pod_frame = undefined; + check(pw.c.spa_pod_builder_push_object( + &b, + &crop_frame, + pw.c.SPA_TYPE_OBJECT_ParamMeta, + pw.c.SPA_PARAM_Meta, + )); - pw.c.SPA_FORMAT_VIDEO_format, + check(pw.c.spa_pod_builder_prop(&b, pw.c.SPA_PARAM_META_type, 0)); + check(pw.c.spa_pod_builder_id(&b, pw.c.SPA_META_VideoCrop)); - "I", - pw.c.SPA_VIDEO_FORMAT_DSP_F32, + check(pw.c.spa_pod_builder_prop(&b, pw.c.SPA_PARAM_META_size, 0)); + check(pw.c.spa_pod_builder_id(&b, @sizeOf(pw.c.spa_meta_region))); - @as(c_int, 0), - ); - const format: *const pw.c.spa_pod = @ptrCast(@alignCast(pw.c.spa_pod_builder_pop(b, &f))); - _ = pw.c.spa_debug_format(2, null, format); - params.appendBounded(format) catch @panic("OOB"); + params.appendBounded(@ptrCast(@alignCast(pw.c.spa_pod_builder_pop(&b, &crop_frame)))) catch @panic("OOB"); } + + // we are done + check(pw.c.pw_stream_update_params(stream, params.items.ptr, @intCast(params.items.len))); } -const FormatPair = struct { - format: u32, - id: u32, -}; +// our data processing function is in general: +// ``` +// struct pw_buffer *b; +// b = pw_stream_dequeue_buffer(stream); +// +// .. do stuff with buffer ... +// +// pw_stream_queue_buffer(stream, b); +// ``` +fn onProcess(userdata: ?*anyopaque) callconv(.c) void { + _ = userdata; + const stream = data.stream; -const sdl_video_formats = [_]FormatPair{ - .{ .format = sdl.SDL_PIXELFORMAT_UNKNOWN, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_INDEX1LSB, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_UNKNOWN, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_INDEX1LSB, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_INDEX1MSB, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_INDEX4LSB, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_INDEX4MSB, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_INDEX8, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_RGB332, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_XRGB4444, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_XRGB1555, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_XBGR1555, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_ARGB4444, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_RGBA4444, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_ABGR4444, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_BGRA4444, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_ARGB1555, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_RGBA5551, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_ABGR1555, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_BGRA5551, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_RGB565, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_BGR565, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_RGB24, .id = pw.c.SPA_VIDEO_FORMAT_BGR }, - .{ .format = sdl.SDL_PIXELFORMAT_XRGB8888, .id = pw.c.SPA_VIDEO_FORMAT_BGR }, - .{ .format = sdl.SDL_PIXELFORMAT_RGBX8888, .id = pw.c.SPA_VIDEO_FORMAT_xBGR }, - .{ .format = sdl.SDL_PIXELFORMAT_BGR24, .id = pw.c.SPA_VIDEO_FORMAT_RGB }, - .{ .format = sdl.SDL_PIXELFORMAT_XBGR8888, .id = pw.c.SPA_VIDEO_FORMAT_RGB }, - .{ .format = sdl.SDL_PIXELFORMAT_BGRX8888, .id = pw.c.SPA_VIDEO_FORMAT_xRGB }, - .{ .format = sdl.SDL_PIXELFORMAT_ARGB2101010, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_RGBA8888, .id = pw.c.SPA_VIDEO_FORMAT_ABGR }, - .{ .format = sdl.SDL_PIXELFORMAT_ARGB8888, .id = pw.c.SPA_VIDEO_FORMAT_BGRA }, - .{ .format = sdl.SDL_PIXELFORMAT_BGRA8888, .id = pw.c.SPA_VIDEO_FORMAT_ARGB }, - .{ .format = sdl.SDL_PIXELFORMAT_ABGR8888, .id = pw.c.SPA_VIDEO_FORMAT_RGBA }, - .{ .format = sdl.SDL_PIXELFORMAT_YV12, .id = pw.c.SPA_VIDEO_FORMAT_YV12 }, - .{ .format = sdl.SDL_PIXELFORMAT_IYUV, .id = pw.c.SPA_VIDEO_FORMAT_I420 }, - .{ .format = sdl.SDL_PIXELFORMAT_YUY2, .id = pw.c.SPA_VIDEO_FORMAT_YUY2 }, - .{ .format = sdl.SDL_PIXELFORMAT_UYVY, .id = pw.c.SPA_VIDEO_FORMAT_UYVY }, - .{ .format = sdl.SDL_PIXELFORMAT_YVYU, .id = pw.c.SPA_VIDEO_FORMAT_YVYU }, - .{ .format = sdl.SDL_PIXELFORMAT_NV12, .id = pw.c.SPA_VIDEO_FORMAT_NV12 }, - .{ .format = sdl.SDL_PIXELFORMAT_NV21, .id = pw.c.SPA_VIDEO_FORMAT_NV21 }, -}; + // var render_cursor = false; + + var maybe_buffer: ?*pw.c.pw_buffer = null; + while (true) { + const t = pw.c.pw_stream_dequeue_buffer(stream) orelse break; + if (maybe_buffer) |b| check(pw.c.pw_stream_queue_buffer(stream, b)); + maybe_buffer = t; + } + const b = maybe_buffer orelse { + log.warn("out of buffers", .{}); + return; + }; + defer check(pw.c.pw_stream_queue_buffer(stream, b)); + + const buf: *pw.c.spa_buffer = b.buffer; + + log.debug("new buffer {*}", .{buf}); -fn sdlFormatToId(format: u32) u32 { - for (sdl_video_formats) |f| { - if (f.format == format) { - return f.id; + const sdata = buf.datas[0].data orelse return; + + const maybe_h: ?*pw.c.spa_meta_header = @ptrCast(@alignCast(pw.c.spa_buffer_find_meta_data(buf, pw.c.SPA_META_Header, @sizeOf(pw.c.spa_meta_header)))); + if (maybe_h) |h| { + const now = pw.c.pw_stream_get_nsec(stream); + log.debug("now:{} pts:{} diff:{}", .{ now, h.pts, now - @as(u64, @intCast(h.pts)) }); + } + + // get the videocrop metadata if any + const maybe_mc: ?*pw.c.spa_meta_region = @ptrCast(@alignCast(pw.c.spa_buffer_find_meta_data(buf, pw.c.SPA_META_VideoCrop, @sizeOf(pw.c.spa_meta_region)))); + if (maybe_mc) |mc| { + if (pw.c.spa_meta_region_is_valid(mc)) { + data.rect.x = @floatFromInt(mc.region.position.x); + data.rect.y = @floatFromInt(mc.region.position.y); + data.rect.w = @floatFromInt(mc.region.size.width); + data.rect.h = @floatFromInt(mc.region.size.height); } } - return pw.c.SPA_VIDEO_FORMAT_UNKNOWN; -} -fn idToSdlFormat(id: u32) u32 { - for (sdl_video_formats) |f| { - if (f.id == id) { - return f.format; + // copy video image in texture + if (data.is_yuv) { + // var datas: [4]?[*]u8 = undefined; + const sstride = data.stride; + const udata: [*]u8 = @ptrCast(sdata); // XXX: ... + if (buf.n_datas == 1) { + const draw = data.draw.?; + draw.clear(); + const size = zin.staticWindow(.main).getClientSize(); + const rect_size = zin.scale(i32, texel_width, draw.getDpiScale().x); + var x: i32, var y: i32 = .{ 0, 0 }; + // XXX: /2 to avoid reading oob for now + while (y < @divTrunc(@min(size.y, data.size.height), 2)) : (y += rect_size) { + while (x < @divTrunc(@min(size.x, data.size.width), 2)) : (x += rect_size) { + const i: usize = @intCast((y * sstride + x) * 3); + var color: zin.Rgb8 = .{ + .r = udata[i], + .g = udata[i + 1], + .b = udata[i + 2], + }; + // XXX: workaround for zin bug where black renders as bright color? + if (std.meta.eql(color, .black)) { + color.r = 1; + color.g = 1; + color.b = 1; + } + draw.rect(.ltwh(x, y, rect_size, rect_size), color); + } + x = 0; + } + } else { + @panic("unimplemented"); + // datas[0] = @ptrCast(sdata); + // datas[1] = @ptrCast(buf.datas[1].data); + // datas[2] = @ptrCast(buf.datas[2].data); + // _ = sdl.SDL_UpdateYUVTexture( + // data.texture, + // null, + // datas[0], + // sstride, + // datas[1], + // @divExact(sstride, 2), + // datas[2], + // @divExact(sstride, 2), + // ); } + } else { + log.info("is not yuv", .{}); + // var dstride: c_int = undefined; + // var ddata: ?*anyopaque = undefined; + // if (!sdl.SDL_LockTexture(data.texture, null, &ddata, &dstride)) { + // log.err("Couldn't lock texture: {s}", .{sdl.SDL_GetError()}); + // } + // defer sdl.SDL_UnlockTexture(data.texture); + + // var sstride: u32 = @intCast(buf.datas[0].chunk.*.stride); + // if (sstride == 0) sstride = buf.datas[0].chunk.*.size / data.size.height; + // const ostride = @min(sstride, dstride); + + // var src: [*]u8 = @ptrCast(sdata); + // var dst: [*]u8 = @ptrCast(ddata); + + // if (data.format.media_subtype == pw.c.SPA_MEDIA_SUBTYPE_dsp) { + // for (0..data.size.height) |_| { + // const pixel: [*]Pixel = @ptrCast(@alignCast(src)); + // for (0..data.size.width) |j| { + // dst[j * 4 + 0] = @intFromFloat(std.math.clamp(pixel[j].r * 255.0, 0, 255)); + // dst[j * 4 + 1] = @intFromFloat(std.math.clamp(pixel[j].g * 255.0, 0, 255)); + // dst[j * 4 + 2] = @intFromFloat(std.math.clamp(pixel[j].b * 255.0, 0, 255)); + // dst[j * 4 + 3] = @intFromFloat(std.math.clamp(pixel[j].a * 255.0, 0, 255)); + // } + // src += sstride; + // dst += @intCast(dstride); + // } + // } else { + // for (0..data.size.height) |_| { + // @memcpy(dst[0..@intCast(ostride)], src[0..@intCast(ostride)]); + // src += sstride; + // dst += @intCast(dstride); + // } + // } } - return sdl.SDL_PIXELFORMAT_UNKNOWN; } -fn sdlBuildFormats(renderer: *sdl.SDL_Renderer, b: *pw.c.spa_pod_builder) *pw.c.spa_pod { - var f: [2]pw.c.spa_pod_frame = undefined; - - // make an object of type SPA_TYPE_OBJECT_Format and id SPA_PARAM_EnumFormat. The object type is - // important because it defines the properties that are acceptable. The id gives more context - // about what the object is meant to contain. In this case we enumerate supported formats. - _ = pw.c.spa_pod_builder_push_object(b, &f[0], pw.c.SPA_TYPE_OBJECT_Format, pw.c.SPA_PARAM_EnumFormat); - // add media type and media subtype properties - _ = pw.c.spa_pod_builder_prop(b, pw.c.SPA_FORMAT_mediaType, 0); - _ = pw.c.spa_pod_builder_id(b, pw.c.SPA_MEDIA_TYPE_video); - _ = pw.c.spa_pod_builder_prop(b, pw.c.SPA_FORMAT_mediaSubtype, 0); - _ = pw.c.spa_pod_builder_id(b, pw.c.SPA_MEDIA_SUBTYPE_raw); - - // build an enumeration of formats - _ = pw.c.spa_pod_builder_prop(b, pw.c.SPA_FORMAT_VIDEO_format, 0); - _ = pw.c.spa_pod_builder_push_choice(b, &f[1], pw.c.SPA_CHOICE_Enum, 0); - - const props: sdl.SDL_PropertiesID = sdl.SDL_GetRendererProperties(renderer); - - const texture_formats: [*]sdl.SDL_PixelFormat = @ptrCast(@alignCast(sdl.SDL_GetPointerProperty( - props, - sdl.SDL_PROP_RENDERER_TEXTURE_FORMATS_POINTER, - null, - ))); - - // first the formats supported by the textures - var i: u32 = 0; - var ci: u32 = 0; - while (texture_formats[i] != sdl.SDL_PIXELFORMAT_UNKNOWN) : (i += 1) { - const id: u32 = sdlFormatToId(texture_formats[i]); - if (id == 0) continue; - if (ci == 0) _ = pw.c.spa_pod_builder_id(b, pw.c.SPA_VIDEO_FORMAT_UNKNOWN); - ci += 1; - _ = pw.c.spa_pod_builder_id(b, id); - } - // then all the other ones SDL can convert from/to - for (sdl_video_formats) |format| { - const id: u32 = format.id; - if (id != pw.c.SPA_VIDEO_FORMAT_UNKNOWN) { - _ = pw.c.spa_pod_builder_id(b, id); - } +fn check(res: c_int) void { + if (res != 0) { + std.debug.panic("pipewire call failed: {s}", .{pw.c.spa_strerror(res)}); } - _ = pw.c.spa_pod_builder_id(b, pw.c.SPA_VIDEO_FORMAT_RGBA_F32); - _ = pw.c.spa_pod_builder_pop(b, &f[1]); - // add size and framerate ranges - const max_texture_size: u32 = @intCast(sdl.SDL_GetNumberProperty( - props, - sdl.SDL_PROP_RENDERER_MAX_TEXTURE_SIZE_NUMBER, - 0, - )); - _ = pw.c.spa_pod_builder_add( - b, - pw.c.SPA_FORMAT_VIDEO_size, - pw.c.SPA_POD_CHOICE_RANGE_Rectangle( - &pw.c.SPA_RECTANGLE(width, height), - &pw.c.SPA_RECTANGLE(1, 1), - &pw.c.SPA_RECTANGLE(max_texture_size, max_texture_size), - ), - pw.c.SPA_FORMAT_VIDEO_framerate, - pw.c.SPA_POD_CHOICE_RANGE_Fraction( - &pw.c.SPA_FRACTION(rate, 1), - &pw.c.SPA_FRACTION(0, 1), - &pw.c.SPA_FRACTION(30, 1), - ), - @as(c_int, 0), - ); - return @ptrCast(@alignCast(pw.c.spa_pod_builder_pop(b, &f[0]))); } From 1860379f922821c0d3734d411ec8371c1bb601dd Mon Sep 17 00:00:00 2001 From: Mason Remaley Date: Thu, 4 Dec 2025 11:54:40 -0800 Subject: [PATCH 03/13] WIP adds more formats --- src/examples/video_play_zin.zig | 37 ++++++++++++++++++++++++++++----- 1 file changed, 32 insertions(+), 5 deletions(-) diff --git a/src/examples/video_play_zin.zig b/src/examples/video_play_zin.zig index 058bc0b..ce143f4 100644 --- a/src/examples/video_play_zin.zig +++ b/src/examples/video_play_zin.zig @@ -98,6 +98,29 @@ const Data = struct { var data: Data = .{}; +const formats: []const pw.c.spa_video_format = &.{ + // XXX: ... this vs xrgb?? + pw.c.SPA_VIDEO_FORMAT_RGBx, + // XXX: ... + pw.c.SPA_VIDEO_FORMAT_BGR, + pw.c.SPA_VIDEO_FORMAT_BGR, + pw.c.SPA_VIDEO_FORMAT_xBGR, + pw.c.SPA_VIDEO_FORMAT_RGB, + pw.c.SPA_VIDEO_FORMAT_RGB, + pw.c.SPA_VIDEO_FORMAT_xRGB, + pw.c.SPA_VIDEO_FORMAT_ABGR, + pw.c.SPA_VIDEO_FORMAT_BGRA, + pw.c.SPA_VIDEO_FORMAT_ARGB, + pw.c.SPA_VIDEO_FORMAT_RGBA, + pw.c.SPA_VIDEO_FORMAT_YV12, + pw.c.SPA_VIDEO_FORMAT_I420, + pw.c.SPA_VIDEO_FORMAT_YUY2, + pw.c.SPA_VIDEO_FORMAT_UYVY, + pw.c.SPA_VIDEO_FORMAT_YVYU, + pw.c.SPA_VIDEO_FORMAT_NV12, + pw.c.SPA_VIDEO_FORMAT_NV21, +}; + pub fn main() !void { // If we're linking with the Zig module, set up logging. var logger = if (example_options.use_zig_module) pw.Logger.init() else {}; @@ -178,6 +201,9 @@ pub fn main() !void { check(pw.c.spa_pod_builder_prop(&b, pw.c.SPA_FORMAT_mediaSubtype, 0)); check(pw.c.spa_pod_builder_id(&b, pw.c.SPA_MEDIA_SUBTYPE_raw)); + // XXX: ... oh, we actually need to do all the conversions ourselves? that's really annoying. + // there's supposed to be a way to get it to convert for us i think? maybe we need more modules + // for video conversion or something. or is it doing it? idk // build an enumeration of formats { var choice_frame: pw.c.spa_pod_frame = undefined; @@ -185,11 +211,12 @@ pub fn main() !void { check(pw.c.spa_pod_builder_push_choice(&b, &choice_frame, pw.c.SPA_CHOICE_Enum, 0)); // We only support one format check(pw.c.spa_pod_builder_id(&b, pw.c.SPA_VIDEO_FORMAT_UNKNOWN)); - check(pw.c.spa_pod_builder_id(&b, pw.c.SPA_VIDEO_FORMAT_RGBx)); - // XXX: ... oh, we actually need to do all the conversions ourselves? that's really annoying. - // there's supposed to be a way to get it to convert for us i think? maybe we need more modules - // for video conversion or something. or is it doing it? idk - check(pw.c.spa_pod_builder_id(&b, pw.c.SPA_VIDEO_FORMAT_YUY2)); + for (formats) |format| { + check(pw.c.spa_pod_builder_id(&b, format)); + } + // check(pw.c.spa_pod_builder_id(&b, pw.c.SPA_VIDEO_FORMAT_RGBx)); + // check(pw.c.spa_pod_builder_id(&b, pw.c.SPA_VIDEO_FORMAT_YUY2)); + assert(pw.c.spa_pod_builder_pop(&b, &choice_frame) != null); } From 29955d02de886b68ddb8ea92899d781f95705dc8 Mon Sep 17 00:00:00 2001 From: Mason Remaley Date: Thu, 4 Dec 2025 11:55:33 -0800 Subject: [PATCH 04/13] Removes dups --- src/examples/video_play_zin.zig | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/examples/video_play_zin.zig b/src/examples/video_play_zin.zig index ce143f4..9edbe4c 100644 --- a/src/examples/video_play_zin.zig +++ b/src/examples/video_play_zin.zig @@ -103,10 +103,8 @@ const formats: []const pw.c.spa_video_format = &.{ pw.c.SPA_VIDEO_FORMAT_RGBx, // XXX: ... pw.c.SPA_VIDEO_FORMAT_BGR, - pw.c.SPA_VIDEO_FORMAT_BGR, pw.c.SPA_VIDEO_FORMAT_xBGR, pw.c.SPA_VIDEO_FORMAT_RGB, - pw.c.SPA_VIDEO_FORMAT_RGB, pw.c.SPA_VIDEO_FORMAT_xRGB, pw.c.SPA_VIDEO_FORMAT_ABGR, pw.c.SPA_VIDEO_FORMAT_BGRA, From a681aef491510766fc713c5524ebeae32b1b4c97 Mon Sep 17 00:00:00 2001 From: Jonathan Marler Date: Thu, 4 Dec 2025 13:34:20 -0700 Subject: [PATCH 05/13] video-play-zin: support more video formats --- src/examples/video_play_zin.zig | 104 +++++++++++++++++++++++++++----- 1 file changed, 88 insertions(+), 16 deletions(-) diff --git a/src/examples/video_play_zin.zig b/src/examples/video_play_zin.zig index 9edbe4c..d10dc0f 100644 --- a/src/examples/video_play_zin.zig +++ b/src/examples/video_play_zin.zig @@ -99,24 +99,94 @@ const Data = struct { var data: Data = .{}; const formats: []const pw.c.spa_video_format = &.{ - // XXX: ... this vs xrgb?? + pw.c.SPA_VIDEO_FORMAT_ENCODED, + pw.c.SPA_VIDEO_FORMAT_I420, + pw.c.SPA_VIDEO_FORMAT_YV12, + pw.c.SPA_VIDEO_FORMAT_YUY2, + pw.c.SPA_VIDEO_FORMAT_UYVY, + pw.c.SPA_VIDEO_FORMAT_AYUV, pw.c.SPA_VIDEO_FORMAT_RGBx, - // XXX: ... - pw.c.SPA_VIDEO_FORMAT_BGR, - pw.c.SPA_VIDEO_FORMAT_xBGR, - pw.c.SPA_VIDEO_FORMAT_RGB, + pw.c.SPA_VIDEO_FORMAT_BGRx, pw.c.SPA_VIDEO_FORMAT_xRGB, - pw.c.SPA_VIDEO_FORMAT_ABGR, + pw.c.SPA_VIDEO_FORMAT_xBGR, + pw.c.SPA_VIDEO_FORMAT_RGBA, pw.c.SPA_VIDEO_FORMAT_BGRA, pw.c.SPA_VIDEO_FORMAT_ARGB, - pw.c.SPA_VIDEO_FORMAT_RGBA, - pw.c.SPA_VIDEO_FORMAT_YV12, - pw.c.SPA_VIDEO_FORMAT_I420, - pw.c.SPA_VIDEO_FORMAT_YUY2, - pw.c.SPA_VIDEO_FORMAT_UYVY, + pw.c.SPA_VIDEO_FORMAT_ABGR, + pw.c.SPA_VIDEO_FORMAT_RGB, + pw.c.SPA_VIDEO_FORMAT_BGR, + pw.c.SPA_VIDEO_FORMAT_Y41B, + pw.c.SPA_VIDEO_FORMAT_Y42B, pw.c.SPA_VIDEO_FORMAT_YVYU, + pw.c.SPA_VIDEO_FORMAT_Y444, + pw.c.SPA_VIDEO_FORMAT_v210, + pw.c.SPA_VIDEO_FORMAT_v216, pw.c.SPA_VIDEO_FORMAT_NV12, pw.c.SPA_VIDEO_FORMAT_NV21, + pw.c.SPA_VIDEO_FORMAT_GRAY8, + pw.c.SPA_VIDEO_FORMAT_GRAY16_BE, + pw.c.SPA_VIDEO_FORMAT_GRAY16_LE, + pw.c.SPA_VIDEO_FORMAT_v308, + pw.c.SPA_VIDEO_FORMAT_RGB16, + pw.c.SPA_VIDEO_FORMAT_BGR16, + pw.c.SPA_VIDEO_FORMAT_RGB15, + pw.c.SPA_VIDEO_FORMAT_BGR15, + pw.c.SPA_VIDEO_FORMAT_UYVP, + pw.c.SPA_VIDEO_FORMAT_A420, + pw.c.SPA_VIDEO_FORMAT_RGB8P, + pw.c.SPA_VIDEO_FORMAT_YUV9, + pw.c.SPA_VIDEO_FORMAT_YVU9, + pw.c.SPA_VIDEO_FORMAT_IYU1, + pw.c.SPA_VIDEO_FORMAT_ARGB64, + pw.c.SPA_VIDEO_FORMAT_AYUV64, + pw.c.SPA_VIDEO_FORMAT_r210, + pw.c.SPA_VIDEO_FORMAT_I420_10BE, + pw.c.SPA_VIDEO_FORMAT_I420_10LE, + pw.c.SPA_VIDEO_FORMAT_I422_10BE, + pw.c.SPA_VIDEO_FORMAT_I422_10LE, + pw.c.SPA_VIDEO_FORMAT_Y444_10BE, + pw.c.SPA_VIDEO_FORMAT_Y444_10LE, + pw.c.SPA_VIDEO_FORMAT_GBR, + pw.c.SPA_VIDEO_FORMAT_GBR_10BE, + pw.c.SPA_VIDEO_FORMAT_GBR_10LE, + pw.c.SPA_VIDEO_FORMAT_NV16, + pw.c.SPA_VIDEO_FORMAT_NV24, + pw.c.SPA_VIDEO_FORMAT_NV12_64Z32, + pw.c.SPA_VIDEO_FORMAT_A420_10BE, + pw.c.SPA_VIDEO_FORMAT_A420_10LE, + pw.c.SPA_VIDEO_FORMAT_A422_10BE, + pw.c.SPA_VIDEO_FORMAT_A422_10LE, + pw.c.SPA_VIDEO_FORMAT_A444_10BE, + pw.c.SPA_VIDEO_FORMAT_A444_10LE, + pw.c.SPA_VIDEO_FORMAT_NV61, + pw.c.SPA_VIDEO_FORMAT_P010_10BE, + pw.c.SPA_VIDEO_FORMAT_P010_10LE, + pw.c.SPA_VIDEO_FORMAT_IYU2, + pw.c.SPA_VIDEO_FORMAT_VYUY, + pw.c.SPA_VIDEO_FORMAT_GBRA, + pw.c.SPA_VIDEO_FORMAT_GBRA_10BE, + pw.c.SPA_VIDEO_FORMAT_GBRA_10LE, + pw.c.SPA_VIDEO_FORMAT_GBR_12BE, + pw.c.SPA_VIDEO_FORMAT_GBR_12LE, + pw.c.SPA_VIDEO_FORMAT_GBRA_12BE, + pw.c.SPA_VIDEO_FORMAT_GBRA_12LE, + pw.c.SPA_VIDEO_FORMAT_I420_12BE, + pw.c.SPA_VIDEO_FORMAT_I420_12LE, + pw.c.SPA_VIDEO_FORMAT_I422_12BE, + pw.c.SPA_VIDEO_FORMAT_I422_12LE, + pw.c.SPA_VIDEO_FORMAT_Y444_12BE, + pw.c.SPA_VIDEO_FORMAT_Y444_12LE, + pw.c.SPA_VIDEO_FORMAT_RGBA_F16, + pw.c.SPA_VIDEO_FORMAT_RGBA_F32, + pw.c.SPA_VIDEO_FORMAT_xRGB_210LE, + pw.c.SPA_VIDEO_FORMAT_xBGR_210LE, + pw.c.SPA_VIDEO_FORMAT_RGBx_102LE, + pw.c.SPA_VIDEO_FORMAT_BGRx_102LE, + pw.c.SPA_VIDEO_FORMAT_ARGB_210LE, + pw.c.SPA_VIDEO_FORMAT_ABGR_210LE, + pw.c.SPA_VIDEO_FORMAT_RGBA_102LE, + pw.c.SPA_VIDEO_FORMAT_BGRA_102LE, + pw.c.SPA_VIDEO_FORMAT_DSP_F32, }; pub fn main() !void { @@ -207,7 +277,6 @@ pub fn main() !void { var choice_frame: pw.c.spa_pod_frame = undefined; check(pw.c.spa_pod_builder_prop(&b, pw.c.SPA_FORMAT_VIDEO_format, 0)); check(pw.c.spa_pod_builder_push_choice(&b, &choice_frame, pw.c.SPA_CHOICE_Enum, 0)); - // We only support one format check(pw.c.spa_pod_builder_id(&b, pw.c.SPA_VIDEO_FORMAT_UNKNOWN)); for (formats) |format| { check(pw.c.spa_pod_builder_id(&b, format)); @@ -226,7 +295,7 @@ pub fn main() !void { check(pw.c.spa_pod_builder_push_choice(&b, &choice_frame, pw.c.SPA_CHOICE_Range, 0)); check(pw.c.spa_pod_builder_rectangle(&b, 1920 / texel_width, 1080 / texel_width)); check(pw.c.spa_pod_builder_rectangle(&b, 1, 1)); - check(pw.c.spa_pod_builder_rectangle(&b, 1920 / texel_width, 1080 / texel_width)); + check(pw.c.spa_pod_builder_rectangle(&b, 999999, 999999)); assert(pw.c.spa_pod_builder_pop(&b, &choice_frame) != null); } @@ -236,7 +305,7 @@ pub fn main() !void { check(pw.c.spa_pod_builder_push_choice(&b, &choice_frame, pw.c.SPA_CHOICE_Range, 0)); check(pw.c.spa_pod_builder_fraction(&b, 1000, timer_ms)); check(pw.c.spa_pod_builder_fraction(&b, 0, 1)); - check(pw.c.spa_pod_builder_fraction(&b, 30, 1)); + check(pw.c.spa_pod_builder_fraction(&b, 120, 1)); assert(pw.c.spa_pod_builder_pop(&b, &choice_frame) != null); } @@ -349,9 +418,12 @@ fn onStreamStateChanged( err: [*c]const u8, ) callconv(.c) void { _ = old; - _ = err; _ = userdata; - log.info("stream state: \"{s}\"", .{pw.c.pw_stream_state_as_string(state)}); + if (err != null) { + log.err("stream state: \"{s}\" (error={s})", .{ pw.c.pw_stream_state_as_string(state), err }); + } else { + log.info("stream state: \"{s}\"", .{pw.c.pw_stream_state_as_string(state)}); + } switch (state) { pw.c.PW_STREAM_STATE_UNCONNECTED => check(pw.c.pw_main_loop_quit(data.loop)), // because we started inactive, activate ourselves now From 1895128238f398d4de33d4c24a8fe15369456b32 Mon Sep 17 00:00:00 2001 From: Jonathan Marler Date: Thu, 4 Dec 2025 15:45:37 -0700 Subject: [PATCH 06/13] better zin/pipewire main loop --- build.zig.zon | 4 +- src/examples/video_play_zin.zig | 68 +++++++++++++++++++-------------- 2 files changed, 41 insertions(+), 31 deletions(-) diff --git a/build.zig.zon b/build.zig.zon index b170231..7f5698f 100644 --- a/build.zig.zon +++ b/build.zig.zon @@ -21,8 +21,8 @@ .hash = "sdl-0.0.0-i4QD0btgqAABajEXrQnyZr1xVsk7LM48w2nBmuQ5gdfr", }, .zin = .{ - .url = "git+https://github.com/marler8997/zin#f59fa3511ad6b666d3cb028605dd7b2983841b8d", - .hash = "zin-0.0.0-W7QDx4NXAwA67HYz0-tKcNsXV1LgVMNTEHxh6JuHG_c5", + .url = "git+https://github.com/marler8997/zin#62706713b7089b4220d8e1eb49f8d776138a9058", + .hash = "zin-0.0.0-W7QDx9BaAwC-H1uS9Cz68oMn9uh5fWuVis5b-eqhCeeq", }, }, .paths = .{ diff --git a/src/examples/video_play_zin.zig b/src/examples/video_play_zin.zig index d10dc0f..17e034f 100644 --- a/src/examples/video_play_zin.zig +++ b/src/examples/video_play_zin.zig @@ -58,15 +58,7 @@ pub const panic = zin.panic(.{ .title = "Hello Panic!" }); // XXX: ... const global = struct { - var class_extra: ?zin.WindowClass = null; var last_animation: ?std.time.Instant = null; - var text_position: f32 = 0; - var mouse_position: ?zin.XY = null; - var mouse_down: zin.MouseButtonsDown = .{ - .left = false, - .right = false, - .middle = false, - }; }; const timer_ms = 33; @@ -93,7 +85,7 @@ const Data = struct { rect: FRect = .{}, is_yuv: bool = false, - draw: ?*const zin.Draw(.{ .static = .main }) = null, + current_buffer: ?*pw.c.pw_buffer = null, }; var data: Data = .{}; @@ -375,13 +367,15 @@ pub fn main() !void { try zin.staticWindow(.main).create(.{ .title = "Video Play", - .size = .{ .client_points = .{ .x = 1920, .y = 1080 } }, + .size = .{ .client_points = .{ .x = 300, .y = 300 } }, .pos = null, }); defer zin.staticWindow(.main).destroy(); zin.staticWindow(.main).show(); - zin.staticWindow(.main).startTimer({}, timer_ms); + // TODO: calcualte the timer based on the framerate + zin.staticWindow(.main).startTimerNanos({}, std.time.ns_per_ms * 16); + callback(.{ .timer = {} }); try zin.mainLoop(); } @@ -390,9 +384,6 @@ fn callback(cb: zin.Callback(.{ .static = .main })) void { .close => zin.quitMainLoop(), .window_size => {}, .draw => |d| { - data.draw = &d; - defer data.draw = null; - // Early out if we're redrawing too fast (e.g. during a resize) { const now = std.time.Instant.now() catch @panic("?"); @@ -401,16 +392,28 @@ fn callback(cb: zin.Callback(.{ .static = .main })) void { if (elapsed_ns / std.time.ns_per_ms < timer_ms / 2) return; } - // XXX: zin update on refresh rate? - // XXX: should be main loop? - // Try to render a frame with Pipewire. - if (pw.c.pw_loop_iterate(pw.c.pw_main_loop_get_loop(data.loop), 0) < 0) return; + render(d); + }, + .timer => { + pipewireFlush(); + zin.staticWindow(.main).invalidate(); }, - .timer => zin.staticWindow(.main).invalidate(), else => {}, } } +fn pipewireFlush() void { + while (true) { + const result = pw.c.pw_loop_iterate(pw.c.pw_main_loop_get_loop(data.loop), 0); + if (result == 0) break; + if (result < 0) { + std.log.err("pipewire error {}", .{result}); + zin.quitMainLoop(); + break; + } + } +} + fn onStreamStateChanged( userdata: ?*anyopaque, old: pw.c.pw_stream_state, @@ -658,26 +661,35 @@ fn onProcess(userdata: ?*anyopaque) callconv(.c) void { _ = userdata; const stream = data.stream; - // var render_cursor = false; - var maybe_buffer: ?*pw.c.pw_buffer = null; while (true) { const t = pw.c.pw_stream_dequeue_buffer(stream) orelse break; if (maybe_buffer) |b| check(pw.c.pw_stream_queue_buffer(stream, b)); maybe_buffer = t; } - const b = maybe_buffer orelse { - log.warn("out of buffers", .{}); - return; - }; - defer check(pw.c.pw_stream_queue_buffer(stream, b)); + if (maybe_buffer) |b| { + if (data.current_buffer) |current| { + check(pw.c.pw_stream_queue_buffer(stream, current)); + } + data.current_buffer = b; + } +} - const buf: *pw.c.spa_buffer = b.buffer; +fn render(draw: zin.Draw(.{ .static = .main })) void { + draw.clear(); + + const client_size = zin.staticWindow(.main).getClientSize(); + + const buf: *pw.c.spa_buffer = (data.current_buffer orelse { + draw.text("waiting for first frame...", 0, @divTrunc(client_size.y, 2), .white); + return; + }).buffer; log.debug("new buffer {*}", .{buf}); const sdata = buf.datas[0].data orelse return; + const stream = data.stream; const maybe_h: ?*pw.c.spa_meta_header = @ptrCast(@alignCast(pw.c.spa_buffer_find_meta_data(buf, pw.c.SPA_META_Header, @sizeOf(pw.c.spa_meta_header)))); if (maybe_h) |h| { const now = pw.c.pw_stream_get_nsec(stream); @@ -701,8 +713,6 @@ fn onProcess(userdata: ?*anyopaque) callconv(.c) void { const sstride = data.stride; const udata: [*]u8 = @ptrCast(sdata); // XXX: ... if (buf.n_datas == 1) { - const draw = data.draw.?; - draw.clear(); const size = zin.staticWindow(.main).getClientSize(); const rect_size = zin.scale(i32, texel_width, draw.getDpiScale().x); var x: i32, var y: i32 = .{ 0, 0 }; From 61de4b4b25ee8b80ea97b58f855a059971a06fc0 Mon Sep 17 00:00:00 2001 From: Mason Remaley Date: Mon, 8 Dec 2025 15:29:25 -0800 Subject: [PATCH 07/13] Fixes connecting/disconnecting --- src/examples/video_play_zin.zig | 69 ++++++++++++++++++++------------- 1 file changed, 43 insertions(+), 26 deletions(-) diff --git a/src/examples/video_play_zin.zig b/src/examples/video_play_zin.zig index 17e034f..6d7f966 100644 --- a/src/examples/video_play_zin.zig +++ b/src/examples/video_play_zin.zig @@ -14,10 +14,6 @@ const Allocator = std.mem.Allocator; pub const std_options: std.Options = .{ .log_level = .info, - .log_scope_levels = &.{ - // XXX: these info logs should be changed to debug - .{ .scope = .x11, .level = .warn }, - }, }; // Normal code wouldn't need this conditional, we're just demonstrating both the static library and @@ -58,10 +54,12 @@ pub const panic = zin.panic(.{ .title = "Hello Panic!" }); // XXX: ... const global = struct { - var last_animation: ?std.time.Instant = null; + const default_timer_period_ns = 16 * std.time.ns_per_ms; + + var last_render: ?std.time.Instant = null; + var timer_period_ns: u64 = 0; }; -const timer_ms = 33; const texel_width = 10; const max_buffers = 64; @@ -295,7 +293,7 @@ pub fn main() !void { var choice_frame: pw.c.spa_pod_frame = undefined; check(pw.c.spa_pod_builder_prop(&b, pw.c.SPA_FORMAT_VIDEO_framerate, 0)); check(pw.c.spa_pod_builder_push_choice(&b, &choice_frame, pw.c.SPA_CHOICE_Range, 0)); - check(pw.c.spa_pod_builder_fraction(&b, 1000, timer_ms)); + check(pw.c.spa_pod_builder_fraction(&b, 60, 1)); check(pw.c.spa_pod_builder_fraction(&b, 0, 1)); check(pw.c.spa_pod_builder_fraction(&b, 120, 1)); assert(pw.c.spa_pod_builder_pop(&b, &choice_frame) != null); @@ -373,8 +371,7 @@ pub fn main() !void { defer zin.staticWindow(.main).destroy(); zin.staticWindow(.main).show(); - // TODO: calcualte the timer based on the framerate - zin.staticWindow(.main).startTimerNanos({}, std.time.ns_per_ms * 16); + startTimerNanos(global.default_timer_period_ns); callback(.{ .timer = {} }); try zin.mainLoop(); } @@ -383,17 +380,7 @@ fn callback(cb: zin.Callback(.{ .static = .main })) void { switch (cb) { .close => zin.quitMainLoop(), .window_size => {}, - .draw => |d| { - // Early out if we're redrawing too fast (e.g. during a resize) - { - const now = std.time.Instant.now() catch @panic("?"); - const elapsed_ns = if (global.last_animation) |l| now.since(l) else 0; - global.last_animation = now; - if (elapsed_ns / std.time.ns_per_ms < timer_ms / 2) return; - } - - render(d); - }, + .draw => |d| render(d), .timer => { pipewireFlush(); zin.staticWindow(.main).invalidate(); @@ -422,16 +409,21 @@ fn onStreamStateChanged( ) callconv(.c) void { _ = old; _ = userdata; + + data.current_buffer = null; + if (err != null) { log.err("stream state: \"{s}\" (error={s})", .{ pw.c.pw_stream_state_as_string(state), err }); } else { log.info("stream state: \"{s}\"", .{pw.c.pw_stream_state_as_string(state)}); } - switch (state) { - pw.c.PW_STREAM_STATE_UNCONNECTED => check(pw.c.pw_main_loop_quit(data.loop)), - // because we started inactive, activate ourselves now - pw.c.PW_STREAM_STATE_PAUSED => check(pw.c.pw_stream_set_active(data.stream, true)), - else => {}, + + if (state == pw.c.PW_STREAM_STATE_PAUSED) { + check(pw.c.pw_stream_set_active(data.stream, true)); + } + + if (state != pw.c.PW_STREAM_STATE_STREAMING) { + startTimerNanos(global.default_timer_period_ns); } } @@ -454,6 +446,7 @@ fn onStreamIoChanged(userdata: ?*anyopaque, id: u32, area: ?*anyopaque, size: u3 // will control the buffer memory allocation. This includes the metadata // that we would like on our buffer, the size, alignment, etp. fn onStreamParamChanged(userdata: ?*anyopaque, id: u32, param: [*c]const pw.c.spa_pod) callconv(.c) void { + log.info("stream param changed", .{}); _ = userdata; const stream = data.stream; var params_buffer: [1024]u8 align(@alignOf(u32)) = undefined; @@ -482,6 +475,15 @@ fn onStreamParamChanged(userdata: ?*anyopaque, id: u32, param: [*c]const pw.c.sp log.info("got format:", .{}); check(pw.c.spa_debug_format(2, null, param)); + var parsed: pw.c.spa_video_info_raw = undefined; + if (pw.c.spa_format_video_raw_parse(param, &parsed) < 0) { + std.debug.panic("failed to parse format", .{}); + } + const num: f32 = @floatFromInt(parsed.framerate.num); + const denom: f32 = @floatFromInt(parsed.framerate.denom); + const hz = denom / num; + startTimerNanos(@intFromFloat(hz * std.time.ns_per_s)); + if (pw.c.spa_format_parse(param, &data.format.media_type, &data.format.media_subtype) < 0) { return; } @@ -676,12 +678,22 @@ fn onProcess(userdata: ?*anyopaque) callconv(.c) void { } fn render(draw: zin.Draw(.{ .static = .main })) void { + // Early out if we're redrawing too fast (e.g. during a resize) + { + const now = std.time.Instant.now() catch |err| @panic(@errorName(err)); + if (global.last_render) |last_render| { + const elapsed_ns = now.since(last_render); + if (elapsed_ns < global.timer_period_ns / 2) return; + } + global.last_render = now; + } + draw.clear(); const client_size = zin.staticWindow(.main).getClientSize(); const buf: *pw.c.spa_buffer = (data.current_buffer orelse { - draw.text("waiting for first frame...", 0, @divTrunc(client_size.y, 2), .white); + draw.text("waiting for webcam...", @divTrunc(client_size.x, 2) - 50, @divTrunc(client_size.y, 2), .white); return; }).buffer; @@ -794,3 +806,8 @@ fn check(res: c_int) void { std.debug.panic("pipewire call failed: {s}", .{pw.c.spa_strerror(res)}); } } + +fn startTimerNanos(ns: u64) void { + global.timer_period_ns = ns; + zin.staticWindow(.main).startTimerNanos({}, ns); +} From deba80d2198e511a37ba9ee6289adb8798e73b20 Mon Sep 17 00:00:00 2001 From: Mason Remaley Date: Mon, 8 Dec 2025 15:54:15 -0800 Subject: [PATCH 08/13] Shows message if format unsupported --- src/examples/video_play_zin.zig | 240 ++++++++++++-------------------- 1 file changed, 89 insertions(+), 151 deletions(-) diff --git a/src/examples/video_play_zin.zig b/src/examples/video_play_zin.zig index 6d7f966..bdcd539 100644 --- a/src/examples/video_play_zin.zig +++ b/src/examples/video_play_zin.zig @@ -52,12 +52,25 @@ const StaticWindowId = enum { pub const panic = zin.panic(.{ .title = "Hello Panic!" }); -// XXX: ... const global = struct { const default_timer_period_ns = 16 * std.time.ns_per_ms; var last_render: ?std.time.Instant = null; var timer_period_ns: u64 = 0; + + var loop: ?*pw.c.pw_main_loop = null; + var stream: ?*pw.c.pw_stream = null; + + var position: ?*pw.c.spa_io_position = null; + + var format: pw.c.spa_video_info = .{}; + var stride: i32 = 0; + var size: pw.c.spa_rectangle = .{}; + + var rect: FRect = .{}; + var is_yuv: bool = false; + + var current_buffer: ?*pw.c.pw_buffer = null; }; const texel_width = 10; @@ -70,24 +83,6 @@ const FRect = struct { h: f32 = 0, }; -const Data = struct { - loop: ?*pw.c.pw_main_loop = null, - stream: ?*pw.c.pw_stream = null, - - position: ?*pw.c.spa_io_position = null, - - format: pw.c.spa_video_info = .{}, - stride: i32 = 0, - size: pw.c.spa_rectangle = .{}, - - rect: FRect = .{}, - is_yuv: bool = false, - - current_buffer: ?*pw.c.pw_buffer = null, -}; - -var data: Data = .{}; - const formats: []const pw.c.spa_video_format = &.{ pw.c.SPA_VIDEO_FORMAT_ENCODED, pw.c.SPA_VIDEO_FORMAT_I420, @@ -192,8 +187,8 @@ pub fn main() !void { defer pw.c.pw_deinit(); // Create the pipewire loop - data.loop = pw.c.pw_main_loop_new(null).?; - defer pw.c.pw_main_loop_destroy(data.loop); + global.loop = pw.c.pw_main_loop_new(null).?; + defer pw.c.pw_main_loop_destroy(global.loop); // Create the pipewire stream { @@ -213,8 +208,8 @@ pub fn main() !void { check(pw.c.pw_properties_set(props, pw.c.PW_KEY_TARGET_OBJECT, arg)); } - data.stream = pw.c.pw_stream_new_simple( - pw.c.pw_main_loop_get_loop(data.loop), + global.stream = pw.c.pw_stream_new_simple( + pw.c.pw_main_loop_get_loop(global.loop), "video-play", props, &.{ @@ -227,7 +222,7 @@ pub fn main() !void { null, ).?; } - defer pw.c.pw_stream_destroy(data.stream); + defer pw.c.pw_stream_destroy(global.stream); // Connect to the stream { @@ -259,21 +254,15 @@ pub fn main() !void { check(pw.c.spa_pod_builder_prop(&b, pw.c.SPA_FORMAT_mediaSubtype, 0)); check(pw.c.spa_pod_builder_id(&b, pw.c.SPA_MEDIA_SUBTYPE_raw)); - // XXX: ... oh, we actually need to do all the conversions ourselves? that's really annoying. - // there's supposed to be a way to get it to convert for us i think? maybe we need more modules - // for video conversion or something. or is it doing it? idk // build an enumeration of formats { var choice_frame: pw.c.spa_pod_frame = undefined; check(pw.c.spa_pod_builder_prop(&b, pw.c.SPA_FORMAT_VIDEO_format, 0)); check(pw.c.spa_pod_builder_push_choice(&b, &choice_frame, pw.c.SPA_CHOICE_Enum, 0)); - check(pw.c.spa_pod_builder_id(&b, pw.c.SPA_VIDEO_FORMAT_UNKNOWN)); + check(pw.c.spa_pod_builder_id(&b, pw.c.SPA_VIDEO_FORMAT_YUY2)); for (formats) |format| { check(pw.c.spa_pod_builder_id(&b, format)); } - // check(pw.c.spa_pod_builder_id(&b, pw.c.SPA_VIDEO_FORMAT_RGBx)); - // check(pw.c.spa_pod_builder_id(&b, pw.c.SPA_VIDEO_FORMAT_YUY2)); - assert(pw.c.spa_pod_builder_pop(&b, &choice_frame) != null); } @@ -329,7 +318,7 @@ pub fn main() !void { // an optional target node to connect to, some flags and parameters // const res = pw.c.pw_stream_connect( - data.stream, + global.stream, pw.c.PW_DIRECTION_INPUT, pw.c.PW_ID_ANY, pw.c.PW_STREAM_FLAG_AUTOCONNECT | // try to automatically connect this stream @@ -391,7 +380,7 @@ fn callback(cb: zin.Callback(.{ .static = .main })) void { fn pipewireFlush() void { while (true) { - const result = pw.c.pw_loop_iterate(pw.c.pw_main_loop_get_loop(data.loop), 0); + const result = pw.c.pw_loop_iterate(pw.c.pw_main_loop_get_loop(global.loop), 0); if (result == 0) break; if (result < 0) { std.log.err("pipewire error {}", .{result}); @@ -410,7 +399,7 @@ fn onStreamStateChanged( _ = old; _ = userdata; - data.current_buffer = null; + global.current_buffer = null; if (err != null) { log.err("stream state: \"{s}\" (error={s})", .{ pw.c.pw_stream_state_as_string(state), err }); @@ -419,7 +408,7 @@ fn onStreamStateChanged( } if (state == pw.c.PW_STREAM_STATE_PAUSED) { - check(pw.c.pw_stream_set_active(data.stream, true)); + check(pw.c.pw_stream_set_active(global.stream, true)); } if (state != pw.c.PW_STREAM_STATE_STREAMING) { @@ -431,7 +420,7 @@ fn onStreamIoChanged(userdata: ?*anyopaque, id: u32, area: ?*anyopaque, size: u3 _ = size; _ = userdata; if (id == pw.c.SPA_IO_Position) { - data.position = @ptrCast(@alignCast(area)); + global.position = @ptrCast(@alignCast(area)); } } @@ -448,7 +437,7 @@ fn onStreamIoChanged(userdata: ?*anyopaque, id: u32, area: ?*anyopaque, size: u3 fn onStreamParamChanged(userdata: ?*anyopaque, id: u32, param: [*c]const pw.c.spa_pod) callconv(.c) void { log.info("stream param changed", .{}); _ = userdata; - const stream = data.stream; + const stream = global.stream; var params_buffer: [1024]u8 align(@alignOf(u32)) = undefined; var b: pw.c.spa_pod_builder = .{ .data = ¶ms_buffer, @@ -484,72 +473,67 @@ fn onStreamParamChanged(userdata: ?*anyopaque, id: u32, param: [*c]const pw.c.sp const hz = denom / num; startTimerNanos(@intFromFloat(hz * std.time.ns_per_s)); - if (pw.c.spa_format_parse(param, &data.format.media_type, &data.format.media_subtype) < 0) { + if (pw.c.spa_format_parse(param, &global.format.media_type, &global.format.media_subtype) < 0) { return; } - if (data.format.media_type != pw.c.SPA_MEDIA_TYPE_video) return; + if (global.format.media_type != pw.c.SPA_MEDIA_TYPE_video) return; - // XXX: do we even need to check the format? - const format, const mult: i32 = switch (data.format.media_subtype) { + const format, const mult: i32 = switch (global.format.media_subtype) { pw.c.SPA_MEDIA_SUBTYPE_raw => b: { // call a helper function to parse the format for us. - _ = pw.c.spa_format_video_raw_parse(param, &data.format.info.raw); - data.size = pw.c.SPA_RECTANGLE(data.format.info.raw.size.width, data.format.info.raw.size.height); - break :b .{ data.format.info.raw.format, 1 }; + _ = pw.c.spa_format_video_raw_parse(param, &global.format.info.raw); + global.size = pw.c.SPA_RECTANGLE(global.format.info.raw.size.width, global.format.info.raw.size.height); + break :b .{ global.format.info.raw.format, 1 }; }, pw.c.SPA_MEDIA_SUBTYPE_dsp => b: { - check(pw.c.spa_format_video_dsp_parse(param, &data.format.info.dsp)); - if (data.format.info.dsp.format != pw.c.SPA_VIDEO_FORMAT_DSP_F32) return; - data.size = pw.c.SPA_RECTANGLE(data.position.?.video.size.width, data.position.?.video.size.height); - // XXX: is this correct? + check(pw.c.spa_format_video_dsp_parse(param, &global.format.info.dsp)); + if (global.format.info.dsp.format != pw.c.SPA_VIDEO_FORMAT_DSP_F32) return; + global.size = pw.c.SPA_RECTANGLE(global.position.?.video.size.width, global.position.?.video.size.height); break :b .{ pw.c.SPA_VIDEO_FORMAT_DSP_F32, 4 }; }, else => .{ pw.c.SPA_VIDEO_FORMAT_UNKNOWN, 0 }, }; if (format == pw.c.SPA_VIDEO_FORMAT_UNKNOWN) { - check(pw.c.pw_stream_set_error(stream, -pw.c.EINVAL, "unknown pixel format")); + _ = pw.c.pw_stream_set_error(stream, -pw.c.EINVAL, "unknown pixel format"); return; } - if (data.size.width == 0 or data.size.height == 0) { - check(pw.c.pw_stream_set_error(stream, -pw.c.EINVAL, "invalid size")); + if (global.size.width == 0 or global.size.height == 0) { + _ = pw.c.pw_stream_set_error(stream, -pw.c.EINVAL, "invalid size"); return; } - // if (data.texture) |texture| data.gpa.free(texture); - // data.texture = data.gpa.alloc(u8, data.size.width * data.size.height * 3) catch @panic("OOM"); - // XXX: don't we always know the format? const size: i32, const blocks: i32 = switch (format) { pw.c.SPA_VIDEO_FORMAT_YV12, pw.c.SPA_VIDEO_FORMAT_I420 => b: { - data.stride = @intCast(data.size.width); - data.is_yuv = true; + global.stride = @intCast(global.size.width); + global.is_yuv = true; break :b .{ - @divExact((data.stride * @as(i32, @intCast(data.size.height))) * 3, 2), + @divExact((global.stride * @as(i32, @intCast(global.size.height))) * 3, 2), 3, }; }, pw.c.SPA_VIDEO_FORMAT_YUY2 => b: { - data.is_yuv = true; - data.stride = @intCast(data.size.width * 2); + global.is_yuv = true; + global.stride = @intCast(global.size.width * 2); break :b .{ - data.stride * @as(i32, @intCast(data.size.height)), + global.stride * @as(i32, @intCast(global.size.height)), 1, }; }, else => b: { - data.stride = @intCast(data.size.width * 2); + global.stride = @intCast(global.size.width * 2); break :b .{ - data.stride * @as(i32, @intCast(data.size.height)), + global.stride * @as(i32, @intCast(global.size.height)), 1, }; }, }; - data.rect.x = 0; - data.rect.y = 0; - data.rect.w = @floatFromInt(data.size.width); - data.rect.h = @floatFromInt(data.size.height); + global.rect.x = 0; + global.rect.y = 0; + global.rect.w = @floatFromInt(global.size.width); + global.rect.h = @floatFromInt(global.size.height); var params_buf: [3]?*const pw.c.spa_pod = undefined; var params: std.ArrayList(?*const pw.c.spa_pod) = .initBuffer(¶ms_buf); @@ -582,10 +566,10 @@ fn onStreamParamChanged(userdata: ?*anyopaque, id: u32, param: [*c]const pw.c.sp check(pw.c.spa_pod_builder_int(&b, size * mult)); check(pw.c.spa_pod_builder_prop(&b, pw.c.SPA_PARAM_BUFFERS_stride, 0)); - check(pw.c.spa_pod_builder_int(&b, data.stride * mult)); + check(pw.c.spa_pod_builder_int(&b, global.stride * mult)); check(pw.c.spa_pod_builder_prop(&b, pw.c.SPA_PARAM_BUFFERS_stride, 0)); - check(pw.c.spa_pod_builder_int(&b, data.stride * mult)); + check(pw.c.spa_pod_builder_int(&b, global.stride * mult)); { var choice_frame: pw.c.spa_pod_frame = undefined; @@ -605,7 +589,6 @@ fn onStreamParamChanged(userdata: ?*anyopaque, id: u32, param: [*c]const pw.c.sp assert(pw.c.spa_pod_builder_pop(&b, &choice_frame) != null); } - // XXX: remove sdl example once done since it's pretty out of date at this point, remove from deps too params.appendBounded(@ptrCast(@alignCast(pw.c.spa_pod_builder_pop(&b, ¶m_buffers_frame)))) catch @panic("OOB"); } @@ -661,7 +644,7 @@ fn onStreamParamChanged(userdata: ?*anyopaque, id: u32, param: [*c]const pw.c.sp // ``` fn onProcess(userdata: ?*anyopaque) callconv(.c) void { _ = userdata; - const stream = data.stream; + const stream = global.stream; var maybe_buffer: ?*pw.c.pw_buffer = null; while (true) { @@ -670,10 +653,10 @@ fn onProcess(userdata: ?*anyopaque) callconv(.c) void { maybe_buffer = t; } if (maybe_buffer) |b| { - if (data.current_buffer) |current| { + if (global.current_buffer) |current| { check(pw.c.pw_stream_queue_buffer(stream, current)); } - data.current_buffer = b; + global.current_buffer = b; } } @@ -692,7 +675,7 @@ fn render(draw: zin.Draw(.{ .static = .main })) void { const client_size = zin.staticWindow(.main).getClientSize(); - const buf: *pw.c.spa_buffer = (data.current_buffer orelse { + const buf: *pw.c.spa_buffer = (global.current_buffer orelse { draw.text("waiting for webcam...", @divTrunc(client_size.x, 2) - 50, @divTrunc(client_size.y, 2), .white); return; }).buffer; @@ -701,7 +684,7 @@ fn render(draw: zin.Draw(.{ .static = .main })) void { const sdata = buf.datas[0].data orelse return; - const stream = data.stream; + const stream = global.stream; const maybe_h: ?*pw.c.spa_meta_header = @ptrCast(@alignCast(pw.c.spa_buffer_find_meta_data(buf, pw.c.SPA_META_Header, @sizeOf(pw.c.spa_meta_header)))); if (maybe_h) |h| { const now = pw.c.pw_stream_get_nsec(stream); @@ -712,92 +695,47 @@ fn render(draw: zin.Draw(.{ .static = .main })) void { const maybe_mc: ?*pw.c.spa_meta_region = @ptrCast(@alignCast(pw.c.spa_buffer_find_meta_data(buf, pw.c.SPA_META_VideoCrop, @sizeOf(pw.c.spa_meta_region)))); if (maybe_mc) |mc| { if (pw.c.spa_meta_region_is_valid(mc)) { - data.rect.x = @floatFromInt(mc.region.position.x); - data.rect.y = @floatFromInt(mc.region.position.y); - data.rect.w = @floatFromInt(mc.region.size.width); - data.rect.h = @floatFromInt(mc.region.size.height); + global.rect.x = @floatFromInt(mc.region.position.x); + global.rect.y = @floatFromInt(mc.region.position.y); + global.rect.w = @floatFromInt(mc.region.size.width); + global.rect.h = @floatFromInt(mc.region.size.height); } } // copy video image in texture - if (data.is_yuv) { - // var datas: [4]?[*]u8 = undefined; - const sstride = data.stride; + if (global.is_yuv and buf.n_datas == 1) { + const sstride = global.stride; const udata: [*]u8 = @ptrCast(sdata); // XXX: ... - if (buf.n_datas == 1) { - const size = zin.staticWindow(.main).getClientSize(); - const rect_size = zin.scale(i32, texel_width, draw.getDpiScale().x); - var x: i32, var y: i32 = .{ 0, 0 }; - // XXX: /2 to avoid reading oob for now - while (y < @divTrunc(@min(size.y, data.size.height), 2)) : (y += rect_size) { - while (x < @divTrunc(@min(size.x, data.size.width), 2)) : (x += rect_size) { - const i: usize = @intCast((y * sstride + x) * 3); - var color: zin.Rgb8 = .{ - .r = udata[i], - .g = udata[i + 1], - .b = udata[i + 2], - }; - // XXX: workaround for zin bug where black renders as bright color? - if (std.meta.eql(color, .black)) { - color.r = 1; - color.g = 1; - color.b = 1; - } - draw.rect(.ltwh(x, y, rect_size, rect_size), color); + const size = zin.staticWindow(.main).getClientSize(); + const rect_size = zin.scale(i32, texel_width, draw.getDpiScale().x); + var x: i32, var y: i32 = .{ 0, 0 }; + // XXX: /2 to avoid reading oob for now + while (y < @divTrunc(@min(size.y, global.size.height), 2)) : (y += rect_size) { + while (x < @divTrunc(@min(size.x, global.size.width), 2)) : (x += rect_size) { + const i: usize = @intCast((y * sstride + x) * 3); + var color: zin.Rgb8 = .{ + .r = udata[i], + .g = udata[i + 1], + .b = udata[i + 2], + }; + // XXX: workaround for zin bug where black renders as bright color? + if (std.meta.eql(color, .black)) { + color.r = 1; + color.g = 1; + color.b = 1; } - x = 0; + draw.rect(.ltwh(x, y, rect_size, rect_size), color); } - } else { - @panic("unimplemented"); - // datas[0] = @ptrCast(sdata); - // datas[1] = @ptrCast(buf.datas[1].data); - // datas[2] = @ptrCast(buf.datas[2].data); - // _ = sdl.SDL_UpdateYUVTexture( - // data.texture, - // null, - // datas[0], - // sstride, - // datas[1], - // @divExact(sstride, 2), - // datas[2], - // @divExact(sstride, 2), - // ); + x = 0; } } else { - log.info("is not yuv", .{}); - // var dstride: c_int = undefined; - // var ddata: ?*anyopaque = undefined; - // if (!sdl.SDL_LockTexture(data.texture, null, &ddata, &dstride)) { - // log.err("Couldn't lock texture: {s}", .{sdl.SDL_GetError()}); - // } - // defer sdl.SDL_UnlockTexture(data.texture); - - // var sstride: u32 = @intCast(buf.datas[0].chunk.*.stride); - // if (sstride == 0) sstride = buf.datas[0].chunk.*.size / data.size.height; - // const ostride = @min(sstride, dstride); - - // var src: [*]u8 = @ptrCast(sdata); - // var dst: [*]u8 = @ptrCast(ddata); - - // if (data.format.media_subtype == pw.c.SPA_MEDIA_SUBTYPE_dsp) { - // for (0..data.size.height) |_| { - // const pixel: [*]Pixel = @ptrCast(@alignCast(src)); - // for (0..data.size.width) |j| { - // dst[j * 4 + 0] = @intFromFloat(std.math.clamp(pixel[j].r * 255.0, 0, 255)); - // dst[j * 4 + 1] = @intFromFloat(std.math.clamp(pixel[j].g * 255.0, 0, 255)); - // dst[j * 4 + 2] = @intFromFloat(std.math.clamp(pixel[j].b * 255.0, 0, 255)); - // dst[j * 4 + 3] = @intFromFloat(std.math.clamp(pixel[j].a * 255.0, 0, 255)); - // } - // src += sstride; - // dst += @intCast(dstride); - // } - // } else { - // for (0..data.size.height) |_| { - // @memcpy(dst[0..@intCast(ostride)], src[0..@intCast(ostride)]); - // src += sstride; - // dst += @intCast(dstride); - // } - // } + draw.text( + "unsupported format...", + @divTrunc(client_size.x, 2) - 50, + @divTrunc(client_size.y, 2), + .white, + ); + return; } } From df638e1ca2caa3dd54ef820b9ea365835cf6bd4e Mon Sep 17 00:00:00 2001 From: Mason Remaley Date: Mon, 8 Dec 2025 16:18:12 -0800 Subject: [PATCH 09/13] Decodes luminance --- src/examples/video_play_zin.zig | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/src/examples/video_play_zin.zig b/src/examples/video_play_zin.zig index bdcd539..f9e4401 100644 --- a/src/examples/video_play_zin.zig +++ b/src/examples/video_play_zin.zig @@ -75,6 +75,8 @@ const global = struct { const texel_width = 10; const max_buffers = 64; +const default_video_width = 160; +const default_video_height = 90; const FRect = struct { x: f32 = 0, @@ -272,9 +274,9 @@ pub fn main() !void { var choice_frame: pw.c.spa_pod_frame = undefined; check(pw.c.spa_pod_builder_prop(&b, pw.c.SPA_FORMAT_VIDEO_size, 0)); check(pw.c.spa_pod_builder_push_choice(&b, &choice_frame, pw.c.SPA_CHOICE_Range, 0)); - check(pw.c.spa_pod_builder_rectangle(&b, 1920 / texel_width, 1080 / texel_width)); + check(pw.c.spa_pod_builder_rectangle(&b, default_video_width, default_video_height)); check(pw.c.spa_pod_builder_rectangle(&b, 1, 1)); - check(pw.c.spa_pod_builder_rectangle(&b, 999999, 999999)); + check(pw.c.spa_pod_builder_rectangle(&b, default_video_width, default_video_height)); assert(pw.c.spa_pod_builder_pop(&b, &choice_frame) != null); } @@ -354,7 +356,10 @@ pub fn main() !void { try zin.staticWindow(.main).create(.{ .title = "Video Play", - .size = .{ .client_points = .{ .x = 300, .y = 300 } }, + .size = .{ .client_points = .{ + .x = default_video_width * texel_width, + .y = default_video_height * texel_width, + } }, .pos = null, }); defer zin.staticWindow(.main).destroy(); @@ -705,18 +710,17 @@ fn render(draw: zin.Draw(.{ .static = .main })) void { // copy video image in texture if (global.is_yuv and buf.n_datas == 1) { const sstride = global.stride; - const udata: [*]u8 = @ptrCast(sdata); // XXX: ... + const udata: [*]u8 = @ptrCast(sdata); const size = zin.staticWindow(.main).getClientSize(); const rect_size = zin.scale(i32, texel_width, draw.getDpiScale().x); var x: i32, var y: i32 = .{ 0, 0 }; - // XXX: /2 to avoid reading oob for now - while (y < @divTrunc(@min(size.y, global.size.height), 2)) : (y += rect_size) { - while (x < @divTrunc(@min(size.x, global.size.width), 2)) : (x += rect_size) { - const i: usize = @intCast((y * sstride + x) * 3); + while (y < @min(size.y, global.size.height)) : (y += 1) { + while (x < @min(size.x, global.size.width)) : (x += 1) { + const i: usize = @intCast(y * sstride + x * 2); var color: zin.Rgb8 = .{ .r = udata[i], - .g = udata[i + 1], - .b = udata[i + 2], + .g = udata[i], + .b = udata[i], }; // XXX: workaround for zin bug where black renders as bright color? if (std.meta.eql(color, .black)) { @@ -724,7 +728,7 @@ fn render(draw: zin.Draw(.{ .static = .main })) void { color.g = 1; color.b = 1; } - draw.rect(.ltwh(x, y, rect_size, rect_size), color); + draw.rect(.ltwh(x * rect_size, y * rect_size, rect_size, rect_size), color); } x = 0; } From 13d78f0c190bd2bacf69a3e1393d3ce5161f7305 Mon Sep 17 00:00:00 2001 From: Mason Remaley Date: Mon, 8 Dec 2025 16:55:14 -0800 Subject: [PATCH 10/13] Converts yuyv to rgb --- src/examples/video_play_zin.zig | 63 ++++++++++++++++++++++++--------- 1 file changed, 46 insertions(+), 17 deletions(-) diff --git a/src/examples/video_play_zin.zig b/src/examples/video_play_zin.zig index f9e4401..97e30d2 100644 --- a/src/examples/video_play_zin.zig +++ b/src/examples/video_play_zin.zig @@ -713,24 +713,30 @@ fn render(draw: zin.Draw(.{ .static = .main })) void { const udata: [*]u8 = @ptrCast(sdata); const size = zin.staticWindow(.main).getClientSize(); const rect_size = zin.scale(i32, texel_width, draw.getDpiScale().x); - var x: i32, var y: i32 = .{ 0, 0 }; - while (y < @min(size.y, global.size.height)) : (y += 1) { - while (x < @min(size.x, global.size.width)) : (x += 1) { - const i: usize = @intCast(y * sstride + x * 2); - var color: zin.Rgb8 = .{ - .r = udata[i], - .g = udata[i], - .b = udata[i], - }; - // XXX: workaround for zin bug where black renders as bright color? - if (std.meta.eql(color, .black)) { - color.r = 1; - color.g = 1; - color.b = 1; - } - draw.rect(.ltwh(x * rect_size, y * rect_size, rect_size, rect_size), color); + for (0..@intCast(@min(size.y, global.size.height))) |y| { + var x: usize = 0; + while (x < @min(size.x, global.size.width)) : (x += 2) { + const i: usize = @intCast(y * @as(usize, @intCast(sstride)) + x * 2); + const colors = yuyvToRgb(udata[i..][0..4].*); + draw.rect( + .ltwh( + @as(i32, @intCast(x)) * rect_size, + @as(i32, @intCast(y)) * rect_size, + rect_size, + rect_size, + ), + colors[0], + ); + draw.rect( + .ltwh( + (@as(i32, @intCast(x)) + 1) * rect_size, + @as(i32, @intCast(y)) * rect_size, + rect_size, + rect_size, + ), + colors[1], + ); } - x = 0; } } else { draw.text( @@ -743,6 +749,29 @@ fn render(draw: zin.Draw(.{ .static = .main })) void { } } +pub fn clampUnorm(val: anytype) u8 { + return @intCast(std.math.clamp(val, 0, 255)); +} + +fn yuyvToRgb(yuyv: [4]u8) [2]zin.Rgb8 { + const d = @as(i32, yuyv[1]) - 128; + const e = @as(i32, yuyv[3]) - 128; + const c0 = @as(i32, yuyv[0]) - 16; + const c1 = @as(i32, yuyv[2]) - 16; + return .{ + .{ + .r = clampUnorm(((298 * c0) + (409 * e) + 128) >> 8), + .g = clampUnorm(((298 * c0) - (100 * d) - (208 * e) + 128) >> 8), + .b = clampUnorm(((298 * c0) + (516 * d) + 128) >> 8), + }, + .{ + .r = clampUnorm(((298 * c1) + (409 * e) + 128) >> 8), + .g = clampUnorm(((298 * c1) - (100 * d) - (208 * e) + 128) >> 8), + .b = clampUnorm(((298 * c1) + (516 * d) + 128) >> 8), + }, + }; +} + fn check(res: c_int) void { if (res != 0) { std.debug.panic("pipewire call failed: {s}", .{pw.c.spa_strerror(res)}); From 9b5269e956079a72a4590f7240326c210aa7e843 Mon Sep 17 00:00:00 2001 From: Mason Remaley Date: Mon, 8 Dec 2025 17:09:31 -0800 Subject: [PATCH 11/13] Cleanup --- src/examples/video_play_zin.zig | 368 +++++++++++++++----------------- 1 file changed, 175 insertions(+), 193 deletions(-) diff --git a/src/examples/video_play_zin.zig b/src/examples/video_play_zin.zig index 97e30d2..f1c1413 100644 --- a/src/examples/video_play_zin.zig +++ b/src/examples/video_play_zin.zig @@ -12,46 +12,49 @@ const assert = std.debug.assert; const Allocator = std.mem.Allocator; +// Configure logging pub const std_options: std.Options = .{ .log_level = .info, }; // Normal code wouldn't need this conditional, we're just demonstrating both the static library and -// the Zig module here. Prefer the Zig module when possible. +// the Zig module here. Prefer the Zig module when possible. We wrap the C module in a struct just +// to make it look like the Zig module so that the rest of the file can use it as is. const pw = if (example_options.use_zig_module) - // Example of linking with the pipewire zig module @import("pipewire") else - // Example of linking with the pipewire static library. We're wrapping it like this just to - // match the Zig module so the rest of the code doesn't need conditionals. struct { pub const c = @import("pipewire"); }; +// Configure zin pub const zin_config: zin.Config = .{ - .StaticWindowId = StaticWindowId, -}; - -const StaticWindowId = enum { - main, - pub fn getConfig(self: StaticWindowId) zin.WindowConfigData { - return switch (self) { - .main => .{ - .window_size_events = true, - .key_events = true, - .mouse_events = true, - .timers = .one, - .background = .{ .r = 49, .g = 49, .b = 49 }, - .dynamic_background = false, - .win32 = .{ .render = .{ .gdi = .{} } }, - .x11 = .{ .render_kind = .double_buffered }, - }, - }; - } + .StaticWindowId = enum { + main, + pub fn getConfig(self: @This()) zin.WindowConfigData { + return switch (self) { + .main => .{ + .window_size_events = true, + .key_events = true, + .mouse_events = true, + .timers = .one, + .background = .{ .r = 49, .g = 49, .b = 49 }, + .dynamic_background = false, + .win32 = .{ .render = .{ .gdi = .{} } }, + .x11 = .{ .render_kind = .double_buffered }, + }, + }; + } + }, }; -pub const panic = zin.panic(.{ .title = "Hello Panic!" }); +// Video settings +const texel_width = 10; +const max_buffers = 64; +const default_video_width = 160; +const default_video_height = 90; +// Global state used by Zin and Pipewire const global = struct { const default_timer_period_ns = 16 * std.time.ns_per_ms; @@ -67,115 +70,12 @@ const global = struct { var stride: i32 = 0; var size: pw.c.spa_rectangle = .{}; - var rect: FRect = .{}; + var rect: Rect = .{}; var is_yuv: bool = false; var current_buffer: ?*pw.c.pw_buffer = null; }; -const texel_width = 10; -const max_buffers = 64; -const default_video_width = 160; -const default_video_height = 90; - -const FRect = struct { - x: f32 = 0, - y: f32 = 0, - w: f32 = 0, - h: f32 = 0, -}; - -const formats: []const pw.c.spa_video_format = &.{ - pw.c.SPA_VIDEO_FORMAT_ENCODED, - pw.c.SPA_VIDEO_FORMAT_I420, - pw.c.SPA_VIDEO_FORMAT_YV12, - pw.c.SPA_VIDEO_FORMAT_YUY2, - pw.c.SPA_VIDEO_FORMAT_UYVY, - pw.c.SPA_VIDEO_FORMAT_AYUV, - pw.c.SPA_VIDEO_FORMAT_RGBx, - pw.c.SPA_VIDEO_FORMAT_BGRx, - pw.c.SPA_VIDEO_FORMAT_xRGB, - pw.c.SPA_VIDEO_FORMAT_xBGR, - pw.c.SPA_VIDEO_FORMAT_RGBA, - pw.c.SPA_VIDEO_FORMAT_BGRA, - pw.c.SPA_VIDEO_FORMAT_ARGB, - pw.c.SPA_VIDEO_FORMAT_ABGR, - pw.c.SPA_VIDEO_FORMAT_RGB, - pw.c.SPA_VIDEO_FORMAT_BGR, - pw.c.SPA_VIDEO_FORMAT_Y41B, - pw.c.SPA_VIDEO_FORMAT_Y42B, - pw.c.SPA_VIDEO_FORMAT_YVYU, - pw.c.SPA_VIDEO_FORMAT_Y444, - pw.c.SPA_VIDEO_FORMAT_v210, - pw.c.SPA_VIDEO_FORMAT_v216, - pw.c.SPA_VIDEO_FORMAT_NV12, - pw.c.SPA_VIDEO_FORMAT_NV21, - pw.c.SPA_VIDEO_FORMAT_GRAY8, - pw.c.SPA_VIDEO_FORMAT_GRAY16_BE, - pw.c.SPA_VIDEO_FORMAT_GRAY16_LE, - pw.c.SPA_VIDEO_FORMAT_v308, - pw.c.SPA_VIDEO_FORMAT_RGB16, - pw.c.SPA_VIDEO_FORMAT_BGR16, - pw.c.SPA_VIDEO_FORMAT_RGB15, - pw.c.SPA_VIDEO_FORMAT_BGR15, - pw.c.SPA_VIDEO_FORMAT_UYVP, - pw.c.SPA_VIDEO_FORMAT_A420, - pw.c.SPA_VIDEO_FORMAT_RGB8P, - pw.c.SPA_VIDEO_FORMAT_YUV9, - pw.c.SPA_VIDEO_FORMAT_YVU9, - pw.c.SPA_VIDEO_FORMAT_IYU1, - pw.c.SPA_VIDEO_FORMAT_ARGB64, - pw.c.SPA_VIDEO_FORMAT_AYUV64, - pw.c.SPA_VIDEO_FORMAT_r210, - pw.c.SPA_VIDEO_FORMAT_I420_10BE, - pw.c.SPA_VIDEO_FORMAT_I420_10LE, - pw.c.SPA_VIDEO_FORMAT_I422_10BE, - pw.c.SPA_VIDEO_FORMAT_I422_10LE, - pw.c.SPA_VIDEO_FORMAT_Y444_10BE, - pw.c.SPA_VIDEO_FORMAT_Y444_10LE, - pw.c.SPA_VIDEO_FORMAT_GBR, - pw.c.SPA_VIDEO_FORMAT_GBR_10BE, - pw.c.SPA_VIDEO_FORMAT_GBR_10LE, - pw.c.SPA_VIDEO_FORMAT_NV16, - pw.c.SPA_VIDEO_FORMAT_NV24, - pw.c.SPA_VIDEO_FORMAT_NV12_64Z32, - pw.c.SPA_VIDEO_FORMAT_A420_10BE, - pw.c.SPA_VIDEO_FORMAT_A420_10LE, - pw.c.SPA_VIDEO_FORMAT_A422_10BE, - pw.c.SPA_VIDEO_FORMAT_A422_10LE, - pw.c.SPA_VIDEO_FORMAT_A444_10BE, - pw.c.SPA_VIDEO_FORMAT_A444_10LE, - pw.c.SPA_VIDEO_FORMAT_NV61, - pw.c.SPA_VIDEO_FORMAT_P010_10BE, - pw.c.SPA_VIDEO_FORMAT_P010_10LE, - pw.c.SPA_VIDEO_FORMAT_IYU2, - pw.c.SPA_VIDEO_FORMAT_VYUY, - pw.c.SPA_VIDEO_FORMAT_GBRA, - pw.c.SPA_VIDEO_FORMAT_GBRA_10BE, - pw.c.SPA_VIDEO_FORMAT_GBRA_10LE, - pw.c.SPA_VIDEO_FORMAT_GBR_12BE, - pw.c.SPA_VIDEO_FORMAT_GBR_12LE, - pw.c.SPA_VIDEO_FORMAT_GBRA_12BE, - pw.c.SPA_VIDEO_FORMAT_GBRA_12LE, - pw.c.SPA_VIDEO_FORMAT_I420_12BE, - pw.c.SPA_VIDEO_FORMAT_I420_12LE, - pw.c.SPA_VIDEO_FORMAT_I422_12BE, - pw.c.SPA_VIDEO_FORMAT_I422_12LE, - pw.c.SPA_VIDEO_FORMAT_Y444_12BE, - pw.c.SPA_VIDEO_FORMAT_Y444_12LE, - pw.c.SPA_VIDEO_FORMAT_RGBA_F16, - pw.c.SPA_VIDEO_FORMAT_RGBA_F32, - pw.c.SPA_VIDEO_FORMAT_xRGB_210LE, - pw.c.SPA_VIDEO_FORMAT_xBGR_210LE, - pw.c.SPA_VIDEO_FORMAT_RGBx_102LE, - pw.c.SPA_VIDEO_FORMAT_BGRx_102LE, - pw.c.SPA_VIDEO_FORMAT_ARGB_210LE, - pw.c.SPA_VIDEO_FORMAT_ABGR_210LE, - pw.c.SPA_VIDEO_FORMAT_RGBA_102LE, - pw.c.SPA_VIDEO_FORMAT_BGRA_102LE, - pw.c.SPA_VIDEO_FORMAT_DSP_F32, -}; - pub fn main() !void { // If we're linking with the Zig module, set up logging. var logger = if (example_options.use_zig_module) pw.Logger.init() else {}; @@ -239,10 +139,6 @@ pub fn main() !void { // Tell pipewire which formats we support { - - // make an object of type SPA_TYPE_OBJECT_Format and id SPA_PARAM_EnumFormat. The object type is - // important because it defines the properties that are acceptable. The id gives more context - // about what the object is meant to contain. In this case we enumerate supported formats. var format_frame: pw.c.spa_pod_frame = undefined; check(pw.c.spa_pod_builder_push_object( &b, @@ -250,26 +146,27 @@ pub fn main() !void { pw.c.SPA_TYPE_OBJECT_Format, pw.c.SPA_PARAM_EnumFormat, )); - // add media type and media subtype properties check(pw.c.spa_pod_builder_prop(&b, pw.c.SPA_FORMAT_mediaType, 0)); check(pw.c.spa_pod_builder_id(&b, pw.c.SPA_MEDIA_TYPE_video)); check(pw.c.spa_pod_builder_prop(&b, pw.c.SPA_FORMAT_mediaSubtype, 0)); check(pw.c.spa_pod_builder_id(&b, pw.c.SPA_MEDIA_SUBTYPE_raw)); - // build an enumeration of formats + // Tell pipewire we prefer yuy2 since it's the only one we currently support, but tell + // it that we support all other formats as fallbacks so we have a chance to respond if + // that's what webcam gives us. { var choice_frame: pw.c.spa_pod_frame = undefined; check(pw.c.spa_pod_builder_prop(&b, pw.c.SPA_FORMAT_VIDEO_format, 0)); check(pw.c.spa_pod_builder_push_choice(&b, &choice_frame, pw.c.SPA_CHOICE_Enum, 0)); check(pw.c.spa_pod_builder_id(&b, pw.c.SPA_VIDEO_FORMAT_YUY2)); - for (formats) |format| { + for (all_formats) |format| { check(pw.c.spa_pod_builder_id(&b, format)); } assert(pw.c.spa_pod_builder_pop(&b, &choice_frame) != null); } - // add size and framerate ranges - + // Set the resolutions we support. We default pretty low since we're rendering a + // rectangle per pixel. { var choice_frame: pw.c.spa_pod_frame = undefined; check(pw.c.spa_pod_builder_prop(&b, pw.c.SPA_FORMAT_VIDEO_size, 0)); @@ -280,6 +177,7 @@ pub fn main() !void { assert(pw.c.spa_pod_builder_pop(&b, &choice_frame) != null); } + // Tell pipewire what framerates we support. { var choice_frame: pw.c.spa_pod_frame = undefined; check(pw.c.spa_pod_builder_prop(&b, pw.c.SPA_FORMAT_VIDEO_framerate, 0)); @@ -290,10 +188,12 @@ pub fn main() !void { assert(pw.c.spa_pod_builder_pop(&b, &choice_frame) != null); } + // Log the supported formats const format: *pw.c.spa_pod = @ptrCast(@alignCast(pw.c.spa_pod_builder_pop(&b, &format_frame).?)); - log.info("supported formats:", .{}); check(pw.c.spa_debug_format(2, null, format)); + + // Add the supported formats to our params params.appendBounded(format) catch @panic("OOB"); } @@ -316,17 +216,14 @@ pub fn main() !void { params.appendBounded(format) catch @panic("OOB"); } - // now connect the stream, we need a direction (input/output), - // an optional target node to connect to, some flags and parameters - // + // Connect to the stream, start inactive we'll active it later const res = pw.c.pw_stream_connect( global.stream, pw.c.PW_DIRECTION_INPUT, pw.c.PW_ID_ANY, - pw.c.PW_STREAM_FLAG_AUTOCONNECT | // try to automatically connect this stream - pw.c.PW_STREAM_FLAG_INACTIVE | // we will activate ourselves - pw.c.PW_STREAM_FLAG_MAP_BUFFERS, // mmap the buffer data for us - // extra parameters, see above + pw.c.PW_STREAM_FLAG_AUTOCONNECT | + pw.c.PW_STREAM_FLAG_INACTIVE | + pw.c.PW_STREAM_FLAG_MAP_BUFFERS, params.items.ptr, @intCast(params.items.len), ); @@ -345,7 +242,7 @@ pub fn main() !void { defer zin.x11Disconnect(); zin.staticWindow(.main).registerClass(.{ - .callback = callback, + .callback = windowEvent, .win32_name = zin.L("VideoPlay"), .macos_view = "VideoPlay", }, .{ @@ -365,12 +262,17 @@ pub fn main() !void { defer zin.staticWindow(.main).destroy(); zin.staticWindow(.main).show(); + // Start a timer with the default timer period since we don't have a video feed yet, and call + // our callback once on startup so we don't have to wait for one timer period to elapse. startTimerNanos(global.default_timer_period_ns); - callback(.{ .timer = {} }); + windowEvent(.{ .timer = {} }); + + // Start the main loop. try zin.mainLoop(); } -fn callback(cb: zin.Callback(.{ .static = .main })) void { +/// Process a window event. +fn windowEvent(cb: zin.Callback(.{ .static = .main })) void { switch (cb) { .close => zin.quitMainLoop(), .window_size => {}, @@ -383,6 +285,7 @@ fn callback(cb: zin.Callback(.{ .static = .main })) void { } } +/// Flush all pending pipewire events. fn pipewireFlush() void { while (true) { const result = pw.c.pw_loop_iterate(pw.c.pw_main_loop_get_loop(global.loop), 0); @@ -395,6 +298,7 @@ fn pipewireFlush() void { } } +/// Handle the stream options changing. fn onStreamStateChanged( userdata: ?*anyopaque, old: pw.c.pw_stream_state, @@ -421,6 +325,7 @@ fn onStreamStateChanged( } } +/// Handle the stream IO state changing. fn onStreamIoChanged(userdata: ?*anyopaque, id: u32, area: ?*anyopaque, size: u32) callconv(.c) void { _ = size; _ = userdata; @@ -429,19 +334,10 @@ fn onStreamIoChanged(userdata: ?*anyopaque, id: u32, area: ?*anyopaque, size: u3 } } -// Be notified when the stream param changes. We're only looking at the -// format changes. -// -// We are now supposed to call pw_stream_finish_format() with success or -// failure, depending on if we can support the format. Because we gave -// a list of supported formats, this should be ok. -// -// As part of pw_stream_finish_format() we can provide parameters that -// will control the buffer memory allocation. This includes the metadata -// that we would like on our buffer, the size, alignment, etp. +/// Handle the stream parameters changing. fn onStreamParamChanged(userdata: ?*anyopaque, id: u32, param: [*c]const pw.c.spa_pod) callconv(.c) void { - log.info("stream param changed", .{}); _ = userdata; + const stream = global.stream; var params_buffer: [1024]u8 align(@alignOf(u32)) = undefined; var b: pw.c.spa_pod_builder = .{ @@ -452,10 +348,13 @@ fn onStreamParamChanged(userdata: ?*anyopaque, id: u32, param: [*c]const pw.c.sp .callbacks = .{ .funcs = null, .data = null }, }; + // Fail if the pod is invalid if (param != null and id == pw.c.SPA_PARAM_Tag) { log.err("invalid pod", .{}); return; } + + // Handle latency changing if (param != null and id == pw.c.SPA_PARAM_Latency) { var info: pw.c.spa_latency_info = undefined; if (pw.c.spa_latency_parse(param, &info) >= 0) { @@ -463,27 +362,29 @@ fn onStreamParamChanged(userdata: ?*anyopaque, id: u32, param: [*c]const pw.c.sp } return; } - // NULL means to clear the format + + // Clear the format if requested if (param == null or id != pw.c.SPA_PARAM_Format) return; + // Log the new format log.info("got format:", .{}); check(pw.c.spa_debug_format(2, null, param)); + // Parse the new format and reset our timer to the new interval var parsed: pw.c.spa_video_info_raw = undefined; if (pw.c.spa_format_video_raw_parse(param, &parsed) < 0) { std.debug.panic("failed to parse format", .{}); } - const num: f32 = @floatFromInt(parsed.framerate.num); - const denom: f32 = @floatFromInt(parsed.framerate.denom); - const hz = denom / num; - startTimerNanos(@intFromFloat(hz * std.time.ns_per_s)); - if (pw.c.spa_format_parse(param, &global.format.media_type, &global.format.media_subtype) < 0) { return; } - if (global.format.media_type != pw.c.SPA_MEDIA_TYPE_video) return; + const num: f32 = @floatFromInt(parsed.framerate.num); + const denom: f32 = @floatFromInt(parsed.framerate.denom); + const hz = denom / num; + startTimerNanos(@intFromFloat(hz * std.time.ns_per_s)); + // Check what format we got const format, const mult: i32 = switch (global.format.media_subtype) { pw.c.SPA_MEDIA_SUBTYPE_raw => b: { // call a helper function to parse the format for us. @@ -499,7 +400,6 @@ fn onStreamParamChanged(userdata: ?*anyopaque, id: u32, param: [*c]const pw.c.sp }, else => .{ pw.c.SPA_VIDEO_FORMAT_UNKNOWN, 0 }, }; - if (format == pw.c.SPA_VIDEO_FORMAT_UNKNOWN) { _ = pw.c.pw_stream_set_error(stream, -pw.c.EINVAL, "unknown pixel format"); return; @@ -509,6 +409,7 @@ fn onStreamParamChanged(userdata: ?*anyopaque, id: u32, param: [*c]const pw.c.sp return; } + // Check what size we got const size: i32, const blocks: i32 = switch (format) { pw.c.SPA_VIDEO_FORMAT_YV12, pw.c.SPA_VIDEO_FORMAT_I420 => b: { global.stride = @intCast(global.size.width); @@ -535,16 +436,15 @@ fn onStreamParamChanged(userdata: ?*anyopaque, id: u32, param: [*c]const pw.c.sp }, }; + // Update the global rect global.rect.x = 0; global.rect.y = 0; global.rect.w = @floatFromInt(global.size.width); global.rect.h = @floatFromInt(global.size.height); + // Specify our buffer options var params_buf: [3]?*const pw.c.spa_pod = undefined; var params: std.ArrayList(?*const pw.c.spa_pod) = .initBuffer(¶ms_buf); - - // a SPA_TYPE_OBJECT_ParamBuffers object defines the acceptable size, - // number, stride etc of the buffers { var param_buffers_frame: pw.c.spa_pod_frame = undefined; check(pw.c.spa_pod_builder_push_object( @@ -597,7 +497,7 @@ fn onStreamParamChanged(userdata: ?*anyopaque, id: u32, param: [*c]const pw.c.sp params.appendBounded(@ptrCast(@alignCast(pw.c.spa_pod_builder_pop(&b, ¶m_buffers_frame)))) catch @panic("OOB"); } - // a header metadata with timing information + // Specify the timing options { var timing_frame: pw.c.spa_pod_frame = undefined; check(pw.c.spa_pod_builder_push_object( @@ -615,7 +515,7 @@ fn onStreamParamChanged(userdata: ?*anyopaque, id: u32, param: [*c]const pw.c.sp params.appendBounded(@ptrCast(@alignCast(pw.c.spa_pod_builder_pop(&b, &timing_frame)))) catch @panic("OOB"); } - // video cropping information + // Specify the cropping options { var crop_frame: pw.c.spa_pod_frame = undefined; check(pw.c.spa_pod_builder_push_object( @@ -634,19 +534,11 @@ fn onStreamParamChanged(userdata: ?*anyopaque, id: u32, param: [*c]const pw.c.sp params.appendBounded(@ptrCast(@alignCast(pw.c.spa_pod_builder_pop(&b, &crop_frame)))) catch @panic("OOB"); } - // we are done + // Success check(pw.c.pw_stream_update_params(stream, params.items.ptr, @intCast(params.items.len))); } -// our data processing function is in general: -// ``` -// struct pw_buffer *b; -// b = pw_stream_dequeue_buffer(stream); -// -// .. do stuff with buffer ... -// -// pw_stream_queue_buffer(stream, b); -// ``` +/// Process a new buffer. fn onProcess(userdata: ?*anyopaque) callconv(.c) void { _ = userdata; const stream = global.stream; @@ -665,6 +557,7 @@ fn onProcess(userdata: ?*anyopaque) callconv(.c) void { } } +/// Render the current buffer. fn render(draw: zin.Draw(.{ .static = .main })) void { // Early out if we're redrawing too fast (e.g. during a resize) { @@ -676,8 +569,10 @@ fn render(draw: zin.Draw(.{ .static = .main })) void { global.last_render = now; } + // Clear the screen draw.clear(); + // Render the current frame const client_size = zin.staticWindow(.main).getClientSize(); const buf: *pw.c.spa_buffer = (global.current_buffer orelse { @@ -685,18 +580,8 @@ fn render(draw: zin.Draw(.{ .static = .main })) void { return; }).buffer; - log.debug("new buffer {*}", .{buf}); - const sdata = buf.datas[0].data orelse return; - const stream = global.stream; - const maybe_h: ?*pw.c.spa_meta_header = @ptrCast(@alignCast(pw.c.spa_buffer_find_meta_data(buf, pw.c.SPA_META_Header, @sizeOf(pw.c.spa_meta_header)))); - if (maybe_h) |h| { - const now = pw.c.pw_stream_get_nsec(stream); - log.debug("now:{} pts:{} diff:{}", .{ now, h.pts, now - @as(u64, @intCast(h.pts)) }); - } - - // get the videocrop metadata if any const maybe_mc: ?*pw.c.spa_meta_region = @ptrCast(@alignCast(pw.c.spa_buffer_find_meta_data(buf, pw.c.SPA_META_VideoCrop, @sizeOf(pw.c.spa_meta_region)))); if (maybe_mc) |mc| { if (pw.c.spa_meta_region_is_valid(mc)) { @@ -707,7 +592,6 @@ fn render(draw: zin.Draw(.{ .static = .main })) void { } } - // copy video image in texture if (global.is_yuv and buf.n_datas == 1) { const sstride = global.stride; const udata: [*]u8 = @ptrCast(sdata); @@ -782,3 +666,101 @@ fn startTimerNanos(ns: u64) void { global.timer_period_ns = ns; zin.staticWindow(.main).startTimerNanos({}, ns); } + +const Rect = struct { + x: f32 = 0, + y: f32 = 0, + w: f32 = 0, + h: f32 = 0, +}; + +const all_formats: []const pw.c.spa_video_format = &.{ + pw.c.SPA_VIDEO_FORMAT_ENCODED, + pw.c.SPA_VIDEO_FORMAT_I420, + pw.c.SPA_VIDEO_FORMAT_YV12, + pw.c.SPA_VIDEO_FORMAT_YUY2, + pw.c.SPA_VIDEO_FORMAT_UYVY, + pw.c.SPA_VIDEO_FORMAT_AYUV, + pw.c.SPA_VIDEO_FORMAT_RGBx, + pw.c.SPA_VIDEO_FORMAT_BGRx, + pw.c.SPA_VIDEO_FORMAT_xRGB, + pw.c.SPA_VIDEO_FORMAT_xBGR, + pw.c.SPA_VIDEO_FORMAT_RGBA, + pw.c.SPA_VIDEO_FORMAT_BGRA, + pw.c.SPA_VIDEO_FORMAT_ARGB, + pw.c.SPA_VIDEO_FORMAT_ABGR, + pw.c.SPA_VIDEO_FORMAT_RGB, + pw.c.SPA_VIDEO_FORMAT_BGR, + pw.c.SPA_VIDEO_FORMAT_Y41B, + pw.c.SPA_VIDEO_FORMAT_Y42B, + pw.c.SPA_VIDEO_FORMAT_YVYU, + pw.c.SPA_VIDEO_FORMAT_Y444, + pw.c.SPA_VIDEO_FORMAT_v210, + pw.c.SPA_VIDEO_FORMAT_v216, + pw.c.SPA_VIDEO_FORMAT_NV12, + pw.c.SPA_VIDEO_FORMAT_NV21, + pw.c.SPA_VIDEO_FORMAT_GRAY8, + pw.c.SPA_VIDEO_FORMAT_GRAY16_BE, + pw.c.SPA_VIDEO_FORMAT_GRAY16_LE, + pw.c.SPA_VIDEO_FORMAT_v308, + pw.c.SPA_VIDEO_FORMAT_RGB16, + pw.c.SPA_VIDEO_FORMAT_BGR16, + pw.c.SPA_VIDEO_FORMAT_RGB15, + pw.c.SPA_VIDEO_FORMAT_BGR15, + pw.c.SPA_VIDEO_FORMAT_UYVP, + pw.c.SPA_VIDEO_FORMAT_A420, + pw.c.SPA_VIDEO_FORMAT_RGB8P, + pw.c.SPA_VIDEO_FORMAT_YUV9, + pw.c.SPA_VIDEO_FORMAT_YVU9, + pw.c.SPA_VIDEO_FORMAT_IYU1, + pw.c.SPA_VIDEO_FORMAT_ARGB64, + pw.c.SPA_VIDEO_FORMAT_AYUV64, + pw.c.SPA_VIDEO_FORMAT_r210, + pw.c.SPA_VIDEO_FORMAT_I420_10BE, + pw.c.SPA_VIDEO_FORMAT_I420_10LE, + pw.c.SPA_VIDEO_FORMAT_I422_10BE, + pw.c.SPA_VIDEO_FORMAT_I422_10LE, + pw.c.SPA_VIDEO_FORMAT_Y444_10BE, + pw.c.SPA_VIDEO_FORMAT_Y444_10LE, + pw.c.SPA_VIDEO_FORMAT_GBR, + pw.c.SPA_VIDEO_FORMAT_GBR_10BE, + pw.c.SPA_VIDEO_FORMAT_GBR_10LE, + pw.c.SPA_VIDEO_FORMAT_NV16, + pw.c.SPA_VIDEO_FORMAT_NV24, + pw.c.SPA_VIDEO_FORMAT_NV12_64Z32, + pw.c.SPA_VIDEO_FORMAT_A420_10BE, + pw.c.SPA_VIDEO_FORMAT_A420_10LE, + pw.c.SPA_VIDEO_FORMAT_A422_10BE, + pw.c.SPA_VIDEO_FORMAT_A422_10LE, + pw.c.SPA_VIDEO_FORMAT_A444_10BE, + pw.c.SPA_VIDEO_FORMAT_A444_10LE, + pw.c.SPA_VIDEO_FORMAT_NV61, + pw.c.SPA_VIDEO_FORMAT_P010_10BE, + pw.c.SPA_VIDEO_FORMAT_P010_10LE, + pw.c.SPA_VIDEO_FORMAT_IYU2, + pw.c.SPA_VIDEO_FORMAT_VYUY, + pw.c.SPA_VIDEO_FORMAT_GBRA, + pw.c.SPA_VIDEO_FORMAT_GBRA_10BE, + pw.c.SPA_VIDEO_FORMAT_GBRA_10LE, + pw.c.SPA_VIDEO_FORMAT_GBR_12BE, + pw.c.SPA_VIDEO_FORMAT_GBR_12LE, + pw.c.SPA_VIDEO_FORMAT_GBRA_12BE, + pw.c.SPA_VIDEO_FORMAT_GBRA_12LE, + pw.c.SPA_VIDEO_FORMAT_I420_12BE, + pw.c.SPA_VIDEO_FORMAT_I420_12LE, + pw.c.SPA_VIDEO_FORMAT_I422_12BE, + pw.c.SPA_VIDEO_FORMAT_I422_12LE, + pw.c.SPA_VIDEO_FORMAT_Y444_12BE, + pw.c.SPA_VIDEO_FORMAT_Y444_12LE, + pw.c.SPA_VIDEO_FORMAT_RGBA_F16, + pw.c.SPA_VIDEO_FORMAT_RGBA_F32, + pw.c.SPA_VIDEO_FORMAT_xRGB_210LE, + pw.c.SPA_VIDEO_FORMAT_xBGR_210LE, + pw.c.SPA_VIDEO_FORMAT_RGBx_102LE, + pw.c.SPA_VIDEO_FORMAT_BGRx_102LE, + pw.c.SPA_VIDEO_FORMAT_ARGB_210LE, + pw.c.SPA_VIDEO_FORMAT_ABGR_210LE, + pw.c.SPA_VIDEO_FORMAT_RGBA_102LE, + pw.c.SPA_VIDEO_FORMAT_BGRA_102LE, + pw.c.SPA_VIDEO_FORMAT_DSP_F32, +}; From 02f72a5c6157a690f0a13f635570067cba11a116 Mon Sep 17 00:00:00 2001 From: Mason Remaley Date: Mon, 8 Dec 2025 17:41:55 -0800 Subject: [PATCH 12/13] Links to issue upstreaming the `enumFromInt` fix --- src/wrap/format.zig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/wrap/format.zig b/src/wrap/format.zig index 58ccfd6..686457c 100644 --- a/src/wrap/format.zig +++ b/src/wrap/format.zig @@ -52,10 +52,10 @@ pub fn FmtFlags(T: type) type { }; } -// Forked from std to fix bug, will upstream. +// https://codeberg.org/ziglang/zig/pulls/30156 pub fn enumFromInt(comptime E: type, integer: anytype) ?E { const enum_info = @typeInfo(E).@"enum"; - if (comptime !enum_info.is_exhaustive) { + if (!enum_info.is_exhaustive) { if (std.math.cast(enum_info.tag_type, integer)) |tag| { return @enumFromInt(tag); } From 6696643ef8efa5462b82bf5a02729726858a0d9d Mon Sep 17 00:00:00 2001 From: Mason Remaley Date: Tue, 9 Dec 2025 15:29:58 -0800 Subject: [PATCH 13/13] Removes SDl example, cleans up README --- README.md | 7 +- build.zig | 48 +- build.zig.zon | 4 - .../{video_play_zin.zig => video_play.zig} | 6 +- src/examples/video_play_sdl.zig | 801 ------------------ 5 files changed, 11 insertions(+), 855 deletions(-) rename src/examples/{video_play_zin.zig => video_play.zig} (99%) delete mode 100644 src/examples/video_play_sdl.zig diff --git a/README.md b/README.md index c281fef..b83141b 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ Pipewire client library, statically linked, ported to the Zig build system. ## Motivation -I want a static executable that can play audio and turn screen contents into a video feed. The pipewire library makes heavy use of `dlopen` internally, so this is nontrivial. +I want a static executable that can play audio and turn screen contents into a video feed. The pipewire library makes heavy use of `dlopen` internally, so this is nontrivial. ## Strategy @@ -17,10 +17,9 @@ This project follows the pristine tarball approach. No modifications are require ## Status -You can run the `video-play` example with `zig build video-play` to see the current webcam feed. This currently works without pipewire accessing the dynamic linker, but the example executable isn't fully static since it relies on SDL. I plan to port the example away from SDL so that this can be changed. - -Only the pipewire plugins/modules required for this example are currently built. To use other parts of the pipewire API, you may need to add more symbols to [src/wrap/dlfcn.zig](src/wrap/dlfcn.zig) and regenerate `c.zig` if additional pipewire headers are required. +Only the pipewire plugins/modules required for the provided example are currently built. To use other parts of the pipewire API, you may need to add more symbols to [src/wrap/dlfcn.zig](src/wrap/dlfcn.zig) and regenerate `c.zig` if additional pipewire headers are required. +You can run the `video-play` example with `zig build video-play` to see the current webcam feed. Use something like `-Dtarget=x86_64-linux-musl` if you want full static linking. Note that the video will be fairly low resolution as the example doesn't have a real graphics stack and as such is rendering pixels one at a time. The example only supports the YUV2 video format. ## Usage diff --git a/build.zig b/build.zig index 9fb8350..331fe4e 100644 --- a/build.zig +++ b/build.zig @@ -306,47 +306,7 @@ pub fn build(b: *std.Build) void { }, }); - // Build the video play SDL example. - { - const video_play = b.addExecutable(.{ - .name = "video-play-sdl", - .root_module = b.createModule(.{ - .root_source_file = b.path("src/examples/video_play_sdl.zig"), - .target = target, - .optimize = optimize, - }), - }); - - const sdl = b.dependency("sdl", .{ - .optimize = optimize, - .target = target, - }); - - if (use_zig_module) { - video_play.root_module.addImport("pipewire", libpipewire_zig); - } else { - video_play.linkLibrary(libpipewire); - video_play.root_module.addImport("pipewire", c); - } - - video_play.root_module.addOptions("example_options", example_options); - - video_play.linkLibrary(sdl.artifact("SDL3")); - b.installArtifact(video_play); - - const run_step = b.step("video-play-sdl", "Run the video-play example"); - - const run_cmd = b.addRunArtifact(video_play); - run_step.dependOn(&run_cmd.step); - - run_cmd.step.dependOn(b.getInstallStep()); - - if (b.args) |args| { - run_cmd.addArgs(args); - } - } - - // Build the video play ZIN example. + // Build the video play example. { const zin = b.dependency("zin", .{ .optimize = optimize, @@ -354,9 +314,9 @@ pub fn build(b: *std.Build) void { }).module("zin"); const video_play = b.addExecutable(.{ - .name = "video-play-zin", + .name = "video-play", .root_module = b.createModule(.{ - .root_source_file = b.path("src/examples/video_play_zin.zig"), + .root_source_file = b.path("src/examples/video_play.zig"), .target = target, .optimize = optimize, .imports = &.{ @@ -376,7 +336,7 @@ pub fn build(b: *std.Build) void { b.installArtifact(video_play); - const run_step = b.step("video-play-zin", "Run the video-play example"); + const run_step = b.step("video-play", "Run the video-play example"); const run_cmd = b.addRunArtifact(video_play); run_step.dependOn(&run_cmd.step); diff --git a/build.zig.zon b/build.zig.zon index 7f5698f..ed4c427 100644 --- a/build.zig.zon +++ b/build.zig.zon @@ -16,10 +16,6 @@ .hash = "N-V-__8AAKYw2AD301ZQsWszbYSWZQF5y-q4WXJif0UGRvFh", }, // Used by the examples - .sdl = .{ - .url = "git+https://github.com/allyourcodebase/SDL3#af9c32ce4824e0ad4337447434c40f2b672faf94", - .hash = "sdl-0.0.0-i4QD0btgqAABajEXrQnyZr1xVsk7LM48w2nBmuQ5gdfr", - }, .zin = .{ .url = "git+https://github.com/marler8997/zin#62706713b7089b4220d8e1eb49f8d776138a9058", .hash = "zin-0.0.0-W7QDx9BaAwC-H1uS9Cz68oMn9uh5fWuVis5b-eqhCeeq", diff --git a/src/examples/video_play_zin.zig b/src/examples/video_play.zig similarity index 99% rename from src/examples/video_play_zin.zig rename to src/examples/video_play.zig index f1c1413..d837d59 100644 --- a/src/examples/video_play_zin.zig +++ b/src/examples/video_play.zig @@ -53,6 +53,8 @@ const texel_width = 10; const max_buffers = 64; const default_video_width = 160; const default_video_height = 90; +const default_frame_rate = 60; +const max_frame_rate = 120; // Global state used by Zin and Pipewire const global = struct { @@ -182,9 +184,9 @@ pub fn main() !void { var choice_frame: pw.c.spa_pod_frame = undefined; check(pw.c.spa_pod_builder_prop(&b, pw.c.SPA_FORMAT_VIDEO_framerate, 0)); check(pw.c.spa_pod_builder_push_choice(&b, &choice_frame, pw.c.SPA_CHOICE_Range, 0)); - check(pw.c.spa_pod_builder_fraction(&b, 60, 1)); + check(pw.c.spa_pod_builder_fraction(&b, default_frame_rate, 1)); check(pw.c.spa_pod_builder_fraction(&b, 0, 1)); - check(pw.c.spa_pod_builder_fraction(&b, 120, 1)); + check(pw.c.spa_pod_builder_fraction(&b, max_frame_rate, 1)); assert(pw.c.spa_pod_builder_pop(&b, &choice_frame) != null); } diff --git a/src/examples/video_play_sdl.zig b/src/examples/video_play_sdl.zig deleted file mode 100644 index 44e9fac..0000000 --- a/src/examples/video_play_sdl.zig +++ /dev/null @@ -1,801 +0,0 @@ -//! `pipewire/src/examples/video-play.c` translated to Zig. - -const std = @import("std"); -const log = std.log; -const example_options = @import("example_options"); - -// Normal code wouldn't need this conditional, we're just demonstrating both the static library and -// the Zig module here. Prefer the Zig module when possible. -const pw = if (example_options.use_zig_module) - // Example of linking with the pipewire zig module - @import("pipewire") -else - // Example of linking with the pipewire static library. We're wrapping it like this just to - // match the Zig module so the rest of the code doesn't need conditionals. - struct { - pub const c = @import("pipewire"); - }; - -const sdl = @cImport({ - @cDefine("WIDTH", std.fmt.comptimePrint("{}", .{width})); - @cDefine("HEIGHT", std.fmt.comptimePrint("{}", .{height})); - @cDefine("RATE", std.fmt.comptimePrint("{}", .{rate})); - @cInclude("SDL3/SDL.h"); -}); - -const width = 1920; -const height = 1080; -const rate = 30; -const max_buffers = 64; - -pub const std_options: std.Options = .{ - .log_level = .info, -}; - -pub fn main() !void { - // If we're linking with the Zig module, set up logging. - var logger = if (example_options.use_zig_module) pw.Logger.init() else {}; - if (example_options.use_zig_module) { - pw.c.pw_log_set(&logger); - pw.c.pw_log_set_level(pw.Logger.default_level); - } - - pw.c.pw_init(0, null); - defer pw.c.pw_deinit(); - - var data: Data = .{}; - - // Create a main loop - data.loop = pw.c.pw_main_loop_new(null).?; - defer pw.c.pw_main_loop_destroy(data.loop); - - _ = pw.c.pw_loop_add_signal(pw.c.pw_main_loop_get_loop(data.loop), pw.c.SIGINT, &doQuit, &data); - _ = pw.c.pw_loop_add_signal(pw.c.pw_main_loop_get_loop(data.loop), pw.c.SIGTERM, &doQuit, &data); - - // create a simple stream, the simple stream manages to core and remote objects for you if you - // don't need to deal with them - // - // If you plan to autoconnect your stream, you need to provide at least media, category and role - // properties - // - // Pass your events and a user_data pointer as the last arguments. This will inform you about - // the stream state. The most important event you need to listen to is the process event where - // you need to consume the data provided to you. - const props = pw.c.pw_properties_new( - pw.c.PW_KEY_MEDIA_TYPE, - "Video", - pw.c.PW_KEY_MEDIA_CATEGORY, - "Capture", - pw.c.PW_KEY_MEDIA_ROLE, - "Camera", - @as(?*anyopaque, null), - ).?; - - var args: std.process.ArgIterator = .init(); - _ = args.next(); - if (args.next()) |arg| { - _ = pw.c.pw_properties_set(props, pw.c.PW_KEY_TARGET_OBJECT, arg); - } - - data.stream = pw.c.pw_stream_new_simple( - pw.c.pw_main_loop_get_loop(data.loop), - "video-play", - props, - &.{ - .version = pw.c.PW_VERSION_STREAM_EVENTS, - .state_changed = &onStreamStateChanged, - .io_changed = &onStreamIoChanged, - .param_changed = &onStreamParamChanged, - .process = &onProcess, - }, - &data, - ).?; - defer pw.c.pw_stream_destroy(data.stream); - - if (!sdl.SDL_Init(sdl.SDL_INIT_VIDEO)) { - log.err("can't initialize SDL: {s}", .{sdl.SDL_GetError()}); - std.process.exit(1); - } - - if (!sdl.SDL_CreateWindowAndRenderer( - "Demo", - width, - height, - sdl.SDL_WINDOW_RESIZABLE, - &data.window, - &data.renderer, - )) { - log.err("can't create window: {s}", .{sdl.SDL_GetError()}); - std.process.exit(1); - } - defer { - if (data.texture) |texture| sdl.SDL_DestroyTexture(texture); - if (data.cursor) |cursor| sdl.SDL_DestroyTexture(cursor); - sdl.SDL_DestroyRenderer(data.renderer); - sdl.SDL_DestroyWindow(data.window); - } - - var buffer: [1024]u8 align(@alignOf(u32)) = undefined; - var b = std.mem.zeroInit(pw.c.spa_pod_builder, .{ - .data = &buffer, - .size = buffer.len, - }); - - // build the extra parameters to connect with. To connect, we can provide a list of supported - // formats. We use a builder that writes the param object to the stack. - var params_buf: [3]?*const pw.c.spa_pod = undefined; - var params: std.ArrayList(?*const pw.c.spa_pod) = .initBuffer(¶ms_buf); - buildFormat(&data, &b, ¶ms); - - { - var f: pw.c.spa_pod_frame = undefined; - // send a tag, input tags travel upstream - pw.c.spa_tag_build_start(&b, &f, pw.c.SPA_PARAM_Tag, pw.c.SPA_DIRECTION_INPUT); - const items: [1]pw.c.spa_dict_item = .{ - pw.c.SPA_DICT_ITEM_INIT("my-tag-other-key", "my-special-other-tag-value"), - }; - pw.c.spa_tag_build_add_dict(&b, &pw.c.SPA_DICT_INIT(items, 1)); - params.appendBounded(pw.c.spa_tag_build_end(&b, &f)) catch @panic("OOB"); - } - - // now connect the stream, we need a direction (input/output), - // an optional target node to connect to, some flags and parameters - // - const res = pw.c.pw_stream_connect( - data.stream, - pw.c.PW_DIRECTION_INPUT, - pw.c.PW_ID_ANY, - pw.c.PW_STREAM_FLAG_AUTOCONNECT | // try to automatically connect this stream - pw.c.PW_STREAM_FLAG_INACTIVE | // we will activate ourselves - pw.c.PW_STREAM_FLAG_MAP_BUFFERS, // mmap the buffer data for us - // extra parameters, see above - params.items.ptr, - @intCast(params.items.len), - ); - if (res < 0) { - log.err("can't connect: {s}", .{pw.c.spa_strerror(res)}); - std.process.exit(1); - } - - // /do things until we quit the mainloop - _ = pw.c.pw_main_loop_run(data.loop); -} - -const Pixel = extern struct { - r: f32, - g: f32, - b: f32, - a: f32, -}; - -const Data = struct { - renderer: ?*sdl.SDL_Renderer = null, - window: ?*sdl.SDL_Window = null, - texture: ?*sdl.SDL_Texture = null, - cursor: ?*sdl.SDL_Texture = null, - - loop: ?*pw.c.pw_main_loop = null, - stream: ?*pw.c.pw_stream = null, - - position: ?*pw.c.spa_io_position = null, - - format: pw.c.spa_video_info = .{}, - stride: i32 = 0, - size: pw.c.spa_rectangle = .{}, - - rect: sdl.SDL_FRect = .{}, - cursor_rect: sdl.SDL_FRect = .{}, - is_yuv: bool = false, -}; - -fn doQuit(userdata: ?*anyopaque, signal_number: c_int) callconv(.c) void { - _ = signal_number; - const data: *Data = @ptrCast(@alignCast(userdata)); - _ = pw.c.pw_main_loop_quit(data.loop); -} - -// our data processing function is in general: -// ``` -// struct pw_buffer *b; -// b = pw_stream_dequeue_buffer(stream); -// -// .. do stuff with buffer ... -// -// pw_stream_queue_buffer(stream, b); -// ``` -fn onProcess(userdata: ?*anyopaque) callconv(.c) void { - const data: *Data = @ptrCast(@alignCast(userdata)); - const stream = data.stream; - - var render_cursor = false; - - var maybe_buffer: ?*pw.c.pw_buffer = null; - while (true) { - const t = pw.c.pw_stream_dequeue_buffer(stream) orelse break; - if (maybe_buffer) |b| _ = pw.c.pw_stream_queue_buffer(stream, b); - maybe_buffer = t; - } - const b = maybe_buffer orelse { - log.warn("out of buffers", .{}); - return; - }; - defer _ = pw.c.pw_stream_queue_buffer(stream, b); - - const buf: *pw.c.spa_buffer = b.buffer; - - log.debug("new buffer {*}", .{buf}); - - handleEvents(data); - - const sdata = buf.datas[0].data orelse return; - - const maybe_h: ?*pw.c.spa_meta_header = @ptrCast(@alignCast(pw.c.spa_buffer_find_meta_data(buf, pw.c.SPA_META_Header, @sizeOf(pw.c.spa_meta_header)))); - if (maybe_h) |h| { - const now = pw.c.pw_stream_get_nsec(stream); - log.debug("now:{} pts:{} diff:{}", .{ now, h.pts, now - @as(u64, @intCast(h.pts)) }); - } - - // get the videocrop metadata if any - const maybe_mc: ?*pw.c.spa_meta_region = @ptrCast(@alignCast(pw.c.spa_buffer_find_meta_data(buf, pw.c.SPA_META_VideoCrop, @sizeOf(pw.c.spa_meta_region)))); - if (maybe_mc) |mc| { - if (pw.c.spa_meta_region_is_valid(mc)) { - data.rect.x = @floatFromInt(mc.region.position.x); - data.rect.y = @floatFromInt(mc.region.position.y); - data.rect.w = @floatFromInt(mc.region.size.width); - data.rect.h = @floatFromInt(mc.region.size.height); - } - } - // get cursor metadata - const maybe_mcs: ?*pw.c.spa_meta_cursor = @ptrCast(@alignCast(pw.c.spa_buffer_find_meta_data(buf, pw.c.SPA_META_Cursor, @sizeOf(pw.c.spa_meta_cursor)))); - if (maybe_mcs) |mcs| { - if (pw.c.spa_meta_cursor_is_valid(mcs)) { - data.cursor_rect.x = @floatFromInt(mcs.position.x); - data.cursor_rect.y = @floatFromInt(mcs.position.y); - - const mb: *pw.c.spa_meta_bitmap = @ptrFromInt(@intFromPtr(mcs) + mcs.bitmap_offset); - data.cursor_rect.w = @floatFromInt(mb.size.width); - data.cursor_rect.h = @floatFromInt(mb.size.height); - - if (data.cursor == null) { - data.cursor = sdl.SDL_CreateTexture( - data.renderer, - idToSdlFormat(mb.format), - sdl.SDL_TEXTUREACCESS_STREAMING, - @intCast(mb.size.width), - @intCast(mb.size.height), - ); - _ = sdl.SDL_SetTextureBlendMode(data.cursor, sdl.SDL_BLENDMODE_BLEND); - } - - var cdata: [*c]u8 = undefined; - var cstride: c_int = undefined; - if (!sdl.SDL_LockTexture(data.cursor, null, &cdata, &cstride)) { - log.err("Couldn't lock cursor texture: {s}", .{sdl.SDL_GetError()}); - return; - } - defer sdl.SDL_UnlockTexture(data.cursor); - - // copy the cursor bitmap into the texture - var src: [*]u8 = @ptrFromInt(@intFromPtr(mb) + mb.offset); - var dst = cdata; - const ostride: usize = @intCast(@min(cstride, mb.stride)); - - for (0..mb.size.height) |_| { - @memcpy(dst[0..ostride], src[0..ostride]); - dst += @intCast(cstride); - src += @intCast(mb.stride); - } - - render_cursor = true; - } - } - - // copy video image in texture - if (data.is_yuv) { - var datas: [4]?[*]u8 = undefined; - const sstride = data.stride; - if (buf.n_datas == 1) { - _ = sdl.SDL_UpdateTexture(data.texture, null, sdata, sstride); - } else { - datas[0] = @ptrCast(sdata); - datas[1] = @ptrCast(buf.datas[1].data); - datas[2] = @ptrCast(buf.datas[2].data); - _ = sdl.SDL_UpdateYUVTexture( - data.texture, - null, - datas[0], - sstride, - datas[1], - @divExact(sstride, 2), - datas[2], - @divExact(sstride, 2), - ); - } - } else { - var dstride: c_int = undefined; - var ddata: ?*anyopaque = undefined; - if (!sdl.SDL_LockTexture(data.texture, null, &ddata, &dstride)) { - log.err("Couldn't lock texture: {s}", .{sdl.SDL_GetError()}); - } - defer sdl.SDL_UnlockTexture(data.texture); - - var sstride: u32 = @intCast(buf.datas[0].chunk.*.stride); - if (sstride == 0) sstride = buf.datas[0].chunk.*.size / data.size.height; - const ostride = @min(sstride, dstride); - - var src: [*]u8 = @ptrCast(sdata); - var dst: [*]u8 = @ptrCast(ddata); - - if (data.format.media_subtype == pw.c.SPA_MEDIA_SUBTYPE_dsp) { - for (0..data.size.height) |_| { - const pixel: [*]Pixel = @ptrCast(@alignCast(src)); - for (0..data.size.width) |j| { - dst[j * 4 + 0] = @intFromFloat(std.math.clamp(pixel[j].r * 255.0, 0, 255)); - dst[j * 4 + 1] = @intFromFloat(std.math.clamp(pixel[j].g * 255.0, 0, 255)); - dst[j * 4 + 2] = @intFromFloat(std.math.clamp(pixel[j].b * 255.0, 0, 255)); - dst[j * 4 + 3] = @intFromFloat(std.math.clamp(pixel[j].a * 255.0, 0, 255)); - } - src += sstride; - dst += @intCast(dstride); - } - } else { - for (0..data.size.height) |_| { - @memcpy(dst[0..@intCast(ostride)], src[0..@intCast(ostride)]); - src += sstride; - dst += @intCast(dstride); - } - } - } - - _ = sdl.SDL_RenderClear(data.renderer); - // now render the video and then the cursor if any - _ = sdl.SDL_RenderTexture(data.renderer, data.texture, &data.rect, null); - if (render_cursor) _ = sdl.SDL_RenderTexture( - data.renderer, - data.cursor, - null, - &data.cursor_rect, - ); - _ = sdl.SDL_RenderPresent(data.renderer); -} - -fn handleEvents(data: *Data) void { - var event: sdl.SDL_Event = undefined; - while (sdl.SDL_PollEvent(&event)) { - switch (event.type) { - sdl.SDL_EVENT_QUIT => { - _ = pw.c.pw_main_loop_quit(data.loop); - }, - else => {}, - } - } -} - -fn onStreamStateChanged( - userdata: ?*anyopaque, - old: pw.c.pw_stream_state, - state: pw.c.pw_stream_state, - err: [*c]const u8, -) callconv(.c) void { - _ = old; - _ = err; - const data: *Data = @ptrCast(@alignCast(userdata)); - log.info("stream state: \"{s}\"", .{pw.c.pw_stream_state_as_string(state)}); - switch (state) { - pw.c.PW_STREAM_STATE_UNCONNECTED => _ = pw.c.pw_main_loop_quit(data.loop), - // because we started inactive, activate ourselves now - pw.c.PW_STREAM_STATE_PAUSED => _ = pw.c.pw_stream_set_active(data.stream, true), - else => {}, - } -} - -fn onStreamIoChanged(userdata: ?*anyopaque, id: u32, area: ?*anyopaque, size: u32) callconv(.c) void { - _ = size; - const data: *Data = @ptrCast(@alignCast(userdata)); - if (id == pw.c.SPA_IO_Position) { - data.position = @ptrCast(@alignCast(area)); - } -} - -// Be notified when the stream param changes. We're only looking at the -// format changes. -// -// We are now supposed to call pw_stream_finish_format() with success or -// failure, depending on if we can support the format. Because we gave -// a list of supported formats, this should be ok. -// -// As part of pw_stream_finish_format() we can provide parameters that -// will control the buffer memory allocation. This includes the metadata -// that we would like on our buffer, the size, alignment, etp. -fn onStreamParamChanged(userdata: ?*anyopaque, id: u32, param: [*c]const pw.c.spa_pod) callconv(.c) void { - const data: *Data = @ptrCast(@alignCast(userdata)); - const stream = data.stream; - var params_buffer: [1024]u8 align(@alignOf(u32)) = undefined; - var b: pw.c.spa_pod_builder = .{ - .data = ¶ms_buffer, - .size = params_buffer.len, - ._padding = 0, - .state = .{ .offset = 0, .flags = 0, .frame = null }, - .callbacks = .{ .funcs = null, .data = null }, - }; - - if (param != null and id == pw.c.SPA_PARAM_Tag) { - log.err("invalid pod", .{}); - return; - } - if (param != null and id == pw.c.SPA_PARAM_Latency) { - var info: pw.c.spa_latency_info = undefined; - if (pw.c.spa_latency_parse(param, &info) >= 0) { - log.info("got latency: {}", .{@divTrunc((info.min_ns + info.max_ns), 2)}); - } - return; - } - // NULL means to clear the format - if (param == null or id != pw.c.SPA_PARAM_Format) return; - - log.info("got format:", .{}); - _ = pw.c.spa_debug_format(2, null, param); - - if (pw.c.spa_format_parse(param, &data.format.media_type, &data.format.media_subtype) < 0) { - return; - } - - if (data.format.media_type != pw.c.SPA_MEDIA_TYPE_video) return; - - const sdl_format, const mult: i32 = switch (data.format.media_subtype) { - pw.c.SPA_MEDIA_SUBTYPE_raw => b: { - // call a helper function to parse the format for us. - _ = pw.c.spa_format_video_raw_parse(param, &data.format.info.raw); - data.size = pw.c.SPA_RECTANGLE(data.format.info.raw.size.width, data.format.info.raw.size.height); - break :b .{ idToSdlFormat(data.format.info.raw.format), 1 }; - }, - pw.c.SPA_MEDIA_SUBTYPE_dsp => b: { - _ = pw.c.spa_format_video_dsp_parse(param, &data.format.info.dsp); - if (data.format.info.dsp.format != pw.c.SPA_VIDEO_FORMAT_DSP_F32) return; - data.size = pw.c.SPA_RECTANGLE(data.position.?.video.size.width, data.position.?.video.size.height); - break :b .{ sdl.SDL_PIXELFORMAT_RGBA32, 4 }; - }, - else => .{ sdl.SDL_PIXELFORMAT_UNKNOWN, 0 }, - }; - - if (sdl_format == sdl.SDL_PIXELFORMAT_UNKNOWN) { - _ = pw.c.pw_stream_set_error(stream, -pw.c.EINVAL, "unknown pixel format"); - return; - } - if (data.size.width == 0 or data.size.height == 0) { - _ = pw.c.pw_stream_set_error(stream, -pw.c.EINVAL, "invalid size"); - return; - } - - data.texture = sdl.SDL_CreateTexture( - data.renderer, - sdl_format, - sdl.SDL_TEXTUREACCESS_STREAMING, - @intCast(data.size.width), - @intCast(data.size.height), - ); - var d: ?*anyopaque = null; - const size: i32, const blocks: i32 = switch (sdl_format) { - sdl.SDL_PIXELFORMAT_YV12, sdl.SDL_PIXELFORMAT_IYUV => b: { - data.stride = @intCast(data.size.width); - data.is_yuv = true; - break :b .{ - @divExact((data.stride * @as(i32, @intCast(data.size.height))) * 3, 2), - 3, - }; - }, - sdl.SDL_PIXELFORMAT_YUY2 => b: { - data.is_yuv = true; - data.stride = @intCast(data.size.width * 2); - break :b .{ - data.stride * @as(i32, @intCast(data.size.height)), - 1, - }; - }, - else => b: { - if (!sdl.SDL_LockTexture(data.texture, null, &d, &data.stride)) { - log.err("Couldn't lock texture: {s}", .{sdl.SDL_GetError()}); - data.stride = @intCast(data.size.width * 2); - } else { - sdl.SDL_UnlockTexture(data.texture); - } - break :b .{ - data.stride * @as(i32, @intCast(data.size.height)), - 1, - }; - }, - }; - - data.rect.x = 0; - data.rect.y = 0; - data.rect.w = @floatFromInt(data.size.width); - data.rect.h = @floatFromInt(data.size.height); - - // a SPA_TYPE_OBJECT_ParamBuffers object defines the acceptable size, - // number, stride etc of the buffers - var params_buf: [5]?*const pw.c.spa_pod = undefined; - var params: std.ArrayList(?*const pw.c.spa_pod) = .initBuffer(¶ms_buf); - var f: pw.c.spa_pod_frame = undefined; - - _ = pw.c.spa_pod_builder_push_object( - &b, - &f, - pw.c.SPA_TYPE_OBJECT_ParamBuffers, - pw.c.SPA_PARAM_Buffers, - ); - _ = pw.c.spa_pod_builder_add( - &b, - - pw.c.SPA_PARAM_BUFFERS_buffers, - "?ri", - @as(c_int, 3), - @as(c_int, 8), - @as(c_int, 2), - @as(c_int, max_buffers), - - pw.c.SPA_PARAM_BUFFERS_blocks, - "i", - blocks, - - pw.c.SPA_PARAM_BUFFERS_size, - "i", - size * mult, - - pw.c.SPA_PARAM_BUFFERS_stride, - "i", - data.stride * mult, - - pw.c.SPA_PARAM_BUFFERS_dataType, - "?fi", - @as(c_int, 1), - @as(c_int, 1 << pw.c.SPA_DATA_MemPtr), - - @as(c_int, 0), - ); - params.appendBounded(@ptrCast(@alignCast(pw.c.spa_pod_builder_pop(&b, &f)))) catch @panic("OOB"); - - // a header metadata with timing information - _ = pw.c.spa_pod_builder_push_object( - &b, - &f, - pw.c.SPA_TYPE_OBJECT_ParamMeta, - pw.c.SPA_PARAM_Meta, - ); - _ = pw.c.spa_pod_builder_add( - &b, - - pw.c.SPA_PARAM_META_type, - "I", - pw.c.SPA_META_Header, - - pw.c.SPA_PARAM_META_size, - "i", - @as(usize, @sizeOf(pw.c.spa_meta_header)), - - @as(c_int, 0), - ); - params.appendBounded(@ptrCast(@alignCast(pw.c.spa_pod_builder_pop(&b, &f)))) catch @panic("OOB"); - - // video cropping information - _ = pw.c.spa_pod_builder_push_object( - &b, - &f, - pw.c.SPA_TYPE_OBJECT_ParamMeta, - pw.c.SPA_PARAM_Meta, - ); - _ = pw.c.spa_pod_builder_add( - &b, - - pw.c.SPA_PARAM_META_type, - "I", - pw.c.SPA_META_VideoCrop, - - pw.c.SPA_PARAM_META_size, - "i", - @as(usize, @sizeOf(pw.c.spa_meta_region)), - - @as(c_int, 0), - ); - params.appendBounded(@ptrCast(@alignCast(pw.c.spa_pod_builder_pop(&b, &f)))) catch @panic("OOB"); - - // cursor information - _ = pw.c.spa_pod_builder_push_object( - &b, - &f, - pw.c.SPA_TYPE_OBJECT_ParamMeta, - pw.c.SPA_PARAM_Meta, - ); - _ = pw.c.spa_pod_builder_add( - &b, - - pw.c.SPA_PARAM_META_type, - "I", - pw.c.SPA_META_Cursor, - - pw.c.SPA_PARAM_META_size, - "?ri", - @as(c_int, 3), - cursorMetaSize(64, 64), - cursorMetaSize(1, 1), - cursorMetaSize(256, 256), - - @as(c_int, 0), - ); - params.appendBounded(@ptrCast(@alignCast(pw.c.spa_pod_builder_pop(&b, &f)))) catch @panic("OOB"); - - // we are done - _ = pw.c.pw_stream_update_params(stream, params.items.ptr, @intCast(params.items.len)); -} - -fn cursorMetaSize(w: usize, h: usize) usize { - return @sizeOf(pw.c.spa_meta_cursor) + @sizeOf(pw.c.spa_meta_bitmap) + w * h * 4; -} - -fn buildFormat(data: *Data, b: *pw.c.spa_pod_builder, params: *std.ArrayList(?*const pw.c.spa_pod)) void { - { - const format = sdlBuildFormats(data.renderer.?, b); - log.info("supported SDL formats:", .{}); - _ = pw.c.spa_debug_format(2, null, format); - params.appendBounded(format) catch @panic("OOB"); - } - - { - var f: pw.c.spa_pod_frame = undefined; - _ = pw.c.spa_pod_builder_push_object(b, &f, pw.c.SPA_TYPE_OBJECT_Format, pw.c.SPA_PARAM_EnumFormat); - _ = pw.c.spa_pod_builder_add( - b, - pw.c.SPA_FORMAT_mediaType, - "I", - pw.c.SPA_MEDIA_TYPE_video, - - pw.c.SPA_FORMAT_mediaSubtype, - "I", - pw.c.SPA_MEDIA_SUBTYPE_dsp, - - pw.c.SPA_FORMAT_VIDEO_format, - - "I", - pw.c.SPA_VIDEO_FORMAT_DSP_F32, - - @as(c_int, 0), - ); - const format: *const pw.c.spa_pod = @ptrCast(@alignCast(pw.c.spa_pod_builder_pop(b, &f))); - _ = pw.c.spa_debug_format(2, null, format); - params.appendBounded(format) catch @panic("OOB"); - } -} - -const FormatPair = struct { - format: u32, - id: u32, -}; - -const sdl_video_formats = [_]FormatPair{ - .{ .format = sdl.SDL_PIXELFORMAT_UNKNOWN, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_INDEX1LSB, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_UNKNOWN, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_INDEX1LSB, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_INDEX1MSB, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_INDEX4LSB, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_INDEX4MSB, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_INDEX8, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_RGB332, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_XRGB4444, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_XRGB1555, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_XBGR1555, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_ARGB4444, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_RGBA4444, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_ABGR4444, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_BGRA4444, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_ARGB1555, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_RGBA5551, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_ABGR1555, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_BGRA5551, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_RGB565, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_BGR565, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_RGB24, .id = pw.c.SPA_VIDEO_FORMAT_BGR }, - .{ .format = sdl.SDL_PIXELFORMAT_XRGB8888, .id = pw.c.SPA_VIDEO_FORMAT_BGR }, - .{ .format = sdl.SDL_PIXELFORMAT_RGBX8888, .id = pw.c.SPA_VIDEO_FORMAT_xBGR }, - .{ .format = sdl.SDL_PIXELFORMAT_BGR24, .id = pw.c.SPA_VIDEO_FORMAT_RGB }, - .{ .format = sdl.SDL_PIXELFORMAT_XBGR8888, .id = pw.c.SPA_VIDEO_FORMAT_RGB }, - .{ .format = sdl.SDL_PIXELFORMAT_BGRX8888, .id = pw.c.SPA_VIDEO_FORMAT_xRGB }, - .{ .format = sdl.SDL_PIXELFORMAT_ARGB2101010, .id = pw.c.SPA_VIDEO_FORMAT_UNKNOWN }, - .{ .format = sdl.SDL_PIXELFORMAT_RGBA8888, .id = pw.c.SPA_VIDEO_FORMAT_ABGR }, - .{ .format = sdl.SDL_PIXELFORMAT_ARGB8888, .id = pw.c.SPA_VIDEO_FORMAT_BGRA }, - .{ .format = sdl.SDL_PIXELFORMAT_BGRA8888, .id = pw.c.SPA_VIDEO_FORMAT_ARGB }, - .{ .format = sdl.SDL_PIXELFORMAT_ABGR8888, .id = pw.c.SPA_VIDEO_FORMAT_RGBA }, - .{ .format = sdl.SDL_PIXELFORMAT_YV12, .id = pw.c.SPA_VIDEO_FORMAT_YV12 }, - .{ .format = sdl.SDL_PIXELFORMAT_IYUV, .id = pw.c.SPA_VIDEO_FORMAT_I420 }, - .{ .format = sdl.SDL_PIXELFORMAT_YUY2, .id = pw.c.SPA_VIDEO_FORMAT_YUY2 }, - .{ .format = sdl.SDL_PIXELFORMAT_UYVY, .id = pw.c.SPA_VIDEO_FORMAT_UYVY }, - .{ .format = sdl.SDL_PIXELFORMAT_YVYU, .id = pw.c.SPA_VIDEO_FORMAT_YVYU }, - .{ .format = sdl.SDL_PIXELFORMAT_NV12, .id = pw.c.SPA_VIDEO_FORMAT_NV12 }, - .{ .format = sdl.SDL_PIXELFORMAT_NV21, .id = pw.c.SPA_VIDEO_FORMAT_NV21 }, -}; - -fn sdlFormatToId(format: u32) u32 { - for (sdl_video_formats) |f| { - if (f.format == format) { - return f.id; - } - } - return pw.c.SPA_VIDEO_FORMAT_UNKNOWN; -} - -fn idToSdlFormat(id: u32) u32 { - for (sdl_video_formats) |f| { - if (f.id == id) { - return f.format; - } - } - return sdl.SDL_PIXELFORMAT_UNKNOWN; -} - -fn sdlBuildFormats(renderer: *sdl.SDL_Renderer, b: *pw.c.spa_pod_builder) *pw.c.spa_pod { - var f: [2]pw.c.spa_pod_frame = undefined; - - // make an object of type SPA_TYPE_OBJECT_Format and id SPA_PARAM_EnumFormat. The object type is - // important because it defines the properties that are acceptable. The id gives more context - // about what the object is meant to contain. In this case we enumerate supported formats. - _ = pw.c.spa_pod_builder_push_object(b, &f[0], pw.c.SPA_TYPE_OBJECT_Format, pw.c.SPA_PARAM_EnumFormat); - // add media type and media subtype properties - _ = pw.c.spa_pod_builder_prop(b, pw.c.SPA_FORMAT_mediaType, 0); - _ = pw.c.spa_pod_builder_id(b, pw.c.SPA_MEDIA_TYPE_video); - _ = pw.c.spa_pod_builder_prop(b, pw.c.SPA_FORMAT_mediaSubtype, 0); - _ = pw.c.spa_pod_builder_id(b, pw.c.SPA_MEDIA_SUBTYPE_raw); - - // build an enumeration of formats - _ = pw.c.spa_pod_builder_prop(b, pw.c.SPA_FORMAT_VIDEO_format, 0); - _ = pw.c.spa_pod_builder_push_choice(b, &f[1], pw.c.SPA_CHOICE_Enum, 0); - - const props: sdl.SDL_PropertiesID = sdl.SDL_GetRendererProperties(renderer); - - const texture_formats: [*]sdl.SDL_PixelFormat = @ptrCast(@alignCast(sdl.SDL_GetPointerProperty( - props, - sdl.SDL_PROP_RENDERER_TEXTURE_FORMATS_POINTER, - null, - ))); - - // first the formats supported by the textures - var i: u32 = 0; - var ci: u32 = 0; - while (texture_formats[i] != sdl.SDL_PIXELFORMAT_UNKNOWN) : (i += 1) { - const id: u32 = sdlFormatToId(texture_formats[i]); - if (id == 0) continue; - if (ci == 0) _ = pw.c.spa_pod_builder_id(b, pw.c.SPA_VIDEO_FORMAT_UNKNOWN); - ci += 1; - _ = pw.c.spa_pod_builder_id(b, id); - } - // then all the other ones SDL can convert from/to - for (sdl_video_formats) |format| { - const id: u32 = format.id; - if (id != pw.c.SPA_VIDEO_FORMAT_UNKNOWN) { - _ = pw.c.spa_pod_builder_id(b, id); - } - } - _ = pw.c.spa_pod_builder_id(b, pw.c.SPA_VIDEO_FORMAT_RGBA_F32); - _ = pw.c.spa_pod_builder_pop(b, &f[1]); - // add size and framerate ranges - const max_texture_size: u32 = @intCast(sdl.SDL_GetNumberProperty( - props, - sdl.SDL_PROP_RENDERER_MAX_TEXTURE_SIZE_NUMBER, - 0, - )); - _ = pw.c.spa_pod_builder_add( - b, - pw.c.SPA_FORMAT_VIDEO_size, - pw.c.SPA_POD_CHOICE_RANGE_Rectangle( - &pw.c.SPA_RECTANGLE(width, height), - &pw.c.SPA_RECTANGLE(1, 1), - &pw.c.SPA_RECTANGLE(max_texture_size, max_texture_size), - ), - pw.c.SPA_FORMAT_VIDEO_framerate, - pw.c.SPA_POD_CHOICE_RANGE_Fraction( - &pw.c.SPA_FRACTION(rate, 1), - &pw.c.SPA_FRACTION(0, 1), - &pw.c.SPA_FRACTION(30, 1), - ), - @as(c_int, 0), - ); - return @ptrCast(@alignCast(pw.c.spa_pod_builder_pop(b, &f[0]))); -}