Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion examples/sdk-core/video/video-editing.ts
Original file line number Diff line number Diff line change
Expand Up @@ -26,10 +26,11 @@ run(async () => {
},
});

// Option 2: Use a reference image to guide the edit
// Option 2: Use a reference image to guide the edit (with empty prompt)
// const referenceImage = fs.readFileSync("reference.png");
// const result = await client.queue.submitAndPoll({
// model: models.video("lucy-2-v2v"),
// prompt: "",
// reference_image: new Blob([referenceImage]),
// data: new Blob([inputVideo]),
// onStatusChange: (job) => {
Expand Down
8 changes: 3 additions & 5 deletions packages/sdk/src/process/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -95,24 +95,22 @@ export interface VideoEditInputs {

/**
* Model-specific input documentation for lucy-2-v2v.
* Requires at least one of prompt or reference_image. Both can be provided together.
* Requires prompt (can be empty string). Optional reference_image can also be provided.
*/
export interface VideoEdit2Inputs {
/**
* Text description to use for the video editing.
* At least one of prompt or reference_image must be provided.
* Text prompt for the video editing. Send an empty string if you want no text prompt.
*
* See our [Prompt Engineering](https://docs.platform.decart.ai/models/video/video-generation#prompt-engineering) guide for how to write prompt for Decart video models effectively.
*/
prompt?: string;
prompt: string;
/**
* Video file to process.
* Can be a File, Blob, ReadableStream, URL, or string URL.
*/
data: FileInput;
/**
* Optional reference image to guide what to add to the video.
* At least one of prompt or reference_image must be provided.
* Can be a File, Blob, ReadableStream, URL, or string URL.
*/
reference_image?: FileInput;
Expand Down
27 changes: 13 additions & 14 deletions packages/sdk/src/shared/model.ts
Original file line number Diff line number Diff line change
Expand Up @@ -195,20 +195,19 @@ export const modelInputSchemas = {
.refine((data) => !(data.reference_image !== undefined && data.enhance_prompt !== undefined), {
message: "'enhance_prompt' is only valid when using 'prompt', not 'reference_image'",
}),
"lucy-2-v2v": z
.object({
prompt: z.string().min(1).max(1000).optional().describe("Text prompt for the video editing"),
reference_image: fileInputSchema
.optional()
.describe("Optional reference image to guide the edit (File, Blob, ReadableStream, URL, or string URL)"),
data: fileInputSchema.describe("Video file to process (File, Blob, ReadableStream, URL, or string URL)"),
seed: z.number().optional().describe("The seed to use for the generation"),
resolution: proV2vResolutionSchema,
enhance_prompt: z.boolean().optional().describe("Whether to enhance the prompt"),
})
.refine((data) => data.prompt !== undefined || data.reference_image !== undefined, {
message: "Must provide at least one of 'prompt' or 'reference_image'",
}),
"lucy-2-v2v": z.object({
prompt: z
.string()
.max(1000)
.describe("Text prompt for the video editing. Send an empty string if you want no text prompt."),
reference_image: fileInputSchema
.optional()
.describe("Optional reference image to guide the edit (File, Blob, ReadableStream, URL, or string URL)"),
data: fileInputSchema.describe("Video file to process (File, Blob, ReadableStream, URL, or string URL)"),
seed: z.number().optional().describe("The seed to use for the generation"),
resolution: proV2vResolutionSchema,
enhance_prompt: z.boolean().optional().describe("Whether to enhance the prompt"),
}),
} as const;

export type ModelInputSchemas = typeof modelInputSchemas;
Expand Down
1 change: 1 addition & 0 deletions packages/sdk/tests/e2e.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -185,6 +185,7 @@ describe.concurrent("E2E Tests", { timeout: TIMEOUT, retry: 2 }, () => {
it("lucy-2-v2v: video editing (reference_image)", async () => {
const result = await client.queue.submitAndPoll({
model: models.video("lucy-2-v2v"),
prompt: "",
reference_image: imageBlob,
data: videoBlob,
seed: 42,
Expand Down
16 changes: 3 additions & 13 deletions packages/sdk/tests/unit.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -520,7 +520,7 @@ describe("Queue API", () => {
expect(dataFile).toBeInstanceOf(File);
});

it("submits lucy-2-v2v job with only reference_image (no prompt)", async () => {
it("submits lucy-2-v2v job with empty prompt and reference_image", async () => {
server.use(
http.post("http://localhost/v1/jobs/lucy-2-v2v", async ({ request }) => {
lastRequest = request;
Expand All @@ -537,13 +537,14 @@ describe("Queue API", () => {

const result = await decart.queue.submit({
model: models.video("lucy-2-v2v"),
prompt: "",
data: testVideoBlob,
reference_image: testImageBlob,
});

expect(result.job_id).toBe("job_lucy2_v2v_refonly");
expect(result.status).toBe("pending");
expect(lastFormData?.get("prompt")).toBeNull();
expect(lastFormData?.get("prompt")).toBe("");

const dataFile = lastFormData?.get("data") as File;
expect(dataFile).toBeInstanceOf(File);
Expand All @@ -552,17 +553,6 @@ describe("Queue API", () => {
expect(refImageFile).toBeInstanceOf(File);
});

it("rejects lucy-2-v2v job when neither prompt nor reference_image is provided", async () => {
const testVideoBlob = new Blob(["test-video"], { type: "video/mp4" });

await expect(
decart.queue.submit({
model: models.video("lucy-2-v2v"),
data: testVideoBlob,
} as Parameters<typeof decart.queue.submit>[0]),
).rejects.toThrow("Must provide at least one of 'prompt' or 'reference_image'");
});

it("submits lucy-2-v2v job with reference_image", async () => {
server.use(
http.post("http://localhost/v1/jobs/lucy-2-v2v", async ({ request }) => {
Expand Down
Loading