import{s as vr,o as Ur,n as yo}from"../chunks/scheduler.8c3d61f6.js";import{S as Zr,i as jr,g as a,s as i,r as p,A as Ir,h as l,f as n,c as s,j as v,u as c,x as d,k as b,y as r,a as o,v as m,d as f,t as u,w as g}from"../chunks/index.da70eac4.js";import{T as Wa}from"../chunks/Tip.1d9b8c37.js";import{D as j}from"../chunks/Docstring.c021b19a.js";import{C as I}from"../chunks/CodeBlock.a9c4becf.js";import{E as Va}from"../chunks/ExampleCodeBlock.56b4589c.js";import{H as w,E as Cr}from"../chunks/getInferenceSnippets.725ed3d4.js";function Gr(A){let h,T="AnimateDiff tends to work better with finetuned Stable Diffusion models. If you plan on using a scheduler that can clip samples, make sure to disable it by setting clip_sample=False in the scheduler as this can also have an adverse effect on generated samples. Additionally, the AnimateDiff checkpoints can be sensitive to the beta schedule of the scheduler. We recommend setting this to linear.";return{c(){h=a("p"),h.innerHTML=T},l(y){h=l(y,"P",{"data-svelte-h":!0}),d(h)!=="svelte-vnv4xn"&&(h.innerHTML=T)},m(y,M){o(y,h,M)},p:yo,d(y){y&&n(h)}}}function Wr(A){let h,T="FreeInit is not really free - the improved quality comes at the cost of extra computation. It requires sampling a few extra times depending on the num_iters parameter that is set when enabling it. Setting the use_fast_sampling parameter to True can improve the overall performance (at the cost of lower quality compared to when use_fast_sampling=False but still better results than vanilla video generation models).";return{c(){h=a("p"),h.innerHTML=T},l(y){h=l(y,"P",{"data-svelte-h":!0}),d(h)!=="svelte-xxwpr5"&&(h.innerHTML=T)},m(y,M){o(y,h,M)},p:yo,d(y){y&&n(h)}}}function Vr(A){let h,T='Make sure to check out the Schedulers guide to learn how to explore the tradeoff between scheduler speed and quality, and see the reuse components across pipelines section to learn how to efficiently load the same components into multiple pipelines.';return{c(){h=a("p"),h.innerHTML=T},l(y){h=l(y,"P",{"data-svelte-h":!0}),d(h)!=="svelte-1qn15hi"&&(h.innerHTML=T)},m(y,M){o(y,h,M)},p:yo,d(y){y&&n(h)}}}function xr(A){let h,T="Examples:",y,M,J;return M=new I({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwTW90aW9uQWRhcHRlciUyQyUyMEFuaW1hdGVEaWZmUGlwZWxpbmUlMkMlMjBERElNU2NoZWR1bGVyJTBBZnJvbSUyMGRpZmZ1c2Vycy51dGlscyUyMGltcG9ydCUyMGV4cG9ydF90b19naWYlMEElMEFhZGFwdGVyJTIwJTNEJTIwTW90aW9uQWRhcHRlci5mcm9tX3ByZXRyYWluZWQoJTIyZ3VveXd3JTJGYW5pbWF0ZWRpZmYtbW90aW9uLWFkYXB0ZXItdjEtNS0yJTIyKSUwQXBpcGUlMjAlM0QlMjBBbmltYXRlRGlmZlBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMjJmcmFua2pvc2h1YSUyRnRvb255b3VfYmV0YTYlMjIlMkMlMjBtb3Rpb25fYWRhcHRlciUzRGFkYXB0ZXIpJTBBcGlwZS5zY2hlZHVsZXIlMjAlM0QlMjBERElNU2NoZWR1bGVyKGJldGFfc2NoZWR1bGUlM0QlMjJsaW5lYXIlMjIlMkMlMjBzdGVwc19vZmZzZXQlM0QxJTJDJTIwY2xpcF9zYW1wbGUlM0RGYWxzZSklMEFvdXRwdXQlMjAlM0QlMjBwaXBlKHByb21wdCUzRCUyMkElMjBjb3JnaSUyMHdhbGtpbmclMjBpbiUyMHRoZSUyMHBhcmslMjIpJTBBZnJhbWVzJTIwJTNEJTIwb3V0cHV0LmZyYW1lcyU1QjAlNUQlMEFleHBvcnRfdG9fZ2lmKGZyYW1lcyUyQyUyMCUyMmFuaW1hdGlvbi5naWYlMjIp",highlighted:`>>> import torch >>> from diffusers import MotionAdapter, AnimateDiffPipeline, DDIMScheduler >>> from diffusers.utils import export_to_gif >>> adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2") >>> pipe = AnimateDiffPipeline.from_pretrained("frankjoshua/toonyou_beta6", motion_adapter=adapter) >>> pipe.scheduler = DDIMScheduler(beta_schedule="linear", steps_offset=1, clip_sample=False) >>> output = pipe(prompt="A corgi walking in the park") >>> frames = output.frames[0] >>> export_to_gif(frames, "animation.gif")`,wrap:!1}}),{c(){h=a("p"),h.textContent=T,y=i(),p(M.$$.fragment)},l(_){h=l(_,"P",{"data-svelte-h":!0}),d(h)!=="svelte-kvfsh7"&&(h.textContent=T),y=s(_),c(M.$$.fragment,_)},m(_,Z){o(_,h,Z),o(_,y,Z),m(M,_,Z),J=!0},p:yo,i(_){J||(f(M.$$.fragment,_),J=!0)},o(_){u(M.$$.fragment,_),J=!1},d(_){_&&(n(h),n(y)),g(M,_)}}}function kr(A){let h,T="Examples:",y,M,J;return M=new I({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwQW5pbWF0ZURpZmZTcGFyc2VDb250cm9sTmV0UGlwZWxpbmUlMEFmcm9tJTIwZGlmZnVzZXJzLm1vZGVscyUyMGltcG9ydCUyMEF1dG9lbmNvZGVyS0wlMkMlMjBNb3Rpb25BZGFwdGVyJTJDJTIwU3BhcnNlQ29udHJvbE5ldE1vZGVsJTBBZnJvbSUyMGRpZmZ1c2Vycy5zY2hlZHVsZXJzJTIwaW1wb3J0JTIwRFBNU29sdmVyTXVsdGlzdGVwU2NoZWR1bGVyJTBBZnJvbSUyMGRpZmZ1c2Vycy51dGlscyUyMGltcG9ydCUyMGV4cG9ydF90b19naWYlMkMlMjBsb2FkX2ltYWdlJTBBJTBBbW9kZWxfaWQlMjAlM0QlMjAlMjJTRzE2MTIyMiUyRlJlYWxpc3RpY19WaXNpb25fVjUuMV9ub1ZBRSUyMiUwQW1vdGlvbl9hZGFwdGVyX2lkJTIwJTNEJTIwJTIyZ3VveXd3JTJGYW5pbWF0ZWRpZmYtbW90aW9uLWFkYXB0ZXItdjEtNS0zJTIyJTBBY29udHJvbG5ldF9pZCUyMCUzRCUyMCUyMmd1b3l3dyUyRmFuaW1hdGVkaWZmLXNwYXJzZWN0cmwtc2NyaWJibGUlMjIlMEFsb3JhX2FkYXB0ZXJfaWQlMjAlM0QlMjAlMjJndW95d3clMkZhbmltYXRlZGlmZi1tb3Rpb24tbG9yYS12MS01LTMlMjIlMEF2YWVfaWQlMjAlM0QlMjAlMjJzdGFiaWxpdHlhaSUyRnNkLXZhZS1mdC1tc2UlMjIlMEFkZXZpY2UlMjAlM0QlMjAlMjJjdWRhJTIyJTBBJTBBbW90aW9uX2FkYXB0ZXIlMjAlM0QlMjBNb3Rpb25BZGFwdGVyLmZyb21fcHJldHJhaW5lZChtb3Rpb25fYWRhcHRlcl9pZCUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNikudG8oZGV2aWNlKSUwQWNvbnRyb2xuZXQlMjAlM0QlMjBTcGFyc2VDb250cm9sTmV0TW9kZWwuZnJvbV9wcmV0cmFpbmVkKGNvbnRyb2xuZXRfaWQlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYpLnRvKGRldmljZSklMEF2YWUlMjAlM0QlMjBBdXRvZW5jb2RlcktMLmZyb21fcHJldHJhaW5lZCh2YWVfaWQlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYpLnRvKGRldmljZSklMEFzY2hlZHVsZXIlMjAlM0QlMjBEUE1Tb2x2ZXJNdWx0aXN0ZXBTY2hlZHVsZXIuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMG1vZGVsX2lkJTJDJTBBJTIwJTIwJTIwJTIwc3ViZm9sZGVyJTNEJTIyc2NoZWR1bGVyJTIyJTJDJTBBJTIwJTIwJTIwJTIwYmV0YV9zY2hlZHVsZSUzRCUyMmxpbmVhciUyMiUyQyUwQSUyMCUyMCUyMCUyMGFsZ29yaXRobV90eXBlJTNEJTIyZHBtc29sdmVyJTJCJTJCJTIyJTJDJTBBJTIwJTIwJTIwJTIwdXNlX2thcnJhc19zaWdtYXMlM0RUcnVlJTJDJTBBKSUwQXBpcGUlMjAlM0QlMjBBbmltYXRlRGlmZlNwYXJzZUNvbnRyb2xOZXRQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwbW9kZWxfaWQlMkMlMEElMjAlMjAlMjAlMjBtb3Rpb25fYWRhcHRlciUzRG1vdGlvbl9hZGFwdGVyJTJDJTBBJTIwJTIwJTIwJTIwY29udHJvbG5ldCUzRGNvbnRyb2xuZXQlMkMlMEElMjAlMjAlMjAlMjB2YWUlM0R2YWUlMkMlMEElMjAlMjAlMjAlMjBzY2hlZHVsZXIlM0RzY2hlZHVsZXIlMkMlMEElMjAlMjAlMjAlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYlMkMlMEEpLnRvKGRldmljZSklMEFwaXBlLmxvYWRfbG9yYV93ZWlnaHRzKGxvcmFfYWRhcHRlcl9pZCUyQyUyMGFkYXB0ZXJfbmFtZSUzRCUyMm1vdGlvbl9sb3JhJTIyKSUwQXBpcGUuZnVzZV9sb3JhKGxvcmFfc2NhbGUlM0QxLjApJTBBJTBBcHJvbXB0JTIwJTNEJTIwJTIyYW4lMjBhZXJpYWwlMjB2aWV3JTIwb2YlMjBhJTIwY3liZXJwdW5rJTIwY2l0eSUyQyUyMG5pZ2h0JTIwdGltZSUyQyUyMG5lb24lMjBsaWdodHMlMkMlMjBtYXN0ZXJwaWVjZSUyQyUyMGhpZ2glMjBxdWFsaXR5JTIyJTBBbmVnYXRpdmVfcHJvbXB0JTIwJTNEJTIwJTIybG93JTIwcXVhbGl0eSUyQyUyMHdvcnN0JTIwcXVhbGl0eSUyQyUyMGxldHRlcmJveGVkJTIyJTBBJTBBaW1hZ2VfZmlsZXMlMjAlM0QlMjAlNUIlMEElMjAlMjAlMjAlMjAlMjJodHRwcyUzQSUyRiUyRmh1Z2dpbmdmYWNlLmNvJTJGZGF0YXNldHMlMkZodWdnaW5nZmFjZSUyRmRvY3VtZW50YXRpb24taW1hZ2VzJTJGcmVzb2x2ZSUyRm1haW4lMkZkaWZmdXNlcnMlMkZhbmltYXRlZGlmZi1zY3JpYmJsZS0xLnBuZyUyMiUyQyUwQSUyMCUyMCUyMCUyMCUyMmh0dHBzJTNBJTJGJTJGaHVnZ2luZ2ZhY2UuY28lMkZkYXRhc2V0cyUyRmh1Z2dpbmdmYWNlJTJGZG9jdW1lbnRhdGlvbi1pbWFnZXMlMkZyZXNvbHZlJTJGbWFpbiUyRmRpZmZ1c2VycyUyRmFuaW1hdGVkaWZmLXNjcmliYmxlLTIucG5nJTIyJTJDJTBBJTIwJTIwJTIwJTIwJTIyaHR0cHMlM0ElMkYlMkZodWdnaW5nZmFjZS5jbyUyRmRhdGFzZXRzJTJGaHVnZ2luZ2ZhY2UlMkZkb2N1bWVudGF0aW9uLWltYWdlcyUyRnJlc29sdmUlMkZtYWluJTJGZGlmZnVzZXJzJTJGYW5pbWF0ZWRpZmYtc2NyaWJibGUtMy5wbmclMjIlMkMlMEElNUQlMEFjb25kaXRpb25fZnJhbWVfaW5kaWNlcyUyMCUzRCUyMCU1QjAlMkMlMjA4JTJDJTIwMTUlNUQlMEFjb25kaXRpb25pbmdfZnJhbWVzJTIwJTNEJTIwJTVCbG9hZF9pbWFnZShpbWdfZmlsZSklMjBmb3IlMjBpbWdfZmlsZSUyMGluJTIwaW1hZ2VfZmlsZXMlNUQlMEElMEF2aWRlbyUyMCUzRCUyMHBpcGUoJTBBJTIwJTIwJTIwJTIwcHJvbXB0JTNEcHJvbXB0JTJDJTBBJTIwJTIwJTIwJTIwbmVnYXRpdmVfcHJvbXB0JTNEbmVnYXRpdmVfcHJvbXB0JTJDJTBBJTIwJTIwJTIwJTIwbnVtX2luZmVyZW5jZV9zdGVwcyUzRDI1JTJDJTBBJTIwJTIwJTIwJTIwY29uZGl0aW9uaW5nX2ZyYW1lcyUzRGNvbmRpdGlvbmluZ19mcmFtZXMlMkMlMEElMjAlMjAlMjAlMjBjb250cm9sbmV0X2NvbmRpdGlvbmluZ19zY2FsZSUzRDEuMCUyQyUwQSUyMCUyMCUyMCUyMGNvbnRyb2xuZXRfZnJhbWVfaW5kaWNlcyUzRGNvbmRpdGlvbl9mcmFtZV9pbmRpY2VzJTJDJTBBJTIwJTIwJTIwJTIwZ2VuZXJhdG9yJTNEdG9yY2guR2VuZXJhdG9yKCkubWFudWFsX3NlZWQoMTMzNyklMkMlMEEpLmZyYW1lcyU1QjAlNUQlMEFleHBvcnRfdG9fZ2lmKHZpZGVvJTJDJTIwJTIyb3V0cHV0LmdpZiUyMik=",highlighted:`>>> import torch >>> from diffusers import AnimateDiffSparseControlNetPipeline >>> from diffusers.models import AutoencoderKL, MotionAdapter, SparseControlNetModel >>> from diffusers.schedulers import DPMSolverMultistepScheduler >>> from diffusers.utils import export_to_gif, load_image >>> model_id = "SG161222/Realistic_Vision_V5.1_noVAE" >>> motion_adapter_id = "guoyww/animatediff-motion-adapter-v1-5-3" >>> controlnet_id = "guoyww/animatediff-sparsectrl-scribble" >>> lora_adapter_id = "guoyww/animatediff-motion-lora-v1-5-3" >>> vae_id = "stabilityai/sd-vae-ft-mse" >>> device = "cuda" >>> motion_adapter = MotionAdapter.from_pretrained(motion_adapter_id, torch_dtype=torch.float16).to(device) >>> controlnet = SparseControlNetModel.from_pretrained(controlnet_id, torch_dtype=torch.float16).to(device) >>> vae = AutoencoderKL.from_pretrained(vae_id, torch_dtype=torch.float16).to(device) >>> scheduler = DPMSolverMultistepScheduler.from_pretrained( ... model_id, ... subfolder="scheduler", ... beta_schedule="linear", ... algorithm_type="dpmsolver++", ... use_karras_sigmas=True, ... ) >>> pipe = AnimateDiffSparseControlNetPipeline.from_pretrained( ... model_id, ... motion_adapter=motion_adapter, ... controlnet=controlnet, ... vae=vae, ... scheduler=scheduler, ... torch_dtype=torch.float16, ... ).to(device) >>> pipe.load_lora_weights(lora_adapter_id, adapter_name="motion_lora") >>> pipe.fuse_lora(lora_scale=1.0) >>> prompt = "an aerial view of a cyberpunk city, night time, neon lights, masterpiece, high quality" >>> negative_prompt = "low quality, worst quality, letterboxed" >>> image_files = [ ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/animatediff-scribble-1.png", ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/animatediff-scribble-2.png", ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/animatediff-scribble-3.png", ... ] >>> condition_frame_indices = [0, 8, 15] >>> conditioning_frames = [load_image(img_file) for img_file in image_files] >>> video = pipe( ... prompt=prompt, ... negative_prompt=negative_prompt, ... num_inference_steps=25, ... conditioning_frames=conditioning_frames, ... controlnet_conditioning_scale=1.0, ... controlnet_frame_indices=condition_frame_indices, ... generator=torch.Generator().manual_seed(1337), ... ).frames[0] >>> export_to_gif(video, "output.gif")`,wrap:!1}}),{c(){h=a("p"),h.textContent=T,y=i(),p(M.$$.fragment)},l(_){h=l(_,"P",{"data-svelte-h":!0}),d(h)!=="svelte-kvfsh7"&&(h.textContent=T),y=s(_),c(M.$$.fragment,_)},m(_,Z){o(_,h,Z),o(_,y,Z),m(M,_,Z),J=!0},p:yo,i(_){J||(f(M.$$.fragment,_),J=!0)},o(_){u(M.$$.fragment,_),J=!1},d(_){_&&(n(h),n(y)),g(M,_)}}}function Br(A){let h,T="Examples:",y,M,J;return M=new I({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzLm1vZGVscyUyMGltcG9ydCUyME1vdGlvbkFkYXB0ZXIlMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwQW5pbWF0ZURpZmZTRFhMUGlwZWxpbmUlMkMlMjBERElNU2NoZWR1bGVyJTBBZnJvbSUyMGRpZmZ1c2Vycy51dGlscyUyMGltcG9ydCUyMGV4cG9ydF90b19naWYlMEElMEFhZGFwdGVyJTIwJTNEJTIwTW90aW9uQWRhcHRlci5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIyYS1yLXItby13JTJGYW5pbWF0ZWRpZmYtbW90aW9uLWFkYXB0ZXItc2R4bC1iZXRhJTIyJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2JTBBKSUwQSUwQW1vZGVsX2lkJTIwJTNEJTIwJTIyc3RhYmlsaXR5YWklMkZzdGFibGUtZGlmZnVzaW9uLXhsLWJhc2UtMS4wJTIyJTBBc2NoZWR1bGVyJTIwJTNEJTIwRERJTVNjaGVkdWxlci5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwbW9kZWxfaWQlMkMlMEElMjAlMjAlMjAlMjBzdWJmb2xkZXIlM0QlMjJzY2hlZHVsZXIlMjIlMkMlMEElMjAlMjAlMjAlMjBjbGlwX3NhbXBsZSUzREZhbHNlJTJDJTBBJTIwJTIwJTIwJTIwdGltZXN0ZXBfc3BhY2luZyUzRCUyMmxpbnNwYWNlJTIyJTJDJTBBJTIwJTIwJTIwJTIwYmV0YV9zY2hlZHVsZSUzRCUyMmxpbmVhciUyMiUyQyUwQSUyMCUyMCUyMCUyMHN0ZXBzX29mZnNldCUzRDElMkMlMEEpJTBBcGlwZSUyMCUzRCUyMEFuaW1hdGVEaWZmU0RYTFBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjBtb2RlbF9pZCUyQyUwQSUyMCUyMCUyMCUyMG1vdGlvbl9hZGFwdGVyJTNEYWRhcHRlciUyQyUwQSUyMCUyMCUyMCUyMHNjaGVkdWxlciUzRHNjaGVkdWxlciUyQyUwQSUyMCUyMCUyMCUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiUyQyUwQSUyMCUyMCUyMCUyMHZhcmlhbnQlM0QlMjJmcDE2JTIyJTJDJTBBKS50byglMjJjdWRhJTIyKSUwQSUwQSUyMyUyMGVuYWJsZSUyMG1lbW9yeSUyMHNhdmluZ3MlMEFwaXBlLmVuYWJsZV92YWVfc2xpY2luZygpJTBBcGlwZS5lbmFibGVfdmFlX3RpbGluZygpJTBBJTBBb3V0cHV0JTIwJTNEJTIwcGlwZSglMEElMjAlMjAlMjAlMjBwcm9tcHQlM0QlMjJhJTIwcGFuZGElMjBzdXJmaW5nJTIwaW4lMjB0aGUlMjBvY2VhbiUyQyUyMHJlYWxpc3RpYyUyQyUyMGhpZ2glMjBxdWFsaXR5JTIyJTJDJTBBJTIwJTIwJTIwJTIwbmVnYXRpdmVfcHJvbXB0JTNEJTIybG93JTIwcXVhbGl0eSUyQyUyMHdvcnN0JTIwcXVhbGl0eSUyMiUyQyUwQSUyMCUyMCUyMCUyMG51bV9pbmZlcmVuY2Vfc3RlcHMlM0QyMCUyQyUwQSUyMCUyMCUyMCUyMGd1aWRhbmNlX3NjYWxlJTNEOCUyQyUwQSUyMCUyMCUyMCUyMHdpZHRoJTNEMTAyNCUyQyUwQSUyMCUyMCUyMCUyMGhlaWdodCUzRDEwMjQlMkMlMEElMjAlMjAlMjAlMjBudW1fZnJhbWVzJTNEMTYlMkMlMEEpJTBBJTBBZnJhbWVzJTIwJTNEJTIwb3V0cHV0LmZyYW1lcyU1QjAlNUQlMEFleHBvcnRfdG9fZ2lmKGZyYW1lcyUyQyUyMCUyMmFuaW1hdGlvbi5naWYlMjIp",highlighted:`>>> import torch >>> from diffusers.models import MotionAdapter >>> from diffusers import AnimateDiffSDXLPipeline, DDIMScheduler >>> from diffusers.utils import export_to_gif >>> adapter = MotionAdapter.from_pretrained( ... "a-r-r-o-w/animatediff-motion-adapter-sdxl-beta", torch_dtype=torch.float16 ... ) >>> model_id = "stabilityai/stable-diffusion-xl-base-1.0" >>> scheduler = DDIMScheduler.from_pretrained( ... model_id, ... subfolder="scheduler", ... clip_sample=False, ... timestep_spacing="linspace", ... beta_schedule="linear", ... steps_offset=1, ... ) >>> pipe = AnimateDiffSDXLPipeline.from_pretrained( ... model_id, ... motion_adapter=adapter, ... scheduler=scheduler, ... torch_dtype=torch.float16, ... variant="fp16", ... ).to("cuda") >>> # enable memory savings >>> pipe.enable_vae_slicing() >>> pipe.enable_vae_tiling() >>> output = pipe( ... prompt="a panda surfing in the ocean, realistic, high quality", ... negative_prompt="low quality, worst quality", ... num_inference_steps=20, ... guidance_scale=8, ... width=1024, ... height=1024, ... num_frames=16, ... ) >>> frames = output.frames[0] >>> export_to_gif(frames, "animation.gif")`,wrap:!1}}),{c(){h=a("p"),h.textContent=T,y=i(),p(M.$$.fragment)},l(_){h=l(_,"P",{"data-svelte-h":!0}),d(h)!=="svelte-kvfsh7"&&(h.textContent=T),y=s(_),c(M.$$.fragment,_)},m(_,Z){o(_,h,Z),o(_,y,Z),m(M,_,Z),J=!0},p:yo,i(_){J||(f(M.$$.fragment,_),J=!0)},o(_){u(M.$$.fragment,_),J=!1},d(_){_&&(n(h),n(y)),g(M,_)}}}function Nr(A){let h,T,y,M,J,_,Z,xa='LoRA',bo,fe,wo,ue,ka='AnimateDiff: Animate Your Personalized Text-to-Image Diffusion Models without Specific Tuning by Yuwei Guo, Ceyuan Yang, Anyi Rao, Yaohui Wang, Yu Qiao, Dahua Lin, Bo Dai.',To,ge,Ba="The abstract of the paper is the following:",Jo,he,Na='With the advance of text-to-image models (e.g., Stable Diffusion) and corresponding personalization techniques such as DreamBooth and LoRA, everyone can manifest their imagination into high-quality images at an affordable cost. Subsequently, there is a great demand for image animation techniques to further combine generated static images with motion dynamics. In this report, we propose a practical framework to animate most of the existing personalized text-to-image models once and for all, saving efforts in model-specific tuning. At the core of the proposed framework is to insert a newly initialized motion modeling module into the frozen text-to-image model and train it on video clips to distill reasonable motion priors. Once trained, by simply injecting this motion modeling module, all personalized versions derived from the same base T2I readily become text-driven models that produce diverse and personalized animated images. We conduct our evaluation on several public representative personalized text-to-image models across anime pictures and realistic photographs, and demonstrate that our proposed framework helps these models generate temporally smooth animation clips while preserving the domain and diversity of their outputs. Code and pre-trained weights will be publicly available at this https URL.',vo,_e,Uo,ye,Aa='Pipeline Tasks Demo AnimateDiffPipeline Text-to-Video Generation with AnimateDiff AnimateDiffControlNetPipeline Controlled Video-to-Video Generation with AnimateDiff using ControlNet AnimateDiffSparseControlNetPipeline Controlled Video-to-Video Generation with AnimateDiff using SparseCtrl AnimateDiffSDXLPipeline Video-to-Video Generation with AnimateDiff AnimateDiffVideoToVideoPipeline Video-to-Video Generation with AnimateDiff AnimateDiffVideoToVideoControlNetPipeline Video-to-Video Generation with AnimateDiff using ControlNet ',Zo,Me,jo,be,Da='Motion Adapter checkpoints can be found under guoyww. These checkpoints are meant to work with any model based on Stable Diffusion 1.4/1.5.',Io,we,Co,Te,Go,Je,Ra="AnimateDiff works with a MotionAdapter checkpoint and a Stable Diffusion model checkpoint. The MotionAdapter is a collection of Motion Modules that are responsible for adding coherent motion across image frames. These modules are applied after the Resnet and Attention blocks in Stable Diffusion UNet.",Wo,ve,Xa="The following example demonstrates how to use a MotionAdapter checkpoint with Diffusers for inference based on StableDiffusion-1.4/1.5.",Vo,Ue,xo,Ze,Sa="Here are some sample outputs:",ko,je,La=`
masterpiece, bestquality, sunset.
masterpiece, bestquality, sunset
`,Bo,$,No,Ie,Ao,Ce,Ya='AnimateDiff can also be used with ControlNets ControlNet was introduced in Adding Conditional Control to Text-to-Image Diffusion Models by Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. With a ControlNet model, you can provide an additional control image to condition and control Stable Diffusion generation. For example, if you provide depth maps, the ControlNet model generates a video that’ll preserve the spatial information from the depth maps. It is a more flexible and accurate way to control the video generation process.',Do,Ge,Ro,We,Ea="Here are some sample outputs:",Xo,q,Qa=`Source Video Output Video raccoon playing a guitar
racoon playing a guitar a panda, playing a guitar, sitting in a pink boat, in the ocean, mountains in background, realistic, high quality
a panda, playing a guitar, sitting in a pink boat, in the ocean, mountains in background, realistic, high quality`,So,Ve,Lo,xe,Pa='SparseCtrl: Adding Sparse Controls to Text-to-Video Diffusion Models for achieving controlled generation in text-to-video diffusion models by Yuwei Guo, Ceyuan Yang, Anyi Rao, Maneesh Agrawala, Dahua Lin, and Bo Dai.',Yo,ke,Fa="The abstract from the paper is:",Eo,Be,Ha='The development of text-to-video (T2V), i.e., generating videos with a given text prompt, has been significantly advanced in recent years. However, relying solely on text prompts often results in ambiguous frame composition due to spatial uncertainty. The research community thus leverages the dense structure signals, e.g., per-frame depth/edge sequences, to enhance controllability, whose collection accordingly increases the burden of inference. In this work, we present SparseCtrl to enable flexible structure control with temporally sparse signals, requiring only one or a few inputs, as shown in Figure 1. It incorporates an additional condition encoder to process these sparse signals while leaving the pre-trained T2V model untouched. The proposed approach is compatible with various modalities, including sketches, depth maps, and RGB images, providing more practical control for video generation and promoting applications such as storyboarding, depth rendering, keyframe animation, and interpolation. Extensive experiments demonstrate the generalization of SparseCtrl on both original and personalized T2V generators. Codes and models will be publicly available at this https URL.',Qo,Ne,za="SparseCtrl introduces the following checkpoints for controlled text-to-video generation:",Po,Ae,$a='
  • SparseCtrl Scribble
  • SparseCtrl RGB
  • ',Fo,De,Ho,Re,zo,Xe,qa="Here are some sample outputs:",$o,Se,Ka="an aerial view of a cyberpunk city, night time, neon lights, masterpiece, high quality ",K,Oa='
    scribble-1
    scribble-2
    scribble-3
    an aerial view of a cyberpunk city, night time, neon lights, masterpiece, high quality
    ',qo,Le,Ko,Ye,Oo,Ee,el="Here are some sample outputs:",ei,Qe,tl="closeup face photo of man in black clothes, night city street, bokeh, fireworks in background ",O,nl='
    closeup face photo of man in black clothes, night city street, bokeh, fireworks in background
    closeup face photo of man in black clothes, night city street, bokeh, fireworks in background
    ',ti,Pe,ni,Fe,ol="AnimateDiff can also be used with SDXL models. This is currently an experimental feature as only a beta release of the motion adapter checkpoint is available.",oi,He,ii,ze,si,$e,il="AnimateDiff can also be used to generate visually similar videos or enable style/character/background or other edits starting from an initial video, allowing you to seamlessly explore creative possibilities.",ai,qe,li,Ke,sl="Here are some sample outputs:",ri,Oe,al=`Source Video Output Video raccoon playing a guitar
    racoon playing a guitar panda playing a guitar
    panda playing a guitar closeup of margot robbie, fireworks in the background, high quality
    closeup of margot robbie, fireworks in the background, high quality closeup of tony stark, robert downey jr, fireworks
    closeup of tony stark, robert downey jr, fireworks`,di,et,pi,tt,ll='AnimateDiff can be used together with ControlNets to enhance video-to-video generation by allowing for precise control over the output. ControlNet was introduced in Adding Conditional Control to Text-to-Image Diffusion Models by Lvmin Zhang, Anyi Rao, and Maneesh Agrawala, and allows you to condition Stable Diffusion with an additional control image to ensure that the spatial information is preserved throughout the video.',ci,nt,rl="This pipeline allows you to condition your generation both on the original video and on a sequence of control images.",mi,ot,fi,it,dl="Here are some sample outputs:",ui,ee,pl=`Source Video Output Video anime girl, dancing
    anime girl, dancing astronaut in space, dancing
    astronaut in space, dancing`,gi,st,cl="The lights and composition were transferred from the Source Video.",hi,at,_i,lt,ml="Motion LoRAs are a collection of LoRAs that work with the guoyww/animatediff-motion-adapter-v1-5-2 checkpoint. These LoRAs are responsible for adding specific types of motion to the animations.",yi,rt,Mi,dt,fl=`
    masterpiece, bestquality, sunset.
    masterpiece, bestquality, sunset
    `,bi,pt,wi,ct,ul='You can also leverage the PEFT backend to combine Motion LoRA’s and create more complex animations.',Ti,mt,gl="First install PEFT with",Ji,ft,vi,ut,hl="Then you can use the following code to combine Motion LoRAs.",Ui,gt,Zi,ht,_l=`
    masterpiece, bestquality, sunset.
    masterpiece, bestquality, sunset
    `,ji,_t,Ii,yt,yl='FreeInit: Bridging Initialization Gap in Video Diffusion Models by Tianxing Wu, Chenyang Si, Yuming Jiang, Ziqi Huang, Ziwei Liu.',Ci,Mt,Ml="FreeInit is an effective method that improves temporal consistency and overall quality of videos generated using video-diffusion-models without any addition training. It can be applied to AnimateDiff, ModelScope, VideoCrafter and various other video generation models seamlessly at inference time, and works by iteratively refining the latent-initialization noise. More details can be found it the paper.",Gi,bt,bl="The following example demonstrates the usage of FreeInit.",Wi,wt,Vi,te,xi,ne,ki,Tt,wl=`Without FreeInit enabled With FreeInit enabled panda playing a guitar
    panda playing a guitar panda playing a guitar
    panda playing a guitar`,Bi,Jt,Ni,vt,Tl='AnimateLCM is a motion module checkpoint and an LCM LoRA that have been created using a consistency learning strategy that decouples the distillation of the image generation priors and the motion generation priors.',Ai,Ut,Di,Zt,Jl=`
    A space rocket, 4K.
    A space rocket, 4K
    `,Ri,jt,vl='AnimateLCM is also compatible with existing Motion LoRAs.',Xi,It,Si,Ct,Ul=`
    A space rocket, 4K.
    A space rocket, 4K
    `,Li,Gt,Yi,Wt,Zl='FreeNoise: Tuning-Free Longer Video Diffusion via Noise Rescheduling by Haonan Qiu, Menghan Xia, Yong Zhang, Yingqing He, Xintao Wang, Ying Shan, Ziwei Liu.',Ei,Vt,jl="FreeNoise is a sampling mechanism that can generate longer videos with short-video generation models by employing noise-rescheduling, temporal attention over sliding windows, and weighted averaging of latent frames. It also can be used with multiple prompts to allow for interpolated video generations. More details are available in the paper.",Qi,xt,Il="The currently supported AnimateDiff pipelines that can be used with FreeNoise are:",Pi,kt,Cl='
  • AnimateDiffPipeline
  • AnimateDiffControlNetPipeline
  • AnimateDiffVideoToVideoPipeline
  • AnimateDiffVideoToVideoControlNetPipeline
  • ',Fi,Bt,Gl="In order to use FreeNoise, a single line needs to be added to the inference code after loading your pipelines.",Hi,Nt,zi,At,Wl="After this, either a single prompt could be used, or multiple prompts can be passed as a dictionary of integer-string pairs. The integer keys of the dictionary correspond to the frame index at which the influence of that prompt would be maximum. Each frame index should map to a single string prompt. The prompts for intermediate frame indices, that are not passed in the dictionary, are created by interpolating between the frame prompts that are passed. By default, simple linear interpolation is used. However, you can customize this behaviour with a callback to the prompt_interpolation_callback parameter when enabling FreeNoise.",$i,Dt,Vl="Full example:",qi,Rt,Ki,Xt,Oi,St,xl="Since FreeNoise processes multiple frames together, there are parts in the modeling where the memory required exceeds that available on normal consumer GPUs. The main memory bottlenecks that we identified are spatial and temporal attention blocks, upsampling and downsampling blocks, resnet blocks and feed-forward layers. Since most of these blocks operate effectively only on the channel/embedding dimension, one can perform chunked inference across the batch dimensions. The batch dimension in AnimateDiff are either spatial ([B x F, H x W, C]) or temporal (B x H x W, F, C) in nature (note that it may seem counter-intuitive, but the batch dimension here are correct, because spatial blocks process across the B x F dimension while the temporal blocks process across the B x H x W dimension). We introduce a SplitInferenceModule that makes it easier to chunk across any dimension and perform inference. This saves a lot of memory but comes at the cost of requiring more time for inference.",es,Lt,ts,Yt,kl="The call to pipe.enable_free_noise_split_inference method accepts two parameters: spatial_split_size (defaults to 256) and temporal_split_size (defaults to 16). These can be configured based on how much VRAM you have available. A lower split size results in lower memory usage but slower inference, whereas a larger split size results in faster inference at the cost of more memory.",ns,Et,os,Qt,Bl="diffusers>=0.30.0 supports loading the AnimateDiff checkpoints into the MotionAdapter in their original format via from_single_file",is,Pt,ss,Ft,as,C,Ht,js,vn,Nl="Pipeline for text-to-video generation.",Is,Un,Al=`This model inherits from DiffusionPipeline. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.).`,Cs,Zn,Dl="The pipeline also inherits the following loading methods:",Gs,jn,Rl='
  • load_textual_inversion() for loading textual inversion embeddings
  • load_lora_weights() for loading LoRA weights
  • save_lora_weights() for saving LoRA weights
  • load_ip_adapter() for loading IP Adapters
  • ',Ws,L,zt,Vs,In,Xl="The call function to the pipeline for generation.",xs,oe,ks,ie,$t,Bs,Cn,Sl="Encodes the prompt into text encoder hidden states.",ls,qt,rs,G,Kt,Ns,Gn,Ll="Pipeline for text-to-video generation with ControlNet guidance.",As,Wn,Yl=`This model inherits from DiffusionPipeline. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.).`,Ds,Vn,El="The pipeline also inherits the following loading methods:",Rs,xn,Ql='
  • load_textual_inversion() for loading textual inversion embeddings
  • load_lora_weights() for loading LoRA weights
  • save_lora_weights() for saving LoRA weights
  • load_ip_adapter() for loading IP Adapters
  • ',Xs,Y,Ot,Ss,kn,Pl="The call function to the pipeline for generation.",Ls,Bn,Fl="Examples:",Ys,se,en,Es,Nn,Hl="Encodes the prompt into text encoder hidden states.",ds,tn,ps,W,nn,Qs,An,zl=`Pipeline for controlled text-to-video generation using the method described in SparseCtrl: Adding Sparse Controls to Text-to-Video Diffusion Models.`,Ps,Dn,$l=`This model inherits from DiffusionPipeline. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.).`,Fs,Rn,ql="The pipeline also inherits the following loading methods:",Hs,Xn,Kl='
  • load_textual_inversion() for loading textual inversion embeddings
  • load_lora_weights() for loading LoRA weights
  • save_lora_weights() for saving LoRA weights
  • load_ip_adapter() for loading IP Adapters
  • ',zs,E,on,$s,Sn,Ol="The call function to the pipeline for generation.",qs,ae,Ks,le,sn,Os,Ln,er="Encodes the prompt into text encoder hidden states.",cs,an,ms,U,ln,ea,Yn,tr="Pipeline for text-to-video generation using Stable Diffusion XL.",ta,En,nr=`This model inherits from DiffusionPipeline. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)`,na,Qn,or="The pipeline also inherits the following loading methods:",oa,Pn,ir='
  • load_textual_inversion() for loading textual inversion embeddings
  • from_single_file() for loading .ckpt files
  • load_lora_weights() for loading LoRA weights
  • save_lora_weights() for saving LoRA weights
  • load_ip_adapter() for loading IP Adapters
  • ',ia,Q,rn,sa,Fn,sr="Function invoked when calling the pipeline for generation.",aa,re,la,de,dn,ra,Hn,ar="Encodes the prompt into text encoder hidden states.",da,pe,pn,pa,zn,lr='See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298',fs,cn,us,V,mn,ca,$n,rr="Pipeline for video-to-video generation.",ma,qn,dr=`This model inherits from DiffusionPipeline. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.).`,fa,Kn,pr="The pipeline also inherits the following loading methods:",ua,On,cr='
  • load_textual_inversion() for loading textual inversion embeddings
  • load_lora_weights() for loading LoRA weights
  • save_lora_weights() for saving LoRA weights
  • load_ip_adapter() for loading IP Adapters
  • ',ga,P,fn,ha,eo,mr="The call function to the pipeline for generation.",_a,to,fr="Examples:",ya,ce,un,Ma,no,ur="Encodes the prompt into text encoder hidden states.",gs,gn,hs,x,hn,ba,oo,gr="Pipeline for video-to-video generation with ControlNet guidance.",wa,io,hr=`This model inherits from DiffusionPipeline. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.).`,Ta,so,_r="The pipeline also inherits the following loading methods:",Ja,ao,yr='
  • load_textual_inversion() for loading textual inversion embeddings
  • load_lora_weights() for loading LoRA weights
  • save_lora_weights() for saving LoRA weights
  • load_ip_adapter() for loading IP Adapters
  • ',va,F,_n,Ua,lo,Mr="The call function to the pipeline for generation.",Za,ro,br="Examples:",ja,me,yn,Ia,po,wr="Encodes the prompt into text encoder hidden states.",_s,Mn,ys,S,bn,Ca,co,Tr="Output class for AnimateDiff pipelines.",Ga,mo,Jr=`PIL image sequences of length num_frames. It can also be a NumPy array or Torch tensor of shape (batch_size, num_frames, channels, height, width)`,Ms,wn,bs,Mo,ws;return J=new w({props:{title:"Text-to-Video Generation with AnimateDiff",local:"text-to-video-generation-with-animatediff",headingTag:"h1"}}),fe=new w({props:{title:"Overview",local:"overview",headingTag:"h2"}}),_e=new w({props:{title:"Available Pipelines",local:"available-pipelines",headingTag:"h2"}}),Me=new w({props:{title:"Available checkpoints",local:"available-checkpoints",headingTag:"h2"}}),we=new w({props:{title:"Usage example",local:"usage-example",headingTag:"h2"}}),Te=new w({props:{title:"AnimateDiffPipeline",local:"animatediffpipeline",headingTag:"h3"}}),Ue=new I({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwQW5pbWF0ZURpZmZQaXBlbGluZSUyQyUyMERESU1TY2hlZHVsZXIlMkMlMjBNb3Rpb25BZGFwdGVyJTBBZnJvbSUyMGRpZmZ1c2Vycy51dGlscyUyMGltcG9ydCUyMGV4cG9ydF90b19naWYlMEElMEElMjMlMjBMb2FkJTIwdGhlJTIwbW90aW9uJTIwYWRhcHRlciUwQWFkYXB0ZXIlMjAlM0QlMjBNb3Rpb25BZGFwdGVyLmZyb21fcHJldHJhaW5lZCglMjJndW95d3clMkZhbmltYXRlZGlmZi1tb3Rpb24tYWRhcHRlci12MS01LTIlMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYpJTBBJTIzJTIwbG9hZCUyMFNEJTIwMS41JTIwYmFzZWQlMjBmaW5ldHVuZWQlMjBtb2RlbCUwQW1vZGVsX2lkJTIwJTNEJTIwJTIyU0cxNjEyMjIlMkZSZWFsaXN0aWNfVmlzaW9uX1Y1LjFfbm9WQUUlMjIlMEFwaXBlJTIwJTNEJTIwQW5pbWF0ZURpZmZQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQobW9kZWxfaWQlMkMlMjBtb3Rpb25fYWRhcHRlciUzRGFkYXB0ZXIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYpJTBBc2NoZWR1bGVyJTIwJTNEJTIwRERJTVNjaGVkdWxlci5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwbW9kZWxfaWQlMkMlMEElMjAlMjAlMjAlMjBzdWJmb2xkZXIlM0QlMjJzY2hlZHVsZXIlMjIlMkMlMEElMjAlMjAlMjAlMjBjbGlwX3NhbXBsZSUzREZhbHNlJTJDJTBBJTIwJTIwJTIwJTIwdGltZXN0ZXBfc3BhY2luZyUzRCUyMmxpbnNwYWNlJTIyJTJDJTBBJTIwJTIwJTIwJTIwYmV0YV9zY2hlZHVsZSUzRCUyMmxpbmVhciUyMiUyQyUwQSUyMCUyMCUyMCUyMHN0ZXBzX29mZnNldCUzRDElMkMlMEEpJTBBcGlwZS5zY2hlZHVsZXIlMjAlM0QlMjBzY2hlZHVsZXIlMEElMEElMjMlMjBlbmFibGUlMjBtZW1vcnklMjBzYXZpbmdzJTBBcGlwZS5lbmFibGVfdmFlX3NsaWNpbmcoKSUwQXBpcGUuZW5hYmxlX21vZGVsX2NwdV9vZmZsb2FkKCklMEElMEFvdXRwdXQlMjAlM0QlMjBwaXBlKCUwQSUyMCUyMCUyMCUyMHByb21wdCUzRCglMEElMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjJtYXN0ZXJwaWVjZSUyQyUyMGJlc3RxdWFsaXR5JTJDJTIwaGlnaGx5ZGV0YWlsZWQlMkMlMjB1bHRyYWRldGFpbGVkJTJDJTIwc3Vuc2V0JTJDJTIwJTIyJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIyb3JhbmdlJTIwc2t5JTJDJTIwd2FybSUyMGxpZ2h0aW5nJTJDJTIwZmlzaGluZyUyMGJvYXRzJTJDJTIwb2NlYW4lMjB3YXZlcyUyMHNlYWd1bGxzJTJDJTIwJTIyJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIycmlwcGxpbmclMjB3YXRlciUyQyUyMHdoYXJmJTJDJTIwc2lsaG91ZXR0ZSUyQyUyMHNlcmVuZSUyMGF0bW9zcGhlcmUlMkMlMjBkdXNrJTJDJTIwZXZlbmluZyUyMGdsb3clMkMlMjAlMjIlMEElMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjJnb2xkZW4lMjBob3VyJTJDJTIwY29hc3RhbCUyMGxhbmRzY2FwZSUyQyUyMHNlYXNpZGUlMjBzY2VuZXJ5JTIyJTBBJTIwJTIwJTIwJTIwKSUyQyUwQSUyMCUyMCUyMCUyMG5lZ2F0aXZlX3Byb21wdCUzRCUyMmJhZCUyMHF1YWxpdHklMkMlMjB3b3JzZSUyMHF1YWxpdHklMjIlMkMlMEElMjAlMjAlMjAlMjBudW1fZnJhbWVzJTNEMTYlMkMlMEElMjAlMjAlMjAlMjBndWlkYW5jZV9zY2FsZSUzRDcuNSUyQyUwQSUyMCUyMCUyMCUyMG51bV9pbmZlcmVuY2Vfc3RlcHMlM0QyNSUyQyUwQSUyMCUyMCUyMCUyMGdlbmVyYXRvciUzRHRvcmNoLkdlbmVyYXRvciglMjJjcHUlMjIpLm1hbnVhbF9zZWVkKDQyKSUyQyUwQSklMEFmcmFtZXMlMjAlM0QlMjBvdXRwdXQuZnJhbWVzJTVCMCU1RCUwQWV4cG9ydF90b19naWYoZnJhbWVzJTJDJTIwJTIyYW5pbWF0aW9uLmdpZiUyMik=",highlighted:`import torch from diffusers import AnimateDiffPipeline, DDIMScheduler, MotionAdapter from diffusers.utils import export_to_gif # Load the motion adapter adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16) # load SD 1.5 based finetuned model model_id = "SG161222/Realistic_Vision_V5.1_noVAE" pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter, torch_dtype=torch.float16) scheduler = DDIMScheduler.from_pretrained( model_id, subfolder="scheduler", clip_sample=False, timestep_spacing="linspace", beta_schedule="linear", steps_offset=1, ) pipe.scheduler = scheduler # enable memory savings pipe.enable_vae_slicing() pipe.enable_model_cpu_offload() output = pipe( prompt=( "masterpiece, bestquality, highlydetailed, ultradetailed, sunset, " "orange sky, warm lighting, fishing boats, ocean waves seagulls, " "rippling water, wharf, silhouette, serene atmosphere, dusk, evening glow, " "golden hour, coastal landscape, seaside scenery" ), negative_prompt="bad quality, worse quality", num_frames=16, guidance_scale=7.5, num_inference_steps=25, generator=torch.Generator("cpu").manual_seed(42), ) frames = output.frames[0] export_to_gif(frames, "animation.gif")`,wrap:!1}}),$=new Wa({props:{$$slots:{default:[Gr]},$$scope:{ctx:A}}}),Ie=new w({props:{title:"AnimateDiffControlNetPipeline",local:"animatediffcontrolnetpipeline",headingTag:"h3"}}),Ge=new I({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwQW5pbWF0ZURpZmZDb250cm9sTmV0UGlwZWxpbmUlMkMlMjBBdXRvZW5jb2RlcktMJTJDJTIwQ29udHJvbE5ldE1vZGVsJTJDJTIwTW90aW9uQWRhcHRlciUyQyUyMExDTVNjaGVkdWxlciUwQWZyb20lMjBkaWZmdXNlcnMudXRpbHMlMjBpbXBvcnQlMjBleHBvcnRfdG9fZ2lmJTJDJTIwbG9hZF92aWRlbyUwQSUwQSUyMyUyMEFkZGl0aW9uYWxseSUyQyUyMHlvdSUyMHdpbGwlMjBuZWVkJTIwYSUyMHByZXByb2Nlc3MlMjB2aWRlb3MlMjBiZWZvcmUlMjB0aGV5JTIwY2FuJTIwYmUlMjB1c2VkJTIwd2l0aCUyMHRoZSUyMENvbnRyb2xOZXQlMEElMjMlMjBIRiUyMG1haW50YWlucyUyMGp1c3QlMjB0aGUlMjByaWdodCUyMHBhY2thZ2UlMjBmb3IlMjBpdCUzQSUyMCU2MHBpcCUyMGluc3RhbGwlMjBjb250cm9sbmV0X2F1eCU2MCUwQWZyb20lMjBjb250cm9sbmV0X2F1eC5wcm9jZXNzb3IlMjBpbXBvcnQlMjBab2VEZXRlY3RvciUwQSUwQSUyMyUyMERvd25sb2FkJTIwY29udHJvbG5ldHMlMjBmcm9tJTIwaHR0cHMlM0ElMkYlMkZodWdnaW5nZmFjZS5jbyUyRmxsbHlhc3ZpZWwlMkZDb250cm9sTmV0LXYxLTElMjB0byUyMHVzZSUyMC5mcm9tX3NpbmdsZV9maWxlJTBBJTIzJTIwRG93bmxvYWQlMjBEaWZmdXNlcnMtZm9ybWF0JTIwY29udHJvbG5ldHMlMkMlMjBzdWNoJTIwYXMlMjBodHRwcyUzQSUyRiUyRmh1Z2dpbmdmYWNlLmNvJTJGbGxseWFzdmllbCUyRnNkLWNvbnRyb2xuZXQtZGVwdGglMkMlMjB0byUyMHVzZSUyMC5mcm9tX3ByZXRyYWluZWQoKSUwQWNvbnRyb2xuZXQlMjAlM0QlMjBDb250cm9sTmV0TW9kZWwuZnJvbV9zaW5nbGVfZmlsZSglMjJjb250cm9sX3YxMWYxcF9zZDE1X2RlcHRoLnB0aCUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiklMEElMEElMjMlMjBXZSUyMHVzZSUyMEFuaW1hdGVMQ00lMjBmb3IlMjB0aGlzJTIwZXhhbXBsZSUyMGJ1dCUyMG9uZSUyMGNhbiUyMHVzZSUyMHRoZSUyMG9yaWdpbmFsJTIwbW90aW9uJTIwYWRhcHRlcnMlMjBhcyUyMHdlbGwlMjAoZm9yJTIwZXhhbXBsZSUyQyUyMGh0dHBzJTNBJTJGJTJGaHVnZ2luZ2ZhY2UuY28lMkZndW95d3clMkZhbmltYXRlZGlmZi1tb3Rpb24tYWRhcHRlci12MS01LTMpJTBBbW90aW9uX2FkYXB0ZXIlMjAlM0QlMjBNb3Rpb25BZGFwdGVyLmZyb21fcHJldHJhaW5lZCglMjJ3YW5nZnV5dW4lMkZBbmltYXRlTENNJTIyKSUwQSUwQXZhZSUyMCUzRCUyMEF1dG9lbmNvZGVyS0wuZnJvbV9wcmV0cmFpbmVkKCUyMnN0YWJpbGl0eWFpJTJGc2QtdmFlLWZ0LW1zZSUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiklMEFwaXBlJTNBJTIwQW5pbWF0ZURpZmZDb250cm9sTmV0UGlwZWxpbmUlMjAlM0QlMjBBbmltYXRlRGlmZkNvbnRyb2xOZXRQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIyU0cxNjEyMjIlMkZSZWFsaXN0aWNfVmlzaW9uX1Y1LjFfbm9WQUUlMjIlMkMlMEElMjAlMjAlMjAlMjBtb3Rpb25fYWRhcHRlciUzRG1vdGlvbl9hZGFwdGVyJTJDJTBBJTIwJTIwJTIwJTIwY29udHJvbG5ldCUzRGNvbnRyb2xuZXQlMkMlMEElMjAlMjAlMjAlMjB2YWUlM0R2YWUlMkMlMEEpLnRvKGRldmljZSUzRCUyMmN1ZGElMjIlMkMlMjBkdHlwZSUzRHRvcmNoLmZsb2F0MTYpJTBBcGlwZS5zY2hlZHVsZXIlMjAlM0QlMjBMQ01TY2hlZHVsZXIuZnJvbV9jb25maWcocGlwZS5zY2hlZHVsZXIuY29uZmlnJTJDJTIwYmV0YV9zY2hlZHVsZSUzRCUyMmxpbmVhciUyMiklMEFwaXBlLmxvYWRfbG9yYV93ZWlnaHRzKCUyMndhbmdmdXl1biUyRkFuaW1hdGVMQ00lMjIlMkMlMjB3ZWlnaHRfbmFtZSUzRCUyMkFuaW1hdGVMQ01fc2QxNV90MnZfbG9yYS5zYWZldGVuc29ycyUyMiUyQyUyMGFkYXB0ZXJfbmFtZSUzRCUyMmxjbS1sb3JhJTIyKSUwQXBpcGUuc2V0X2FkYXB0ZXJzKCU1QiUyMmxjbS1sb3JhJTIyJTVEJTJDJTIwJTVCMC44JTVEKSUwQSUwQWRlcHRoX2RldGVjdG9yJTIwJTNEJTIwWm9lRGV0ZWN0b3IuZnJvbV9wcmV0cmFpbmVkKCUyMmxsbHlhc3ZpZWwlMkZBbm5vdGF0b3JzJTIyKS50byglMjJjdWRhJTIyKSUwQXZpZGVvJTIwJTNEJTIwbG9hZF92aWRlbyglMjJodHRwcyUzQSUyRiUyRmh1Z2dpbmdmYWNlLmNvJTJGZGF0YXNldHMlMkZodWdnaW5nZmFjZSUyRmRvY3VtZW50YXRpb24taW1hZ2VzJTJGcmVzb2x2ZSUyRm1haW4lMkZkaWZmdXNlcnMlMkZhbmltYXRlZGlmZi12aWQydmlkLWlucHV0LTEuZ2lmJTIyKSUwQWNvbmRpdGlvbmluZ19mcmFtZXMlMjAlM0QlMjAlNUIlNUQlMEElMEF3aXRoJTIwcGlwZS5wcm9ncmVzc19iYXIodG90YWwlM0RsZW4odmlkZW8pKSUyMGFzJTIwcHJvZ3Jlc3NfYmFyJTNBJTBBJTIwJTIwJTIwJTIwZm9yJTIwZnJhbWUlMjBpbiUyMHZpZGVvJTNBJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwY29uZGl0aW9uaW5nX2ZyYW1lcy5hcHBlbmQoZGVwdGhfZGV0ZWN0b3IoZnJhbWUpKSUwQSUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMHByb2dyZXNzX2Jhci51cGRhdGUoKSUwQSUwQXByb21wdCUyMCUzRCUyMCUyMmElMjBwYW5kYSUyQyUyMHBsYXlpbmclMjBhJTIwZ3VpdGFyJTJDJTIwc2l0dGluZyUyMGluJTIwYSUyMHBpbmslMjBib2F0JTJDJTIwaW4lMjB0aGUlMjBvY2VhbiUyQyUyMG1vdW50YWlucyUyMGluJTIwYmFja2dyb3VuZCUyQyUyMHJlYWxpc3RpYyUyQyUyMGhpZ2glMjBxdWFsaXR5JTIyJTBBbmVnYXRpdmVfcHJvbXB0JTIwJTNEJTIwJTIyYmFkJTIwcXVhbGl0eSUyQyUyMHdvcnN0JTIwcXVhbGl0eSUyMiUwQSUwQXZpZGVvJTIwJTNEJTIwcGlwZSglMEElMjAlMjAlMjAlMjBwcm9tcHQlM0Rwcm9tcHQlMkMlMEElMjAlMjAlMjAlMjBuZWdhdGl2ZV9wcm9tcHQlM0RuZWdhdGl2ZV9wcm9tcHQlMkMlMEElMjAlMjAlMjAlMjBudW1fZnJhbWVzJTNEbGVuKHZpZGVvKSUyQyUwQSUyMCUyMCUyMCUyMG51bV9pbmZlcmVuY2Vfc3RlcHMlM0QxMCUyQyUwQSUyMCUyMCUyMCUyMGd1aWRhbmNlX3NjYWxlJTNEMi4wJTJDJTBBJTIwJTIwJTIwJTIwY29uZGl0aW9uaW5nX2ZyYW1lcyUzRGNvbmRpdGlvbmluZ19mcmFtZXMlMkMlMEElMjAlMjAlMjAlMjBnZW5lcmF0b3IlM0R0b3JjaC5HZW5lcmF0b3IoKS5tYW51YWxfc2VlZCg0MiklMkMlMEEpLmZyYW1lcyU1QjAlNUQlMEElMEFleHBvcnRfdG9fZ2lmKHZpZGVvJTJDJTIwJTIyYW5pbWF0ZWRpZmZfY29udHJvbG5ldC5naWYlMjIlMkMlMjBmcHMlM0Q4KQ==",highlighted:`import torch from diffusers import AnimateDiffControlNetPipeline, AutoencoderKL, ControlNetModel, MotionAdapter, LCMScheduler from diffusers.utils import export_to_gif, load_video # Additionally, you will need a preprocess videos before they can be used with the ControlNet # HF maintains just the right package for it: \`pip install controlnet_aux\` from controlnet_aux.processor import ZoeDetector # Download controlnets from https://huggingface.co/lllyasviel/ControlNet-v1-1 to use .from_single_file # Download Diffusers-format controlnets, such as https://huggingface.co/lllyasviel/sd-controlnet-depth, to use .from_pretrained() controlnet = ControlNetModel.from_single_file("control_v11f1p_sd15_depth.pth", torch_dtype=torch.float16) # We use AnimateLCM for this example but one can use the original motion adapters as well (for example, https://huggingface.co/guoyww/animatediff-motion-adapter-v1-5-3) motion_adapter = MotionAdapter.from_pretrained("wangfuyun/AnimateLCM") vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch.float16) pipe: AnimateDiffControlNetPipeline = AnimateDiffControlNetPipeline.from_pretrained( "SG161222/Realistic_Vision_V5.1_noVAE", motion_adapter=motion_adapter, controlnet=controlnet, vae=vae, ).to(device="cuda", dtype=torch.float16) pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config, beta_schedule="linear") pipe.load_lora_weights("wangfuyun/AnimateLCM", weight_name="AnimateLCM_sd15_t2v_lora.safetensors", adapter_name="lcm-lora") pipe.set_adapters(["lcm-lora"], [0.8]) depth_detector = ZoeDetector.from_pretrained("lllyasviel/Annotators").to("cuda") video = load_video("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/animatediff-vid2vid-input-1.gif") conditioning_frames = [] with pipe.progress_bar(total=len(video)) as progress_bar: for frame in video: conditioning_frames.append(depth_detector(frame)) progress_bar.update() prompt = "a panda, playing a guitar, sitting in a pink boat, in the ocean, mountains in background, realistic, high quality" negative_prompt = "bad quality, worst quality" video = pipe( prompt=prompt, negative_prompt=negative_prompt, num_frames=len(video), num_inference_steps=10, guidance_scale=2.0, conditioning_frames=conditioning_frames, generator=torch.Generator().manual_seed(42), ).frames[0] export_to_gif(video, "animatediff_controlnet.gif", fps=8)`,wrap:!1}}),Ve=new w({props:{title:"AnimateDiffSparseControlNetPipeline",local:"animatediffsparsecontrolnetpipeline",headingTag:"h3"}}),De=new w({props:{title:"Using SparseCtrl Scribble",local:"using-sparsectrl-scribble",headingTag:"h4"}}),Re=new I({props:{code:"aW1wb3J0JTIwdG9yY2glMEElMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwQW5pbWF0ZURpZmZTcGFyc2VDb250cm9sTmV0UGlwZWxpbmUlMEFmcm9tJTIwZGlmZnVzZXJzLm1vZGVscyUyMGltcG9ydCUyMEF1dG9lbmNvZGVyS0wlMkMlMjBNb3Rpb25BZGFwdGVyJTJDJTIwU3BhcnNlQ29udHJvbE5ldE1vZGVsJTBBZnJvbSUyMGRpZmZ1c2Vycy5zY2hlZHVsZXJzJTIwaW1wb3J0JTIwRFBNU29sdmVyTXVsdGlzdGVwU2NoZWR1bGVyJTBBZnJvbSUyMGRpZmZ1c2Vycy51dGlscyUyMGltcG9ydCUyMGV4cG9ydF90b19naWYlMkMlMjBsb2FkX2ltYWdlJTBBJTBBJTBBbW9kZWxfaWQlMjAlM0QlMjAlMjJTRzE2MTIyMiUyRlJlYWxpc3RpY19WaXNpb25fVjUuMV9ub1ZBRSUyMiUwQW1vdGlvbl9hZGFwdGVyX2lkJTIwJTNEJTIwJTIyZ3VveXd3JTJGYW5pbWF0ZWRpZmYtbW90aW9uLWFkYXB0ZXItdjEtNS0zJTIyJTBBY29udHJvbG5ldF9pZCUyMCUzRCUyMCUyMmd1b3l3dyUyRmFuaW1hdGVkaWZmLXNwYXJzZWN0cmwtc2NyaWJibGUlMjIlMEFsb3JhX2FkYXB0ZXJfaWQlMjAlM0QlMjAlMjJndW95d3clMkZhbmltYXRlZGlmZi1tb3Rpb24tbG9yYS12MS01LTMlMjIlMEF2YWVfaWQlMjAlM0QlMjAlMjJzdGFiaWxpdHlhaSUyRnNkLXZhZS1mdC1tc2UlMjIlMEFkZXZpY2UlMjAlM0QlMjAlMjJjdWRhJTIyJTBBJTBBbW90aW9uX2FkYXB0ZXIlMjAlM0QlMjBNb3Rpb25BZGFwdGVyLmZyb21fcHJldHJhaW5lZChtb3Rpb25fYWRhcHRlcl9pZCUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNikudG8oZGV2aWNlKSUwQWNvbnRyb2xuZXQlMjAlM0QlMjBTcGFyc2VDb250cm9sTmV0TW9kZWwuZnJvbV9wcmV0cmFpbmVkKGNvbnRyb2xuZXRfaWQlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYpLnRvKGRldmljZSklMEF2YWUlMjAlM0QlMjBBdXRvZW5jb2RlcktMLmZyb21fcHJldHJhaW5lZCh2YWVfaWQlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYpLnRvKGRldmljZSklMEFzY2hlZHVsZXIlMjAlM0QlMjBEUE1Tb2x2ZXJNdWx0aXN0ZXBTY2hlZHVsZXIuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMG1vZGVsX2lkJTJDJTBBJTIwJTIwJTIwJTIwc3ViZm9sZGVyJTNEJTIyc2NoZWR1bGVyJTIyJTJDJTBBJTIwJTIwJTIwJTIwYmV0YV9zY2hlZHVsZSUzRCUyMmxpbmVhciUyMiUyQyUwQSUyMCUyMCUyMCUyMGFsZ29yaXRobV90eXBlJTNEJTIyZHBtc29sdmVyJTJCJTJCJTIyJTJDJTBBJTIwJTIwJTIwJTIwdXNlX2thcnJhc19zaWdtYXMlM0RUcnVlJTJDJTBBKSUwQXBpcGUlMjAlM0QlMjBBbmltYXRlRGlmZlNwYXJzZUNvbnRyb2xOZXRQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwbW9kZWxfaWQlMkMlMEElMjAlMjAlMjAlMjBtb3Rpb25fYWRhcHRlciUzRG1vdGlvbl9hZGFwdGVyJTJDJTBBJTIwJTIwJTIwJTIwY29udHJvbG5ldCUzRGNvbnRyb2xuZXQlMkMlMEElMjAlMjAlMjAlMjB2YWUlM0R2YWUlMkMlMEElMjAlMjAlMjAlMjBzY2hlZHVsZXIlM0RzY2hlZHVsZXIlMkMlMEElMjAlMjAlMjAlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYlMkMlMEEpLnRvKGRldmljZSklMEFwaXBlLmxvYWRfbG9yYV93ZWlnaHRzKGxvcmFfYWRhcHRlcl9pZCUyQyUyMGFkYXB0ZXJfbmFtZSUzRCUyMm1vdGlvbl9sb3JhJTIyKSUwQXBpcGUuZnVzZV9sb3JhKGxvcmFfc2NhbGUlM0QxLjApJTBBJTBBcHJvbXB0JTIwJTNEJTIwJTIyYW4lMjBhZXJpYWwlMjB2aWV3JTIwb2YlMjBhJTIwY3liZXJwdW5rJTIwY2l0eSUyQyUyMG5pZ2h0JTIwdGltZSUyQyUyMG5lb24lMjBsaWdodHMlMkMlMjBtYXN0ZXJwaWVjZSUyQyUyMGhpZ2glMjBxdWFsaXR5JTIyJTBBbmVnYXRpdmVfcHJvbXB0JTIwJTNEJTIwJTIybG93JTIwcXVhbGl0eSUyQyUyMHdvcnN0JTIwcXVhbGl0eSUyQyUyMGxldHRlcmJveGVkJTIyJTBBJTBBaW1hZ2VfZmlsZXMlMjAlM0QlMjAlNUIlMEElMjAlMjAlMjAlMjAlMjJodHRwcyUzQSUyRiUyRmh1Z2dpbmdmYWNlLmNvJTJGZGF0YXNldHMlMkZodWdnaW5nZmFjZSUyRmRvY3VtZW50YXRpb24taW1hZ2VzJTJGcmVzb2x2ZSUyRm1haW4lMkZkaWZmdXNlcnMlMkZhbmltYXRlZGlmZi1zY3JpYmJsZS0xLnBuZyUyMiUyQyUwQSUyMCUyMCUyMCUyMCUyMmh0dHBzJTNBJTJGJTJGaHVnZ2luZ2ZhY2UuY28lMkZkYXRhc2V0cyUyRmh1Z2dpbmdmYWNlJTJGZG9jdW1lbnRhdGlvbi1pbWFnZXMlMkZyZXNvbHZlJTJGbWFpbiUyRmRpZmZ1c2VycyUyRmFuaW1hdGVkaWZmLXNjcmliYmxlLTIucG5nJTIyJTJDJTBBJTIwJTIwJTIwJTIwJTIyaHR0cHMlM0ElMkYlMkZodWdnaW5nZmFjZS5jbyUyRmRhdGFzZXRzJTJGaHVnZ2luZ2ZhY2UlMkZkb2N1bWVudGF0aW9uLWltYWdlcyUyRnJlc29sdmUlMkZtYWluJTJGZGlmZnVzZXJzJTJGYW5pbWF0ZWRpZmYtc2NyaWJibGUtMy5wbmclMjIlMEElNUQlMEFjb25kaXRpb25fZnJhbWVfaW5kaWNlcyUyMCUzRCUyMCU1QjAlMkMlMjA4JTJDJTIwMTUlNUQlMEFjb25kaXRpb25pbmdfZnJhbWVzJTIwJTNEJTIwJTVCbG9hZF9pbWFnZShpbWdfZmlsZSklMjBmb3IlMjBpbWdfZmlsZSUyMGluJTIwaW1hZ2VfZmlsZXMlNUQlMEElMEF2aWRlbyUyMCUzRCUyMHBpcGUoJTBBJTIwJTIwJTIwJTIwcHJvbXB0JTNEcHJvbXB0JTJDJTBBJTIwJTIwJTIwJTIwbmVnYXRpdmVfcHJvbXB0JTNEbmVnYXRpdmVfcHJvbXB0JTJDJTBBJTIwJTIwJTIwJTIwbnVtX2luZmVyZW5jZV9zdGVwcyUzRDI1JTJDJTBBJTIwJTIwJTIwJTIwY29uZGl0aW9uaW5nX2ZyYW1lcyUzRGNvbmRpdGlvbmluZ19mcmFtZXMlMkMlMEElMjAlMjAlMjAlMjBjb250cm9sbmV0X2NvbmRpdGlvbmluZ19zY2FsZSUzRDEuMCUyQyUwQSUyMCUyMCUyMCUyMGNvbnRyb2xuZXRfZnJhbWVfaW5kaWNlcyUzRGNvbmRpdGlvbl9mcmFtZV9pbmRpY2VzJTJDJTBBJTIwJTIwJTIwJTIwZ2VuZXJhdG9yJTNEdG9yY2guR2VuZXJhdG9yKCkubWFudWFsX3NlZWQoMTMzNyklMkMlMEEpLmZyYW1lcyU1QjAlNUQlMEFleHBvcnRfdG9fZ2lmKHZpZGVvJTJDJTIwJTIyb3V0cHV0LmdpZiUyMik=",highlighted:`import torch from diffusers import AnimateDiffSparseControlNetPipeline from diffusers.models import AutoencoderKL, MotionAdapter, SparseControlNetModel from diffusers.schedulers import DPMSolverMultistepScheduler from diffusers.utils import export_to_gif, load_image model_id = "SG161222/Realistic_Vision_V5.1_noVAE" motion_adapter_id = "guoyww/animatediff-motion-adapter-v1-5-3" controlnet_id = "guoyww/animatediff-sparsectrl-scribble" lora_adapter_id = "guoyww/animatediff-motion-lora-v1-5-3" vae_id = "stabilityai/sd-vae-ft-mse" device = "cuda" motion_adapter = MotionAdapter.from_pretrained(motion_adapter_id, torch_dtype=torch.float16).to(device) controlnet = SparseControlNetModel.from_pretrained(controlnet_id, torch_dtype=torch.float16).to(device) vae = AutoencoderKL.from_pretrained(vae_id, torch_dtype=torch.float16).to(device) scheduler = DPMSolverMultistepScheduler.from_pretrained( model_id, subfolder="scheduler", beta_schedule="linear", algorithm_type="dpmsolver++", use_karras_sigmas=True, ) pipe = AnimateDiffSparseControlNetPipeline.from_pretrained( model_id, motion_adapter=motion_adapter, controlnet=controlnet, vae=vae, scheduler=scheduler, torch_dtype=torch.float16, ).to(device) pipe.load_lora_weights(lora_adapter_id, adapter_name="motion_lora") pipe.fuse_lora(lora_scale=1.0) prompt = "an aerial view of a cyberpunk city, night time, neon lights, masterpiece, high quality" negative_prompt = "low quality, worst quality, letterboxed" image_files = [ "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/animatediff-scribble-1.png", "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/animatediff-scribble-2.png", "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/animatediff-scribble-3.png" ] condition_frame_indices = [0, 8, 15] conditioning_frames = [load_image(img_file) for img_file in image_files] video = pipe( prompt=prompt, negative_prompt=negative_prompt, num_inference_steps=25, conditioning_frames=conditioning_frames, controlnet_conditioning_scale=1.0, controlnet_frame_indices=condition_frame_indices, generator=torch.Generator().manual_seed(1337), ).frames[0] export_to_gif(video, "output.gif")`,wrap:!1}}),Le=new w({props:{title:"Using SparseCtrl RGB",local:"using-sparsectrl-rgb",headingTag:"h4"}}),Ye=new I({props:{code:"aW1wb3J0JTIwdG9yY2glMEElMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwQW5pbWF0ZURpZmZTcGFyc2VDb250cm9sTmV0UGlwZWxpbmUlMEFmcm9tJTIwZGlmZnVzZXJzLm1vZGVscyUyMGltcG9ydCUyMEF1dG9lbmNvZGVyS0wlMkMlMjBNb3Rpb25BZGFwdGVyJTJDJTIwU3BhcnNlQ29udHJvbE5ldE1vZGVsJTBBZnJvbSUyMGRpZmZ1c2Vycy5zY2hlZHVsZXJzJTIwaW1wb3J0JTIwRFBNU29sdmVyTXVsdGlzdGVwU2NoZWR1bGVyJTBBZnJvbSUyMGRpZmZ1c2Vycy51dGlscyUyMGltcG9ydCUyMGV4cG9ydF90b19naWYlMkMlMjBsb2FkX2ltYWdlJTBBJTBBJTBBbW9kZWxfaWQlMjAlM0QlMjAlMjJTRzE2MTIyMiUyRlJlYWxpc3RpY19WaXNpb25fVjUuMV9ub1ZBRSUyMiUwQW1vdGlvbl9hZGFwdGVyX2lkJTIwJTNEJTIwJTIyZ3VveXd3JTJGYW5pbWF0ZWRpZmYtbW90aW9uLWFkYXB0ZXItdjEtNS0zJTIyJTBBY29udHJvbG5ldF9pZCUyMCUzRCUyMCUyMmd1b3l3dyUyRmFuaW1hdGVkaWZmLXNwYXJzZWN0cmwtcmdiJTIyJTBBbG9yYV9hZGFwdGVyX2lkJTIwJTNEJTIwJTIyZ3VveXd3JTJGYW5pbWF0ZWRpZmYtbW90aW9uLWxvcmEtdjEtNS0zJTIyJTBBdmFlX2lkJTIwJTNEJTIwJTIyc3RhYmlsaXR5YWklMkZzZC12YWUtZnQtbXNlJTIyJTBBZGV2aWNlJTIwJTNEJTIwJTIyY3VkYSUyMiUwQSUwQW1vdGlvbl9hZGFwdGVyJTIwJTNEJTIwTW90aW9uQWRhcHRlci5mcm9tX3ByZXRyYWluZWQobW90aW9uX2FkYXB0ZXJfaWQlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYpLnRvKGRldmljZSklMEFjb250cm9sbmV0JTIwJTNEJTIwU3BhcnNlQ29udHJvbE5ldE1vZGVsLmZyb21fcHJldHJhaW5lZChjb250cm9sbmV0X2lkJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2KS50byhkZXZpY2UpJTBBdmFlJTIwJTNEJTIwQXV0b2VuY29kZXJLTC5mcm9tX3ByZXRyYWluZWQodmFlX2lkJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2KS50byhkZXZpY2UpJTBBc2NoZWR1bGVyJTIwJTNEJTIwRFBNU29sdmVyTXVsdGlzdGVwU2NoZWR1bGVyLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjBtb2RlbF9pZCUyQyUwQSUyMCUyMCUyMCUyMHN1YmZvbGRlciUzRCUyMnNjaGVkdWxlciUyMiUyQyUwQSUyMCUyMCUyMCUyMGJldGFfc2NoZWR1bGUlM0QlMjJsaW5lYXIlMjIlMkMlMEElMjAlMjAlMjAlMjBhbGdvcml0aG1fdHlwZSUzRCUyMmRwbXNvbHZlciUyQiUyQiUyMiUyQyUwQSUyMCUyMCUyMCUyMHVzZV9rYXJyYXNfc2lnbWFzJTNEVHJ1ZSUyQyUwQSklMEFwaXBlJTIwJTNEJTIwQW5pbWF0ZURpZmZTcGFyc2VDb250cm9sTmV0UGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMG1vZGVsX2lkJTJDJTBBJTIwJTIwJTIwJTIwbW90aW9uX2FkYXB0ZXIlM0Rtb3Rpb25fYWRhcHRlciUyQyUwQSUyMCUyMCUyMCUyMGNvbnRyb2xuZXQlM0Rjb250cm9sbmV0JTJDJTBBJTIwJTIwJTIwJTIwdmFlJTNEdmFlJTJDJTBBJTIwJTIwJTIwJTIwc2NoZWR1bGVyJTNEc2NoZWR1bGVyJTJDJTBBJTIwJTIwJTIwJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2JTJDJTBBKS50byhkZXZpY2UpJTBBcGlwZS5sb2FkX2xvcmFfd2VpZ2h0cyhsb3JhX2FkYXB0ZXJfaWQlMkMlMjBhZGFwdGVyX25hbWUlM0QlMjJtb3Rpb25fbG9yYSUyMiklMEElMEFpbWFnZSUyMCUzRCUyMGxvYWRfaW1hZ2UoJTIyaHR0cHMlM0ElMkYlMkZodWdnaW5nZmFjZS5jbyUyRmRhdGFzZXRzJTJGaHVnZ2luZ2ZhY2UlMkZkb2N1bWVudGF0aW9uLWltYWdlcyUyRnJlc29sdmUlMkZtYWluJTJGZGlmZnVzZXJzJTJGYW5pbWF0ZWRpZmYtZmlyZXdvcmsucG5nJTIyKSUwQSUwQXZpZGVvJTIwJTNEJTIwcGlwZSglMEElMjAlMjAlMjAlMjBwcm9tcHQlM0QlMjJjbG9zZXVwJTIwZmFjZSUyMHBob3RvJTIwb2YlMjBtYW4lMjBpbiUyMGJsYWNrJTIwY2xvdGhlcyUyQyUyMG5pZ2h0JTIwY2l0eSUyMHN0cmVldCUyQyUyMGJva2VoJTJDJTIwZmlyZXdvcmtzJTIwaW4lMjBiYWNrZ3JvdW5kJTIyJTJDJTBBJTIwJTIwJTIwJTIwbmVnYXRpdmVfcHJvbXB0JTNEJTIybG93JTIwcXVhbGl0eSUyQyUyMHdvcnN0JTIwcXVhbGl0eSUyMiUyQyUwQSUyMCUyMCUyMCUyMG51bV9pbmZlcmVuY2Vfc3RlcHMlM0QyNSUyQyUwQSUyMCUyMCUyMCUyMGNvbmRpdGlvbmluZ19mcmFtZXMlM0RpbWFnZSUyQyUwQSUyMCUyMCUyMCUyMGNvbnRyb2xuZXRfZnJhbWVfaW5kaWNlcyUzRCU1QjAlNUQlMkMlMEElMjAlMjAlMjAlMjBjb250cm9sbmV0X2NvbmRpdGlvbmluZ19zY2FsZSUzRDEuMCUyQyUwQSUyMCUyMCUyMCUyMGdlbmVyYXRvciUzRHRvcmNoLkdlbmVyYXRvcigpLm1hbnVhbF9zZWVkKDQyKSUyQyUwQSkuZnJhbWVzJTVCMCU1RCUwQWV4cG9ydF90b19naWYodmlkZW8lMkMlMjAlMjJvdXRwdXQuZ2lmJTIyKQ==",highlighted:`import torch from diffusers import AnimateDiffSparseControlNetPipeline from diffusers.models import AutoencoderKL, MotionAdapter, SparseControlNetModel from diffusers.schedulers import DPMSolverMultistepScheduler from diffusers.utils import export_to_gif, load_image model_id = "SG161222/Realistic_Vision_V5.1_noVAE" motion_adapter_id = "guoyww/animatediff-motion-adapter-v1-5-3" controlnet_id = "guoyww/animatediff-sparsectrl-rgb" lora_adapter_id = "guoyww/animatediff-motion-lora-v1-5-3" vae_id = "stabilityai/sd-vae-ft-mse" device = "cuda" motion_adapter = MotionAdapter.from_pretrained(motion_adapter_id, torch_dtype=torch.float16).to(device) controlnet = SparseControlNetModel.from_pretrained(controlnet_id, torch_dtype=torch.float16).to(device) vae = AutoencoderKL.from_pretrained(vae_id, torch_dtype=torch.float16).to(device) scheduler = DPMSolverMultistepScheduler.from_pretrained( model_id, subfolder="scheduler", beta_schedule="linear", algorithm_type="dpmsolver++", use_karras_sigmas=True, ) pipe = AnimateDiffSparseControlNetPipeline.from_pretrained( model_id, motion_adapter=motion_adapter, controlnet=controlnet, vae=vae, scheduler=scheduler, torch_dtype=torch.float16, ).to(device) pipe.load_lora_weights(lora_adapter_id, adapter_name="motion_lora") image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/animatediff-firework.png") video = pipe( prompt="closeup face photo of man in black clothes, night city street, bokeh, fireworks in background", negative_prompt="low quality, worst quality", num_inference_steps=25, conditioning_frames=image, controlnet_frame_indices=[0], controlnet_conditioning_scale=1.0, generator=torch.Generator().manual_seed(42), ).frames[0] export_to_gif(video, "output.gif")`,wrap:!1}}),Pe=new w({props:{title:"AnimateDiffSDXLPipeline",local:"animatediffsdxlpipeline",headingTag:"h3"}}),He=new I({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzLm1vZGVscyUyMGltcG9ydCUyME1vdGlvbkFkYXB0ZXIlMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwQW5pbWF0ZURpZmZTRFhMUGlwZWxpbmUlMkMlMjBERElNU2NoZWR1bGVyJTBBZnJvbSUyMGRpZmZ1c2Vycy51dGlscyUyMGltcG9ydCUyMGV4cG9ydF90b19naWYlMEElMEFhZGFwdGVyJTIwJTNEJTIwTW90aW9uQWRhcHRlci5mcm9tX3ByZXRyYWluZWQoJTIyZ3VveXd3JTJGYW5pbWF0ZWRpZmYtbW90aW9uLWFkYXB0ZXItc2R4bC1iZXRhJTIyJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2KSUwQSUwQW1vZGVsX2lkJTIwJTNEJTIwJTIyc3RhYmlsaXR5YWklMkZzdGFibGUtZGlmZnVzaW9uLXhsLWJhc2UtMS4wJTIyJTBBc2NoZWR1bGVyJTIwJTNEJTIwRERJTVNjaGVkdWxlci5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwbW9kZWxfaWQlMkMlMEElMjAlMjAlMjAlMjBzdWJmb2xkZXIlM0QlMjJzY2hlZHVsZXIlMjIlMkMlMEElMjAlMjAlMjAlMjBjbGlwX3NhbXBsZSUzREZhbHNlJTJDJTBBJTIwJTIwJTIwJTIwdGltZXN0ZXBfc3BhY2luZyUzRCUyMmxpbnNwYWNlJTIyJTJDJTBBJTIwJTIwJTIwJTIwYmV0YV9zY2hlZHVsZSUzRCUyMmxpbmVhciUyMiUyQyUwQSUyMCUyMCUyMCUyMHN0ZXBzX29mZnNldCUzRDElMkMlMEEpJTBBcGlwZSUyMCUzRCUyMEFuaW1hdGVEaWZmU0RYTFBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjBtb2RlbF9pZCUyQyUwQSUyMCUyMCUyMCUyMG1vdGlvbl9hZGFwdGVyJTNEYWRhcHRlciUyQyUwQSUyMCUyMCUyMCUyMHNjaGVkdWxlciUzRHNjaGVkdWxlciUyQyUwQSUyMCUyMCUyMCUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiUyQyUwQSUyMCUyMCUyMCUyMHZhcmlhbnQlM0QlMjJmcDE2JTIyJTJDJTBBKS50byglMjJjdWRhJTIyKSUwQSUwQSUyMyUyMGVuYWJsZSUyMG1lbW9yeSUyMHNhdmluZ3MlMEFwaXBlLmVuYWJsZV92YWVfc2xpY2luZygpJTBBcGlwZS5lbmFibGVfdmFlX3RpbGluZygpJTBBJTBBb3V0cHV0JTIwJTNEJTIwcGlwZSglMEElMjAlMjAlMjAlMjBwcm9tcHQlM0QlMjJhJTIwcGFuZGElMjBzdXJmaW5nJTIwaW4lMjB0aGUlMjBvY2VhbiUyQyUyMHJlYWxpc3RpYyUyQyUyMGhpZ2glMjBxdWFsaXR5JTIyJTJDJTBBJTIwJTIwJTIwJTIwbmVnYXRpdmVfcHJvbXB0JTNEJTIybG93JTIwcXVhbGl0eSUyQyUyMHdvcnN0JTIwcXVhbGl0eSUyMiUyQyUwQSUyMCUyMCUyMCUyMG51bV9pbmZlcmVuY2Vfc3RlcHMlM0QyMCUyQyUwQSUyMCUyMCUyMCUyMGd1aWRhbmNlX3NjYWxlJTNEOCUyQyUwQSUyMCUyMCUyMCUyMHdpZHRoJTNEMTAyNCUyQyUwQSUyMCUyMCUyMCUyMGhlaWdodCUzRDEwMjQlMkMlMEElMjAlMjAlMjAlMjBudW1fZnJhbWVzJTNEMTYlMkMlMEEpJTBBJTBBZnJhbWVzJTIwJTNEJTIwb3V0cHV0LmZyYW1lcyU1QjAlNUQlMEFleHBvcnRfdG9fZ2lmKGZyYW1lcyUyQyUyMCUyMmFuaW1hdGlvbi5naWYlMjIp",highlighted:`import torch from diffusers.models import MotionAdapter from diffusers import AnimateDiffSDXLPipeline, DDIMScheduler from diffusers.utils import export_to_gif adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-sdxl-beta", torch_dtype=torch.float16) model_id = "stabilityai/stable-diffusion-xl-base-1.0" scheduler = DDIMScheduler.from_pretrained( model_id, subfolder="scheduler", clip_sample=False, timestep_spacing="linspace", beta_schedule="linear", steps_offset=1, ) pipe = AnimateDiffSDXLPipeline.from_pretrained( model_id, motion_adapter=adapter, scheduler=scheduler, torch_dtype=torch.float16, variant="fp16", ).to("cuda") # enable memory savings pipe.enable_vae_slicing() pipe.enable_vae_tiling() output = pipe( prompt="a panda surfing in the ocean, realistic, high quality", negative_prompt="low quality, worst quality", num_inference_steps=20, guidance_scale=8, width=1024, height=1024, num_frames=16, ) frames = output.frames[0] export_to_gif(frames, "animation.gif")`,wrap:!1}}),ze=new w({props:{title:"AnimateDiffVideoToVideoPipeline",local:"animatediffvideotovideopipeline",headingTag:"h3"}}),qe=new I({props:{code:"aW1wb3J0JTIwaW1hZ2VpbyUwQWltcG9ydCUyMHJlcXVlc3RzJTBBaW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwQW5pbWF0ZURpZmZWaWRlb1RvVmlkZW9QaXBlbGluZSUyQyUyMERESU1TY2hlZHVsZXIlMkMlMjBNb3Rpb25BZGFwdGVyJTBBZnJvbSUyMGRpZmZ1c2Vycy51dGlscyUyMGltcG9ydCUyMGV4cG9ydF90b19naWYlMEFmcm9tJTIwaW8lMjBpbXBvcnQlMjBCeXRlc0lPJTBBZnJvbSUyMFBJTCUyMGltcG9ydCUyMEltYWdlJTBBJTBBJTIzJTIwTG9hZCUyMHRoZSUyMG1vdGlvbiUyMGFkYXB0ZXIlMEFhZGFwdGVyJTIwJTNEJTIwTW90aW9uQWRhcHRlci5mcm9tX3ByZXRyYWluZWQoJTIyZ3VveXd3JTJGYW5pbWF0ZWRpZmYtbW90aW9uLWFkYXB0ZXItdjEtNS0yJTIyJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2KSUwQSUyMyUyMGxvYWQlMjBTRCUyMDEuNSUyMGJhc2VkJTIwZmluZXR1bmVkJTIwbW9kZWwlMEFtb2RlbF9pZCUyMCUzRCUyMCUyMlNHMTYxMjIyJTJGUmVhbGlzdGljX1Zpc2lvbl9WNS4xX25vVkFFJTIyJTBBcGlwZSUyMCUzRCUyMEFuaW1hdGVEaWZmVmlkZW9Ub1ZpZGVvUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKG1vZGVsX2lkJTJDJTIwbW90aW9uX2FkYXB0ZXIlM0RhZGFwdGVyJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2KSUwQXNjaGVkdWxlciUyMCUzRCUyMERESU1TY2hlZHVsZXIuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMG1vZGVsX2lkJTJDJTBBJTIwJTIwJTIwJTIwc3ViZm9sZGVyJTNEJTIyc2NoZWR1bGVyJTIyJTJDJTBBJTIwJTIwJTIwJTIwY2xpcF9zYW1wbGUlM0RGYWxzZSUyQyUwQSUyMCUyMCUyMCUyMHRpbWVzdGVwX3NwYWNpbmclM0QlMjJsaW5zcGFjZSUyMiUyQyUwQSUyMCUyMCUyMCUyMGJldGFfc2NoZWR1bGUlM0QlMjJsaW5lYXIlMjIlMkMlMEElMjAlMjAlMjAlMjBzdGVwc19vZmZzZXQlM0QxJTJDJTBBKSUwQXBpcGUuc2NoZWR1bGVyJTIwJTNEJTIwc2NoZWR1bGVyJTBBJTBBJTIzJTIwZW5hYmxlJTIwbWVtb3J5JTIwc2F2aW5ncyUwQXBpcGUuZW5hYmxlX3ZhZV9zbGljaW5nKCklMEFwaXBlLmVuYWJsZV9tb2RlbF9jcHVfb2ZmbG9hZCgpJTBBJTBBJTIzJTIwaGVscGVyJTIwZnVuY3Rpb24lMjB0byUyMGxvYWQlMjB2aWRlb3MlMEFkZWYlMjBsb2FkX3ZpZGVvKGZpbGVfcGF0aCUzQSUyMHN0ciklM0ElMEElMjAlMjAlMjAlMjBpbWFnZXMlMjAlM0QlMjAlNUIlNUQlMEElMEElMjAlMjAlMjAlMjBpZiUyMGZpbGVfcGF0aC5zdGFydHN3aXRoKCgnaHR0cCUzQSUyRiUyRiclMkMlMjAnaHR0cHMlM0ElMkYlMkYnKSklM0ElMEElMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjMlMjBJZiUyMHRoZSUyMGZpbGVfcGF0aCUyMGlzJTIwYSUyMFVSTCUwQSUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMHJlc3BvbnNlJTIwJTNEJTIwcmVxdWVzdHMuZ2V0KGZpbGVfcGF0aCklMEElMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjByZXNwb25zZS5yYWlzZV9mb3Jfc3RhdHVzKCklMEElMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjBjb250ZW50JTIwJTNEJTIwQnl0ZXNJTyhyZXNwb25zZS5jb250ZW50KSUwQSUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMHZpZCUyMCUzRCUyMGltYWdlaW8uZ2V0X3JlYWRlcihjb250ZW50KSUwQSUyMCUyMCUyMCUyMGVsc2UlM0ElMEElMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjMlMjBBc3N1bWluZyUyMGl0J3MlMjBhJTIwbG9jYWwlMjBmaWxlJTIwcGF0aCUwQSUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMHZpZCUyMCUzRCUyMGltYWdlaW8uZ2V0X3JlYWRlcihmaWxlX3BhdGgpJTBBJTBBJTIwJTIwJTIwJTIwZm9yJTIwZnJhbWUlMjBpbiUyMHZpZCUzQSUwQSUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMHBpbF9pbWFnZSUyMCUzRCUyMEltYWdlLmZyb21hcnJheShmcmFtZSklMEElMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjBpbWFnZXMuYXBwZW5kKHBpbF9pbWFnZSklMEElMEElMjAlMjAlMjAlMjByZXR1cm4lMjBpbWFnZXMlMEElMEF2aWRlbyUyMCUzRCUyMGxvYWRfdmlkZW8oJTIyaHR0cHMlM0ElMkYlMkZodWdnaW5nZmFjZS5jbyUyRmRhdGFzZXRzJTJGaHVnZ2luZ2ZhY2UlMkZkb2N1bWVudGF0aW9uLWltYWdlcyUyRnJlc29sdmUlMkZtYWluJTJGZGlmZnVzZXJzJTJGYW5pbWF0ZWRpZmYtdmlkMnZpZC1pbnB1dC0xLmdpZiUyMiklMEElMEFvdXRwdXQlMjAlM0QlMjBwaXBlKCUwQSUyMCUyMCUyMCUyMHZpZGVvJTIwJTNEJTIwdmlkZW8lMkMlMEElMjAlMjAlMjAlMjBwcm9tcHQlM0QlMjJwYW5kYSUyMHBsYXlpbmclMjBhJTIwZ3VpdGFyJTJDJTIwb24lMjBhJTIwYm9hdCUyQyUyMGluJTIwdGhlJTIwb2NlYW4lMkMlMjBoaWdoJTIwcXVhbGl0eSUyMiUyQyUwQSUyMCUyMCUyMCUyMG5lZ2F0aXZlX3Byb21wdCUzRCUyMmJhZCUyMHF1YWxpdHklMkMlMjB3b3JzZSUyMHF1YWxpdHklMjIlMkMlMEElMjAlMjAlMjAlMjBndWlkYW5jZV9zY2FsZSUzRDcuNSUyQyUwQSUyMCUyMCUyMCUyMG51bV9pbmZlcmVuY2Vfc3RlcHMlM0QyNSUyQyUwQSUyMCUyMCUyMCUyMHN0cmVuZ3RoJTNEMC41JTJDJTBBJTIwJTIwJTIwJTIwZ2VuZXJhdG9yJTNEdG9yY2guR2VuZXJhdG9yKCUyMmNwdSUyMikubWFudWFsX3NlZWQoNDIpJTJDJTBBKSUwQWZyYW1lcyUyMCUzRCUyMG91dHB1dC5mcmFtZXMlNUIwJTVEJTBBZXhwb3J0X3RvX2dpZihmcmFtZXMlMkMlMjAlMjJhbmltYXRpb24uZ2lmJTIyKQ==",highlighted:`import imageio import requests import torch from diffusers import AnimateDiffVideoToVideoPipeline, DDIMScheduler, MotionAdapter from diffusers.utils import export_to_gif from io import BytesIO from PIL import Image # Load the motion adapter adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16) # load SD 1.5 based finetuned model model_id = "SG161222/Realistic_Vision_V5.1_noVAE" pipe = AnimateDiffVideoToVideoPipeline.from_pretrained(model_id, motion_adapter=adapter, torch_dtype=torch.float16) scheduler = DDIMScheduler.from_pretrained( model_id, subfolder="scheduler", clip_sample=False, timestep_spacing="linspace", beta_schedule="linear", steps_offset=1, ) pipe.scheduler = scheduler # enable memory savings pipe.enable_vae_slicing() pipe.enable_model_cpu_offload() # helper function to load videos def load_video(file_path: str): images = [] if file_path.startswith(('http://', 'https://')): # If the file_path is a URL response = requests.get(file_path) response.raise_for_status() content = BytesIO(response.content) vid = imageio.get_reader(content) else: # Assuming it's a local file path vid = imageio.get_reader(file_path) for frame in vid: pil_image = Image.fromarray(frame) images.append(pil_image) return images video = load_video("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/animatediff-vid2vid-input-1.gif") output = pipe( video = video, prompt="panda playing a guitar, on a boat, in the ocean, high quality", negative_prompt="bad quality, worse quality", guidance_scale=7.5, num_inference_steps=25, strength=0.5, generator=torch.Generator("cpu").manual_seed(42), ) frames = output.frames[0] export_to_gif(frames, "animation.gif")`,wrap:!1}}),et=new w({props:{title:"AnimateDiffVideoToVideoControlNetPipeline",local:"animatediffvideotovideocontrolnetpipeline",headingTag:"h3"}}),ot=new I({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwUElMJTIwaW1wb3J0JTIwSW1hZ2UlMEFmcm9tJTIwdHFkbS5hdXRvJTIwaW1wb3J0JTIwdHFkbSUwQSUwQWZyb20lMjBjb250cm9sbmV0X2F1eC5wcm9jZXNzb3IlMjBpbXBvcnQlMjBPcGVucG9zZURldGVjdG9yJTBBZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMEFuaW1hdGVEaWZmVmlkZW9Ub1ZpZGVvQ29udHJvbE5ldFBpcGVsaW5lJTBBZnJvbSUyMGRpZmZ1c2Vycy51dGlscyUyMGltcG9ydCUyMGV4cG9ydF90b19naWYlMkMlMjBsb2FkX3ZpZGVvJTBBZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMEF1dG9lbmNvZGVyS0wlMkMlMjBDb250cm9sTmV0TW9kZWwlMkMlMjBNb3Rpb25BZGFwdGVyJTJDJTIwTENNU2NoZWR1bGVyJTBBJTBBJTIzJTIwTG9hZCUyMHRoZSUyMENvbnRyb2xOZXQlMEFjb250cm9sbmV0JTIwJTNEJTIwQ29udHJvbE5ldE1vZGVsLmZyb21fcHJldHJhaW5lZCglMjJsbGx5YXN2aWVsJTJGc2QtY29udHJvbG5ldC1vcGVucG9zZSUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiklMEElMjMlMjBMb2FkJTIwdGhlJTIwbW90aW9uJTIwYWRhcHRlciUwQW1vdGlvbl9hZGFwdGVyJTIwJTNEJTIwTW90aW9uQWRhcHRlci5mcm9tX3ByZXRyYWluZWQoJTIyd2FuZ2Z1eXVuJTJGQW5pbWF0ZUxDTSUyMiklMEElMjMlMjBMb2FkJTIwU0QlMjAxLjUlMjBiYXNlZCUyMGZpbmV0dW5lZCUyMG1vZGVsJTBBdmFlJTIwJTNEJTIwQXV0b2VuY29kZXJLTC5mcm9tX3ByZXRyYWluZWQoJTIyc3RhYmlsaXR5YWklMkZzZC12YWUtZnQtbXNlJTIyJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2KSUwQXBpcGUlMjAlM0QlMjBBbmltYXRlRGlmZlZpZGVvVG9WaWRlb0NvbnRyb2xOZXRQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIyU0cxNjEyMjIlMkZSZWFsaXN0aWNfVmlzaW9uX1Y1LjFfbm9WQUUlMjIlMkMlMEElMjAlMjAlMjAlMjBtb3Rpb25fYWRhcHRlciUzRG1vdGlvbl9hZGFwdGVyJTJDJTBBJTIwJTIwJTIwJTIwY29udHJvbG5ldCUzRGNvbnRyb2xuZXQlMkMlMEElMjAlMjAlMjAlMjB2YWUlM0R2YWUlMkMlMEEpLnRvKGRldmljZSUzRCUyMmN1ZGElMjIlMkMlMjBkdHlwZSUzRHRvcmNoLmZsb2F0MTYpJTBBJTBBJTIzJTIwRW5hYmxlJTIwTENNJTIwdG8lMjBzcGVlZCUyMHVwJTIwaW5mZXJlbmNlJTBBcGlwZS5zY2hlZHVsZXIlMjAlM0QlMjBMQ01TY2hlZHVsZXIuZnJvbV9jb25maWcocGlwZS5zY2hlZHVsZXIuY29uZmlnJTJDJTIwYmV0YV9zY2hlZHVsZSUzRCUyMmxpbmVhciUyMiklMEFwaXBlLmxvYWRfbG9yYV93ZWlnaHRzKCUyMndhbmdmdXl1biUyRkFuaW1hdGVMQ00lMjIlMkMlMjB3ZWlnaHRfbmFtZSUzRCUyMkFuaW1hdGVMQ01fc2QxNV90MnZfbG9yYS5zYWZldGVuc29ycyUyMiUyQyUyMGFkYXB0ZXJfbmFtZSUzRCUyMmxjbS1sb3JhJTIyKSUwQXBpcGUuc2V0X2FkYXB0ZXJzKCU1QiUyMmxjbS1sb3JhJTIyJTVEJTJDJTIwJTVCMC44JTVEKSUwQSUwQXZpZGVvJTIwJTNEJTIwbG9hZF92aWRlbyglMjJodHRwcyUzQSUyRiUyRmh1Z2dpbmdmYWNlLmNvJTJGZGF0YXNldHMlMkZodWdnaW5nZmFjZSUyRmRvY3VtZW50YXRpb24taW1hZ2VzJTJGcmVzb2x2ZSUyRm1haW4lMkZkaWZmdXNlcnMlMkZkYW5jZS5naWYlMjIpJTBBdmlkZW8lMjAlM0QlMjAlNUJmcmFtZS5jb252ZXJ0KCUyMlJHQiUyMiklMjBmb3IlMjBmcmFtZSUyMGluJTIwdmlkZW8lNUQlMEElMEFwcm9tcHQlMjAlM0QlMjAlMjJhc3Ryb25hdXQlMjBpbiUyMHNwYWNlJTJDJTIwZGFuY2luZyUyMiUwQW5lZ2F0aXZlX3Byb21wdCUyMCUzRCUyMCUyMmJhZCUyMHF1YWxpdHklMkMlMjB3b3JzdCUyMHF1YWxpdHklMkMlMjBqcGVnJTIwYXJ0aWZhY3RzJTJDJTIwdWdseSUyMiUwQSUwQSUyMyUyMENyZWF0ZSUyMGNvbnRyb2xuZXQlMjBwcmVwcm9jZXNzb3IlMEFvcGVuX3Bvc2UlMjAlM0QlMjBPcGVucG9zZURldGVjdG9yLmZyb21fcHJldHJhaW5lZCglMjJsbGx5YXN2aWVsJTJGQW5ub3RhdG9ycyUyMikudG8oJTIyY3VkYSUyMiklMEElMEElMjMlMjBQcmVwcm9jZXNzJTIwY29udHJvbG5ldCUyMGltYWdlcyUwQWNvbmRpdGlvbmluZ19mcmFtZXMlMjAlM0QlMjAlNUIlNUQlMEFmb3IlMjBmcmFtZSUyMGluJTIwdHFkbSh2aWRlbyklM0ElMEElMjAlMjAlMjAlMjBjb25kaXRpb25pbmdfZnJhbWVzLmFwcGVuZChvcGVuX3Bvc2UoZnJhbWUpKSUwQSUwQXN0cmVuZ3RoJTIwJTNEJTIwMC44JTBBd2l0aCUyMHRvcmNoLmluZmVyZW5jZV9tb2RlKCklM0ElMEElMjAlMjAlMjAlMjB2aWRlbyUyMCUzRCUyMHBpcGUoJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwdmlkZW8lM0R2aWRlbyUyQyUwQSUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMHByb21wdCUzRHByb21wdCUyQyUwQSUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMG5lZ2F0aXZlX3Byb21wdCUzRG5lZ2F0aXZlX3Byb21wdCUyQyUwQSUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMG51bV9pbmZlcmVuY2Vfc3RlcHMlM0QxMCUyQyUwQSUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMGd1aWRhbmNlX3NjYWxlJTNEMi4wJTJDJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwY29udHJvbG5ldF9jb25kaXRpb25pbmdfc2NhbGUlM0QwLjc1JTJDJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwY29uZGl0aW9uaW5nX2ZyYW1lcyUzRGNvbmRpdGlvbmluZ19mcmFtZXMlMkMlMEElMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjBzdHJlbmd0aCUzRHN0cmVuZ3RoJTJDJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwZ2VuZXJhdG9yJTNEdG9yY2guR2VuZXJhdG9yKCkubWFudWFsX3NlZWQoNDIpJTJDJTBBJTIwJTIwJTIwJTIwKS5mcmFtZXMlNUIwJTVEJTBBJTBBdmlkZW8lMjAlM0QlMjAlNUJmcmFtZS5yZXNpemUoY29uZGl0aW9uaW5nX2ZyYW1lcyU1QjAlNUQuc2l6ZSklMjBmb3IlMjBmcmFtZSUyMGluJTIwdmlkZW8lNUQlMEFleHBvcnRfdG9fZ2lmKHZpZGVvJTJDJTIwZiUyMmFuaW1hdGVkaWZmX3ZpZDJ2aWRfY29udHJvbG5ldC5naWYlMjIlMkMlMjBmcHMlM0Q4KQ==",highlighted:`import torch from PIL import Image from tqdm.auto import tqdm from controlnet_aux.processor import OpenposeDetector from diffusers import AnimateDiffVideoToVideoControlNetPipeline from diffusers.utils import export_to_gif, load_video from diffusers import AutoencoderKL, ControlNetModel, MotionAdapter, LCMScheduler # Load the ControlNet controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-openpose", torch_dtype=torch.float16) # Load the motion adapter motion_adapter = MotionAdapter.from_pretrained("wangfuyun/AnimateLCM") # Load SD 1.5 based finetuned model vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch.float16) pipe = AnimateDiffVideoToVideoControlNetPipeline.from_pretrained( "SG161222/Realistic_Vision_V5.1_noVAE", motion_adapter=motion_adapter, controlnet=controlnet, vae=vae, ).to(device="cuda", dtype=torch.float16) # Enable LCM to speed up inference pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config, beta_schedule="linear") pipe.load_lora_weights("wangfuyun/AnimateLCM", weight_name="AnimateLCM_sd15_t2v_lora.safetensors", adapter_name="lcm-lora") pipe.set_adapters(["lcm-lora"], [0.8]) video = load_video("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/dance.gif") video = [frame.convert("RGB") for frame in video] prompt = "astronaut in space, dancing" negative_prompt = "bad quality, worst quality, jpeg artifacts, ugly" # Create controlnet preprocessor open_pose = OpenposeDetector.from_pretrained("lllyasviel/Annotators").to("cuda") # Preprocess controlnet images conditioning_frames = [] for frame in tqdm(video): conditioning_frames.append(open_pose(frame)) strength = 0.8 with torch.inference_mode(): video = pipe( video=video, prompt=prompt, negative_prompt=negative_prompt, num_inference_steps=10, guidance_scale=2.0, controlnet_conditioning_scale=0.75, conditioning_frames=conditioning_frames, strength=strength, generator=torch.Generator().manual_seed(42), ).frames[0] video = [frame.resize(conditioning_frames[0].size) for frame in video] export_to_gif(video, f"animatediff_vid2vid_controlnet.gif", fps=8)`,wrap:!1}}),at=new w({props:{title:"Using Motion LoRAs",local:"using-motion-loras",headingTag:"h2"}}),rt=new I({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwQW5pbWF0ZURpZmZQaXBlbGluZSUyQyUyMERESU1TY2hlZHVsZXIlMkMlMjBNb3Rpb25BZGFwdGVyJTBBZnJvbSUyMGRpZmZ1c2Vycy51dGlscyUyMGltcG9ydCUyMGV4cG9ydF90b19naWYlMEElMEElMjMlMjBMb2FkJTIwdGhlJTIwbW90aW9uJTIwYWRhcHRlciUwQWFkYXB0ZXIlMjAlM0QlMjBNb3Rpb25BZGFwdGVyLmZyb21fcHJldHJhaW5lZCglMjJndW95d3clMkZhbmltYXRlZGlmZi1tb3Rpb24tYWRhcHRlci12MS01LTIlMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYpJTBBJTIzJTIwbG9hZCUyMFNEJTIwMS41JTIwYmFzZWQlMjBmaW5ldHVuZWQlMjBtb2RlbCUwQW1vZGVsX2lkJTIwJTNEJTIwJTIyU0cxNjEyMjIlMkZSZWFsaXN0aWNfVmlzaW9uX1Y1LjFfbm9WQUUlMjIlMEFwaXBlJTIwJTNEJTIwQW5pbWF0ZURpZmZQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQobW9kZWxfaWQlMkMlMjBtb3Rpb25fYWRhcHRlciUzRGFkYXB0ZXIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYpJTBBcGlwZS5sb2FkX2xvcmFfd2VpZ2h0cyglMEElMjAlMjAlMjAlMjAlMjJndW95d3clMkZhbmltYXRlZGlmZi1tb3Rpb24tbG9yYS16b29tLW91dCUyMiUyQyUyMGFkYXB0ZXJfbmFtZSUzRCUyMnpvb20tb3V0JTIyJTBBKSUwQSUwQXNjaGVkdWxlciUyMCUzRCUyMERESU1TY2hlZHVsZXIuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMG1vZGVsX2lkJTJDJTBBJTIwJTIwJTIwJTIwc3ViZm9sZGVyJTNEJTIyc2NoZWR1bGVyJTIyJTJDJTBBJTIwJTIwJTIwJTIwY2xpcF9zYW1wbGUlM0RGYWxzZSUyQyUwQSUyMCUyMCUyMCUyMGJldGFfc2NoZWR1bGUlM0QlMjJsaW5lYXIlMjIlMkMlMEElMjAlMjAlMjAlMjB0aW1lc3RlcF9zcGFjaW5nJTNEJTIybGluc3BhY2UlMjIlMkMlMEElMjAlMjAlMjAlMjBzdGVwc19vZmZzZXQlM0QxJTJDJTBBKSUwQXBpcGUuc2NoZWR1bGVyJTIwJTNEJTIwc2NoZWR1bGVyJTBBJTBBJTIzJTIwZW5hYmxlJTIwbWVtb3J5JTIwc2F2aW5ncyUwQXBpcGUuZW5hYmxlX3ZhZV9zbGljaW5nKCklMEFwaXBlLmVuYWJsZV9tb2RlbF9jcHVfb2ZmbG9hZCgpJTBBJTBBb3V0cHV0JTIwJTNEJTIwcGlwZSglMEElMjAlMjAlMjAlMjBwcm9tcHQlM0QoJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIybWFzdGVycGllY2UlMkMlMjBiZXN0cXVhbGl0eSUyQyUyMGhpZ2hseWRldGFpbGVkJTJDJTIwdWx0cmFkZXRhaWxlZCUyQyUyMHN1bnNldCUyQyUyMCUyMiUwQSUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMm9yYW5nZSUyMHNreSUyQyUyMHdhcm0lMjBsaWdodGluZyUyQyUyMGZpc2hpbmclMjBib2F0cyUyQyUyMG9jZWFuJTIwd2F2ZXMlMjBzZWFndWxscyUyQyUyMCUyMiUwQSUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMnJpcHBsaW5nJTIwd2F0ZXIlMkMlMjB3aGFyZiUyQyUyMHNpbGhvdWV0dGUlMkMlMjBzZXJlbmUlMjBhdG1vc3BoZXJlJTJDJTIwZHVzayUyQyUyMGV2ZW5pbmclMjBnbG93JTJDJTIwJTIyJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIyZ29sZGVuJTIwaG91ciUyQyUyMGNvYXN0YWwlMjBsYW5kc2NhcGUlMkMlMjBzZWFzaWRlJTIwc2NlbmVyeSUyMiUwQSUyMCUyMCUyMCUyMCklMkMlMEElMjAlMjAlMjAlMjBuZWdhdGl2ZV9wcm9tcHQlM0QlMjJiYWQlMjBxdWFsaXR5JTJDJTIwd29yc2UlMjBxdWFsaXR5JTIyJTJDJTBBJTIwJTIwJTIwJTIwbnVtX2ZyYW1lcyUzRDE2JTJDJTBBJTIwJTIwJTIwJTIwZ3VpZGFuY2Vfc2NhbGUlM0Q3LjUlMkMlMEElMjAlMjAlMjAlMjBudW1faW5mZXJlbmNlX3N0ZXBzJTNEMjUlMkMlMEElMjAlMjAlMjAlMjBnZW5lcmF0b3IlM0R0b3JjaC5HZW5lcmF0b3IoJTIyY3B1JTIyKS5tYW51YWxfc2VlZCg0MiklMkMlMEEpJTBBZnJhbWVzJTIwJTNEJTIwb3V0cHV0LmZyYW1lcyU1QjAlNUQlMEFleHBvcnRfdG9fZ2lmKGZyYW1lcyUyQyUyMCUyMmFuaW1hdGlvbi5naWYlMjIp",highlighted:`import torch from diffusers import AnimateDiffPipeline, DDIMScheduler, MotionAdapter from diffusers.utils import export_to_gif # Load the motion adapter adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16) # load SD 1.5 based finetuned model model_id = "SG161222/Realistic_Vision_V5.1_noVAE" pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter, torch_dtype=torch.float16) pipe.load_lora_weights( "guoyww/animatediff-motion-lora-zoom-out", adapter_name="zoom-out" ) scheduler = DDIMScheduler.from_pretrained( model_id, subfolder="scheduler", clip_sample=False, beta_schedule="linear", timestep_spacing="linspace", steps_offset=1, ) pipe.scheduler = scheduler # enable memory savings pipe.enable_vae_slicing() pipe.enable_model_cpu_offload() output = pipe( prompt=( "masterpiece, bestquality, highlydetailed, ultradetailed, sunset, " "orange sky, warm lighting, fishing boats, ocean waves seagulls, " "rippling water, wharf, silhouette, serene atmosphere, dusk, evening glow, " "golden hour, coastal landscape, seaside scenery" ), negative_prompt="bad quality, worse quality", num_frames=16, guidance_scale=7.5, num_inference_steps=25, generator=torch.Generator("cpu").manual_seed(42), ) frames = output.frames[0] export_to_gif(frames, "animation.gif")`,wrap:!1}}),pt=new w({props:{title:"Using Motion LoRAs with PEFT",local:"using-motion-loras-with-peft",headingTag:"h2"}}),ft=new I({props:{code:"cGlwJTIwaW5zdGFsbCUyMHBlZnQ=",highlighted:"pip install peft",wrap:!1}}),gt=new I({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwQW5pbWF0ZURpZmZQaXBlbGluZSUyQyUyMERESU1TY2hlZHVsZXIlMkMlMjBNb3Rpb25BZGFwdGVyJTBBZnJvbSUyMGRpZmZ1c2Vycy51dGlscyUyMGltcG9ydCUyMGV4cG9ydF90b19naWYlMEElMEElMjMlMjBMb2FkJTIwdGhlJTIwbW90aW9uJTIwYWRhcHRlciUwQWFkYXB0ZXIlMjAlM0QlMjBNb3Rpb25BZGFwdGVyLmZyb21fcHJldHJhaW5lZCglMjJndW95d3clMkZhbmltYXRlZGlmZi1tb3Rpb24tYWRhcHRlci12MS01LTIlMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYpJTBBJTIzJTIwbG9hZCUyMFNEJTIwMS41JTIwYmFzZWQlMjBmaW5ldHVuZWQlMjBtb2RlbCUwQW1vZGVsX2lkJTIwJTNEJTIwJTIyU0cxNjEyMjIlMkZSZWFsaXN0aWNfVmlzaW9uX1Y1LjFfbm9WQUUlMjIlMEFwaXBlJTIwJTNEJTIwQW5pbWF0ZURpZmZQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQobW9kZWxfaWQlMkMlMjBtb3Rpb25fYWRhcHRlciUzRGFkYXB0ZXIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYpJTBBJTBBcGlwZS5sb2FkX2xvcmFfd2VpZ2h0cyglMEElMjAlMjAlMjAlMjAlMjJkaWZmdXNlcnMlMkZhbmltYXRlZGlmZi1tb3Rpb24tbG9yYS16b29tLW91dCUyMiUyQyUyMGFkYXB0ZXJfbmFtZSUzRCUyMnpvb20tb3V0JTIyJTJDJTBBKSUwQXBpcGUubG9hZF9sb3JhX3dlaWdodHMoJTBBJTIwJTIwJTIwJTIwJTIyZGlmZnVzZXJzJTJGYW5pbWF0ZWRpZmYtbW90aW9uLWxvcmEtcGFuLWxlZnQlMjIlMkMlMjBhZGFwdGVyX25hbWUlM0QlMjJwYW4tbGVmdCUyMiUyQyUwQSklMEFwaXBlLnNldF9hZGFwdGVycyglNUIlMjJ6b29tLW91dCUyMiUyQyUyMCUyMnBhbi1sZWZ0JTIyJTVEJTJDJTIwYWRhcHRlcl93ZWlnaHRzJTNEJTVCMS4wJTJDJTIwMS4wJTVEKSUwQSUwQXNjaGVkdWxlciUyMCUzRCUyMERESU1TY2hlZHVsZXIuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMG1vZGVsX2lkJTJDJTBBJTIwJTIwJTIwJTIwc3ViZm9sZGVyJTNEJTIyc2NoZWR1bGVyJTIyJTJDJTBBJTIwJTIwJTIwJTIwY2xpcF9zYW1wbGUlM0RGYWxzZSUyQyUwQSUyMCUyMCUyMCUyMHRpbWVzdGVwX3NwYWNpbmclM0QlMjJsaW5zcGFjZSUyMiUyQyUwQSUyMCUyMCUyMCUyMGJldGFfc2NoZWR1bGUlM0QlMjJsaW5lYXIlMjIlMkMlMEElMjAlMjAlMjAlMjBzdGVwc19vZmZzZXQlM0QxJTJDJTBBKSUwQXBpcGUuc2NoZWR1bGVyJTIwJTNEJTIwc2NoZWR1bGVyJTBBJTBBJTIzJTIwZW5hYmxlJTIwbWVtb3J5JTIwc2F2aW5ncyUwQXBpcGUuZW5hYmxlX3ZhZV9zbGljaW5nKCklMEFwaXBlLmVuYWJsZV9tb2RlbF9jcHVfb2ZmbG9hZCgpJTBBJTBBb3V0cHV0JTIwJTNEJTIwcGlwZSglMEElMjAlMjAlMjAlMjBwcm9tcHQlM0QoJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIybWFzdGVycGllY2UlMkMlMjBiZXN0cXVhbGl0eSUyQyUyMGhpZ2hseWRldGFpbGVkJTJDJTIwdWx0cmFkZXRhaWxlZCUyQyUyMHN1bnNldCUyQyUyMCUyMiUwQSUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMm9yYW5nZSUyMHNreSUyQyUyMHdhcm0lMjBsaWdodGluZyUyQyUyMGZpc2hpbmclMjBib2F0cyUyQyUyMG9jZWFuJTIwd2F2ZXMlMjBzZWFndWxscyUyQyUyMCUyMiUwQSUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMnJpcHBsaW5nJTIwd2F0ZXIlMkMlMjB3aGFyZiUyQyUyMHNpbGhvdWV0dGUlMkMlMjBzZXJlbmUlMjBhdG1vc3BoZXJlJTJDJTIwZHVzayUyQyUyMGV2ZW5pbmclMjBnbG93JTJDJTIwJTIyJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIyZ29sZGVuJTIwaG91ciUyQyUyMGNvYXN0YWwlMjBsYW5kc2NhcGUlMkMlMjBzZWFzaWRlJTIwc2NlbmVyeSUyMiUwQSUyMCUyMCUyMCUyMCklMkMlMEElMjAlMjAlMjAlMjBuZWdhdGl2ZV9wcm9tcHQlM0QlMjJiYWQlMjBxdWFsaXR5JTJDJTIwd29yc2UlMjBxdWFsaXR5JTIyJTJDJTBBJTIwJTIwJTIwJTIwbnVtX2ZyYW1lcyUzRDE2JTJDJTBBJTIwJTIwJTIwJTIwZ3VpZGFuY2Vfc2NhbGUlM0Q3LjUlMkMlMEElMjAlMjAlMjAlMjBudW1faW5mZXJlbmNlX3N0ZXBzJTNEMjUlMkMlMEElMjAlMjAlMjAlMjBnZW5lcmF0b3IlM0R0b3JjaC5HZW5lcmF0b3IoJTIyY3B1JTIyKS5tYW51YWxfc2VlZCg0MiklMkMlMEEpJTBBZnJhbWVzJTIwJTNEJTIwb3V0cHV0LmZyYW1lcyU1QjAlNUQlMEFleHBvcnRfdG9fZ2lmKGZyYW1lcyUyQyUyMCUyMmFuaW1hdGlvbi5naWYlMjIp",highlighted:`import torch from diffusers import AnimateDiffPipeline, DDIMScheduler, MotionAdapter from diffusers.utils import export_to_gif # Load the motion adapter adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16) # load SD 1.5 based finetuned model model_id = "SG161222/Realistic_Vision_V5.1_noVAE" pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter, torch_dtype=torch.float16) pipe.load_lora_weights( "diffusers/animatediff-motion-lora-zoom-out", adapter_name="zoom-out", ) pipe.load_lora_weights( "diffusers/animatediff-motion-lora-pan-left", adapter_name="pan-left", ) pipe.set_adapters(["zoom-out", "pan-left"], adapter_weights=[1.0, 1.0]) scheduler = DDIMScheduler.from_pretrained( model_id, subfolder="scheduler", clip_sample=False, timestep_spacing="linspace", beta_schedule="linear", steps_offset=1, ) pipe.scheduler = scheduler # enable memory savings pipe.enable_vae_slicing() pipe.enable_model_cpu_offload() output = pipe( prompt=( "masterpiece, bestquality, highlydetailed, ultradetailed, sunset, " "orange sky, warm lighting, fishing boats, ocean waves seagulls, " "rippling water, wharf, silhouette, serene atmosphere, dusk, evening glow, " "golden hour, coastal landscape, seaside scenery" ), negative_prompt="bad quality, worse quality", num_frames=16, guidance_scale=7.5, num_inference_steps=25, generator=torch.Generator("cpu").manual_seed(42), ) frames = output.frames[0] export_to_gif(frames, "animation.gif")`,wrap:!1}}),_t=new w({props:{title:"Using FreeInit",local:"using-freeinit",headingTag:"h2"}}),wt=new I({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwTW90aW9uQWRhcHRlciUyQyUyMEFuaW1hdGVEaWZmUGlwZWxpbmUlMkMlMjBERElNU2NoZWR1bGVyJTBBZnJvbSUyMGRpZmZ1c2Vycy51dGlscyUyMGltcG9ydCUyMGV4cG9ydF90b19naWYlMEElMEFhZGFwdGVyJTIwJTNEJTIwTW90aW9uQWRhcHRlci5mcm9tX3ByZXRyYWluZWQoJTIyZ3VveXd3JTJGYW5pbWF0ZWRpZmYtbW90aW9uLWFkYXB0ZXItdjEtNS0yJTIyKSUwQW1vZGVsX2lkJTIwJTNEJTIwJTIyU0cxNjEyMjIlMkZSZWFsaXN0aWNfVmlzaW9uX1Y1LjFfbm9WQUUlMjIlMEFwaXBlJTIwJTNEJTIwQW5pbWF0ZURpZmZQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQobW9kZWxfaWQlMkMlMjBtb3Rpb25fYWRhcHRlciUzRGFkYXB0ZXIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYpLnRvKCUyMmN1ZGElMjIpJTBBcGlwZS5zY2hlZHVsZXIlMjAlM0QlMjBERElNU2NoZWR1bGVyLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjBtb2RlbF9pZCUyQyUwQSUyMCUyMCUyMCUyMHN1YmZvbGRlciUzRCUyMnNjaGVkdWxlciUyMiUyQyUwQSUyMCUyMCUyMCUyMGJldGFfc2NoZWR1bGUlM0QlMjJsaW5lYXIlMjIlMkMlMEElMjAlMjAlMjAlMjBjbGlwX3NhbXBsZSUzREZhbHNlJTJDJTBBJTIwJTIwJTIwJTIwdGltZXN0ZXBfc3BhY2luZyUzRCUyMmxpbnNwYWNlJTIyJTJDJTBBJTIwJTIwJTIwJTIwc3RlcHNfb2Zmc2V0JTNEMSUwQSklMEElMEElMjMlMjBlbmFibGUlMjBtZW1vcnklMjBzYXZpbmdzJTBBcGlwZS5lbmFibGVfdmFlX3NsaWNpbmcoKSUwQXBpcGUuZW5hYmxlX3ZhZV90aWxpbmcoKSUwQSUwQSUyMyUyMGVuYWJsZSUyMEZyZWVJbml0JTBBJTIzJTIwUmVmZXIlMjB0byUyMHRoZSUyMGVuYWJsZV9mcmVlX2luaXQlMjBkb2N1bWVudGF0aW9uJTIwZm9yJTIwYSUyMGZ1bGwlMjBsaXN0JTIwb2YlMjBjb25maWd1cmFibGUlMjBwYXJhbWV0ZXJzJTBBcGlwZS5lbmFibGVfZnJlZV9pbml0KG1ldGhvZCUzRCUyMmJ1dHRlcndvcnRoJTIyJTJDJTIwdXNlX2Zhc3Rfc2FtcGxpbmclM0RUcnVlKSUwQSUwQSUyMyUyMHJ1biUyMGluZmVyZW5jZSUwQW91dHB1dCUyMCUzRCUyMHBpcGUoJTBBJTIwJTIwJTIwJTIwcHJvbXB0JTNEJTIyYSUyMHBhbmRhJTIwcGxheWluZyUyMGElMjBndWl0YXIlMkMlMjBvbiUyMGElMjBib2F0JTJDJTIwaW4lMjB0aGUlMjBvY2VhbiUyQyUyMGhpZ2glMjBxdWFsaXR5JTIyJTJDJTBBJTIwJTIwJTIwJTIwbmVnYXRpdmVfcHJvbXB0JTNEJTIyYmFkJTIwcXVhbGl0eSUyQyUyMHdvcnNlJTIwcXVhbGl0eSUyMiUyQyUwQSUyMCUyMCUyMCUyMG51bV9mcmFtZXMlM0QxNiUyQyUwQSUyMCUyMCUyMCUyMGd1aWRhbmNlX3NjYWxlJTNENy41JTJDJTBBJTIwJTIwJTIwJTIwbnVtX2luZmVyZW5jZV9zdGVwcyUzRDIwJTJDJTBBJTIwJTIwJTIwJTIwZ2VuZXJhdG9yJTNEdG9yY2guR2VuZXJhdG9yKCUyMmNwdSUyMikubWFudWFsX3NlZWQoNjY2KSUyQyUwQSklMEElMEElMjMlMjBkaXNhYmxlJTIwRnJlZUluaXQlMEFwaXBlLmRpc2FibGVfZnJlZV9pbml0KCklMEElMEFmcmFtZXMlMjAlM0QlMjBvdXRwdXQuZnJhbWVzJTVCMCU1RCUwQWV4cG9ydF90b19naWYoZnJhbWVzJTJDJTIwJTIyYW5pbWF0aW9uLmdpZiUyMik=",highlighted:`import torch from diffusers import MotionAdapter, AnimateDiffPipeline, DDIMScheduler from diffusers.utils import export_to_gif adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2") model_id = "SG161222/Realistic_Vision_V5.1_noVAE" pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter, torch_dtype=torch.float16).to("cuda") pipe.scheduler = DDIMScheduler.from_pretrained( model_id, subfolder="scheduler", beta_schedule="linear", clip_sample=False, timestep_spacing="linspace", steps_offset=1 ) # enable memory savings pipe.enable_vae_slicing() pipe.enable_vae_tiling() # enable FreeInit # Refer to the enable_free_init documentation for a full list of configurable parameters pipe.enable_free_init(method="butterworth", use_fast_sampling=True) # run inference output = pipe( prompt="a panda playing a guitar, on a boat, in the ocean, high quality", negative_prompt="bad quality, worse quality", num_frames=16, guidance_scale=7.5, num_inference_steps=20, generator=torch.Generator("cpu").manual_seed(666), ) # disable FreeInit pipe.disable_free_init() frames = output.frames[0] export_to_gif(frames, "animation.gif")`,wrap:!1}}),te=new Wa({props:{warning:!0,$$slots:{default:[Wr]},$$scope:{ctx:A}}}),ne=new Wa({props:{$$slots:{default:[Vr]},$$scope:{ctx:A}}}),Jt=new w({props:{title:"Using AnimateLCM",local:"using-animatelcm",headingTag:"h2"}}),Ut=new I({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwQW5pbWF0ZURpZmZQaXBlbGluZSUyQyUyMExDTVNjaGVkdWxlciUyQyUyME1vdGlvbkFkYXB0ZXIlMEFmcm9tJTIwZGlmZnVzZXJzLnV0aWxzJTIwaW1wb3J0JTIwZXhwb3J0X3RvX2dpZiUwQSUwQWFkYXB0ZXIlMjAlM0QlMjBNb3Rpb25BZGFwdGVyLmZyb21fcHJldHJhaW5lZCglMjJ3YW5nZnV5dW4lMkZBbmltYXRlTENNJTIyKSUwQXBpcGUlMjAlM0QlMjBBbmltYXRlRGlmZlBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMjJlbWlsaWFuSlIlMkZlcGlDUmVhbGlzbSUyMiUyQyUyMG1vdGlvbl9hZGFwdGVyJTNEYWRhcHRlciklMEFwaXBlLnNjaGVkdWxlciUyMCUzRCUyMExDTVNjaGVkdWxlci5mcm9tX2NvbmZpZyhwaXBlLnNjaGVkdWxlci5jb25maWclMkMlMjBiZXRhX3NjaGVkdWxlJTNEJTIybGluZWFyJTIyKSUwQSUwQXBpcGUubG9hZF9sb3JhX3dlaWdodHMoJTIyd2FuZ2Z1eXVuJTJGQW5pbWF0ZUxDTSUyMiUyQyUyMHdlaWdodF9uYW1lJTNEJTIyc2QxNV9sb3JhX2JldGEuc2FmZXRlbnNvcnMlMjIlMkMlMjBhZGFwdGVyX25hbWUlM0QlMjJsY20tbG9yYSUyMiklMEElMEFwaXBlLmVuYWJsZV92YWVfc2xpY2luZygpJTBBcGlwZS5lbmFibGVfbW9kZWxfY3B1X29mZmxvYWQoKSUwQSUwQW91dHB1dCUyMCUzRCUyMHBpcGUoJTBBJTIwJTIwJTIwJTIwcHJvbXB0JTNEJTIyQSUyMHNwYWNlJTIwcm9ja2V0JTIwd2l0aCUyMHRyYWlscyUyMG9mJTIwc21va2UlMjBiZWhpbmQlMjBpdCUyMGxhdW5jaGluZyUyMGludG8lMjBzcGFjZSUyMGZyb20lMjB0aGUlMjBkZXNlcnQlMkMlMjA0ayUyQyUyMGhpZ2glMjByZXNvbHV0aW9uJTIyJTJDJTBBJTIwJTIwJTIwJTIwbmVnYXRpdmVfcHJvbXB0JTNEJTIyYmFkJTIwcXVhbGl0eSUyQyUyMHdvcnNlJTIwcXVhbGl0eSUyQyUyMGxvdyUyMHJlc29sdXRpb24lMjIlMkMlMEElMjAlMjAlMjAlMjBudW1fZnJhbWVzJTNEMTYlMkMlMEElMjAlMjAlMjAlMjBndWlkYW5jZV9zY2FsZSUzRDEuNSUyQyUwQSUyMCUyMCUyMCUyMG51bV9pbmZlcmVuY2Vfc3RlcHMlM0Q2JTJDJTBBJTIwJTIwJTIwJTIwZ2VuZXJhdG9yJTNEdG9yY2guR2VuZXJhdG9yKCUyMmNwdSUyMikubWFudWFsX3NlZWQoMCklMkMlMEEpJTBBZnJhbWVzJTIwJTNEJTIwb3V0cHV0LmZyYW1lcyU1QjAlNUQlMEFleHBvcnRfdG9fZ2lmKGZyYW1lcyUyQyUyMCUyMmFuaW1hdGVsY20uZ2lmJTIyKQ==",highlighted:`import torch from diffusers import AnimateDiffPipeline, LCMScheduler, MotionAdapter from diffusers.utils import export_to_gif adapter = MotionAdapter.from_pretrained("wangfuyun/AnimateLCM") pipe = AnimateDiffPipeline.from_pretrained("emilianJR/epiCRealism", motion_adapter=adapter) pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config, beta_schedule="linear") pipe.load_lora_weights("wangfuyun/AnimateLCM", weight_name="sd15_lora_beta.safetensors", adapter_name="lcm-lora") pipe.enable_vae_slicing() pipe.enable_model_cpu_offload() output = pipe( prompt="A space rocket with trails of smoke behind it launching into space from the desert, 4k, high resolution", negative_prompt="bad quality, worse quality, low resolution", num_frames=16, guidance_scale=1.5, num_inference_steps=6, generator=torch.Generator("cpu").manual_seed(0), ) frames = output.frames[0] export_to_gif(frames, "animatelcm.gif")`,wrap:!1}}),It=new I({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwQW5pbWF0ZURpZmZQaXBlbGluZSUyQyUyMExDTVNjaGVkdWxlciUyQyUyME1vdGlvbkFkYXB0ZXIlMEFmcm9tJTIwZGlmZnVzZXJzLnV0aWxzJTIwaW1wb3J0JTIwZXhwb3J0X3RvX2dpZiUwQSUwQWFkYXB0ZXIlMjAlM0QlMjBNb3Rpb25BZGFwdGVyLmZyb21fcHJldHJhaW5lZCglMjJ3YW5nZnV5dW4lMkZBbmltYXRlTENNJTIyKSUwQXBpcGUlMjAlM0QlMjBBbmltYXRlRGlmZlBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMjJlbWlsaWFuSlIlMkZlcGlDUmVhbGlzbSUyMiUyQyUyMG1vdGlvbl9hZGFwdGVyJTNEYWRhcHRlciklMEFwaXBlLnNjaGVkdWxlciUyMCUzRCUyMExDTVNjaGVkdWxlci5mcm9tX2NvbmZpZyhwaXBlLnNjaGVkdWxlci5jb25maWclMkMlMjBiZXRhX3NjaGVkdWxlJTNEJTIybGluZWFyJTIyKSUwQSUwQXBpcGUubG9hZF9sb3JhX3dlaWdodHMoJTIyd2FuZ2Z1eXVuJTJGQW5pbWF0ZUxDTSUyMiUyQyUyMHdlaWdodF9uYW1lJTNEJTIyc2QxNV9sb3JhX2JldGEuc2FmZXRlbnNvcnMlMjIlMkMlMjBhZGFwdGVyX25hbWUlM0QlMjJsY20tbG9yYSUyMiklMEFwaXBlLmxvYWRfbG9yYV93ZWlnaHRzKCUyMmd1b3l3dyUyRmFuaW1hdGVkaWZmLW1vdGlvbi1sb3JhLXRpbHQtdXAlMjIlMkMlMjBhZGFwdGVyX25hbWUlM0QlMjJ0aWx0LXVwJTIyKSUwQSUwQXBpcGUuc2V0X2FkYXB0ZXJzKCU1QiUyMmxjbS1sb3JhJTIyJTJDJTIwJTIydGlsdC11cCUyMiU1RCUyQyUyMCU1QjEuMCUyQyUyMDAuOCU1RCklMEFwaXBlLmVuYWJsZV92YWVfc2xpY2luZygpJTBBcGlwZS5lbmFibGVfbW9kZWxfY3B1X29mZmxvYWQoKSUwQSUwQW91dHB1dCUyMCUzRCUyMHBpcGUoJTBBJTIwJTIwJTIwJTIwcHJvbXB0JTNEJTIyQSUyMHNwYWNlJTIwcm9ja2V0JTIwd2l0aCUyMHRyYWlscyUyMG9mJTIwc21va2UlMjBiZWhpbmQlMjBpdCUyMGxhdW5jaGluZyUyMGludG8lMjBzcGFjZSUyMGZyb20lMjB0aGUlMjBkZXNlcnQlMkMlMjA0ayUyQyUyMGhpZ2glMjByZXNvbHV0aW9uJTIyJTJDJTBBJTIwJTIwJTIwJTIwbmVnYXRpdmVfcHJvbXB0JTNEJTIyYmFkJTIwcXVhbGl0eSUyQyUyMHdvcnNlJTIwcXVhbGl0eSUyQyUyMGxvdyUyMHJlc29sdXRpb24lMjIlMkMlMEElMjAlMjAlMjAlMjBudW1fZnJhbWVzJTNEMTYlMkMlMEElMjAlMjAlMjAlMjBndWlkYW5jZV9zY2FsZSUzRDEuNSUyQyUwQSUyMCUyMCUyMCUyMG51bV9pbmZlcmVuY2Vfc3RlcHMlM0Q2JTJDJTBBJTIwJTIwJTIwJTIwZ2VuZXJhdG9yJTNEdG9yY2guR2VuZXJhdG9yKCUyMmNwdSUyMikubWFudWFsX3NlZWQoMCklMkMlMEEpJTBBZnJhbWVzJTIwJTNEJTIwb3V0cHV0LmZyYW1lcyU1QjAlNUQlMEFleHBvcnRfdG9fZ2lmKGZyYW1lcyUyQyUyMCUyMmFuaW1hdGVsY20tbW90aW9uLWxvcmEuZ2lmJTIyKQ==",highlighted:`import torch from diffusers import AnimateDiffPipeline, LCMScheduler, MotionAdapter from diffusers.utils import export_to_gif adapter = MotionAdapter.from_pretrained("wangfuyun/AnimateLCM") pipe = AnimateDiffPipeline.from_pretrained("emilianJR/epiCRealism", motion_adapter=adapter) pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config, beta_schedule="linear") pipe.load_lora_weights("wangfuyun/AnimateLCM", weight_name="sd15_lora_beta.safetensors", adapter_name="lcm-lora") pipe.load_lora_weights("guoyww/animatediff-motion-lora-tilt-up", adapter_name="tilt-up") pipe.set_adapters(["lcm-lora", "tilt-up"], [1.0, 0.8]) pipe.enable_vae_slicing() pipe.enable_model_cpu_offload() output = pipe( prompt="A space rocket with trails of smoke behind it launching into space from the desert, 4k, high resolution", negative_prompt="bad quality, worse quality, low resolution", num_frames=16, guidance_scale=1.5, num_inference_steps=6, generator=torch.Generator("cpu").manual_seed(0), ) frames = output.frames[0] export_to_gif(frames, "animatelcm-motion-lora.gif")`,wrap:!1}}),Gt=new w({props:{title:"Using FreeNoise",local:"using-freenoise",headingTag:"h2"}}),Nt=new I({props:{code:"JTJCJTIwcGlwZS5lbmFibGVfZnJlZV9ub2lzZSgp",highlighted:'+ pipe.enable_free_noise()',wrap:!1}}),Rt=new I({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwQXV0b2VuY29kZXJLTCUyQyUyMEFuaW1hdGVEaWZmUGlwZWxpbmUlMkMlMjBMQ01TY2hlZHVsZXIlMkMlMjBNb3Rpb25BZGFwdGVyJTBBZnJvbSUyMGRpZmZ1c2Vycy51dGlscyUyMGltcG9ydCUyMGV4cG9ydF90b192aWRlbyUyQyUyMGxvYWRfaW1hZ2UlMEElMEElMjMlMjBMb2FkJTIwcGlwZWxpbmUlMEFkdHlwZSUyMCUzRCUyMHRvcmNoLmZsb2F0MTYlMEFtb3Rpb25fYWRhcHRlciUyMCUzRCUyME1vdGlvbkFkYXB0ZXIuZnJvbV9wcmV0cmFpbmVkKCUyMndhbmdmdXl1biUyRkFuaW1hdGVMQ00lMjIlMkMlMjB0b3JjaF9kdHlwZSUzRGR0eXBlKSUwQXZhZSUyMCUzRCUyMEF1dG9lbmNvZGVyS0wuZnJvbV9wcmV0cmFpbmVkKCUyMnN0YWJpbGl0eWFpJTJGc2QtdmFlLWZ0LW1zZSUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEZHR5cGUpJTBBJTBBcGlwZSUyMCUzRCUyMEFuaW1hdGVEaWZmUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUyMmVtaWxpYW5KUiUyRmVwaUNSZWFsaXNtJTIyJTJDJTIwbW90aW9uX2FkYXB0ZXIlM0Rtb3Rpb25fYWRhcHRlciUyQyUyMHZhZSUzRHZhZSUyQyUyMHRvcmNoX2R0eXBlJTNEZHR5cGUpJTBBcGlwZS5zY2hlZHVsZXIlMjAlM0QlMjBMQ01TY2hlZHVsZXIuZnJvbV9jb25maWcocGlwZS5zY2hlZHVsZXIuY29uZmlnJTJDJTIwYmV0YV9zY2hlZHVsZSUzRCUyMmxpbmVhciUyMiklMEElMEFwaXBlLmxvYWRfbG9yYV93ZWlnaHRzKCUwQSUyMCUyMCUyMCUyMCUyMndhbmdmdXl1biUyRkFuaW1hdGVMQ00lMjIlMkMlMjB3ZWlnaHRfbmFtZSUzRCUyMkFuaW1hdGVMQ01fc2QxNV90MnZfbG9yYS5zYWZldGVuc29ycyUyMiUyQyUyMGFkYXB0ZXJfbmFtZSUzRCUyMmxjbV9sb3JhJTIyJTBBKSUwQXBpcGUuc2V0X2FkYXB0ZXJzKCU1QiUyMmxjbV9sb3JhJTIyJTVEJTJDJTIwJTVCMC44JTVEKSUwQSUwQSUyMyUyMEVuYWJsZSUyMEZyZWVOb2lzZSUyMGZvciUyMGxvbmclMjBwcm9tcHQlMjBnZW5lcmF0aW9uJTBBcGlwZS5lbmFibGVfZnJlZV9ub2lzZShjb250ZXh0X2xlbmd0aCUzRDE2JTJDJTIwY29udGV4dF9zdHJpZGUlM0Q0KSUwQXBpcGUudG8oJTIyY3VkYSUyMiklMEElMEElMjMlMjBDYW4lMjBiZSUyMGElMjBzaW5nbGUlMjBwcm9tcHQlMkMlMjBvciUyMGElMjBkaWN0aW9uYXJ5JTIwd2l0aCUyMGZyYW1lJTIwdGltZXN0ZXBzJTBBcHJvbXB0JTIwJTNEJTIwJTdCJTBBJTIwJTIwJTIwJTIwMCUzQSUyMCUyMkElMjBjYXRlcnBpbGxhciUyMG9uJTIwYSUyMGxlYWYlMkMlMjBoaWdoJTIwcXVhbGl0eSUyQyUyMHBob3RvcmVhbGlzdGljJTIyJTJDJTBBJTIwJTIwJTIwJTIwNDAlM0ElMjAlMjJBJTIwY2F0ZXJwaWxsYXIlMjB0cmFuc2Zvcm1pbmclMjBpbnRvJTIwYSUyMGNvY29vbiUyQyUyMG9uJTIwYSUyMGxlYWYlMkMlMjBuZWFyJTIwZmxvd2VycyUyQyUyMHBob3RvcmVhbGlzdGljJTIyJTJDJTBBJTIwJTIwJTIwJTIwODAlM0ElMjAlMjJBJTIwY29jb29uJTIwb24lMjBhJTIwbGVhZiUyQyUyMGZsb3dlcnMlMjBpbiUyMHRoZSUyMGJhY2tncm91bmQlMkMlMjBwaG90b3JlYWxpc3RpYyUyMiUyQyUwQSUyMCUyMCUyMCUyMDEyMCUzQSUyMCUyMkElMjBjb2Nvb24lMjBtYXR1cmluZyUyMGFuZCUyMGElMjBidXR0ZXJmbHklMjBiZWluZyUyMGJvcm4lMkMlMjBmbG93ZXJzJTIwYW5kJTIwbGVhdmVzJTIwdmlzaWJsZSUyMGluJTIwdGhlJTIwYmFja2dyb3VuZCUyQyUyMHBob3RvcmVhbGlzdGljJTIyJTJDJTBBJTIwJTIwJTIwJTIwMTYwJTNBJTIwJTIyQSUyMGJlYXV0aWZ1bCUyMGJ1dHRlcmZseSUyQyUyMHZpYnJhbnQlMjBjb2xvcnMlMkMlMjBzaXR0aW5nJTIwb24lMjBhJTIwbGVhZiUyQyUyMGZsb3dlcnMlMjBpbiUyMHRoZSUyMGJhY2tncm91bmQlMkMlMjBwaG90b3JlYWxpc3RpYyUyMiUyQyUwQSUyMCUyMCUyMCUyMDIwMCUzQSUyMCUyMkElMjBiZWF1dGlmdWwlMjBidXR0ZXJmbHklMkMlMjBmbHlpbmclMjBhd2F5JTIwaW4lMjBhJTIwZm9yZXN0JTJDJTIwcGhvdG9yZWFsaXN0aWMlMjIlMkMlMEElMjAlMjAlMjAlMjAyNDAlM0ElMjAlMjJBJTIwY3liZXJwdW5rJTIwYnV0dGVyZmx5JTJDJTIwbmVvbiUyMGxpZ2h0cyUyQyUyMGdsb3dpbmclMjIlMkMlMEElN0QlMEFuZWdhdGl2ZV9wcm9tcHQlMjAlM0QlMjAlMjJiYWQlMjBxdWFsaXR5JTJDJTIwd29yc3QlMjBxdWFsaXR5JTJDJTIwanBlZyUyMGFydGlmYWN0cyUyMiUwQSUwQSUyMyUyMFJ1biUyMGluZmVyZW5jZSUwQW91dHB1dCUyMCUzRCUyMHBpcGUoJTBBJTIwJTIwJTIwJTIwcHJvbXB0JTNEcHJvbXB0JTJDJTBBJTIwJTIwJTIwJTIwbmVnYXRpdmVfcHJvbXB0JTNEbmVnYXRpdmVfcHJvbXB0JTJDJTBBJTIwJTIwJTIwJTIwbnVtX2ZyYW1lcyUzRDI1NiUyQyUwQSUyMCUyMCUyMCUyMGd1aWRhbmNlX3NjYWxlJTNEMi41JTJDJTBBJTIwJTIwJTIwJTIwbnVtX2luZmVyZW5jZV9zdGVwcyUzRDEwJTJDJTBBJTIwJTIwJTIwJTIwZ2VuZXJhdG9yJTNEdG9yY2guR2VuZXJhdG9yKCUyMmNwdSUyMikubWFudWFsX3NlZWQoMCklMkMlMEEpJTBBJTBBJTIzJTIwU2F2ZSUyMHZpZGVvJTBBZnJhbWVzJTIwJTNEJTIwb3V0cHV0LmZyYW1lcyU1QjAlNUQlMEFleHBvcnRfdG9fdmlkZW8oZnJhbWVzJTJDJTIwJTIyb3V0cHV0Lm1wNCUyMiUyQyUyMGZwcyUzRDE2KQ==",highlighted:`import torch from diffusers import AutoencoderKL, AnimateDiffPipeline, LCMScheduler, MotionAdapter from diffusers.utils import export_to_video, load_image # Load pipeline dtype = torch.float16 motion_adapter = MotionAdapter.from_pretrained("wangfuyun/AnimateLCM", torch_dtype=dtype) vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=dtype) pipe = AnimateDiffPipeline.from_pretrained("emilianJR/epiCRealism", motion_adapter=motion_adapter, vae=vae, torch_dtype=dtype) pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config, beta_schedule="linear") pipe.load_lora_weights( "wangfuyun/AnimateLCM", weight_name="AnimateLCM_sd15_t2v_lora.safetensors", adapter_name="lcm_lora" ) pipe.set_adapters(["lcm_lora"], [0.8]) # Enable FreeNoise for long prompt generation pipe.enable_free_noise(context_length=16, context_stride=4) pipe.to("cuda") # Can be a single prompt, or a dictionary with frame timesteps prompt = { 0: "A caterpillar on a leaf, high quality, photorealistic", 40: "A caterpillar transforming into a cocoon, on a leaf, near flowers, photorealistic", 80: "A cocoon on a leaf, flowers in the background, photorealistic", 120: "A cocoon maturing and a butterfly being born, flowers and leaves visible in the background, photorealistic", 160: "A beautiful butterfly, vibrant colors, sitting on a leaf, flowers in the background, photorealistic", 200: "A beautiful butterfly, flying away in a forest, photorealistic", 240: "A cyberpunk butterfly, neon lights, glowing", } negative_prompt = "bad quality, worst quality, jpeg artifacts" # Run inference output = pipe( prompt=prompt, negative_prompt=negative_prompt, num_frames=256, guidance_scale=2.5, num_inference_steps=10, generator=torch.Generator("cpu").manual_seed(0), ) # Save video frames = output.frames[0] export_to_video(frames, "output.mp4", fps=16)`,wrap:!1}}),Xt=new w({props:{title:"FreeNoise memory savings",local:"freenoise-memory-savings",headingTag:"h3"}}),Lt=new I({props:{code:"JTIzJTIwTG9hZCUyMHBpcGVsaW5lJTIwYW5kJTIwYWRhcHRlcnMlMEElMjMlMjAuLi4lMEElMkIlMjBwaXBlLmVuYWJsZV9mcmVlX25vaXNlX3NwbGl0X2luZmVyZW5jZSgpJTBBJTJCJTIwcGlwZS51bmV0LmVuYWJsZV9mb3J3YXJkX2NodW5raW5nKDE2KQ==",highlighted:`# Load pipeline and adapters # ... + pipe.enable_free_noise_split_inference() + pipe.unet.enable_forward_chunking(16)`,wrap:!1}}),Et=new w({props:{title:"Using from_single_file with the MotionAdapter",local:"using-fromsinglefile-with-the-motionadapter",headingTag:"h2"}}),Pt=new I({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyME1vdGlvbkFkYXB0ZXIlMEElMEFja3B0X3BhdGglMjAlM0QlMjAlMjJodHRwcyUzQSUyRiUyRmh1Z2dpbmdmYWNlLmNvJTJGTGlnaHRyaWNrcyUyRkxvbmdBbmltYXRlRGlmZiUyRmJsb2IlMkZtYWluJTJGbHRfbG9uZ19tbV8zMl9mcmFtZXMuY2twdCUyMiUwQSUwQWFkYXB0ZXIlMjAlM0QlMjBNb3Rpb25BZGFwdGVyLmZyb21fc2luZ2xlX2ZpbGUoY2twdF9wYXRoJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2KSUwQXBpcGUlMjAlM0QlMjBBbmltYXRlRGlmZlBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMjJlbWlsaWFuSlIlMkZlcGlDUmVhbGlzbSUyMiUyQyUyMG1vdGlvbl9hZGFwdGVyJTNEYWRhcHRlcik=",highlighted:`from diffusers import MotionAdapter ckpt_path = "https://huggingface.co/Lightricks/LongAnimateDiff/blob/main/lt_long_mm_32_frames.ckpt" adapter = MotionAdapter.from_single_file(ckpt_path, torch_dtype=torch.float16) pipe = AnimateDiffPipeline.from_pretrained("emilianJR/epiCRealism", motion_adapter=adapter)`,wrap:!1}}),Ft=new w({props:{title:"AnimateDiffPipeline",local:"diffusers.AnimateDiffPipeline",headingTag:"h2"}}),Ht=new j({props:{name:"class diffusers.AnimateDiffPipeline",anchor:"diffusers.AnimateDiffPipeline",parameters:[{name:"vae",val:": AutoencoderKL"},{name:"text_encoder",val:": CLIPTextModel"},{name:"tokenizer",val:": CLIPTokenizer"},{name:"unet",val:": typing.Union[diffusers.models.unets.unet_2d_condition.UNet2DConditionModel, diffusers.models.unets.unet_motion_model.UNetMotionModel]"},{name:"motion_adapter",val:": MotionAdapter"},{name:"scheduler",val:": typing.Union[diffusers.schedulers.scheduling_ddim.DDIMScheduler, diffusers.schedulers.scheduling_pndm.PNDMScheduler, diffusers.schedulers.scheduling_lms_discrete.LMSDiscreteScheduler, diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler, diffusers.schedulers.scheduling_euler_ancestral_discrete.EulerAncestralDiscreteScheduler, diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler]"},{name:"feature_extractor",val:": CLIPImageProcessor = None"},{name:"image_encoder",val:": CLIPVisionModelWithProjection = None"}],parametersDescription:[{anchor:"diffusers.AnimateDiffPipeline.vae",description:`vae (AutoencoderKL) — Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.AnimateDiffPipeline.text_encoder",description:`text_encoder (CLIPTextModel) — Frozen text-encoder (clip-vit-large-patch14).`,name:"text_encoder"},{anchor:"diffusers.AnimateDiffPipeline.tokenizer",description:`tokenizer (CLIPTokenizer) — A CLIPTokenizer to tokenize text.`,name:"tokenizer"},{anchor:"diffusers.AnimateDiffPipeline.unet",description:`unet (UNet2DConditionModel) — A UNet2DConditionModel used to create a UNetMotionModel to denoise the encoded video latents.`,name:"unet"},{anchor:"diffusers.AnimateDiffPipeline.motion_adapter",description:`motion_adapter (MotionAdapter) — A MotionAdapter to be used in combination with unet to denoise the encoded video latents.`,name:"motion_adapter"},{anchor:"diffusers.AnimateDiffPipeline.scheduler",description:`scheduler (SchedulerMixin) — A scheduler to be used in combination with unet to denoise the encoded image latents. Can be one of DDIMScheduler, LMSDiscreteScheduler, or PNDMScheduler.`,name:"scheduler"}],source:"https://github.com/huggingface/diffusers/blob/v0.35.1/src/diffusers/pipelines/animatediff/pipeline_animatediff.py#L78"}}),zt=new j({props:{name:"__call__",anchor:"diffusers.AnimateDiffPipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"num_frames",val:": typing.Optional[int] = 16"},{name:"height",val:": typing.Optional[int] = None"},{name:"width",val:": typing.Optional[int] = None"},{name:"num_inference_steps",val:": int = 50"},{name:"guidance_scale",val:": float = 7.5"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"num_videos_per_prompt",val:": typing.Optional[int] = 1"},{name:"eta",val:": float = 0.0"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"ip_adapter_image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor], NoneType] = None"},{name:"ip_adapter_image_embeds",val:": typing.Optional[typing.List[torch.Tensor]] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"cross_attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"clip_skip",val:": typing.Optional[int] = None"},{name:"callback_on_step_end",val:": typing.Optional[typing.Callable[[int, int, typing.Dict], NoneType]] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"},{name:"decode_chunk_size",val:": int = 16"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.AnimateDiffPipeline.__call__.prompt",description:`prompt (str or List[str], optional) — The prompt or prompts to guide image generation. If not defined, you need to pass prompt_embeds.`,name:"prompt"},{anchor:"diffusers.AnimateDiffPipeline.__call__.height",description:`height (int, optional, defaults to self.unet.config.sample_size * self.vae_scale_factor) — The height in pixels of the generated video.`,name:"height"},{anchor:"diffusers.AnimateDiffPipeline.__call__.width",description:`width (int, optional, defaults to self.unet.config.sample_size * self.vae_scale_factor) — The width in pixels of the generated video.`,name:"width"},{anchor:"diffusers.AnimateDiffPipeline.__call__.num_frames",description:`num_frames (int, optional, defaults to 16) — The number of video frames that are generated. Defaults to 16 frames which at 8 frames per seconds amounts to 2 seconds of video.`,name:"num_frames"},{anchor:"diffusers.AnimateDiffPipeline.__call__.num_inference_steps",description:`num_inference_steps (int, optional, defaults to 50) — The number of denoising steps. More denoising steps usually lead to a higher quality videos at the expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.AnimateDiffPipeline.__call__.guidance_scale",description:`guidance_scale (float, optional, defaults to 7.5) — A higher guidance scale value encourages the model to generate images closely linked to the text prompt at the expense of lower image quality. Guidance scale is enabled when guidance_scale > 1.`,name:"guidance_scale"},{anchor:"diffusers.AnimateDiffPipeline.__call__.negative_prompt",description:`negative_prompt (str or List[str], optional) — The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass negative_prompt_embeds instead. Ignored when not using guidance (guidance_scale < 1).`,name:"negative_prompt"},{anchor:"diffusers.AnimateDiffPipeline.__call__.eta",description:`eta (float, optional, defaults to 0.0) — Corresponds to parameter eta (η) from the DDIM paper. Only applies to the DDIMScheduler, and is ignored in other schedulers.`,name:"eta"},{anchor:"diffusers.AnimateDiffPipeline.__call__.generator",description:`generator (torch.Generator or List[torch.Generator], optional) — A torch.Generator to make generation deterministic.`,name:"generator"},{anchor:"diffusers.AnimateDiffPipeline.__call__.latents",description:`latents (torch.Tensor, optional) — Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random generator. Latents should be of shape (batch_size, num_channel, num_frames, height, width).`,name:"latents"},{anchor:"diffusers.AnimateDiffPipeline.__call__.prompt_embeds",description:`prompt_embeds (torch.Tensor, optional) — Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the prompt input argument.`,name:"prompt_embeds"},{anchor:"diffusers.AnimateDiffPipeline.__call__.negative_prompt_embeds",description:`negative_prompt_embeds (torch.Tensor, optional) — Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, negative_prompt_embeds are generated from the negative_prompt input argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.AnimateDiffPipeline.__call__.ip_adapter_image",description:`ip_adapter_image — (PipelineImageInput, optional): Optional image input to work with IP Adapters.`,name:"ip_adapter_image"},{anchor:"diffusers.AnimateDiffPipeline.__call__.ip_adapter_image_embeds",description:`ip_adapter_image_embeds (List[torch.Tensor], optional) — Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters. Each element should be a tensor of shape (batch_size, num_images, emb_dim). It should contain the negative image embedding if do_classifier_free_guidance is set to True. If not provided, embeddings are computed from the ip_adapter_image input argument.`,name:"ip_adapter_image_embeds"},{anchor:"diffusers.AnimateDiffPipeline.__call__.output_type",description:`output_type (str, optional, defaults to "pil") — The output format of the generated video. Choose between torch.Tensor, PIL.Image or np.array.`,name:"output_type"},{anchor:"diffusers.AnimateDiffPipeline.__call__.return_dict",description:`return_dict (bool, optional, defaults to True) — Whether or not to return a TextToVideoSDPipelineOutput instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.AnimateDiffPipeline.__call__.cross_attention_kwargs",description:`cross_attention_kwargs (dict, optional) — A kwargs dictionary that if specified is passed along to the AttentionProcessor as defined in self.processor.`,name:"cross_attention_kwargs"},{anchor:"diffusers.AnimateDiffPipeline.__call__.clip_skip",description:`clip_skip (int, optional) — Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings.`,name:"clip_skip"},{anchor:"diffusers.AnimateDiffPipeline.__call__.callback_on_step_end",description:`callback_on_step_end (Callable, optional) — A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict). callback_kwargs will include a list of all tensors as specified by callback_on_step_end_tensor_inputs.`,name:"callback_on_step_end"},{anchor:"diffusers.AnimateDiffPipeline.__call__.callback_on_step_end_tensor_inputs",description:`callback_on_step_end_tensor_inputs (List, optional) — The list of tensor inputs for the callback_on_step_end function. The tensors specified in the list will be passed as callback_kwargs argument. You will only be able to include variables listed in the ._callback_tensor_inputs attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"},{anchor:"diffusers.AnimateDiffPipeline.__call__.decode_chunk_size",description:`decode_chunk_size (int, defaults to 16) — The number of frames to decode at a time when calling decode_latents method.`,name:"decode_chunk_size"}],source:"https://github.com/huggingface/diffusers/blob/v0.35.1/src/diffusers/pipelines/animatediff/pipeline_animatediff.py#L573",returnDescription:`