import{S as vn,i as Jn,s as _n,e as i,k as f,w as h,t as a,M as Un,c as o,d as t,m as c,a as n,x as m,h as r,b as u,N as rt,G as l,g as p,y as d,q as y,o as b,B as M,v as Zn}from"../chunks/vendor-hf-doc-builder.js";import{T as gn}from"../chunks/Tip-hf-doc-builder.js";import{I as be}from"../chunks/IconCopyLink-hf-doc-builder.js";import{C as J}from"../chunks/CodeBlock-hf-doc-builder.js";import{D as Tn}from"../chunks/DocNotebookDropdown-hf-doc-builder.js";function Gn(nt){let w,T,g,U,Z;return{c(){w=i("p"),T=a("\u{1F4A1} If you don\u2019t have access to a GPU, you can use one for free from a GPU provider like "),g=i("a"),U=a("Colab"),Z=a("!"),this.h()},l(v){w=o(v,"P",{});var _=n(w);T=r(_,"\u{1F4A1} If you don\u2019t have access to a GPU, you can use one for free from a GPU provider like "),g=o(_,"A",{href:!0,rel:!0});var $=n(g);U=r($,"Colab"),$.forEach(t),Z=r(_,"!"),_.forEach(t),this.h()},h(){u(g,"href","https://colab.research.google.com/"),u(g,"rel","nofollow")},m(v,_){p(v,w,_),l(w,T),l(w,g),l(g,U),l(w,Z)},d(v){v&&t(w)}}}function jn(nt){let w,T,g,U,Z;return{c(){w=i("p"),T=a("\u{1F4A1} We strongly suggest always running your pipelines in "),g=i("code"),U=a("float16"),Z=a(", and so far, we\u2019ve rarely seen any degradation in output quality.")},l(v){w=o(v,"P",{});var _=n(w);T=r(_,"\u{1F4A1} We strongly suggest always running your pipelines in "),g=o(_,"CODE",{});var $=n(g);U=r($,"float16"),$.forEach(t),Z=r(_,", and so far, we\u2019ve rarely seen any degradation in output quality."),_.forEach(t)},m(v,_){p(v,w,_),l(w,T),l(w,g),l(g,U),l(w,Z)},d(v){v&&t(w)}}}function $n(nt){let w,T,g,U,Z,v,_,$,di,Il,Me,Wl,E,yi,pt,bi,Mi,ft,wi,gi,Bl,k,vi,zt,Ji,_i,Pt,Ui,Zi,Vl,H,Ti,ct,Gi,ji,Nl,F,$i,we,Dt,Ei,ki,Cl,ge,Sl,ut,Ii,Rl,ve,Xl,R,Y,Qt,Je,Wi,xt,Bi,Al,q,zl,ht,Vi,Pl,_e,Dl,I,Ni,Ue,Ht,Ci,Si,mt,Ri,Xi,Ql,Ze,xl,dt,Ai,Hl,Te,Fl,Ge,Ft,Sa,Yl,G,zi,yt,Pi,Di,Yt,Qi,xi,qt,Hi,Fi,ql,L,Yi,Lt,qi,Li,Ll,je,Kl,$e,Kt,Ra,Ol,bt,Ki,es,K,ts,W,Oi,Mt,eo,to,Ot,lo,so,ls,Ee,ss,j,io,wt,oo,ao,gt,ro,no,vt,po,fo,is,ke,os,O,co,el,uo,ho,as,Ie,rs,We,tl,Xa,ns,Jt,mo,ps,X,ee,ll,Be,yo,sl,bo,fs,te,Mo,il,wo,go,cs,B,vo,ol,Jo,_o,al,Uo,Zo,us,Ve,hs,_t,To,ms,Ne,ds,le,Go,rl,jo,$o,ys,Ce,bs,V,Eo,nl,ko,Io,Ut,Wo,Bo,Ms,Se,ws,se,Vo,pl,No,Co,gs,Re,vs,Xe,fl,Aa,Js,Zt,So,_s,A,ie,cl,Ae,Ro,ul,Xo,Us,oe,Ao,hl,zo,Po,Zs,z,ae,ml,ze,Do,dl,Qo,Ts,re,xo,Pe,Ho,Fo,Gs,N,Yo,De,qo,Lo,Qe,Ko,Oo,js,P,ne,yl,xe,ea,bl,ta,$s,pe,la,He,sa,ia,Es,Fe,ks,Ye,Ml,za,Is,D,fe,wl,qe,oa,gl,aa,Ws,ce,ra,vl,na,pa,Bs,ue,Jl,fa,ca,_l,ua,Vs,Tt,ha,Ns,Le,Cs,Gt,ma,Ss,Ke,Rs,Oe,Ul,Pa,Xs,C,da,Zl,ya,ba,Tl,Ma,wa,As,et,zs,tt,Gl,Da,Ps,Q,he,jl,lt,ga,$l,va,Ds,me,Ja,jt,_a,Ua,Qs,S,x,Za,$t,Ta,Ga,st,El,ja,$a,Ea,it,ka,Et,Ia,Wa,Ba,ot,Va,kt,Na,Ca,xs;return v=new be({}),Me=new Tn({props:{classNames:"absolute z-10 right-0 top-0",options:[{label:"Mixed",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers_doc/en/stable_diffusion.ipynb"},{label:"PyTorch",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers_doc/en/pytorch/stable_diffusion.ipynb"},{label:"TensorFlow",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers_doc/en/tensorflow/stable_diffusion.ipynb"},{label:"Mixed",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/diffusers_doc/en/stable_diffusion.ipynb"},{label:"PyTorch",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/diffusers_doc/en/pytorch/stable_diffusion.ipynb"},{label:"TensorFlow",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/diffusers_doc/en/tensorflow/stable_diffusion.ipynb"}]}}),ge=new J({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMERpZmZ1c2lvblBpcGVsaW5lJTBBJTBBbW9kZWxfaWQlMjAlM0QlMjAlMjJydW53YXltbCUyRnN0YWJsZS1kaWZmdXNpb24tdjEtNSUyMiUwQXBpcGVsaW5lJTIwJTNEJTIwRGlmZnVzaW9uUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKG1vZGVsX2lkKQ==",highlighted:`from diffusers import DiffusionPipeline
model_id = "runwayml/stable-diffusion-v1-5"
pipeline = DiffusionPipeline.from_pretrained(model_id)`}}),ve=new J({props:{code:"cHJvbXB0JTIwJTNEJTIwJTIycG9ydHJhaXQlMjBwaG90byUyMG9mJTIwYSUyMG9sZCUyMHdhcnJpb3IlMjBjaGllZiUyMg==",highlighted:'prompt = "portrait photo of a old warrior chief"'}}),Je=new be({}),q=new gn({props:{$$slots:{default:[Gn]},$$scope:{ctx:nt}}}),_e=new J({props:{code:"cGlwZWxpbmUlMjAlM0QlMjBwaXBlbGluZS50byglMjJjdWRhJTIyKQ==",highlighted:'pipeline = pipeline.to("cuda")'}}),Ze=new J({props:{code:"aW1wb3J0JTIwdG9yY2glMEElMEFnZW5lcmF0b3IlMjAlM0QlMjB0b3JjaC5HZW5lcmF0b3IoJTIyY3VkYSUyMikubWFudWFsX3NlZWQoMCk=",highlighted:`import torch
generator = torch.Generator("cuda").manual_seed(0)`}}),Te=new J({props:{code:"aW1hZ2UlMjAlM0QlMjBwaXBlbGluZShwcm9tcHQlMkMlMjBnZW5lcmF0b3IlM0RnZW5lcmF0b3IpLmltYWdlcyU1QjAlNUQlMEFpbWFnZQ==",highlighted:`image = pipeline(prompt, generator=generator).images[0]
image`}}),je=new J({props:{code:"aW1wb3J0JTIwdG9yY2glMEElMEFwaXBlbGluZSUyMCUzRCUyMERpZmZ1c2lvblBpcGVsaW5lLmZyb21fcHJldHJhaW5lZChtb2RlbF9pZCUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiklMEFwaXBlbGluZSUyMCUzRCUyMHBpcGVsaW5lLnRvKCUyMmN1ZGElMjIpJTBBZ2VuZXJhdG9yJTIwJTNEJTIwdG9yY2guR2VuZXJhdG9yKCUyMmN1ZGElMjIpLm1hbnVhbF9zZWVkKDApJTBBaW1hZ2UlMjAlM0QlMjBwaXBlbGluZShwcm9tcHQlMkMlMjBnZW5lcmF0b3IlM0RnZW5lcmF0b3IpLmltYWdlcyU1QjAlNUQlMEFpbWFnZQ==",highlighted:`import torch
pipeline = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
pipeline = pipeline.to("cuda")
generator = torch.Generator("cuda").manual_seed(0)
image = pipeline(prompt, generator=generator).images[0]
image`}}),K=new gn({props:{$$slots:{default:[jn]},$$scope:{ctx:nt}}}),Ee=new J({props:{code:"cGlwZWxpbmUuc2NoZWR1bGVyLmNvbXBhdGlibGVzJTBBJTVCJTBBJTIwJTIwJTIwJTIwZGlmZnVzZXJzLnNjaGVkdWxlcnMuc2NoZWR1bGluZ19sbXNfZGlzY3JldGUuTE1TRGlzY3JldGVTY2hlZHVsZXIlMkMlMEElMjAlMjAlMjAlMjBkaWZmdXNlcnMuc2NoZWR1bGVycy5zY2hlZHVsaW5nX3VuaXBjX211bHRpc3RlcC5VbmlQQ011bHRpc3RlcFNjaGVkdWxlciUyQyUwQSUyMCUyMCUyMCUyMGRpZmZ1c2Vycy5zY2hlZHVsZXJzLnNjaGVkdWxpbmdfa19kcG1fMl9kaXNjcmV0ZS5LRFBNMkRpc2NyZXRlU2NoZWR1bGVyJTJDJTBBJTIwJTIwJTIwJTIwZGlmZnVzZXJzLnNjaGVkdWxlcnMuc2NoZWR1bGluZ19kZWlzX211bHRpc3RlcC5ERUlTTXVsdGlzdGVwU2NoZWR1bGVyJTJDJTBBJTIwJTIwJTIwJTIwZGlmZnVzZXJzLnNjaGVkdWxlcnMuc2NoZWR1bGluZ19ldWxlcl9kaXNjcmV0ZS5FdWxlckRpc2NyZXRlU2NoZWR1bGVyJTJDJTBBJTIwJTIwJTIwJTIwZGlmZnVzZXJzLnNjaGVkdWxlcnMuc2NoZWR1bGluZ19kcG1zb2x2ZXJfbXVsdGlzdGVwLkRQTVNvbHZlck11bHRpc3RlcFNjaGVkdWxlciUyQyUwQSUyMCUyMCUyMCUyMGRpZmZ1c2Vycy5zY2hlZHVsZXJzLnNjaGVkdWxpbmdfZGRwbS5ERFBNU2NoZWR1bGVyJTJDJTBBJTIwJTIwJTIwJTIwZGlmZnVzZXJzLnNjaGVkdWxlcnMuc2NoZWR1bGluZ19kcG1zb2x2ZXJfc2luZ2xlc3RlcC5EUE1Tb2x2ZXJTaW5nbGVzdGVwU2NoZWR1bGVyJTJDJTBBJTIwJTIwJTIwJTIwZGlmZnVzZXJzLnNjaGVkdWxlcnMuc2NoZWR1bGluZ19rX2RwbV8yX2FuY2VzdHJhbF9kaXNjcmV0ZS5LRFBNMkFuY2VzdHJhbERpc2NyZXRlU2NoZWR1bGVyJTJDJTBBJTIwJTIwJTIwJTIwZGlmZnVzZXJzLnNjaGVkdWxlcnMuc2NoZWR1bGluZ19oZXVuX2Rpc2NyZXRlLkhldW5EaXNjcmV0ZVNjaGVkdWxlciUyQyUwQSUyMCUyMCUyMCUyMGRpZmZ1c2Vycy5zY2hlZHVsZXJzLnNjaGVkdWxpbmdfcG5kbS5QTkRNU2NoZWR1bGVyJTJDJTBBJTIwJTIwJTIwJTIwZGlmZnVzZXJzLnNjaGVkdWxlcnMuc2NoZWR1bGluZ19ldWxlcl9hbmNlc3RyYWxfZGlzY3JldGUuRXVsZXJBbmNlc3RyYWxEaXNjcmV0ZVNjaGVkdWxlciUyQyUwQSUyMCUyMCUyMCUyMGRpZmZ1c2Vycy5zY2hlZHVsZXJzLnNjaGVkdWxpbmdfZGRpbS5ERElNU2NoZWR1bGVyJTJDJTBBJTVE",highlighted:`pipeline.scheduler.compatibles
[
diffusers.schedulers.scheduling_lms_discrete.LMSDiscreteScheduler,
diffusers.schedulers.scheduling_unipc_multistep.UniPCMultistepScheduler,
diffusers.schedulers.scheduling_k_dpm_2_discrete.KDPM2DiscreteScheduler,
diffusers.schedulers.scheduling_deis_multistep.DEISMultistepScheduler,
diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler,
diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler,
diffusers.schedulers.scheduling_ddpm.DDPMScheduler,
diffusers.schedulers.scheduling_dpmsolver_singlestep.DPMSolverSinglestepScheduler,
diffusers.schedulers.scheduling_k_dpm_2_ancestral_discrete.KDPM2AncestralDiscreteScheduler,
diffusers.schedulers.scheduling_heun_discrete.HeunDiscreteScheduler,
diffusers.schedulers.scheduling_pndm.PNDMScheduler,
diffusers.schedulers.scheduling_euler_ancestral_discrete.EulerAncestralDiscreteScheduler,
diffusers.schedulers.scheduling_ddim.DDIMScheduler,
]`}}),ke=new J({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMERQTVNvbHZlck11bHRpc3RlcFNjaGVkdWxlciUwQSUwQXBpcGVsaW5lLnNjaGVkdWxlciUyMCUzRCUyMERQTVNvbHZlck11bHRpc3RlcFNjaGVkdWxlci5mcm9tX2NvbmZpZyhwaXBlbGluZS5zY2hlZHVsZXIuY29uZmlnKQ==",highlighted:`from diffusers import DPMSolverMultistepScheduler
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)`}}),Ie=new J({props:{code:"Z2VuZXJhdG9yJTIwJTNEJTIwdG9yY2guR2VuZXJhdG9yKCUyMmN1ZGElMjIpLm1hbnVhbF9zZWVkKDApJTBBaW1hZ2UlMjAlM0QlMjBwaXBlbGluZShwcm9tcHQlMkMlMjBnZW5lcmF0b3IlM0RnZW5lcmF0b3IlMkMlMjBudW1faW5mZXJlbmNlX3N0ZXBzJTNEMjApLmltYWdlcyU1QjAlNUQlMEFpbWFnZQ==",highlighted:`generator = torch.Generator("cuda").manual_seed(0)
image = pipeline(prompt, generator=generator, num_inference_steps=20).images[0]
image`}}),Be=new be({}),Ve=new J({props:{code:"ZGVmJTIwZ2V0X2lucHV0cyhiYXRjaF9zaXplJTNEMSklM0ElMEElMjAlMjAlMjAlMjBnZW5lcmF0b3IlMjAlM0QlMjAlNUJ0b3JjaC5HZW5lcmF0b3IoJTIyY3VkYSUyMikubWFudWFsX3NlZWQoaSklMjBmb3IlMjBpJTIwaW4lMjByYW5nZShiYXRjaF9zaXplKSU1RCUwQSUyMCUyMCUyMCUyMHByb21wdHMlMjAlM0QlMjBiYXRjaF9zaXplJTIwKiUyMCU1QnByb21wdCU1RCUwQSUyMCUyMCUyMCUyMG51bV9pbmZlcmVuY2Vfc3RlcHMlMjAlM0QlMjAyMCUwQSUwQSUyMCUyMCUyMCUyMHJldHVybiUyMCU3QiUyMnByb21wdCUyMiUzQSUyMHByb21wdHMlMkMlMjAlMjJnZW5lcmF0b3IlMjIlM0ElMjBnZW5lcmF0b3IlMkMlMjAlMjJudW1faW5mZXJlbmNlX3N0ZXBzJTIyJTNBJTIwbnVtX2luZmVyZW5jZV9zdGVwcyU3RA==",highlighted:`def get_inputs(batch_size=1):
generator = [torch.Generator("cuda").manual_seed(i) for i in range(batch_size)]
prompts = batch_size * [prompt]
num_inference_steps = 20
return {"prompt": prompts, "generator": generator, "num_inference_steps": num_inference_steps}`}}),Ne=new J({props:{code:"ZnJvbSUyMFBJTCUyMGltcG9ydCUyMEltYWdlJTBBJTBBJTBBZGVmJTIwaW1hZ2VfZ3JpZChpbWdzJTJDJTIwcm93cyUzRDIlMkMlMjBjb2xzJTNEMiklM0ElMEElMjAlMjAlMjAlMjB3JTJDJTIwaCUyMCUzRCUyMGltZ3MlNUIwJTVELnNpemUlMEElMjAlMjAlMjAlMjBncmlkJTIwJTNEJTIwSW1hZ2UubmV3KCUyMlJHQiUyMiUyQyUyMHNpemUlM0QoY29scyUyMColMjB3JTJDJTIwcm93cyUyMColMjBoKSklMEElMEElMjAlMjAlMjAlMjBmb3IlMjBpJTJDJTIwaW1nJTIwaW4lMjBlbnVtZXJhdGUoaW1ncyklM0ElMEElMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjBncmlkLnBhc3RlKGltZyUyQyUyMGJveCUzRChpJTIwJTI1JTIwY29scyUyMColMjB3JTJDJTIwaSUyMCUyRiUyRiUyMGNvbHMlMjAqJTIwaCkpJTBBJTIwJTIwJTIwJTIwcmV0dXJuJTIwZ3JpZA==",highlighted:`from PIL import Image
def image_grid(imgs, rows=2, cols=2):
w, h = imgs[0].size
grid = Image.new("RGB", size=(cols * w, rows * h))
for i, img in enumerate(imgs):
grid.paste(img, box=(i % cols * w, i // cols * h))
return grid`}}),Ce=new J({props:{code:"aW1hZ2VzJTIwJTNEJTIwcGlwZWxpbmUoKipnZXRfaW5wdXRzKGJhdGNoX3NpemUlM0Q0KSkuaW1hZ2VzJTBBaW1hZ2VfZ3JpZChpbWFnZXMp",highlighted:`images = pipeline(**get_inputs(batch_size=4)).images
image_grid(images)`}}),Se=new J({props:{code:"cGlwZWxpbmUuZW5hYmxlX2F0dGVudGlvbl9zbGljaW5nKCk=",highlighted:"pipeline.enable_attention_slicing()"}}),Re=new J({props:{code:"aW1hZ2VzJTIwJTNEJTIwcGlwZWxpbmUoKipnZXRfaW5wdXRzKGJhdGNoX3NpemUlM0Q4KSkuaW1hZ2VzJTBBaW1hZ2VfZ3JpZChpbWFnZXMlMkMlMjByb3dzJTNEMiUyQyUyMGNvbHMlM0Q0KQ==",highlighted:`images = pipeline(**get_inputs(batch_size=8)).images
image_grid(images, rows=2, cols=4)`}}),Ae=new be({}),ze=new be({}),xe=new be({}),Fe=new J({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMEF1dG9lbmNvZGVyS0wlMEElMEF2YWUlMjAlM0QlMjBBdXRvZW5jb2RlcktMLmZyb21fcHJldHJhaW5lZCglMjJzdGFiaWxpdHlhaSUyRnNkLXZhZS1mdC1tc2UlMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYpLnRvKCUyMmN1ZGElMjIpJTBBcGlwZWxpbmUudmFlJTIwJTNEJTIwdmFlJTBBaW1hZ2VzJTIwJTNEJTIwcGlwZWxpbmUoKipnZXRfaW5wdXRzKGJhdGNoX3NpemUlM0Q4KSkuaW1hZ2VzJTBBaW1hZ2VfZ3JpZChpbWFnZXMlMkMlMjByb3dzJTNEMiUyQyUyMGNvbHMlM0Q0KQ==",highlighted:`from diffusers import AutoencoderKL
vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch.float16).to("cuda")
pipeline.vae = vae
images = pipeline(**get_inputs(batch_size=8)).images
image_grid(images, rows=2, cols=4)`}}),qe=new be({}),Le=new J({props:{code:"cHJvbXB0JTIwJTJCJTNEJTIwJTIyJTJDJTIwdHJpYmFsJTIwcGFudGhlciUyMG1ha2UlMjB1cCUyQyUyMGJsdWUlMjBvbiUyMHJlZCUyQyUyMHNpZGUlMjBwcm9maWxlJTJDJTIwbG9va2luZyUyMGF3YXklMkMlMjBzZXJpb3VzJTIwZXllcyUyMiUwQXByb21wdCUyMCUyQiUzRCUyMCUyMiUyMDUwbW0lMjBwb3J0cmFpdCUyMHBob3RvZ3JhcGh5JTJDJTIwaGFyZCUyMHJpbSUyMGxpZ2h0aW5nJTIwcGhvdG9ncmFwaHktLWJldGElMjAtLWFyJTIwMiUzQTMlMjAlMjAtLWJldGElMjAtLXVwYmV0YSUyMg==",highlighted:`prompt += ", tribal panther make up, blue on red, side profile, looking away, serious eyes"
prompt += " 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta"`}}),Ke=new J({props:{code:"aW1hZ2VzJTIwJTNEJTIwcGlwZWxpbmUoKipnZXRfaW5wdXRzKGJhdGNoX3NpemUlM0Q4KSkuaW1hZ2VzJTBBaW1hZ2VfZ3JpZChpbWFnZXMlMkMlMjByb3dzJTNEMiUyQyUyMGNvbHMlM0Q0KQ==",highlighted:`images = pipeline(**get_inputs(batch_size=8)).images
image_grid(images, rows=2, cols=4)`}}),et=new J({props:{code:"cHJvbXB0cyUyMCUzRCUyMCU1QiUwQSUyMCUyMCUyMCUyMCUyMnBvcnRyYWl0JTIwcGhvdG8lMjBvZiUyMHRoZSUyMG9sZGVzdCUyMHdhcnJpb3IlMjBjaGllZiUyQyUyMHRyaWJhbCUyMHBhbnRoZXIlMjBtYWtlJTIwdXAlMkMlMjBibHVlJTIwb24lMjByZWQlMkMlMjBzaWRlJTIwcHJvZmlsZSUyQyUyMGxvb2tpbmclMjBhd2F5JTJDJTIwc2VyaW91cyUyMGV5ZXMlMjA1MG1tJTIwcG9ydHJhaXQlMjBwaG90b2dyYXBoeSUyQyUyMGhhcmQlMjByaW0lMjBsaWdodGluZyUyMHBob3RvZ3JhcGh5LS1iZXRhJTIwLS1hciUyMDIlM0EzJTIwJTIwLS1iZXRhJTIwLS11cGJldGElMjIlMkMlMEElMjAlMjAlMjAlMjAlMjJwb3J0cmFpdCUyMHBob3RvJTIwb2YlMjBhJTIwb2xkJTIwd2FycmlvciUyMGNoaWVmJTJDJTIwdHJpYmFsJTIwcGFudGhlciUyMG1ha2UlMjB1cCUyQyUyMGJsdWUlMjBvbiUyMHJlZCUyQyUyMHNpZGUlMjBwcm9maWxlJTJDJTIwbG9va2luZyUyMGF3YXklMkMlMjBzZXJpb3VzJTIwZXllcyUyMDUwbW0lMjBwb3J0cmFpdCUyMHBob3RvZ3JhcGh5JTJDJTIwaGFyZCUyMHJpbSUyMGxpZ2h0aW5nJTIwcGhvdG9ncmFwaHktLWJldGElMjAtLWFyJTIwMiUzQTMlMjAlMjAtLWJldGElMjAtLXVwYmV0YSUyMiUyQyUwQSUyMCUyMCUyMCUyMCUyMnBvcnRyYWl0JTIwcGhvdG8lMjBvZiUyMGElMjB3YXJyaW9yJTIwY2hpZWYlMkMlMjB0cmliYWwlMjBwYW50aGVyJTIwbWFrZSUyMHVwJTJDJTIwYmx1ZSUyMG9uJTIwcmVkJTJDJTIwc2lkZSUyMHByb2ZpbGUlMkMlMjBsb29raW5nJTIwYXdheSUyQyUyMHNlcmlvdXMlMjBleWVzJTIwNTBtbSUyMHBvcnRyYWl0JTIwcGhvdG9ncmFwaHklMkMlMjBoYXJkJTIwcmltJTIwbGlnaHRpbmclMjBwaG90b2dyYXBoeS0tYmV0YSUyMC0tYXIlMjAyJTNBMyUyMCUyMC0tYmV0YSUyMC0tdXBiZXRhJTIyJTJDJTBBJTIwJTIwJTIwJTIwJTIycG9ydHJhaXQlMjBwaG90byUyMG9mJTIwYSUyMHlvdW5nJTIwd2FycmlvciUyMGNoaWVmJTJDJTIwdHJpYmFsJTIwcGFudGhlciUyMG1ha2UlMjB1cCUyQyUyMGJsdWUlMjBvbiUyMHJlZCUyQyUyMHNpZGUlMjBwcm9maWxlJTJDJTIwbG9va2luZyUyMGF3YXklMkMlMjBzZXJpb3VzJTIwZXllcyUyMDUwbW0lMjBwb3J0cmFpdCUyMHBob3RvZ3JhcGh5JTJDJTIwaGFyZCUyMHJpbSUyMGxpZ2h0aW5nJTIwcGhvdG9ncmFwaHktLWJldGElMjAtLWFyJTIwMiUzQTMlMjAlMjAtLWJldGElMjAtLXVwYmV0YSUyMiUyQyUwQSU1RCUwQSUwQWdlbmVyYXRvciUyMCUzRCUyMCU1QnRvcmNoLkdlbmVyYXRvciglMjJjdWRhJTIyKS5tYW51YWxfc2VlZCgxKSUyMGZvciUyMF8lMjBpbiUyMHJhbmdlKGxlbihwcm9tcHRzKSklNUQlMEFpbWFnZXMlMjAlM0QlMjBwaXBlbGluZShwcm9tcHQlM0Rwcm9tcHRzJTJDJTIwZ2VuZXJhdG9yJTNEZ2VuZXJhdG9yJTJDJTIwbnVtX2luZmVyZW5jZV9zdGVwcyUzRDI1KS5pbWFnZXMlMEFpbWFnZV9ncmlkKGltYWdlcyk=",highlighted:`prompts = [
"portrait photo of the oldest warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta",
"portrait photo of a old warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta",
"portrait photo of a warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta",
"portrait photo of a young warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta",
]
generator = [torch.Generator("cuda").manual_seed(1) for _ in range(len(prompts))]
images = pipeline(prompt=prompts, generator=generator, num_inference_steps=25).images
image_grid(images)`}}),lt=new be({}),{c(){w=i("meta"),T=f(),g=i("h1"),U=i("a"),Z=i("span"),h(v.$$.fragment),_=f(),$=i("span"),di=a("Effective and efficient diffusion"),Il=f(),h(Me.$$.fragment),Wl=f(),E=i("p"),yi=a("Getting the "),pt=i("a"),bi=a("DiffusionPipeline"),Mi=a(" to generate images in a certain style or include what you want can be tricky. Often times, you have to run the "),ft=i("a"),wi=a("DiffusionPipeline"),gi=a(" several times before you end up with an image you\u2019re happy with. But generating something out of nothing is a computationally intensive process, especially if you\u2019re running inference over and over again."),Bl=f(),k=i("p"),vi=a("This is why it\u2019s important to get the most "),zt=i("em"),Ji=a("computational"),_i=a(" (speed) and "),Pt=i("em"),Ui=a("memory"),Zi=a(" (GPU RAM) efficiency from the pipeline to reduce the time between inference cycles so you can iterate faster."),Vl=f(),H=i("p"),Ti=a("This tutorial walks you through how to generate faster and better with the "),ct=i("a"),Gi=a("DiffusionPipeline"),ji=a("."),Nl=f(),F=i("p"),$i=a("Begin by loading the "),we=i("a"),Dt=i("code"),Ei=a("runwayml/stable-diffusion-v1-5"),ki=a(" model:"),Cl=f(),h(ge.$$.fragment),Sl=f(),ut=i("p"),Ii=a("The example prompt you\u2019ll use is a portrait of an old warrior chief, but feel free to use your own prompt:"),Rl=f(),h(ve.$$.fragment),Xl=f(),R=i("h2"),Y=i("a"),Qt=i("span"),h(Je.$$.fragment),Wi=f(),xt=i("span"),Bi=a("Speed"),Al=f(),h(q.$$.fragment),zl=f(),ht=i("p"),Vi=a("One of the simplest ways to speed up inference is to place the pipeline on a GPU the same way you would with any PyTorch module:"),Pl=f(),h(_e.$$.fragment),Dl=f(),I=i("p"),Ni=a("To make sure you can use the same image and improve on it, use a "),Ue=i("a"),Ht=i("code"),Ci=a("Generator"),Si=a(" and set a seed for "),mt=i("a"),Ri=a("reproducibility"),Xi=a(":"),Ql=f(),h(Ze.$$.fragment),xl=f(),dt=i("p"),Ai=a("Now you can generate an image:"),Hl=f(),h(Te.$$.fragment),Fl=f(),Ge=i("div"),Ft=i("img"),Yl=f(),G=i("p"),zi=a("This process took ~30 seconds on a T4 GPU (it might be faster if your allocated GPU is better than a T4). By default, the "),yt=i("a"),Pi=a("DiffusionPipeline"),Di=a(" runs inference with full "),Yt=i("code"),Qi=a("float32"),xi=a(" precision for 50 inference steps. You can speed this up by switching to a lower precision like "),qt=i("code"),Hi=a("float16"),Fi=a(" or running fewer inference steps."),ql=f(),L=i("p"),Yi=a("Let\u2019s start by loading the model in "),Lt=i("code"),qi=a("float16"),Li=a(" and generate an image:"),Ll=f(),h(je.$$.fragment),Kl=f(),$e=i("div"),Kt=i("img"),Ol=f(),bt=i("p"),Ki=a("This time, it only took ~11 seconds to generate the image, which is almost 3x faster than before!"),es=f(),h(K.$$.fragment),ts=f(),W=i("p"),Oi=a("Another option is to reduce the number of inference steps. Choosing a more efficient scheduler could help decrease the number of steps without sacrificing output quality. You can find which schedulers are compatible with the current model in the "),Mt=i("a"),eo=a("DiffusionPipeline"),to=a(" by calling the "),Ot=i("code"),lo=a("compatibles"),so=a(" method:"),ls=f(),h(Ee.$$.fragment),ss=f(),j=i("p"),io=a("The Stable Diffusion model uses the "),wt=i("a"),oo=a("PNDMScheduler"),ao=a(" by default which usually requires ~50 inference steps, but more performant schedulers like "),gt=i("a"),ro=a("DPMSolverMultistepScheduler"),no=a(", require only ~20 or 25 inference steps. Use the "),vt=i("a"),po=a("ConfigMixin.from_config()"),fo=a(" method to load a new scheduler:"),is=f(),h(ke.$$.fragment),os=f(),O=i("p"),co=a("Now set the "),el=i("code"),uo=a("num_inference_steps"),ho=a(" to 20:"),as=f(),h(Ie.$$.fragment),rs=f(),We=i("div"),tl=i("img"),ns=f(),Jt=i("p"),mo=a("Great, you\u2019ve managed to cut the inference time to just 4 seconds! \u26A1\uFE0F"),ps=f(),X=i("h2"),ee=i("a"),ll=i("span"),h(Be.$$.fragment),yo=f(),sl=i("span"),bo=a("Memory"),fs=f(),te=i("p"),Mo=a("The other key to improving pipeline performance is consuming less memory, which indirectly implies more speed, since you\u2019re often trying to maximize the number of images generated per second. The easiest way to see how many images you can generate at once is to try out different batch sizes until you get an "),il=i("code"),wo=a("OutOfMemoryError"),go=a(" (OOM)."),cs=f(),B=i("p"),vo=a("Create a function that\u2019ll generate a batch of images from a list of prompts and "),ol=i("code"),Jo=a("Generators"),_o=a(". Make sure to assign each "),al=i("code"),Uo=a("Generator"),Zo=a(" a seed so you can reuse it if it produces a good result."),us=f(),h(Ve.$$.fragment),hs=f(),_t=i("p"),To=a("You\u2019ll also need a function that\u2019ll display each batch of images:"),ms=f(),h(Ne.$$.fragment),ds=f(),le=i("p"),Go=a("Start with "),rl=i("code"),jo=a("batch_size=4"),$o=a(" and see how much memory you\u2019ve consumed:"),ys=f(),h(Ce.$$.fragment),bs=f(),V=i("p"),Eo=a("Unless you have a GPU with more RAM, the code above probably returned an "),nl=i("code"),ko=a("OOM"),Io=a(" error! Most of the memory is taken up by the cross-attention layers. Instead of running this operation in a batch, you can run it sequentially to save a significant amount of memory. All you have to do is configure the pipeline to use the "),Ut=i("a"),Wo=a("enable_attention_slicing()"),Bo=a(" function:"),Ms=f(),h(Se.$$.fragment),ws=f(),se=i("p"),Vo=a("Now try increasing the "),pl=i("code"),No=a("batch_size"),Co=a(" to 8!"),gs=f(),h(Re.$$.fragment),vs=f(),Xe=i("div"),fl=i("img"),Js=f(),Zt=i("p"),So=a("Whereas before you couldn\u2019t even generate a batch of 4 images, now you can generate a batch of 8 images at ~3.5 seconds per image! This is probably the fastest you can go on a T4 GPU without sacrificing quality."),_s=f(),A=i("h2"),ie=i("a"),cl=i("span"),h(Ae.$$.fragment),Ro=f(),ul=i("span"),Xo=a("Quality"),Us=f(),oe=i("p"),Ao=a("In the last two sections, you learned how to optimize the speed of your pipeline by using "),hl=i("code"),zo=a("fp16"),Po=a(", reducing the number of inference steps by using a more performant scheduler, and enabling attention slicing to reduce memory consumption. Now you\u2019re going to focus on how to improve the quality of generated images."),Zs=f(),z=i("h3"),ae=i("a"),ml=i("span"),h(ze.$$.fragment),Do=f(),dl=i("span"),Qo=a("Better checkpoints"),Ts=f(),re=i("p"),xo=a("The most obvious step is to use better checkpoints. The Stable Diffusion model is a good starting point, and since its official launch, several improved versions have also been released. However, using a newer version doesn\u2019t automatically mean you\u2019ll get better results. You\u2019ll still have to experiment with different checkpoints yourself, and do a little research (such as using "),Pe=i("a"),Ho=a("negative prompts"),Fo=a(") to get the best results."),Gs=f(),N=i("p"),Yo=a("As the field grows, there are more and more high-quality checkpoints finetuned to produce certain styles. Try exploring the "),De=i("a"),qo=a("Hub"),Lo=a(" and "),Qe=i("a"),Ko=a("Diffusers Gallery"),Oo=a(" to find one you\u2019re interested in!"),js=f(),P=i("h3"),ne=i("a"),yl=i("span"),h(xe.$$.fragment),ea=f(),bl=i("span"),ta=a("Better pipeline components"),$s=f(),pe=i("p"),la=a("You can also try replacing the current pipeline components with a newer version. Let\u2019s try loading the latest "),He=i("a"),sa=a("autodecoder"),ia=a(" from Stability AI into the pipeline, and generate some images:"),Es=f(),h(Fe.$$.fragment),ks=f(),Ye=i("div"),Ml=i("img"),Is=f(),D=i("h3"),fe=i("a"),wl=i("span"),h(qe.$$.fragment),oa=f(),gl=i("span"),aa=a("Better prompt engineering"),Ws=f(),ce=i("p"),ra=a("The text prompt you use to generate an image is super important, so much so that it is called "),vl=i("em"),na=a("prompt engineering"),pa=a(". Some considerations to keep during prompt engineering are:"),Bs=f(),ue=i("ul"),Jl=i("li"),fa=a("How is the image or similar images of the one I want to generate stored on the internet?"),ca=f(),_l=i("li"),ua=a("What additional detail can I give that steers the model towards the style I want?"),Vs=f(),Tt=i("p"),ha=a("With this in mind, let\u2019s improve the prompt to include color and higher quality details:"),Ns=f(),h(Le.$$.fragment),Cs=f(),Gt=i("p"),ma=a("Generate a batch of images with the new prompt:"),Ss=f(),h(Ke.$$.fragment),Rs=f(),Oe=i("div"),Ul=i("img"),Xs=f(),C=i("p"),da=a("Pretty impressive! Let\u2019s tweak the second image - corresponding to the "),Zl=i("code"),ya=a("Generator"),ba=a(" with a seed of "),Tl=i("code"),Ma=a("1"),wa=a(" - a bit more by adding some text about the age of the subject:"),As=f(),h(et.$$.fragment),zs=f(),tt=i("div"),Gl=i("img"),Ps=f(),Q=i("h2"),he=i("a"),jl=i("span"),h(lt.$$.fragment),ga=f(),$l=i("span"),va=a("Next steps"),Ds=f(),me=i("p"),Ja=a("In this tutorial, you learned how to optimize a "),jt=i("a"),_a=a("DiffusionPipeline"),Ua=a(" for computational and memory efficiency as well as improving the quality of generated outputs. If you\u2019re interested in making your pipeline even faster, take a look at the following resources:"),Qs=f(),S=i("ul"),x=i("li"),Za=a("Learn how "),$t=i("a"),Ta=a("PyTorch 2.0"),Ga=a(" and "),st=i("a"),El=i("code"),ja=a("torch.compile"),$a=a(" can yield 5 - 300% faster inference speed. On an A100 GPU, inference can be up to 50% faster!"),Ea=f(),it=i("li"),ka=a("If you can\u2019t use PyTorch 2, we recommend you install "),Et=i("a"),Ia=a("xFormers"),Wa=a(". Its memory-efficient attention mechanism works great with PyTorch 1.13.1 for faster speed and reduced memory consumption."),Ba=f(),ot=i("li"),Va=a("Other optimization techniques, such as model offloading, are covered in "),kt=i("a"),Na=a("this guide"),Ca=a("."),this.h()},l(e){const s=Un('[data-svelte="svelte-1phssyn"]',document.head);w=o(s,"META",{name:!0,content:!0}),s.forEach(t),T=c(e),g=o(e,"H1",{class:!0});var at=n(g);U=o(at,"A",{id:!0,class:!0,href:!0});var kl=n(U);Z=o(kl,"SPAN",{});var Qa=n(Z);m(v.$$.fragment,Qa),Qa.forEach(t),kl.forEach(t),_=c(at),$=o(at,"SPAN",{});var xa=n($);di=r(xa,"Effective and efficient diffusion"),xa.forEach(t),at.forEach(t),Il=c(e),m(Me.$$.fragment,e),Wl=c(e),E=o(e,"P",{});var It=n(E);yi=r(It,"Getting the "),pt=o(It,"A",{href:!0});var Ha=n(pt);bi=r(Ha,"DiffusionPipeline"),Ha.forEach(t),Mi=r(It," to generate images in a certain style or include what you want can be tricky. Often times, you have to run the "),ft=o(It,"A",{href:!0});var Fa=n(ft);wi=r(Fa,"DiffusionPipeline"),Fa.forEach(t),gi=r(It," several times before you end up with an image you\u2019re happy with. But generating something out of nothing is a computationally intensive process, especially if you\u2019re running inference over and over again."),It.forEach(t),Bl=c(e),k=o(e,"P",{});var Wt=n(k);vi=r(Wt,"This is why it\u2019s important to get the most "),zt=o(Wt,"EM",{});var Ya=n(zt);Ji=r(Ya,"computational"),Ya.forEach(t),_i=r(Wt," (speed) and "),Pt=o(Wt,"EM",{});var qa=n(Pt);Ui=r(qa,"memory"),qa.forEach(t),Zi=r(Wt," (GPU RAM) efficiency from the pipeline to reduce the time between inference cycles so you can iterate faster."),Wt.forEach(t),Vl=c(e),H=o(e,"P",{});var Hs=n(H);Ti=r(Hs,"This tutorial walks you through how to generate faster and better with the "),ct=o(Hs,"A",{href:!0});var La=n(ct);Gi=r(La,"DiffusionPipeline"),La.forEach(t),ji=r(Hs,"."),Hs.forEach(t),Nl=c(e),F=o(e,"P",{});var Fs=n(F);$i=r(Fs,"Begin by loading the "),we=o(Fs,"A",{href:!0,rel:!0});var Ka=n(we);Dt=o(Ka,"CODE",{});var Oa=n(Dt);Ei=r(Oa,"runwayml/stable-diffusion-v1-5"),Oa.forEach(t),Ka.forEach(t),ki=r(Fs," model:"),Fs.forEach(t),Cl=c(e),m(ge.$$.fragment,e),Sl=c(e),ut=o(e,"P",{});var er=n(ut);Ii=r(er,"The example prompt you\u2019ll use is a portrait of an old warrior chief, but feel free to use your own prompt:"),er.forEach(t),Rl=c(e),m(ve.$$.fragment,e),Xl=c(e),R=o(e,"H2",{class:!0});var Ys=n(R);Y=o(Ys,"A",{id:!0,class:!0,href:!0});var tr=n(Y);Qt=o(tr,"SPAN",{});var lr=n(Qt);m(Je.$$.fragment,lr),lr.forEach(t),tr.forEach(t),Wi=c(Ys),xt=o(Ys,"SPAN",{});var sr=n(xt);Bi=r(sr,"Speed"),sr.forEach(t),Ys.forEach(t),Al=c(e),m(q.$$.fragment,e),zl=c(e),ht=o(e,"P",{});var ir=n(ht);Vi=r(ir,"One of the simplest ways to speed up inference is to place the pipeline on a GPU the same way you would with any PyTorch module:"),ir.forEach(t),Pl=c(e),m(_e.$$.fragment,e),Dl=c(e),I=o(e,"P",{});var Bt=n(I);Ni=r(Bt,"To make sure you can use the same image and improve on it, use a "),Ue=o(Bt,"A",{href:!0,rel:!0});var or=n(Ue);Ht=o(or,"CODE",{});var ar=n(Ht);Ci=r(ar,"Generator"),ar.forEach(t),or.forEach(t),Si=r(Bt," and set a seed for "),mt=o(Bt,"A",{href:!0});var rr=n(mt);Ri=r(rr,"reproducibility"),rr.forEach(t),Xi=r(Bt,":"),Bt.forEach(t),Ql=c(e),m(Ze.$$.fragment,e),xl=c(e),dt=o(e,"P",{});var nr=n(dt);Ai=r(nr,"Now you can generate an image:"),nr.forEach(t),Hl=c(e),m(Te.$$.fragment,e),Fl=c(e),Ge=o(e,"DIV",{class:!0});var pr=n(Ge);Ft=o(pr,"IMG",{src:!0}),pr.forEach(t),Yl=c(e),G=o(e,"P",{});var de=n(G);zi=r(de,"This process took ~30 seconds on a T4 GPU (it might be faster if your allocated GPU is better than a T4). By default, the "),yt=o(de,"A",{href:!0});var fr=n(yt);Pi=r(fr,"DiffusionPipeline"),fr.forEach(t),Di=r(de," runs inference with full "),Yt=o(de,"CODE",{});var cr=n(Yt);Qi=r(cr,"float32"),cr.forEach(t),xi=r(de," precision for 50 inference steps. You can speed this up by switching to a lower precision like "),qt=o(de,"CODE",{});var ur=n(qt);Hi=r(ur,"float16"),ur.forEach(t),Fi=r(de," or running fewer inference steps."),de.forEach(t),ql=c(e),L=o(e,"P",{});var qs=n(L);Yi=r(qs,"Let\u2019s start by loading the model in "),Lt=o(qs,"CODE",{});var hr=n(Lt);qi=r(hr,"float16"),hr.forEach(t),Li=r(qs," and generate an image:"),qs.forEach(t),Ll=c(e),m(je.$$.fragment,e),Kl=c(e),$e=o(e,"DIV",{class:!0});var mr=n($e);Kt=o(mr,"IMG",{src:!0}),mr.forEach(t),Ol=c(e),bt=o(e,"P",{});var dr=n(bt);Ki=r(dr,"This time, it only took ~11 seconds to generate the image, which is almost 3x faster than before!"),dr.forEach(t),es=c(e),m(K.$$.fragment,e),ts=c(e),W=o(e,"P",{});var Vt=n(W);Oi=r(Vt,"Another option is to reduce the number of inference steps. Choosing a more efficient scheduler could help decrease the number of steps without sacrificing output quality. You can find which schedulers are compatible with the current model in the "),Mt=o(Vt,"A",{href:!0});var yr=n(Mt);eo=r(yr,"DiffusionPipeline"),yr.forEach(t),to=r(Vt," by calling the "),Ot=o(Vt,"CODE",{});var br=n(Ot);lo=r(br,"compatibles"),br.forEach(t),so=r(Vt," method:"),Vt.forEach(t),ls=c(e),m(Ee.$$.fragment,e),ss=c(e),j=o(e,"P",{});var ye=n(j);io=r(ye,"The Stable Diffusion model uses the "),wt=o(ye,"A",{href:!0});var Mr=n(wt);oo=r(Mr,"PNDMScheduler"),Mr.forEach(t),ao=r(ye," by default which usually requires ~50 inference steps, but more performant schedulers like "),gt=o(ye,"A",{href:!0});var wr=n(gt);ro=r(wr,"DPMSolverMultistepScheduler"),wr.forEach(t),no=r(ye,", require only ~20 or 25 inference steps. Use the "),vt=o(ye,"A",{href:!0});var gr=n(vt);po=r(gr,"ConfigMixin.from_config()"),gr.forEach(t),fo=r(ye," method to load a new scheduler:"),ye.forEach(t),is=c(e),m(ke.$$.fragment,e),os=c(e),O=o(e,"P",{});var Ls=n(O);co=r(Ls,"Now set the "),el=o(Ls,"CODE",{});var vr=n(el);uo=r(vr,"num_inference_steps"),vr.forEach(t),ho=r(Ls," to 20:"),Ls.forEach(t),as=c(e),m(Ie.$$.fragment,e),rs=c(e),We=o(e,"DIV",{class:!0});var Jr=n(We);tl=o(Jr,"IMG",{src:!0}),Jr.forEach(t),ns=c(e),Jt=o(e,"P",{});var _r=n(Jt);mo=r(_r,"Great, you\u2019ve managed to cut the inference time to just 4 seconds! \u26A1\uFE0F"),_r.forEach(t),ps=c(e),X=o(e,"H2",{class:!0});var Ks=n(X);ee=o(Ks,"A",{id:!0,class:!0,href:!0});var Ur=n(ee);ll=o(Ur,"SPAN",{});var Zr=n(ll);m(Be.$$.fragment,Zr),Zr.forEach(t),Ur.forEach(t),yo=c(Ks),sl=o(Ks,"SPAN",{});var Tr=n(sl);bo=r(Tr,"Memory"),Tr.forEach(t),Ks.forEach(t),fs=c(e),te=o(e,"P",{});var Os=n(te);Mo=r(Os,"The other key to improving pipeline performance is consuming less memory, which indirectly implies more speed, since you\u2019re often trying to maximize the number of images generated per second. The easiest way to see how many images you can generate at once is to try out different batch sizes until you get an "),il=o(Os,"CODE",{});var Gr=n(il);wo=r(Gr,"OutOfMemoryError"),Gr.forEach(t),go=r(Os," (OOM)."),Os.forEach(t),cs=c(e),B=o(e,"P",{});var Nt=n(B);vo=r(Nt,"Create a function that\u2019ll generate a batch of images from a list of prompts and "),ol=o(Nt,"CODE",{});var jr=n(ol);Jo=r(jr,"Generators"),jr.forEach(t),_o=r(Nt,". Make sure to assign each "),al=o(Nt,"CODE",{});var $r=n(al);Uo=r($r,"Generator"),$r.forEach(t),Zo=r(Nt," a seed so you can reuse it if it produces a good result."),Nt.forEach(t),us=c(e),m(Ve.$$.fragment,e),hs=c(e),_t=o(e,"P",{});var Er=n(_t);To=r(Er,"You\u2019ll also need a function that\u2019ll display each batch of images:"),Er.forEach(t),ms=c(e),m(Ne.$$.fragment,e),ds=c(e),le=o(e,"P",{});var ei=n(le);Go=r(ei,"Start with "),rl=o(ei,"CODE",{});var kr=n(rl);jo=r(kr,"batch_size=4"),kr.forEach(t),$o=r(ei," and see how much memory you\u2019ve consumed:"),ei.forEach(t),ys=c(e),m(Ce.$$.fragment,e),bs=c(e),V=o(e,"P",{});var Ct=n(V);Eo=r(Ct,"Unless you have a GPU with more RAM, the code above probably returned an "),nl=o(Ct,"CODE",{});var Ir=n(nl);ko=r(Ir,"OOM"),Ir.forEach(t),Io=r(Ct," error! Most of the memory is taken up by the cross-attention layers. Instead of running this operation in a batch, you can run it sequentially to save a significant amount of memory. All you have to do is configure the pipeline to use the "),Ut=o(Ct,"A",{href:!0});var Wr=n(Ut);Wo=r(Wr,"enable_attention_slicing()"),Wr.forEach(t),Bo=r(Ct," function:"),Ct.forEach(t),Ms=c(e),m(Se.$$.fragment,e),ws=c(e),se=o(e,"P",{});var ti=n(se);Vo=r(ti,"Now try increasing the "),pl=o(ti,"CODE",{});var Br=n(pl);No=r(Br,"batch_size"),Br.forEach(t),Co=r(ti," to 8!"),ti.forEach(t),gs=c(e),m(Re.$$.fragment,e),vs=c(e),Xe=o(e,"DIV",{class:!0});var Vr=n(Xe);fl=o(Vr,"IMG",{src:!0}),Vr.forEach(t),Js=c(e),Zt=o(e,"P",{});var Nr=n(Zt);So=r(Nr,"Whereas before you couldn\u2019t even generate a batch of 4 images, now you can generate a batch of 8 images at ~3.5 seconds per image! This is probably the fastest you can go on a T4 GPU without sacrificing quality."),Nr.forEach(t),_s=c(e),A=o(e,"H2",{class:!0});var li=n(A);ie=o(li,"A",{id:!0,class:!0,href:!0});var Cr=n(ie);cl=o(Cr,"SPAN",{});var Sr=n(cl);m(Ae.$$.fragment,Sr),Sr.forEach(t),Cr.forEach(t),Ro=c(li),ul=o(li,"SPAN",{});var Rr=n(ul);Xo=r(Rr,"Quality"),Rr.forEach(t),li.forEach(t),Us=c(e),oe=o(e,"P",{});var si=n(oe);Ao=r(si,"In the last two sections, you learned how to optimize the speed of your pipeline by using "),hl=o(si,"CODE",{});var Xr=n(hl);zo=r(Xr,"fp16"),Xr.forEach(t),Po=r(si,", reducing the number of inference steps by using a more performant scheduler, and enabling attention slicing to reduce memory consumption. Now you\u2019re going to focus on how to improve the quality of generated images."),si.forEach(t),Zs=c(e),z=o(e,"H3",{class:!0});var ii=n(z);ae=o(ii,"A",{id:!0,class:!0,href:!0});var Ar=n(ae);ml=o(Ar,"SPAN",{});var zr=n(ml);m(ze.$$.fragment,zr),zr.forEach(t),Ar.forEach(t),Do=c(ii),dl=o(ii,"SPAN",{});var Pr=n(dl);Qo=r(Pr,"Better checkpoints"),Pr.forEach(t),ii.forEach(t),Ts=c(e),re=o(e,"P",{});var oi=n(re);xo=r(oi,"The most obvious step is to use better checkpoints. The Stable Diffusion model is a good starting point, and since its official launch, several improved versions have also been released. However, using a newer version doesn\u2019t automatically mean you\u2019ll get better results. You\u2019ll still have to experiment with different checkpoints yourself, and do a little research (such as using "),Pe=o(oi,"A",{href:!0,rel:!0});var Dr=n(Pe);Ho=r(Dr,"negative prompts"),Dr.forEach(t),Fo=r(oi,") to get the best results."),oi.forEach(t),Gs=c(e),N=o(e,"P",{});var St=n(N);Yo=r(St,"As the field grows, there are more and more high-quality checkpoints finetuned to produce certain styles. Try exploring the "),De=o(St,"A",{href:!0,rel:!0});var Qr=n(De);qo=r(Qr,"Hub"),Qr.forEach(t),Lo=r(St," and "),Qe=o(St,"A",{href:!0,rel:!0});var xr=n(Qe);Ko=r(xr,"Diffusers Gallery"),xr.forEach(t),Oo=r(St," to find one you\u2019re interested in!"),St.forEach(t),js=c(e),P=o(e,"H3",{class:!0});var ai=n(P);ne=o(ai,"A",{id:!0,class:!0,href:!0});var Hr=n(ne);yl=o(Hr,"SPAN",{});var Fr=n(yl);m(xe.$$.fragment,Fr),Fr.forEach(t),Hr.forEach(t),ea=c(ai),bl=o(ai,"SPAN",{});var Yr=n(bl);ta=r(Yr,"Better pipeline components"),Yr.forEach(t),ai.forEach(t),$s=c(e),pe=o(e,"P",{});var ri=n(pe);la=r(ri,"You can also try replacing the current pipeline components with a newer version. Let\u2019s try loading the latest "),He=o(ri,"A",{href:!0,rel:!0});var qr=n(He);sa=r(qr,"autodecoder"),qr.forEach(t),ia=r(ri," from Stability AI into the pipeline, and generate some images:"),ri.forEach(t),Es=c(e),m(Fe.$$.fragment,e),ks=c(e),Ye=o(e,"DIV",{class:!0});var Lr=n(Ye);Ml=o(Lr,"IMG",{src:!0}),Lr.forEach(t),Is=c(e),D=o(e,"H3",{class:!0});var ni=n(D);fe=o(ni,"A",{id:!0,class:!0,href:!0});var Kr=n(fe);wl=o(Kr,"SPAN",{});var Or=n(wl);m(qe.$$.fragment,Or),Or.forEach(t),Kr.forEach(t),oa=c(ni),gl=o(ni,"SPAN",{});var en=n(gl);aa=r(en,"Better prompt engineering"),en.forEach(t),ni.forEach(t),Ws=c(e),ce=o(e,"P",{});var pi=n(ce);ra=r(pi,"The text prompt you use to generate an image is super important, so much so that it is called "),vl=o(pi,"EM",{});var tn=n(vl);na=r(tn,"prompt engineering"),tn.forEach(t),pa=r(pi,". Some considerations to keep during prompt engineering are:"),pi.forEach(t),Bs=c(e),ue=o(e,"UL",{});var fi=n(ue);Jl=o(fi,"LI",{});var ln=n(Jl);fa=r(ln,"How is the image or similar images of the one I want to generate stored on the internet?"),ln.forEach(t),ca=c(fi),_l=o(fi,"LI",{});var sn=n(_l);ua=r(sn,"What additional detail can I give that steers the model towards the style I want?"),sn.forEach(t),fi.forEach(t),Vs=c(e),Tt=o(e,"P",{});var on=n(Tt);ha=r(on,"With this in mind, let\u2019s improve the prompt to include color and higher quality details:"),on.forEach(t),Ns=c(e),m(Le.$$.fragment,e),Cs=c(e),Gt=o(e,"P",{});var an=n(Gt);ma=r(an,"Generate a batch of images with the new prompt:"),an.forEach(t),Ss=c(e),m(Ke.$$.fragment,e),Rs=c(e),Oe=o(e,"DIV",{class:!0});var rn=n(Oe);Ul=o(rn,"IMG",{src:!0}),rn.forEach(t),Xs=c(e),C=o(e,"P",{});var Rt=n(C);da=r(Rt,"Pretty impressive! Let\u2019s tweak the second image - corresponding to the "),Zl=o(Rt,"CODE",{});var nn=n(Zl);ya=r(nn,"Generator"),nn.forEach(t),ba=r(Rt," with a seed of "),Tl=o(Rt,"CODE",{});var pn=n(Tl);Ma=r(pn,"1"),pn.forEach(t),wa=r(Rt," - a bit more by adding some text about the age of the subject:"),Rt.forEach(t),As=c(e),m(et.$$.fragment,e),zs=c(e),tt=o(e,"DIV",{class:!0});var fn=n(tt);Gl=o(fn,"IMG",{src:!0}),fn.forEach(t),Ps=c(e),Q=o(e,"H2",{class:!0});var ci=n(Q);he=o(ci,"A",{id:!0,class:!0,href:!0});var cn=n(he);jl=o(cn,"SPAN",{});var un=n(jl);m(lt.$$.fragment,un),un.forEach(t),cn.forEach(t),ga=c(ci),$l=o(ci,"SPAN",{});var hn=n($l);va=r(hn,"Next steps"),hn.forEach(t),ci.forEach(t),Ds=c(e),me=o(e,"P",{});var ui=n(me);Ja=r(ui,"In this tutorial, you learned how to optimize a "),jt=o(ui,"A",{href:!0});var mn=n(jt);_a=r(mn,"DiffusionPipeline"),mn.forEach(t),Ua=r(ui," for computational and memory efficiency as well as improving the quality of generated outputs. If you\u2019re interested in making your pipeline even faster, take a look at the following resources:"),ui.forEach(t),Qs=c(e),S=o(e,"UL",{});var Xt=n(S);x=o(Xt,"LI",{});var At=n(x);Za=r(At,"Learn how "),$t=o(At,"A",{href:!0});var dn=n($t);Ta=r(dn,"PyTorch 2.0"),dn.forEach(t),Ga=r(At," and "),st=o(At,"A",{href:!0,rel:!0});var yn=n(st);El=o(yn,"CODE",{});var bn=n(El);ja=r(bn,"torch.compile"),bn.forEach(t),yn.forEach(t),$a=r(At," can yield 5 - 300% faster inference speed. On an A100 GPU, inference can be up to 50% faster!"),At.forEach(t),Ea=c(Xt),it=o(Xt,"LI",{});var hi=n(it);ka=r(hi,"If you can\u2019t use PyTorch 2, we recommend you install "),Et=o(hi,"A",{href:!0});var Mn=n(Et);Ia=r(Mn,"xFormers"),Mn.forEach(t),Wa=r(hi,". Its memory-efficient attention mechanism works great with PyTorch 1.13.1 for faster speed and reduced memory consumption."),hi.forEach(t),Ba=c(Xt),ot=o(Xt,"LI",{});var mi=n(ot);Va=r(mi,"Other optimization techniques, such as model offloading, are covered in "),kt=o(mi,"A",{href:!0});var wn=n(kt);Na=r(wn,"this guide"),wn.forEach(t),Ca=r(mi,"."),mi.forEach(t),Xt.forEach(t),this.h()},h(){u(w,"name","hf:doc:metadata"),u(w,"content",JSON.stringify(En)),u(U,"id","effective-and-efficient-diffusion"),u(U,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(U,"href","#effective-and-efficient-diffusion"),u(g,"class","relative group"),u(pt,"href","/docs/diffusers/v0.18.2/en/api/diffusion_pipeline#diffusers.DiffusionPipeline"),u(ft,"href","/docs/diffusers/v0.18.2/en/api/diffusion_pipeline#diffusers.DiffusionPipeline"),u(ct,"href","/docs/diffusers/v0.18.2/en/api/diffusion_pipeline#diffusers.DiffusionPipeline"),u(we,"href","https://huggingface.co/runwayml/stable-diffusion-v1-5"),u(we,"rel","nofollow"),u(Y,"id","speed"),u(Y,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(Y,"href","#speed"),u(R,"class","relative group"),u(Ue,"href","https://pytorch.org/docs/stable/generated/torch.Generator.html"),u(Ue,"rel","nofollow"),u(mt,"href","./using-diffusers/reproducibility"),rt(Ft.src,Sa="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_1.png")||u(Ft,"src",Sa),u(Ge,"class","flex justify-center"),u(yt,"href","/docs/diffusers/v0.18.2/en/api/diffusion_pipeline#diffusers.DiffusionPipeline"),rt(Kt.src,Ra="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_2.png")||u(Kt,"src",Ra),u($e,"class","flex justify-center"),u(Mt,"href","/docs/diffusers/v0.18.2/en/api/diffusion_pipeline#diffusers.DiffusionPipeline"),u(wt,"href","/docs/diffusers/v0.18.2/en/api/schedulers/pndm#diffusers.PNDMScheduler"),u(gt,"href","/docs/diffusers/v0.18.2/en/api/schedulers/multistep_dpm_solver#diffusers.DPMSolverMultistepScheduler"),u(vt,"href","/docs/diffusers/v0.18.2/en/api/configuration#diffusers.ConfigMixin.from_config"),rt(tl.src,Xa="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_3.png")||u(tl,"src",Xa),u(We,"class","flex justify-center"),u(ee,"id","memory"),u(ee,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(ee,"href","#memory"),u(X,"class","relative group"),u(Ut,"href","/docs/diffusers/v0.18.2/en/api/pipelines/stable_diffusion/upscale#diffusers.StableDiffusionUpscalePipeline.enable_attention_slicing"),rt(fl.src,Aa="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_5.png")||u(fl,"src",Aa),u(Xe,"class","flex justify-center"),u(ie,"id","quality"),u(ie,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(ie,"href","#quality"),u(A,"class","relative group"),u(ae,"id","better-checkpoints"),u(ae,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(ae,"href","#better-checkpoints"),u(z,"class","relative group"),u(Pe,"href","https://minimaxir.com/2022/11/stable-diffusion-negative-prompt/"),u(Pe,"rel","nofollow"),u(De,"href","https://huggingface.co/models?library=diffusers&sort=downloads"),u(De,"rel","nofollow"),u(Qe,"href","https://huggingface.co/spaces/huggingface-projects/diffusers-gallery"),u(Qe,"rel","nofollow"),u(ne,"id","better-pipeline-components"),u(ne,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(ne,"href","#better-pipeline-components"),u(P,"class","relative group"),u(He,"href","https://huggingface.co/stabilityai/stable-diffusion-2-1/tree/main/vae"),u(He,"rel","nofollow"),rt(Ml.src,za="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_6.png")||u(Ml,"src",za),u(Ye,"class","flex justify-center"),u(fe,"id","better-prompt-engineering"),u(fe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(fe,"href","#better-prompt-engineering"),u(D,"class","relative group"),rt(Ul.src,Pa="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_7.png")||u(Ul,"src",Pa),u(Oe,"class","flex justify-center"),rt(Gl.src,Da="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_8.png")||u(Gl,"src",Da),u(tt,"class","flex justify-center"),u(he,"id","next-steps"),u(he,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(he,"href","#next-steps"),u(Q,"class","relative group"),u(jt,"href","/docs/diffusers/v0.18.2/en/api/diffusion_pipeline#diffusers.DiffusionPipeline"),u($t,"href","./optimization/torch2.0"),u(st,"href","https://pytorch.org/docs/stable/generated/torch.compile.html"),u(st,"rel","nofollow"),u(Et,"href","./optimization/xformers"),u(kt,"href","./optimization/fp16")},m(e,s){l(document.head,w),p(e,T,s),p(e,g,s),l(g,U),l(U,Z),d(v,Z,null),l(g,_),l(g,$),l($,di),p(e,Il,s),d(Me,e,s),p(e,Wl,s),p(e,E,s),l(E,yi),l(E,pt),l(pt,bi),l(E,Mi),l(E,ft),l(ft,wi),l(E,gi),p(e,Bl,s),p(e,k,s),l(k,vi),l(k,zt),l(zt,Ji),l(k,_i),l(k,Pt),l(Pt,Ui),l(k,Zi),p(e,Vl,s),p(e,H,s),l(H,Ti),l(H,ct),l(ct,Gi),l(H,ji),p(e,Nl,s),p(e,F,s),l(F,$i),l(F,we),l(we,Dt),l(Dt,Ei),l(F,ki),p(e,Cl,s),d(ge,e,s),p(e,Sl,s),p(e,ut,s),l(ut,Ii),p(e,Rl,s),d(ve,e,s),p(e,Xl,s),p(e,R,s),l(R,Y),l(Y,Qt),d(Je,Qt,null),l(R,Wi),l(R,xt),l(xt,Bi),p(e,Al,s),d(q,e,s),p(e,zl,s),p(e,ht,s),l(ht,Vi),p(e,Pl,s),d(_e,e,s),p(e,Dl,s),p(e,I,s),l(I,Ni),l(I,Ue),l(Ue,Ht),l(Ht,Ci),l(I,Si),l(I,mt),l(mt,Ri),l(I,Xi),p(e,Ql,s),d(Ze,e,s),p(e,xl,s),p(e,dt,s),l(dt,Ai),p(e,Hl,s),d(Te,e,s),p(e,Fl,s),p(e,Ge,s),l(Ge,Ft),p(e,Yl,s),p(e,G,s),l(G,zi),l(G,yt),l(yt,Pi),l(G,Di),l(G,Yt),l(Yt,Qi),l(G,xi),l(G,qt),l(qt,Hi),l(G,Fi),p(e,ql,s),p(e,L,s),l(L,Yi),l(L,Lt),l(Lt,qi),l(L,Li),p(e,Ll,s),d(je,e,s),p(e,Kl,s),p(e,$e,s),l($e,Kt),p(e,Ol,s),p(e,bt,s),l(bt,Ki),p(e,es,s),d(K,e,s),p(e,ts,s),p(e,W,s),l(W,Oi),l(W,Mt),l(Mt,eo),l(W,to),l(W,Ot),l(Ot,lo),l(W,so),p(e,ls,s),d(Ee,e,s),p(e,ss,s),p(e,j,s),l(j,io),l(j,wt),l(wt,oo),l(j,ao),l(j,gt),l(gt,ro),l(j,no),l(j,vt),l(vt,po),l(j,fo),p(e,is,s),d(ke,e,s),p(e,os,s),p(e,O,s),l(O,co),l(O,el),l(el,uo),l(O,ho),p(e,as,s),d(Ie,e,s),p(e,rs,s),p(e,We,s),l(We,tl),p(e,ns,s),p(e,Jt,s),l(Jt,mo),p(e,ps,s),p(e,X,s),l(X,ee),l(ee,ll),d(Be,ll,null),l(X,yo),l(X,sl),l(sl,bo),p(e,fs,s),p(e,te,s),l(te,Mo),l(te,il),l(il,wo),l(te,go),p(e,cs,s),p(e,B,s),l(B,vo),l(B,ol),l(ol,Jo),l(B,_o),l(B,al),l(al,Uo),l(B,Zo),p(e,us,s),d(Ve,e,s),p(e,hs,s),p(e,_t,s),l(_t,To),p(e,ms,s),d(Ne,e,s),p(e,ds,s),p(e,le,s),l(le,Go),l(le,rl),l(rl,jo),l(le,$o),p(e,ys,s),d(Ce,e,s),p(e,bs,s),p(e,V,s),l(V,Eo),l(V,nl),l(nl,ko),l(V,Io),l(V,Ut),l(Ut,Wo),l(V,Bo),p(e,Ms,s),d(Se,e,s),p(e,ws,s),p(e,se,s),l(se,Vo),l(se,pl),l(pl,No),l(se,Co),p(e,gs,s),d(Re,e,s),p(e,vs,s),p(e,Xe,s),l(Xe,fl),p(e,Js,s),p(e,Zt,s),l(Zt,So),p(e,_s,s),p(e,A,s),l(A,ie),l(ie,cl),d(Ae,cl,null),l(A,Ro),l(A,ul),l(ul,Xo),p(e,Us,s),p(e,oe,s),l(oe,Ao),l(oe,hl),l(hl,zo),l(oe,Po),p(e,Zs,s),p(e,z,s),l(z,ae),l(ae,ml),d(ze,ml,null),l(z,Do),l(z,dl),l(dl,Qo),p(e,Ts,s),p(e,re,s),l(re,xo),l(re,Pe),l(Pe,Ho),l(re,Fo),p(e,Gs,s),p(e,N,s),l(N,Yo),l(N,De),l(De,qo),l(N,Lo),l(N,Qe),l(Qe,Ko),l(N,Oo),p(e,js,s),p(e,P,s),l(P,ne),l(ne,yl),d(xe,yl,null),l(P,ea),l(P,bl),l(bl,ta),p(e,$s,s),p(e,pe,s),l(pe,la),l(pe,He),l(He,sa),l(pe,ia),p(e,Es,s),d(Fe,e,s),p(e,ks,s),p(e,Ye,s),l(Ye,Ml),p(e,Is,s),p(e,D,s),l(D,fe),l(fe,wl),d(qe,wl,null),l(D,oa),l(D,gl),l(gl,aa),p(e,Ws,s),p(e,ce,s),l(ce,ra),l(ce,vl),l(vl,na),l(ce,pa),p(e,Bs,s),p(e,ue,s),l(ue,Jl),l(Jl,fa),l(ue,ca),l(ue,_l),l(_l,ua),p(e,Vs,s),p(e,Tt,s),l(Tt,ha),p(e,Ns,s),d(Le,e,s),p(e,Cs,s),p(e,Gt,s),l(Gt,ma),p(e,Ss,s),d(Ke,e,s),p(e,Rs,s),p(e,Oe,s),l(Oe,Ul),p(e,Xs,s),p(e,C,s),l(C,da),l(C,Zl),l(Zl,ya),l(C,ba),l(C,Tl),l(Tl,Ma),l(C,wa),p(e,As,s),d(et,e,s),p(e,zs,s),p(e,tt,s),l(tt,Gl),p(e,Ps,s),p(e,Q,s),l(Q,he),l(he,jl),d(lt,jl,null),l(Q,ga),l(Q,$l),l($l,va),p(e,Ds,s),p(e,me,s),l(me,Ja),l(me,jt),l(jt,_a),l(me,Ua),p(e,Qs,s),p(e,S,s),l(S,x),l(x,Za),l(x,$t),l($t,Ta),l(x,Ga),l(x,st),l(st,El),l(El,ja),l(x,$a),l(S,Ea),l(S,it),l(it,ka),l(it,Et),l(Et,Ia),l(it,Wa),l(S,Ba),l(S,ot),l(ot,Va),l(ot,kt),l(kt,Na),l(ot,Ca),xs=!0},p(e,[s]){const at={};s&2&&(at.$$scope={dirty:s,ctx:e}),q.$set(at);const kl={};s&2&&(kl.$$scope={dirty:s,ctx:e}),K.$set(kl)},i(e){xs||(y(v.$$.fragment,e),y(Me.$$.fragment,e),y(ge.$$.fragment,e),y(ve.$$.fragment,e),y(Je.$$.fragment,e),y(q.$$.fragment,e),y(_e.$$.fragment,e),y(Ze.$$.fragment,e),y(Te.$$.fragment,e),y(je.$$.fragment,e),y(K.$$.fragment,e),y(Ee.$$.fragment,e),y(ke.$$.fragment,e),y(Ie.$$.fragment,e),y(Be.$$.fragment,e),y(Ve.$$.fragment,e),y(Ne.$$.fragment,e),y(Ce.$$.fragment,e),y(Se.$$.fragment,e),y(Re.$$.fragment,e),y(Ae.$$.fragment,e),y(ze.$$.fragment,e),y(xe.$$.fragment,e),y(Fe.$$.fragment,e),y(qe.$$.fragment,e),y(Le.$$.fragment,e),y(Ke.$$.fragment,e),y(et.$$.fragment,e),y(lt.$$.fragment,e),xs=!0)},o(e){b(v.$$.fragment,e),b(Me.$$.fragment,e),b(ge.$$.fragment,e),b(ve.$$.fragment,e),b(Je.$$.fragment,e),b(q.$$.fragment,e),b(_e.$$.fragment,e),b(Ze.$$.fragment,e),b(Te.$$.fragment,e),b(je.$$.fragment,e),b(K.$$.fragment,e),b(Ee.$$.fragment,e),b(ke.$$.fragment,e),b(Ie.$$.fragment,e),b(Be.$$.fragment,e),b(Ve.$$.fragment,e),b(Ne.$$.fragment,e),b(Ce.$$.fragment,e),b(Se.$$.fragment,e),b(Re.$$.fragment,e),b(Ae.$$.fragment,e),b(ze.$$.fragment,e),b(xe.$$.fragment,e),b(Fe.$$.fragment,e),b(qe.$$.fragment,e),b(Le.$$.fragment,e),b(Ke.$$.fragment,e),b(et.$$.fragment,e),b(lt.$$.fragment,e),xs=!1},d(e){t(w),e&&t(T),e&&t(g),M(v),e&&t(Il),M(Me,e),e&&t(Wl),e&&t(E),e&&t(Bl),e&&t(k),e&&t(Vl),e&&t(H),e&&t(Nl),e&&t(F),e&&t(Cl),M(ge,e),e&&t(Sl),e&&t(ut),e&&t(Rl),M(ve,e),e&&t(Xl),e&&t(R),M(Je),e&&t(Al),M(q,e),e&&t(zl),e&&t(ht),e&&t(Pl),M(_e,e),e&&t(Dl),e&&t(I),e&&t(Ql),M(Ze,e),e&&t(xl),e&&t(dt),e&&t(Hl),M(Te,e),e&&t(Fl),e&&t(Ge),e&&t(Yl),e&&t(G),e&&t(ql),e&&t(L),e&&t(Ll),M(je,e),e&&t(Kl),e&&t($e),e&&t(Ol),e&&t(bt),e&&t(es),M(K,e),e&&t(ts),e&&t(W),e&&t(ls),M(Ee,e),e&&t(ss),e&&t(j),e&&t(is),M(ke,e),e&&t(os),e&&t(O),e&&t(as),M(Ie,e),e&&t(rs),e&&t(We),e&&t(ns),e&&t(Jt),e&&t(ps),e&&t(X),M(Be),e&&t(fs),e&&t(te),e&&t(cs),e&&t(B),e&&t(us),M(Ve,e),e&&t(hs),e&&t(_t),e&&t(ms),M(Ne,e),e&&t(ds),e&&t(le),e&&t(ys),M(Ce,e),e&&t(bs),e&&t(V),e&&t(Ms),M(Se,e),e&&t(ws),e&&t(se),e&&t(gs),M(Re,e),e&&t(vs),e&&t(Xe),e&&t(Js),e&&t(Zt),e&&t(_s),e&&t(A),M(Ae),e&&t(Us),e&&t(oe),e&&t(Zs),e&&t(z),M(ze),e&&t(Ts),e&&t(re),e&&t(Gs),e&&t(N),e&&t(js),e&&t(P),M(xe),e&&t($s),e&&t(pe),e&&t(Es),M(Fe,e),e&&t(ks),e&&t(Ye),e&&t(Is),e&&t(D),M(qe),e&&t(Ws),e&&t(ce),e&&t(Bs),e&&t(ue),e&&t(Vs),e&&t(Tt),e&&t(Ns),M(Le,e),e&&t(Cs),e&&t(Gt),e&&t(Ss),M(Ke,e),e&&t(Rs),e&&t(Oe),e&&t(Xs),e&&t(C),e&&t(As),M(et,e),e&&t(zs),e&&t(tt),e&&t(Ps),e&&t(Q),M(lt),e&&t(Ds),e&&t(me),e&&t(Qs),e&&t(S)}}}const En={local:"effective-and-efficient-diffusion",sections:[{local:"speed",title:"Speed"},{local:"memory",title:"Memory"},{local:"quality",sections:[{local:"better-checkpoints",title:"Better checkpoints"},{local:"better-pipeline-components",title:"Better pipeline components"},{local:"better-prompt-engineering",title:"Better prompt engineering"}],title:"Quality"},{local:"next-steps",title:"Next steps"}],title:"Effective and efficient diffusion"};function kn(nt){return Zn(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Cn extends vn{constructor(w){super();Jn(this,w,kn,$n,_n,{})}}export{Cn as default,En as metadata};