
    +h              
          S SK r S SKJrJrJrJrJrJr  S SKr	S SK
rS SKrS SKJs  Jr  S SKJrJrJr  SSKJrJr  SSKJrJr  SSKJrJrJr  SSKJ r J!r!J"r"J#r#  SS	K$J%r%  SS
K&J'r'  SSK(J)r)J*r*J+r+J,r,J-r-J.r.J/r/  SSK0J1r1J2r2J3r3J4r4  SSK5J6r6J7r7J8r8  SSK9J:r:  SSK;J<r<  \+" 5       (       a  S SK=J>s  J?r@  SrAOSrA\,R                  " \C5      rDSrE " S S\6\7\8\\\5      rFg)    N)AnyCallableDictListOptionalUnion)CLIPImageProcessorCLIPTextModelCLIPTokenizer   )MultiPipelineCallbacksPipelineCallback)PipelineImageInputVaeImageProcessor)FromSingleFileMixinStableDiffusionLoraLoaderMixinTextualInversionLoaderMixin)AutoencoderKLControlNetXSAdapterUNet2DConditionModelUNetControlNetXSModel)adjust_lora_scale_text_encoder)KarrasDiffusionSchedulers)USE_PEFT_BACKEND	deprecateis_torch_xla_availableloggingreplace_example_docstringscale_lora_layersunscale_lora_layers)empty_device_cacheis_compiled_moduleis_torch_versionrandn_tensor   )DeprecatedPipelineMixinDiffusionPipelineStableDiffusionMixin)StableDiffusionPipelineOutput)StableDiffusionSafetyCheckerTFa  
    Examples:
        ```py
        >>> # !pip install opencv-python transformers accelerate
        >>> from diffusers import StableDiffusionControlNetXSPipeline, ControlNetXSAdapter
        >>> from diffusers.utils import load_image
        >>> import numpy as np
        >>> import torch

        >>> import cv2
        >>> from PIL import Image

        >>> prompt = "aerial view, a futuristic research complex in a bright foggy jungle, hard lighting"
        >>> negative_prompt = "low quality, bad quality, sketches"

        >>> # download an image
        >>> image = load_image(
        ...     "https://hf.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/hf-logo.png"
        ... )

        >>> # initialize the models and pipeline
        >>> controlnet_conditioning_scale = 0.5

        >>> controlnet = ControlNetXSAdapter.from_pretrained(
        ...     "UmerHA/Testing-ConrolNetXS-SD2.1-canny", torch_dtype=torch.float16
        ... )
        >>> pipe = StableDiffusionControlNetXSPipeline.from_pretrained(
        ...     "stabilityai/stable-diffusion-2-1-base", controlnet=controlnet, torch_dtype=torch.float16
        ... )
        >>> pipe.enable_model_cpu_offload()

        >>> # get canny image
        >>> image = np.array(image)
        >>> image = cv2.Canny(image, 100, 200)
        >>> image = image[:, :, None]
        >>> image = np.concatenate([image, image, image], axis=2)
        >>> canny_image = Image.fromarray(image)
        >>> # generate image
        >>> image = pipe(
        ...     prompt, controlnet_conditioning_scale=controlnet_conditioning_scale, image=canny_image
        ... ).images[0]
        ```
c            2         ^  \ rS rSrSrSrSrSS/rS/r/ SQr	 S?S	\
S
\S\S\\\4   S\S\S\S\S\4U 4S jjjr    S@S\\R0                     S\\R0                     S\\   4S jjr     SAS\\R0                     S\\R0                     S\\   S\\   4S jjrS rS rS r       SBS jr S r! SCS jr"SDS  jr#\$S! 5       r%\$S" 5       r&\$S# 5       r'\$S$ 5       r(\$S% 5       r)\RT                  " 5       \+" \,5      SSSSS&S'SS(SSSSSS)SSSSSSSS*/4S+\\-\.\-   4   S,\/S-\\   S.\\   S/\S0\S1\\\-\.\-   4      S2\\   S3\S4\\\R`                  \.\R`                     4      S*\\R0                     S\\R0                     S\\R0                     S5\\-   S6\S7\\1\-\24      S8\\\.\   4   S9\S:\S\\   S;\\\3\\\1/S4   \4\54      S<\.\-   4,S= jj5       5       r6S>r7U =r8$ )E#StableDiffusionControlNetXSPipelined   aa  
Pipeline for text-to-image generation using Stable Diffusion with ControlNet-XS guidance.

This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
implemented for all pipelines (downloading, saving, running on a particular device, etc.).

The pipeline also inherits the following loading methods:
    - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
    - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights
    - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights
    - [`loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files

Args:
    vae ([`AutoencoderKL`]):
        Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
    text_encoder ([`~transformers.CLIPTextModel`]):
        Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
    tokenizer ([`~transformers.CLIPTokenizer`]):
        A `CLIPTokenizer` to tokenize text.
    unet ([`UNet2DConditionModel`]):
        A [`UNet2DConditionModel`] used to create a UNetControlNetXSModel to denoise the encoded image latents.
    controlnet ([`ControlNetXSAdapter`]):
        A [`ControlNetXSAdapter`] to be used in combination with `unet` to denoise the encoded image latents.
    scheduler ([`SchedulerMixin`]):
        A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
        [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
    safety_checker ([`StableDiffusionSafetyChecker`]):
        Classification module that estimates whether generated images could be considered offensive or harmful.
        Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
        about a model's potential harms.
    feature_extractor ([`~transformers.CLIPImageProcessor`]):
        A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
z0.33.1ztext_encoder->unet->vaesafety_checkerfeature_extractor)latentsprompt_embedsnegative_prompt_embedsTvaetext_encoder	tokenizerunet
controlnet	schedulerrequires_safety_checkerc
                 4  > [         T
U ]  5         [        U[        5      (       a  [        R
                  " XE5      nUc*  U	(       a#  [        R                  SU R                   S35        Ub  Uc  [        S5      eU R                  UUUUUUUUS9  [        U SS 5      (       a/  S[        U R                  R                  R                  5      S-
  -  OSU l        [#        U R                   S	S
9U l        [#        U R                   S	SS9U l        U R)                  U	S9  g )Nz)You have disabled the safety checker for a   by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .zMake sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.)r3   r4   r5   r6   r7   r8   r.   r/   r3   r%         T)vae_scale_factordo_convert_rgbF)r=   r>   do_normalize)r9   )super__init__
isinstancer   r   	from_unetloggerwarning	__class__
ValueErrorregister_modulesgetattrlenr3   configblock_out_channelsr=   r   image_processorcontrol_image_processorregister_to_config)selfr3   r4   r5   r6   r7   r8   r.   r/   r9   rF   s             r/home/james-whalen/.local/lib/python3.13/site-packages/diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs.pyrA   ,StableDiffusionControlNetXSPipeline.__init__   s-    	d011(224DD!&=NN;DNN;K Lj j %*;*Cx 
 	%!)/ 	 		
 W^^bdikoVpVpc$((//*L*L&MPQ&Q Rvw0$BWBWhlm'8!224V[(
$ 	8OP    Nr1   r2   
lora_scalec	                     Sn
[        SSU
SS9  U R                  " S	UUUUUUUUS.U	D6n[        R                  " US   US   /5      nU$ )
Nz`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple.z_encode_prompt()1.0.0Fstandard_warn)promptdevicenum_images_per_promptdo_classifier_free_guidancenegative_promptr1   r2   rT   r;   r    )r   encode_prompttorchcat)rP   rY   rZ   r[   r\   r]   r1   r2   rT   kwargsdeprecation_messageprompt_embeds_tuples               rQ   _encode_prompt2StableDiffusionControlNetXSPipeline._encode_prompt   s{     a$g/BRWX"00 

"7(C+'#9!

 

 		#6q#9;Nq;Q"RSrS   	clip_skipc
                 
   UbS  [        U [        5      (       a>  Xl        [        (       d  [	        U R
                  U5        O[        U R
                  U5        Ub  [        U[        5      (       a  Sn
O3Ub!  [        U[        5      (       a  [        U5      n
OUR                  S   n
UGc  [        U [        5      (       a  U R                  XR                  5      nU R                  USU R                  R                  SSS9nUR                  nU R                  USSS	9R                  nUR                  S
   UR                  S
   :  a  [         R"                  " X5      (       dj  U R                  R%                  USS2U R                  R                  S-
  S
24   5      n[&        R)                  SU R                  R                   SU 35        [+        U R
                  R,                  S5      (       aA  U R
                  R,                  R.                  (       a  UR0                  R3                  U5      nOSnU	c%  U R                  UR3                  U5      US9nUS   nOQU R                  UR3                  U5      USS9nUS
   U	S-   *    nU R
                  R4                  R7                  U5      nU R
                  b  U R
                  R8                  nO0U R:                  b  U R:                  R8                  nOUR8                  nUR3                  UUS9nUR                  u  nnnUR=                  SUS5      nUR?                  UU-  US
5      nU(       Ga  UGc|  Uc  S/U
-  nOUb;  [A        U5      [A        U5      La$  [C        S[A        U5       S[A        U5       S35      e[        U[        5      (       a  U/nO2U
[        U5      :w  a!  [E        SU S[        U5       SU SU
 S3	5      eUn[        U [        5      (       a  U R                  UU R                  5      nUR                  S   nU R                  USUSSS9n[+        U R
                  R,                  S5      (       aA  U R
                  R,                  R.                  (       a  UR0                  R3                  U5      nOSnU R                  UR                  R3                  U5      US9nUS   nU(       aG  UR                  S   nUR3                  UUS9nUR=                  SUS5      nUR?                  X-  US
5      nU R
                  b6  [        U [        5      (       a!  [        (       a  [G        U R
                  U5        Xg4$ )a,  
Encodes the prompt into text encoder hidden states.

Args:
    prompt (`str` or `List[str]`, *optional*):
        prompt to be encoded
    device: (`torch.device`):
        torch device
    num_images_per_prompt (`int`):
        number of images that should be generated per prompt
    do_classifier_free_guidance (`bool`):
        whether to use classifier free guidance or not
    negative_prompt (`str` or `List[str]`, *optional*):
        The prompt or prompts not to guide the image generation. If not defined, one has to pass
        `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
        less than `1`).
    prompt_embeds (`torch.Tensor`, *optional*):
        Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
        provided, text embeddings will be generated from `prompt` input argument.
    negative_prompt_embeds (`torch.Tensor`, *optional*):
        Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
        weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
        argument.
    lora_scale (`float`, *optional*):
        A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
    clip_skip (`int`, *optional*):
        Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
        the output of the pre-final layer will be used for computing the prompt embeddings.
Nr;   r   
max_lengthTpt)paddingri   
truncationreturn_tensorslongest)rk   rm   z\The following part of your input was truncated because CLIP can only handle sequences up to z	 tokens: use_attention_mask)attention_mask)rq   output_hidden_states)dtyperZ    z?`negative_prompt` should be the same type to `prompt`, but got z != .z`negative_prompt`: z has batch size z, but `prompt`: zT. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.)$rB   r   _lora_scaler   r   r4   r   strlistrJ   shaper   maybe_convert_promptr5   model_max_length	input_idsr`   equalbatch_decoderD   rE   hasattrrK   rp   rq   to
text_modelfinal_layer_normrs   r6   repeatviewtype	TypeErrorrG   r    )rP   rY   rZ   r[   r\   r]   r1   r2   rT   rg   
batch_sizetext_inputstext_input_idsuntruncated_idsremoved_textrq   prompt_embeds_dtypebs_embedseq_len_uncond_tokensri   uncond_inputs                          rQ   r_   1StableDiffusionControlNetXSPipeline.encode_prompt   sQ   V !j7U&V&V) $#.t/@/@*M!$"3"3Z@*VS"9"9JJvt$<$<VJ&,,Q/J $ ;<<226>>J..$>>::# ) K )22N"nnVYW[n\ffO$$R(N,@,@,DDU[[N N  $~~::#At~~'F'F'JR'O$OP  778	,Q
 t((//1EFF4K\K\KcKcKvKv!,!;!;!>!>v!F!%  $ 1 1.2C2CF2K\j 1 k -a 0 $ 1 1"%%f-ncg !2 ! !.b 1IM2B C
 !% 1 1 < < M Mm \("&"3"3"9"9YY""&))//"/"5"5%((/B6(R,22'1%,,Q0EqI%**86K+KWVXY '+A+I&!#z 1#VD<Q(QUVZ[jVkUl mV~Q(  OS11!0 1s?33 )/)::J3K_J` ax/
| <33  !0 $ ;<< $ 9 9- X&,,Q/J>>$%# * L t((//1EFF4K\K\KcKcKvKv!-!<!<!?!?!G!%%)%6%6&&))&1- &7 &" &<A%>"&,2215G%;%>%>EXag%>%h"%;%B%B1F[]^%_"%;%@%@Acelnp%q"($ >??DTDT#D$5$5zB44rS   c                 `   U R                   c  S nX4$ [        R                  " U5      (       a  U R                  R	                  USS9nOU R                  R                  U5      nU R                  USS9R                  U5      nU R                  XR                  R                  U5      S9u  pX4$ )Npil)output_typerj   )rm   )images
clip_input)	r.   r`   	is_tensorrM   postprocessnumpy_to_pilr/   r   pixel_values)rP   imagerZ   rs   has_nsfw_conceptfeature_extractor_inputsafety_checker_inputs          rQ   run_safety_checker6StableDiffusionControlNetXSPipeline.run_safety_checker  s    &# && u%%*.*>*>*J*J5^c*J*d'*.*>*>*K*KE*R'#'#9#9:Qbf#9#g#j#jkq#r &*&9&9)J)J)M)Me)T ': '#E &&rS   c                 T   Sn[        SSUSS9  SU R                  R                  R                  -  U-  nU R                  R	                  USS9S   nUS	-  S
-   R                  SS5      nUR                  5       R                  SS	SS5      R                  5       R                  5       nU$ )Nz{The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) insteaddecode_latentsrV   FrW   r;   )return_dictr   r%   g      ?r   )
r   r3   rK   scaling_factordecodeclampcpupermutefloatnumpy)rP   r0   rc   r   s       rQ   r   2StableDiffusionControlNetXSPipeline.decode_latents  s     \"G-@PUVdhhoo444w>U;A>S''1-		##Aq!Q/557==?rS   c                 n   S[        [        R                  " U R                  R                  5      R
                  R                  5       5      ;   n0 nU(       a  X$S'   S[        [        R                  " U R                  R                  5      R
                  R                  5       5      ;   nU(       a  XS'   U$ )Neta	generator)setinspect	signaturer8   step
parameterskeys)rP   r   r   accepts_etaextra_step_kwargsaccepts_generators         rQ   prepare_extra_step_kwargs=StableDiffusionControlNetXSPipeline.prepare_extra_step_kwargs  s     s7#4#4T^^5H5H#I#T#T#Y#Y#[\\'*e$ (3w/@/@ATAT/U/`/`/e/e/g+hh-6k*  rS         ?        c
           
        ^  U	bW  [        U 4S jU	 5       5      (       d=  [        ST R                   SU	 V
s/ s H  oT R                  ;  d  M  U
PM     sn
 35      eUb  Ub  [        SU SU S35      eUc  Uc  [        S5      eUbA  [        U[        5      (       d,  [        U[
        5      (       d  [        S[        U5       35      eUb  Ub  [        S	U S
U S35      eUbC  Ub@  UR                  UR                  :w  a&  [        SUR                   SUR                   S35      e[        [        S5      =(       a8    [        T R                  [        R                  R                  R                  5      n[        T R                  [        5      (       d0  U(       a\  [        T R                  R                   [        5      (       a3  T R#                  X!U5        [        U[$        5      (       d  ['        S5      eO eXxpX:  a  [        SU SU S35      eUS:  a  [        SU S35      eUS:  a  [        SU S35      eg s  sn
f )Nc              3   @   >#    U  H  oTR                   ;   v   M     g 7fN)_callback_tensor_inputs).0krP   s     rQ   	<genexpr>CStableDiffusionControlNetXSPipeline.check_inputs.<locals>.<genexpr>  s      F
7Y!---7Ys   z2`callback_on_step_end_tensor_inputs` has to be in z, but found zCannot forward both `prompt`: z and `prompt_embeds`: z2. Please make sure to only forward one of the two.zeProvide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.z2`prompt` has to be of type `str` or `list` but is z'Cannot forward both `negative_prompt`: z and `negative_prompt_embeds`: zu`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` z != `negative_prompt_embeds` ru   scaled_dot_product_attentionzLFor single controlnet: `controlnet_conditioning_scale` must be type `float`.zcontrol guidance start: z4 cannot be larger or equal to control guidance end: r   z can't be smaller than 0.r   zcontrol guidance end: z can't be larger than 1.0.)allrG   r   rB   rw   rx   r   ry   r   Fr6   r`   _dynamo
eval_frameOptimizedModuler   	_orig_modcheck_imager   r   )rP   rY   r   r]   r1   r2   controlnet_conditioning_scalecontrol_guidance_startcontrol_guidance_end"callback_on_step_end_tensor_inputsr   is_compiledstartends   `             rQ   check_inputs0StableDiffusionControlNetXSPipeline.check_inputs  s    .9# F
7YF
 C
 C
 DTEaEaDbbn  |^  pH  |^vw  ko  kG  kG  bGpq  |^  pH  oI  J  -";08N}o ^0 0  ^ 5w  FC)@)@TZ\`IaIaQRVW]R^Q_`aa&+A+M9/9J K*++]_ 
 $)?)K""&<&B&BB --:-@-@,A B.445Q8  a!?@ 
ZIIu}}//??F
 tyy"788499..0EFFUM:;UCC noo D 5+s<*5'1efiejjkl  3;7w>WXYY95cU:TUVV g pHs   H>H>c                    [        U[        R                  R                  5      n[        U[        R                  5      n[        U[
        R                  5      n[        U[        5      =(       a'    [        US   [        R                  R                  5      n[        U[        5      =(       a    [        US   [        R                  5      n[        U[        5      =(       a    [        US   [
        R                  5      n	U(       d:  U(       d3  U(       d,  U(       d%  U(       d  U	(       d  [        S[        U5       35      eU(       a  Sn
O[        U5      n
Ub  [        U[        5      (       a  SnO6Ub!  [        U[        5      (       a  [        U5      nOUb  UR                  S   nU
S:w  a  U
W:w  a  [        SU
 SU 35      eg g )Nr   zimage must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is r;   zdIf image batch size is not 1, image batch size must be same as prompt batch size. image batch size: z, prompt batch size: )rB   PILImager`   Tensornpndarrayrx   r   r   rJ   rw   ry   rG   )rP   r   rY   r1   image_is_pilimage_is_tensorimage_is_npimage_is_pil_listimage_is_tensor_listimage_is_np_listimage_batch_sizeprompt_batch_sizes               rQ   r   /StableDiffusionControlNetXSPipeline.check_image  s   !%9$UELL9 

3&ud3]
58SYY__8])%6]:eAhPUP\P\;]%eT2Wz%(BJJ7W #%($ f  gk  lq  gr  fs  t   "5z*VS"9"9 !Jvt$<$< #F& - 3 3A 6q %59J%Jv  xH  wI  I^  _p  ^q  r  &K rS   c	                 $   U R                   R                  XUS9R                  [        R                  S9nUR
                  S   n	U	S:X  a  Un
OUn
UR                  U
SS9nUR                  XgS9nU(       a  [        R                  " U/S-  5      nU$ )N)heightwidth)rs   r   r;   )dim)rZ   rs   r%   )rN   
preprocessr   r`   float32ry   repeat_interleavera   )rP   r   r   r   r   r[   rZ   rs   r\   r   	repeat_bys              rQ   prepare_image1StableDiffusionControlNetXSPipeline.prepare_image6  s     ,,77TY7Z]]didqdq]r ;;q>q "I .I''	q'94&IIugk*ErS   c	                 V   UU[        U5      U R                  -  [        U5      U R                  -  4n	[        U[        5      (       a*  [	        U5      U:w  a  [        S[	        U5       SU S35      eUc  [        XXeS9nOUR                  U5      nXR                  R                  -  nU$ )Nz/You have passed a list of generators of length z+, but requested an effective batch size of z@. Make sure the batch size matches the length of the generators.)r   rZ   rs   )
intr=   rB   rx   rJ   rG   r$   r   r8   init_noise_sigma)
rP   r   num_channels_latentsr   r   rs   rZ   r   r0   ry   s
             rQ   prepare_latents3StableDiffusionControlNetXSPipeline.prepare_latentsT  s     K4000J$///	
 i&&3y>Z+GA#i.AQ R&<'gi 
 ?"5fZGjj(G NN;;;rS   c                     U R                   $ r   )_guidance_scalerP   s    rQ   guidance_scale2StableDiffusionControlNetXSPipeline.guidance_scalej  s     ###rS   c                     U R                   $ r   )
_clip_skipr   s    rQ   rg   -StableDiffusionControlNetXSPipeline.clip_skipo  s     rS   c                 r    U R                   S:  =(       a"    U R                  R                  R                  S L $ )Nr;   )r   r6   rK   time_cond_proj_dimr   s    rQ   r\   ?StableDiffusionControlNetXSPipeline.do_classifier_free_guidancet  s0     ##a'WDII,<,<,O,OSW,WWrS   c                     U R                   $ r   )_cross_attention_kwargsr   s    rQ   cross_attention_kwargs:StableDiffusionControlNetXSPipeline.cross_attention_kwargsy  s     +++rS   c                     U R                   $ r   )_num_timestepsr   s    rQ   num_timesteps1StableDiffusionControlNetXSPipeline.num_timesteps~  s     """rS   2   g      @r;   r   r0   rY   r   r   r   num_inference_stepsr   r]   r[   r   r   r   r   r  r   r   r   callback_on_step_endr   c                 n   [        U[        [        45      (       a  UR                  n[	        U R
                  5      (       a  U R
                  R                  OU R
                  nU R                  UUUUUUUUU5	        X`l        UU l	        UU l
        SU l        Ub  [        U[        5      (       a  SnO3Ub!  [        U[        5      (       a  [        U5      nOUR                  S   nU R                   nUS:  nUb  UR#                  SS5      OSnU R%                  UUUUUUUUUS9	u  pU(       a  [&        R(                  " X/5      nU R+                  UUUUU-  UUUR,                  US9nUR                  S	S u  p4U R.                  R1                  UUS
9  U R.                  R2                  nU R
                  R4                  nU R7                  UU-  UUUUR,                  UU
U5      nU R9                  X5      n[        U5      XPR.                  R:                  -  -
  n[        U5      U l        [	        U R
                  5      n [?        SS5      n!U RA                  US9 n"[C        U5       GH  u  n#n$[&        RD                  RG                  5       (       a,  U (       a%  U!(       a  [&        RH                  RK                  5         U(       a  [&        R(                  " U/S-  5      OUn%U R.                  RM                  U%U$5      n%U#[        U5      -  U:  =(       a    U#S-   [        U5      -  U:*  n&U R                  U%U$UUUUSU&S9RN                  n'U(       a  U'RQ                  S5      u  n(n)U(UU)U(-
  -  -   n'U R.                  RR                  " U'U$U40 UDSS0D6S   nUb\  0 n*U H  n+[U        5       U+   U*U+'   M     U" U U#U$U*5      n,U,RW                  SU5      nU,RW                  SU5      nU,RW                  SU5      nU#[        U5      S-
  :X  d)  U#S-   U:  a0  U#S-   U R.                  R:                  -  S:X  a  U"RY                  5         [Z        (       d  GM  [\        R^                  " 5         GM     SSS5        [a        U S5      (       aM  U Rb                  b@  U R
                  Re                  S5        U Rf                  Re                  S5        [i        5         US:X  d_  U Rj                  Rm                  XRj                  Rn                  Rp                  -  SU
S9S   nU Rs                  UUUR,                  5      u  nn-OUnSn-U-c  S/UR                  S   -  n.OU- V/s/ s H
  n/U/(       + PM     n.n/U Rt                  Rw                  X.U.S9nU Ry                  5         U(       d  UU-4$ [{        UU-S9$ ! , (       d  f       GNC= fs  sn/f )u  
The call function to the pipeline for generation.

Args:
    prompt (`str` or `List[str]`, *optional*):
        The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
    image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
            `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
        The ControlNet input condition to provide guidance to the `unet` for generation. If the type is
        specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be accepted
        as an image. The dimensions of the output image defaults to `image`'s dimensions. If height and/or
        width are passed, `image` is resized accordingly. If multiple ControlNets are specified in `init`,
        images must be passed as a list such that each element of the list can be correctly batched for input
        to a single ControlNet.
    height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
        The height in pixels of the generated image.
    width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
        The width in pixels of the generated image.
    num_inference_steps (`int`, *optional*, defaults to 50):
        The number of denoising steps. More denoising steps usually lead to a higher quality image at the
        expense of slower inference.
    guidance_scale (`float`, *optional*, defaults to 7.5):
        A higher guidance scale value encourages the model to generate images closely linked to the text
        `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
    negative_prompt (`str` or `List[str]`, *optional*):
        The prompt or prompts to guide what to not include in image generation. If not defined, you need to
        pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
    num_images_per_prompt (`int`, *optional*, defaults to 1):
        The number of images to generate per prompt.
    eta (`float`, *optional*, defaults to 0.0):
        Corresponds to parameter eta (η) from the [DDIM](https://huggingface.co/papers/2010.02502) paper. Only
        applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
    generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
        A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
        generation deterministic.
    latents (`torch.Tensor`, *optional*):
        Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
        generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
        tensor is generated by sampling using the supplied random `generator`.
    prompt_embeds (`torch.Tensor`, *optional*):
        Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
        provided, text embeddings are generated from the `prompt` input argument.
    negative_prompt_embeds (`torch.Tensor`, *optional*):
        Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
        not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
    output_type (`str`, *optional*, defaults to `"pil"`):
        The output format of the generated image. Choose between `PIL.Image` or `np.array`.
    return_dict (`bool`, *optional*, defaults to `True`):
        Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
        plain tuple.
    cross_attention_kwargs (`dict`, *optional*):
        A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
        [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
    controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
        The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added
        to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set
        the corresponding scale as a list.
    control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
        The percentage of total steps at which the ControlNet starts applying.
    control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
        The percentage of total steps at which the ControlNet stops applying.
    clip_skip (`int`, *optional*):
        Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
        the output of the pre-final layer will be used for computing the prompt embeddings.
    callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*):
        A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of
        each denoising step during the inference. with the following arguments: `callback_on_step_end(self:
        DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a
        list of all tensors as specified by `callback_on_step_end_tensor_inputs`.
    callback_on_step_end_tensor_inputs (`List`, *optional*):
        The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
        will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
        `._callback_tensor_inputs` attribute of your pipeline class.
Examples:

Returns:
    [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
        If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
        otherwise a `tuple` is returned where the first element is a list with the generated images and the
        second element is a list of `bool`s indicating whether the corresponding generated image contains
        "not-safe-for-work" (nsfw) content.
FNr;   r   r   scale)r1   r2   rT   rg   )r   r   r   r   r[   rZ   rs   r\   )rZ   z>=z2.1)totalr%   T)sampletimestepencoder_hidden_statescontrolnet_condconditioning_scaler  r   apply_controlr   r0   r1   r2   final_offload_hookr   latent)r   r   )r   do_denormalize)r   nsfw_content_detected)>rB   r   r   tensor_inputsr"   r6   r   r   r   r   r  
_interruptrw   rx   rJ   ry   _execution_devicegetr_   r`   ra   r   rs   r8   set_timesteps	timestepsin_channelsr   r   orderr  r#   progress_bar	enumeratecudais_available	_inductorcudagraph_mark_step_beginscale_model_inputr  chunkr   localspopupdateXLA_AVAILABLExm	mark_stepr   r  r   r7   r!   r3   r   rK   r   r   rM   r   maybe_free_model_hooksr)   )0rP   rY   r   r   r   r	  r   r]   r[   r   r   r0   r1   r2   r   r   r  r   r   r   rg   r
  r   r6   r   rZ   r\   text_encoder_lora_scaler  r   r   num_warmup_stepsis_controlnet_compiledis_torch_higher_equal_2_1r!  itlatent_model_inputr  
noise_prednoise_pred_uncondnoise_pred_textcallback_kwargsr   callback_outputsr   r  has_nsfws0                                                   rQ   __call__,StableDiffusionControlNetXSPipeline.__call__  s   ` *-=?U,VWW1E1S1S.&8&C&Ctyy"" 	")" .
	
  .#'=$ *VS"9"9JJvt$<$<VJ&,,Q/J'' '5s&:# :P9["&&w5ae 	  150B0B!''#9. 1C 
1
- '!II'=&MNM ""!$99"7**(C # 	
 BC( 	$$%8$HNN,,	  $yy44&&.. 	
 !::9J y>,?..BVBV,VV!)n!3DII!>$4T5$A!%89\!),1 ::**,,1GLeOO==?A\UYYy1}%=bi"%)^^%E%EFXZ[%\" I&*@@ua!esS\~E]auEu  "YY-*7$)'D+A $"/ ' 	 &  /9C9I9I!9L6%!2^YjGj5k!kJ..--j!WmHYmglmnop'3&(O?-3Xa[* @';D!Q'X$.229gFG$4$8$8-$XM-=-A-ABZ\r-s*I**A9I/IqSTuX\XfXfXlXlNlpqNq '') =LLN[ - :d 4-..43J3J3VIILLOOu% h&HHOOGhhoo.L.L$LZ_ktOuE '+&=&=eV]M`M`&a#E#E##"Vekk!n4N;KL;Kx(l;KNL$$00`n0o 	##%+,,,EQabb[ :9F Ms   G,V 
V V2 
V/)	r   r  r   r  rv   r  rN   rM   r=   )T)NNNN)NNNNN)NNNr   r   r   N)Fr   )9__name__
__module____qualname____firstlineno____doc___last_supported_versionmodel_cpu_offload_seq_optional_components_exclude_from_cpu_offloadr   r   r
   r   r   r   r   r   r   r*   r	   boolrA   r   r`   r   r   re   r   r_   r   r   r   r   r   r   r   propertyr   rg   r\   r  r  no_gradr   EXAMPLE_DOC_STRINGrw   r   r   	Generatorr   r   r   r   r   r=  __static_attributes____classcell__)rF   s   @rQ   r,   r,   d   s*    D '5,.AB!1 2T )-0Q0Q $0Q !	0Q
 (*??@0Q (0Q -0Q 50Q .0Q "&0Q 0Qr 049=&*  - !) 6 UON 049=&*#'t5  -t5 !) 6t5 UOt5 C=t5n'	!* #&)" +/DWL#\ %*<, $ $   X X , , # # ]]_12 )-$( $##% #;?/0MQ*.049=%* ;?CF(+&)#' 9B3\cc49n%\c "\c 	\c
 }\c !\c \c "%T#Y"78\c  (}\c \c E%//43H"HIJ\c %,,'\c  -\c !) 6\c c]\c  !\c" !)c3h 8#\c$ (-UDK-?'@%\c& !&'\c( $)\c* C=+\c, '(Cd+T124DF\\]
-\c2 -1I3\c 3 \crS   r,   )Gr   typingr   r   r   r   r   r   r   r   	PIL.Imager   r`   torch.nn.functionalnn
functionalr   transformersr	   r
   r   	callbacksr   r   rM   r   r   loadersr   r   r   modelsr   r   r   r   models.lorar   
schedulersr   utilsr   r   r   r   r   r   r    utils.torch_utilsr!   r"   r#   r$   pipeline_utilsr&   r'   r(    stable_diffusion.pipeline_outputr)   stable_diffusion.safety_checkerr*   torch_xla.core.xla_modelcore	xla_modelr-  r,  
get_loggerr?  rD   rK  r,   r^   rS   rQ   <module>rc     s     = =      I I A D g g e e 9 3   h g ] ] L J ))MM			H	%* Z}c"}crS   