
    wi.8                       d dl Z d dlZd dlZd dlmZ d dlmZmZmZm	Z	m
Z
 d dlZd dlZd dlmZ d dlmZ d dlmZ d dlmZ dd	lmZ dd
lmZmZmZmZmZmZmZmZmZm Z m!Z!m"Z"m#Z#m$Z$m%Z% ddl&m'Z'm(Z(  e             rd dl)m*Z* ddl+m,Z,m-Z-  e            r
d dl.m/Z/m0Z0m1Z1  e!j2        e3          Z4dZ5dZ6dZ7dZ8dZ9dZ: G d d          Z; G d de;          Z< G d d          Z=dS )    N)Path)CallableDictListOptionalUnion)
model_info)HF_HUB_OFFLINE)validate_hf_hub_args)nn   )load_state_dict)USE_PEFT_BACKEND_get_model_fileconvert_state_dict_to_diffusersconvert_state_dict_to_peftdelete_adapter_layersget_adapter_nameget_peft_kwargsis_accelerate_availableis_peft_versionis_transformers_availableloggingrecurse_remove_peft_layersscale_lora_layersset_adapter_layers!set_weights_and_activate_adapters   ) _convert_kohya_lora_to_diffusers"_maybe_map_sgm_blocks_to_diffusers)PreTrainedModel)text_encoder_attn_modulestext_encoder_mlp_modules)AlignDevicesHook
CpuOffloadremove_hook_from_moduletext_encoderunettransformerzpytorch_lora_weights.binz pytorch_lora_weights.safetensorszYou are using an old version of LoRA backend. This will be deprecated in the next releases in favor of PEFT make sure to install the latest PEFT and transformers packages in the future.c                      e Zd ZdZeZeZeZ	dZ
	 d7deeeeej        f         f         fdZeedeeeeej        f         f         fd                        Ze	 d8d	            Zed
             Zed9d            Ze	 	 	 	 d:d            Zed9d            Zedefd            Zd Ze	 	 	 	 	 	 	 d;deeej        f         deeeej        j         ej        f         f         deeej        j         f         deeej        j         f         de!dede"de!fd            Z#e$deeej        f         dede!dede"de!fd            Z%d Z&	 	 	 	 	 d<de!d e!d!ed"e!d#e'e(e                  f
d$Z)d=d%e!d&e!fd'Z*	 	 d9d#ee(e         ef         d(e'd)         d*e'eee(e         e(d         f                  fd+Z+d7d(e'd)         fd,Z,d7d(e'd)         fd-Z-	 d7d#ee(e         ef         d.e'eeee(e         e(e         f                  fd/Z.d0 Z/d1 Z0d#ee(e         ef         fd2Z1de(e         fd3Z2deee(e         f         fd4Z3d#e(e         d5eej4        ee5f         ddfd6Z6dS )>LoraLoaderMixinz
    Load LoRA layers into [`UNet2DConditionModel`] and
    [`CLIPTextModel`](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel).
    r   N%pretrained_model_name_or_path_or_dictc                 :   t           st          d          t          |t                    r|                                } | j        |fi |\  }}t          d |                                D                       }|st          d          |                     ||t          | d          st          | | j                  n| j        ||            |                     ||t          | d          st          | | j                  n| j        | j        ||            dS )	a_  
        Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.unet` and
        `self.text_encoder`.

        All kwargs are forwarded to `self.lora_state_dict`.

        See [`~loaders.LoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded.

        See [`~loaders.LoraLoaderMixin.load_lora_into_unet`] for more details on how the state dict is loaded into
        `self.unet`.

        See [`~loaders.LoraLoaderMixin.load_lora_into_text_encoder`] for more details on how the state dict is loaded
        into `self.text_encoder`.

        Parameters:
            pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
                See [`~loaders.LoraLoaderMixin.lora_state_dict`].
            kwargs (`dict`, *optional*):
                See [`~loaders.LoraLoaderMixin.lora_state_dict`].
            adapter_name (`str`, *optional*):
                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
                `default_{i}` where i is the total number of adapters being loaded.
        )PEFT backend is required for this method.c              3   &   K   | ]}d |v pd|v V  dS lora
dora_scaleN .0keys     f/root/.openclaw/workspace/chatterbox_venv_py311/lib/python3.11/site-packages/diffusers/loaders/lora.py	<genexpr>z4LoraLoaderMixin.load_lora_weights.<locals>.<genexpr>s   0      bb# D1Dbbbbbb    Invalid LoRA checkpoint.r(   network_alphasr(   adapter_name	_pipeliner'   )r=   r'   
lora_scaler>   r?   N)r   
ValueError
isinstancedictcopylora_state_dictallkeysload_lora_into_unethasattrgetattr	unet_namer(   load_lora_into_text_encodertext_encoder_namer'   r@   )selfr,   r>   kwargs
state_dictr=   is_correct_formats          r7   load_lora_weightsz!LoraLoaderMixin.load_lora_weightsO   s\   4   	JHIII ;TBB 	a4Y4^4^4`4`1 &:T%9:_%j%jci%j%j"
NbbPZP_P_PaPabbbbb  	97888  )6=dF6K6KZt~...QUQZ% 	! 	
 	
 	
 	(()400#t'=>>>"% 	) 		
 		
 		
 		
 		
r:   c                    |                     dd          }|                     dd          }|                     dd          }|                     dd          }|                     dd          }|                     dd          }|                     d	d          }	|                     d
d          }
|                     dd          }|                     dd          }|                     dd          }d}|d}d}ddd}d}t          |t                    s|r|||                    d          r	 ||                     |d|          }t          ||pt          |||||||	|
|          }t          j        	                    |d          }n)# t          t          j        f$ r}|s|d}Y d}~nd}~ww xY w|J||                     |d|          }t          ||pt          |||||||	|
|          }t          |          }n|}d}t          d |                                D                       r$|t!          ||          }t#          |          \  }}||fS )a
  
        Return state dict for lora weights and the network alphas.

        <Tip warning={true}>

        We support loading A1111 formatted LoRA checkpoints in a limited capacity.

        This function is experimental and might change in the future.

        </Tip>

        Parameters:
            pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
                Can be either:

                    - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
                      the Hub.
                    - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
                      with [`ModelMixin.save_pretrained`].
                    - A [torch state
                      dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).

            cache_dir (`Union[str, os.PathLike]`, *optional*):
                Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
                is not used.
            force_download (`bool`, *optional*, defaults to `False`):
                Whether or not to force the (re-)download of the model weights and configuration files, overriding the
                cached versions if they exist.
            resume_download:
                Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v1
                of Diffusers.
            proxies (`Dict[str, str]`, *optional*):
                A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
                'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
            local_files_only (`bool`, *optional*, defaults to `False`):
                Whether to only load local model weights and configuration files or not. If set to `True`, the model
                won't be downloaded from the Hub.
            token (`str` or *bool*, *optional*):
                The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
                `diffusers-cli login` (stored in `~/.huggingface`) is used.
            revision (`str`, *optional*, defaults to `"main"`):
                The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
                allowed by Git.
            subfolder (`str`, *optional*, defaults to `""`):
                The subfolder location of a model file within a larger model repository on the Hub or locally.
            weight_name (`str`, *optional*, defaults to None):
                Name of the serialized state dict file.
        	cache_dirNforce_downloadFresume_downloadproxieslocal_files_onlytokenrevision	subfolderweight_nameunet_configuse_safetensorsTattn_procs_weightspytorch	file_type	framework.safetensors)file_extensionrX   
weights_namerT   rU   rV   rW   rX   rY   rZ   r[   
user_agentcpudevicez.binc              3      K   | ]X}|                     d           p>|                     d          p)|                     d          p|                     d          V  YdS )lora_te_
lora_unet_	lora_te1_	lora_te2_N)
startswith)r5   ks     r7   r8   z2LoraLoaderMixin.lora_state_dict.<locals>.<genexpr>  s       
 
  Z(( -<<---<<,,- <<,,
 
 
 
 
 
r:   )poprB   rC   endswith_best_guess_weight_namer   LORA_WEIGHT_NAME_SAFEsafetensorstorch	load_fileIOErrorSafetensorErrorLORA_WEIGHT_NAMEr   rF   rG   r    r   )clsr,   rO   rT   rU   rV   rW   rX   rY   rZ   r[   r\   r]   r^   allow_picklerh   
model_filerP   er=   s                       r7   rE   zLoraLoaderMixin.lora_state_dict   s   r JJ{D11	$4e<< **%6==**Y--!::&8$??

7D))::j$//JJ{D11	jj55jj55 **%6==""OL ."
 


 
?FF 8	?  K$7'K,@,@,P,P' #*&)&A&AA+9-= 'B ' '
 "1=%0%I4I"+'5(7 ')9#!)"+#-" " "J "-!2!<!<ZPU!<!V!VJJ!<=   '  !%JDDDD !&"%"="==fgw #> # #K -9!,!@0@'#1$3#%5%')  
 -Z88
>J 
 
  __&&
 
 
 
 
 	V &?
KXX
)I*)U)U&J>))s   3AF F6&F11F6rd   Fc                    |st           rt          d          g }t          j                            |          rd S t          j                            |          r!fdt          j        |          D             }n"t          |          j        }fd|D             }t          |          dk    rd S h dt          t          fd|                    }t          d |D                       rt          t          d |                    }n7t          d	 |D                       rt          t          d
 |                    }t          |          dk    rt          d d| d          |d         }|S )Nz>When using the offline mode, you must specify a `weight_name`.c                 >    g | ]}|                               |S r3   )rt   r5   fre   s     r7   
<listcomp>z;LoraLoaderMixin._best_guess_weight_name.<locals>.<listcomp>2  s>       PQPZPZ[iPjPj  r:   c                 R    g | ]#}|j                                       |j         $S r3   )	rfilenamert   r   s     r7   r   z;LoraLoaderMixin._best_guess_weight_name.<locals>.<listcomp>7  s1    iiiaAKDXDXYgDhDhiakiiir:   r   >   	optimizer	scheduler
checkpointc                 <     t           fdD                       S )Nc              3       K   | ]}|vV  	d S Nr3   )r5   	substringxs     r7   r8   zLLoraLoaderMixin._best_guess_weight_name.<locals>.<lambda>.<locals>.<genexpr>@  s(       Z Z	!!3 Z Z Z Z Z Zr:   )rF   )r   unallowed_substringss   `r7   <lambda>z9LoraLoaderMixin._best_guess_weight_name.<locals>.<lambda>@  s&    S Z Z Z ZEY Z Z ZZZ r:   c              3   J   K   | ]}|                     t                    V  d S r   rt   r|   r5   r   s     r7   r8   z:LoraLoaderMixin._best_guess_weight_name.<locals>.<genexpr>C  s/      DDqzz*++DDDDDDr:   c                 6    |                      t                    S r   r   r   s    r7   r   z9LoraLoaderMixin._best_guess_weight_name.<locals>.<lambda>D  s    1::>N3O3O r:   c              3   J   K   | ]}|                     t                    V  d S r   rt   rv   r   s     r7   r8   z:LoraLoaderMixin._best_guess_weight_name.<locals>.<genexpr>E  s/      KKq122KKKKKKr:   c                 6    |                      t                    S r   r   r   s    r7   r   z9LoraLoaderMixin._best_guess_weight_name.<locals>.<lambda>F  s    1::>S3T3T r:   r   z9Provided path contains more than one weights file in the z~ format. Either specify `weight_name` in `load_lora_weights` or make sure there's only one  `.safetensors` or `.bin` file in  .)r
   rA   ospathisfileisdirlistdirr	   siblingslenlistfilterany)r}   r,   re   rX   targeted_filesfiles_in_repor\   r   s     `    @r7   ru   z'LoraLoaderMixin._best_guess_weight_name&  s     	_~ 	_]^^^7>>?@@ 	jFW]]@AA 	j   :&KLL  NN ''LMMVMiiii=iiiN~!##F
  HGGZZZZ\jkk
 
 DD^DDDDD 	g!&)O)OQ_"`"`aaNNKKNKKKKK 	g!&)T)TVd"e"effN~"" CN  C  C  [@  C  C  C   %Q'r:   c                    d}d}||j         |j                                        D ]\  }}t          |t          j                  rt          |d          r|st          |j        t                    }|sTt          |j        t                    p9t          |j        d          o$t          |j        j
        d         t                    }t                              d           t          ||           ||fS ar  
        Optionally removes offloading in case the pipeline has been already sequentially offloaded to CPU.

        Args:
            _pipeline (`DiffusionPipeline`):
                The pipeline to disable offloading for.

        Returns:
            tuple:
                A tuple indicating if `is_model_cpu_offload` or `is_sequential_cpu_offload` is True.
        FN_hf_hookhooksr   zAccelerate hooks detected. Since you have called `load_lora_weights()`, the previous hooks will be first removed. Then the LoRA parameters will be loaded and the hooks will be applied again.)recursehf_device_map
componentsitemsrB   r   ModulerI   r   r%   r$   r   loggerinfor&   r}   r?   is_model_cpu_offloadis_sequential_cpu_offload_	components         r7   _optionally_disable_offloadingz.LoraLoaderMixin._optionally_disable_offloadingO  s     %$)! Y%<%D ) 4 : : < < Z Z9i33 Z	:8V8V Z/ Z/9):Lj/Y/Y,4 &y'9;KLL Z&y'97CC  Z *9+=+CA+FHX Y Y 2 KK Y   ,I?XYYYY$&?@@r:   c                 f    t           st          d          t          |                                          }t	           fd|D                       }t           fd|D                       r@|s@t                              d j         d           |	                    ||||           dS dS dS )a  
        This will load the LoRA layers specified in `state_dict` into `unet`.

        Parameters:
            state_dict (`dict`):
                A standard state dict containing the lora layer parameters. The keys can either be indexed directly
                into the unet or prefixed with an additional `unet` which can be used to distinguish between text
                encoder lora layers.
            network_alphas (`Dict[str, float]`):
                The value of the network alpha used for stable learning and preventing underflow. This value has the
                same meaning as the `--network_alpha` option in the kohya-ss trainer script. Refer to [this
                link](https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning).
            unet (`UNet2DConditionModel`):
                The UNet model to load the LoRA layers into.
            adapter_name (`str`, *optional*):
                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
                `default_{i}` where i is the total number of adapters being loaded.
        r.   c              3   L   K   | ]}|                     j                  V  d S r   )rq   rM   r5   r6   r}   s     r7   r8   z6LoraLoaderMixin.load_lora_into_unet.<locals>.<genexpr>  s2      VV#s/D E EVVVVVVr:   c              3   L   K   | ]}|                     j                  V  d S r   )rq   rK   r   s     r7   r8   z6LoraLoaderMixin.load_lora_into_unet.<locals>.<genexpr>  s1      ==s~~cm,,======r:   Loading r   )r=   r>   r?   N)
r   rA   r   rG   rF   r   r   r   rK   load_attn_procs)r}   rP   r=   r(   r>   r?   rG   only_text_encoders   `       r7   rH   z#LoraLoaderMixin.load_lora_into_unetr  s    (   	JHIII
 JOO%%&&VVVVQUVVVVV========= 	FW 	KK33=333444  >`i !     	 	 	 	r:         ?c                     t           st          d          ddlm} t	          |                                          }	 j        nt           fd|	D                       r{fd|	D             fd|                                D             }
t          |
          dk    r:t                              d d	           i }t          |
          }
t          |
          }
t          |          D ] \  }}| d
}|
|         j        d         ||<   !t          d |
                                D                       }|rKt!          |          D ];\  }}| d}| d}|
|         j        d         ||<   |
|         j        d         ||<   <|Afd|                                D             fd|                                D             }t#          |||
d          }d|v rM|d         r t%          dd          rt          d          n%t%          dd          r|                    d            |di |}|t)          |          }                     |          \  }}|                    ||
|           t/          ||           |                    |j        |j                   |r|                                 dS |r|                                 dS dS dS dS )a?  
        This will load the LoRA layers specified in `state_dict` into `text_encoder`

        Parameters:
            state_dict (`dict`):
                A standard state dict containing the lora layer parameters. The key should be prefixed with an
                additional `text_encoder` to distinguish between unet lora layers.
            network_alphas (`Dict[str, float]`):
                See `LoRALinearLayer` for more details.
            text_encoder (`CLIPTextModel`):
                The text encoder model to load the LoRA layers into.
            prefix (`str`):
                Expected prefix of the `text_encoder` in the `state_dict`.
            lora_scale (`float`):
                How much to scale the output of the lora linear layer before it is added with the output of the regular
                lora layer.
            adapter_name (`str`, *optional*):
                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
                `default_{i}` where i is the total number of adapters being loaded.
        r.   r   )
LoraConfigNc              3   *   K   | ]}j         |v V  d S r   )rM   r   s     r7   r8   z>LoraLoaderMixin.load_lora_into_text_encoder.<locals>.<genexpr>  s+      <<s$+<<<<<<r:   c                 |    g | ]8}|                               |                    d           d         k    6|9S r   r   rq   splitr5   rr   prefixs     r7   r   z?LoraLoaderMixin.load_lora_into_text_encoder.<locals>.<listcomp>  sE     g g gqALL4H4H gQWWUX\\Z[_`fMfMfMfMfMfr:   c                 R    i | ]#\  }}|v 	|                      d d          |$S r    replace)r5   rr   vr   text_encoder_keyss      r7   
<dictcomp>z?LoraLoaderMixin.load_lora_into_text_encoder.<locals>.<dictcomp>  sF     , , ,371aQRVgQgQg		V,,,++QQgQgQgr:   r   r   z.out_proj.lora_B.weightr   c              3      K   | ]}d |v V  	dS )z.mlp.Nr3   r4   s     r7   r8   z>LoraLoaderMixin.load_lora_into_text_encoder.<locals>.<genexpr>  s&      ^^33^^^^^^r:   z.fc1.lora_B.weightz.fc2.lora_B.weightc                 |    g | ]8}|                               r!|                    d           d         k    6|9S r   r   r   s     r7   r   z?LoraLoaderMixin.load_lora_into_text_encoder.<locals>.<listcomp>  sT     " " "ALL<P<P"UVU\U\]`UaUabcUdhnUnUnUnUnUnr:   c                 R    i | ]#\  }}|v 	|                      d d          |$S r   r   )r5   rr   r   
alpha_keysr   s      r7   r   z?LoraLoaderMixin.load_lora_into_text_encoder.<locals>.<dictcomp>  sF     & & &;?1a]^bl]l]l		V,,,33Q]l]l]lr:   F)is_unetuse_dora<0.9.0eYou need `peft` 0.9.0 at least to use DoRA-enabled LoRAs. Please upgrade your installation of `peft`.)r>   adapter_state_dictpeft_config)weight)rk   dtyper3   )r   rA   peftr   r   rG   rM   r   r   r   r   r   r   r   r"   shaper#   r   r   rs   r   r   load_adapterr   tork   r   enable_model_cpu_offloadenable_sequential_cpu_offload)r}   rP   r=   r'   r   r@   r>   r?   r   rG   text_encoder_lora_state_dictranknamer   rank_key	patch_mlprank_key_fc1rank_key_fc2lora_config_kwargslora_configr   r   r   r   s   `   `                 @@r7   rL   z+LoraLoaderMixin.load_lora_into_text_encoder  s   >   	JHIII######
 JOO%%&&*0.&&f <<<<t<<<<< G	> g g g gD g g g, , , , ,;E;K;K;M;M, , ,( /001440v000111/NOk/l/l, 0JJf/g/g,8FF U UGD!"&???H%A(%K%QRS%TDNN^^:V:[:[:]:]^^^^^	 a#;L#I#I a aa*.'B'B'B*.'B'B'B-I,-W-]^_-`\*-I,-W-]^_-`\**!-" " " "#1#6#6#8#8" " "J& & & & &CQCWCWCYCY& & &N &5T>Kgqv%w%w%w"!333)*5 ?*388 ", !H# # 
 +388 ?.22:>>>(j>>+=>>  '#3L#A#ALBEBdBdenBoBo?$&? ))!-'C + *    ",zBBBB|':,BTUUU ( >6688888. >;;=====OG	> G	> 54~> >r:   c                 B    ddl m}m}m} t	          |                                          }	 fd|	D              fd|                                D             }|A fd|                                D              fd|                                D             }t          |                                          dk    rV|t          |di           v rt          d	| d
          i }
|                                D ]\  }}d|v r|j
        d         |
|<   t          |
||          }d|v r<|d         rt          dd          rt          d          |                    d            |di |}|t          |          }                     |          \  }} ||||            ||||          }|1t          |dd          }|rt                               d| d           |r|                                 dS |r|                                 dS dS dS )ab  
        This will load the LoRA layers specified in `state_dict` into `transformer`.

        Parameters:
            state_dict (`dict`):
                A standard state dict containing the lora layer parameters. The keys can either be indexed directly
                into the unet or prefixed with an additional `unet` which can be used to distinguish between text
                encoder lora layers.
            network_alphas (`Dict[str, float]`):
                See `LoRALinearLayer` for more details.
            unet (`UNet2DConditionModel`):
                The UNet model to load the LoRA layers into.
            adapter_name (`str`, *optional*):
                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
                `default_{i}` where i is the total number of adapters being loaded.
        r   r   inject_adapter_in_modelset_peft_model_state_dictc                 H    g | ]}|                     j                  |S r3   rq   transformer_namer5   rr   r}   s     r7   r   z>LoraLoaderMixin.load_lora_into_transformer.<locals>.<listcomp>!  -    RRR!q||C<P/Q/QRARRRr:   c                 \    i | ](\  }}|v 	|                     j         d d          |)S r   r   r   r5   rr   r   r}   transformer_keyss      r7   r   z>LoraLoaderMixin.load_lora_into_transformer.<locals>.<dictcomp>"  L     
 
 
=AQ[\`p[p[pAII-000"55q[p[p[pr:   Nc                 H    g | ]}|                     j                  |S r3   r   r   s     r7   r   z>LoraLoaderMixin.load_lora_into_transformer.<locals>.<listcomp>'  s-    aaaall3K_>`>`a!aaar:   c                 \    i | ](\  }}|v 	|                     j         d d          |)S r   r   )r5   rr   r   r   r}   s      r7   r   z>LoraLoaderMixin.load_lora_into_transformer.<locals>.<dictcomp>(  sL       AEAcdhrcrcr		S1444b991crcrcrr:   r   Adapter name F already in use in the transformer - please select a new adapter name.lora_Br   r   r   r   r   r>   unexpected_keysXLoading adapter weights from state_dict led to unexpected keys not found in the model:  . r3   r   r   r   r   r   rG   r   r   rJ   rA   r   r   r   rs   r   r   r   warningr   r   )r}   rP   r=   r)   r>   r?   r   r   r   rG   r   r6   valr   r   r   r   incompatible_keysr  r   r   s   `                  @@r7   load_lora_into_transformerz*LoraLoaderMixin.load_lora_into_transformer  s   $ 	XWWWWWWWWWJOO%%&&RRRRtRRR
 
 
 
 
EOEUEUEWEW
 
 

 %aaaa^%8%8%:%:aaaJ    IWI]I]I_I_  N z  !!A%%w{M2FFFF xLxxx   D&,,.. - -Ss?? #	!DI!0~z!R!R///%j1 7oc76S6S 7$   '**:666$*::'9::K #/<< ?B>`>`aj>k>k; ";##K<XXXX 9 9+zS_ ` ` ,")*;=NPT"U"U" NN0+0 0 0   $ :2244444* :7799999[ &%X: :r:   returnc                 4    t          | d          r| j        ndS )N_lora_scaler   )rI   r  rN   s    r7   r@   zLoraLoaderMixin.lora_scale\  s!     $+4#?#?HtSHr:   c                 @   t           }t          | d          r9 || j                   t          | j        dd           | j        `d | j        _        t          | d          r; || j                   t          | j        dd           | j        `d | j        _        d S d S d S )Nr'   r   text_encoder_2)r   rI   r'   rJ   r   _hf_peft_config_loadedr  )rN   remove_methods     r7   !_remove_text_encoder_monkey_patchz1LoraLoaderMixin._remove_text_encoder_monkey_patchb  s    24(( 	@M$+,,,t(->>J%1;?!84)** 	BM$-...t*M4@@L'3=A#:::		B 	BLLr:   Tsave_directoryunet_lora_layerstext_encoder_lora_layerstransformer_lora_layersis_main_processr\   save_functionsafe_serializationc	                 J   i }	d }
|s|s|st          d          |r$|	                     |
|| j                             |r$|	                     |
|| j                             |r|	                     |
|d                     |                     |	|||||           dS )u  
        Save the LoRA parameters corresponding to the UNet and text encoder.

        Arguments:
            save_directory (`str` or `os.PathLike`):
                Directory to save LoRA parameters to. Will be created if it doesn't exist.
            unet_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
                State dict of the LoRA layers corresponding to the `unet`.
            text_encoder_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
                State dict of the LoRA layers corresponding to the `text_encoder`. Must explicitly pass the text
                encoder LoRA state dict because it comes from 🤗 Transformers.
            is_main_process (`bool`, *optional*, defaults to `True`):
                Whether the process calling this is the main process or not. Useful during distributed training and you
                need to call this function on all processes. In this case, set `is_main_process=True` only on the main
                process to avoid race conditions.
            save_function (`Callable`):
                The function to use to save the state dictionary. Useful during distributed training when you need to
                replace `torch.save` with another method. Can be configured with the environment variable
                `DIFFUSERS_SAVE_MODE`.
            safe_serialization (`bool`, *optional*, defaults to `True`):
                Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
        c                     t          | t          j        j                  r|                                 n| }fd|                                D             }|S )Nc                 &    i | ]\  }} d | |S r   r3   r5   module_nameparamr   s      r7   r   zKLoraLoaderMixin.save_lora_weights.<locals>.pack_weights.<locals>.<dictcomp>  /     s s sFXkSXF!:!:[!:!:E s s sr:   rB   rx   r   r   rP   r   layersr   layers_weightslayers_state_dicts    `  r7   pack_weightsz7LoraLoaderMixin.save_lora_weights.<locals>.pack_weights  Z    4>vux4W4WcV..000]cN s s s s\j\p\p\r\r s s s$$r:   zkYou must pass at least one of `unet_lora_layers`, `text_encoder_lora_layers`, or `transformer_lora_layers`.r)   rP   r  r  r\   r  r  N)rA   updaterK   rM   write_lora_layers)r}   r  r  r  r  r  r\   r  r  rP   r'  s              r7   save_lora_weightsz!LoraLoaderMixin.save_lora_weightsq  s   D 
	% 	% 	%
 ! 	$< 	@W 	}    	Mll+;S]KKLLL# 	]ll+CSEZ[[\\\" 	Tll+BMRRSSS 	!)+#'1 	 	
 	
 	
 	
 	
r:   rP   c                    t           j                            |          r t                              d| d           d S ||rd }nt
          j        }t          j        |d           ||rt          }nt          }t          ||                                          } || |           t                              d|            d S )NProvided path (#) should be a directory, not a filec                 J    t           j                            | |ddi          S Nformatpt)metadatarw   rx   	save_fileweightsfilenames     r7   r  z8LoraLoaderMixin.write_lora_layers.<locals>.save_function  '    &,66wT\^bSc6dddr:   Texist_okModel weights saved in r   r   r   r   errorrx   savemakedirsrv   r|   r   as_posixr   rP   r  r  r\   r  r  	save_paths          r7   r+  z!LoraLoaderMixin.write_lora_layers       7>>.)) 	LL^>^^^___F ! +e e e e !&

NT2222! /3.55>>@@	j),,,9i99:::::r:   c                     t           st          d          t          | d          st          | | j                  n| j        }|                                 |                                  dS )
        Unloads the LoRA parameters.

        Examples:

        ```python
        >>> # Assuming `pipeline` is already loaded with the LoRA parameters.
        >>> pipeline.unload_lora_weights()
        >>> ...
        ```
        r.   r(   N)r   rA   rI   rJ   rK   r(   unload_lorar  rN   r(   s     r7   unload_lora_weightsz#LoraLoaderMixin.unload_lora_weights  so       	JHIII4;D&4I4IXwtT^,,,ty 	..00000r:   	fuse_unetfuse_text_encoderr@   safe_fusingadapter_namesc                    ddl m |s|r5| xj        dz  c_        | j        dk    rt                              d           |rDt          | d          st          | | j                  n| j        }|	                    |||           dfd
	}|rJt          | d          r || j
        |||           t          | d          r || j        |||           d	S d	S d	S )a  
        Fuses the LoRA parameters into the original parameters of the corresponding blocks.

        <Tip warning={true}>

        This is an experimental API.

        </Tip>

        Args:
            fuse_unet (`bool`, defaults to `True`): Whether to fuse the UNet LoRA parameters.
            fuse_text_encoder (`bool`, defaults to `True`):
                Whether to fuse the text encoder LoRA parameters. If the text encoder wasn't monkey-patched with the
                LoRA parameters then it won't have any effect.
            lora_scale (`float`, defaults to 1.0):
                Controls how much to influence the outputs with the LoRA parameters.
            safe_fusing (`bool`, defaults to `False`):
                Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them.
            adapter_names (`List[str]`, *optional*):
                Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused.

        Example:

        ```py
        from diffusers import DiffusionPipeline
        import torch

        pipeline = DiffusionPipeline.from_pretrained(
            "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
        ).to("cuda")
        pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel")
        pipeline.fuse_lora(lora_scale=0.7)
        ```
        r   BaseTunerLayerr   zThe current API is supported for operating with a single LoRA file. You are trying to load and fuse more than one LoRA which is not well-supported.r(   )rM  rN  r   FNc                 B   d|i}|                                  D ]}t          |          rr|dk    r|                    |           t          t	          j        |j                  j                  }d|v r||d<   nd|vr|t          d           |j        di | d S )N
safe_merger   rN  zThe `adapter_names` argument is not supported with your PEFT version. Please upgrade to the latest version of PEFT. `pip install -U peft`r3   )	modulesrB   scale_layerr   inspect	signaturemerge
parametersrA   )r'   r@   rM  rN  merge_kwargsmodulesupported_merge_kwargsrQ  s          r7   fuse_text_encoder_loraz9LoraLoaderMixin.fuse_lora.<locals>.fuse_text_encoder_lora   s    (+6L&..00 1 1fn55 1!S((**:666 .2'2CFL2Q2Q2\-]-]*&*@@@8E_55(0FFF=Kd(b  
 !FL00<000!1 1r:   r'   )rN  r  )r   FN)peft.tuners.tuners_utilsrQ  num_fused_lorasr   r  rI   rJ   rK   r(   	fuse_lorar'   r  )	rN   rK  rL  r@   rM  rN  r(   r]  rQ  s	           @r7   r`  zLoraLoaderMixin.fuse_lora  s\   T 	<;;;;; 	) 	  A%  #a'' j    	]8?f8M8M\74000SWS\DNN:;mN\\\	1 	1 	1 	1 	1 	1*  	rt^,, p&&t'8*kanoooot-.. r&&t':Jcpqqqqqq		r 	rr rr:   unfuse_unetunfuse_text_encoderc                    ddl m t          | d          st          | | j                  n| j        }|r;|                                D ]&}t          |          r|                                 'fd}|r@t          | d          r || j	                   t          | d          r || j
                   | xj        dz  c_        dS )	af  
        Reverses the effect of
        [`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraLoaderMixin.fuse_lora).

        <Tip warning={true}>

        This is an experimental API.

        </Tip>

        Args:
            unfuse_unet (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters.
            unfuse_text_encoder (`bool`, defaults to `True`):
                Whether to unfuse the text encoder LoRA parameters. If the text encoder wasn't monkey-patched with the
                LoRA parameters then it won't have any effect.
        r   rP  r(   c                 ~    |                                  D ]&}t          |          r|                                 'd S r   )rT  rB   unmerge)r'   r[  rQ  s     r7   unfuse_text_encoder_loraz=LoraLoaderMixin.unfuse_lora.<locals>.unfuse_text_encoder_loraT  sK    &..00 % %fn55 %NN$$$% %r:   r'   r  r   N)r^  rQ  rI   rJ   rK   r(   rT  rB   re  r'   r  r_  )rN   ra  rb  r(   r[  rf  rQ  s         @r7   unfuse_lorazLoraLoaderMixin.unfuse_lora;  s
   " 	<;;;;;4;D&4I4IXwtT^,,,ty 	%,,.. % %fn55 %NN$$$	% 	% 	% 	% 	%
  	>t^,, <(():;;;t-.. >(()<===!r:   r'   r!   text_encoder_weightsc                     t           st          d          d }t          |t                    r|gn|} |||          }|pt	          | dd          }|t          d          t          |||           dS )a4  
        Sets the adapter layers for the text encoder.

        Args:
            adapter_names (`List[str]` or `str`):
                The names of the adapters to use.
            text_encoder (`torch.nn.Module`, *optional*):
                The text encoder module to set the adapter layers for. If `None`, it will try to get the `text_encoder`
                attribute.
            text_encoder_weights (`List[float]`, *optional*):
                The weights to use for the text encoder. If `None`, the weights are set to `1.0` for all the adapters.
        r.   c                    t          |t                    s|gt          |           z  }t          |           t          |          k    r/t          dt          |            dt          |                     d |D             }|S )NLength of adapter names + is not equal to the length of the weights c                     g | ]}||nd	S )Nr   r3   )r5   ws     r7   r   zZLoraLoaderMixin.set_adapters_for_text_encoder.<locals>.process_weights.<locals>.<listcomp>  s     DDDqAMqqsDDDr:   )rB   r   r   rA   )rN  r8  s     r7   process_weightszFLoraLoaderMixin.set_adapters_for_text_encoder.<locals>.process_weightsv  s     gt,, 9")c-&8&88=!!S\\11 |s=/A/A||nqrynznz||   EDGDDDGNr:   r'   NzrThe pipeline does not have a default `pipe.text_encoder` class. Please make sure to pass a `text_encoder` instead.)r   rA   rB   strrJ   r   )rN   rN  r'   rh  ro  s        r7   set_adapters_for_text_encoderz-LoraLoaderMixin.set_adapters_for_text_encodera  s    $   	JHIII	 	 	" ,6mS+I+I\}.}>RSS#Jwt^T'J'J E   	*,G[\\\\\r:   c                     t           st          d          |pt          | dd          }|t          d          t          |d           dS )a  
        Disables the LoRA layers for the text encoder.

        Args:
            text_encoder (`torch.nn.Module`, *optional*):
                The text encoder module to disable the LoRA layers for. If `None`, it will try to get the
                `text_encoder` attribute.
        r.   r'   NText Encoder not found.Fenabled)r   rA   rJ   r   rN   r'   s     r7   disable_lora_for_text_encoderz-LoraLoaderMixin.disable_lora_for_text_encoder  s`       	JHIII#Jwt^T'J'J6777<777777r:   c                     t           st          d          |pt          | dd          }|t          d          t          | j        d           dS )a  
        Enables the LoRA layers for the text encoder.

        Args:
            text_encoder (`torch.nn.Module`, *optional*):
                The text encoder module to enable the LoRA layers for. If `None`, it will try to get the `text_encoder`
                attribute.
        r.   r'   Nrs  Trt  )r   rA   rJ   r   r'   rv  s     r7   enable_lora_for_text_encoderz,LoraLoaderMixin.enable_lora_for_text_encoder  sc       	JHIII#Jwt^T'J'J67774,d;;;;;;r:   adapter_weightsc                    t          |t                    r|gn|}t          j        |          }t          |t                    s|gt          |          z  }t          |          t          |          k    r/t          dt          |           dt          |                     g g g }}}|                                 d                                 D             }fd|D             }t          ||          D ]e\  }}	t          |	t                    r|	                    dd           }
|	                    dd           }|	                    dd           }t          |	          dk    r(t          d	|	                                 d
| d          |*t          | d          st                              d           t          |
||gg d          D ]D\  }}|=|||         vr3t                              d| d| d| d| d| d||          d           En|	}
|	}|	}|                    |
           |                    |           |                    |           gt          | d          st#          | | j                  n| j        }|                    ||           t          | d          r|                     || j        |           t          | d          r|                     || j        |           d S d S )Nrk  rl  c                     h | ]	}|D ]}|
S r3   r3   )r5   adaptersadapters      r7   	<setcomp>z/LoraLoaderMixin.set_adapters.<locals>.<setcomp>  s9     
 
 
 (
 
?FG
 
 
 
r:   c                 T    i | ]#fd                                  D             $S )c                 "    g | ]\  }}|v 	|S r3   r3   )r5   partr}  r~  s      r7   r   z;LoraLoaderMixin.set_adapters.<locals>.<dictcomp>.<listcomp>  s(    ^^^~tX'U]J]J]dJ]J]J]r:   )r   )r5   r~  list_adapterss    @r7   r   z0LoraLoaderMixin.set_adapters.<locals>.<dictcomp>  sM      
  
  
 ^^^^1D1D1F1F^^^ 
  
  
r:   r(   r'   r  r   zGot invalid key 'z"' in lora weight dict for adapter r   zsLora weight dict contains text_encoder_2 weights but will be ignored because pipeline does not have text_encoder_2.)r(   r'   r  zLora weight dict for adapter 'z' contains z#, but this will be ignored because z does not contain weights for z. Valid parts for z are: )rB   rp  rD   deepcopyr   r   rA   get_list_adaptersvaluesziprC   rs   rG   rI   r   r  appendrJ   rK   r(   set_adaptersrq  r'   r  )rN   rN  rz  unet_lora_weightstext_encoder_lora_weightstext_encoder_2_lora_weightsall_adaptersinvert_list_adaptersr>   r8  unet_lora_weighttext_encoder_lora_weighttext_encoder_2_lora_weightpart_weight	part_namer(   r  s                   @r7   r  zLoraLoaderMixin.set_adapters  s   
 ,6mS+I+I\}-88 /400 	E./#m2D2DDO}_!5!555 A3}+=+=  A  Ajmn}j~j~  A  A  
 UWXZ\^6Q4..00
 
$1$8$8$:$:
 
 
 
  
  
  
' 
  
  

 &)%H%H !	K !	K!L''4(( 5#*;;vt#<#< +2;;~t+L+L(-4[[9I4-P-P*w<<!##$mGLLNNmm^jmmm   .9'$P`BaBa9NN N  
 /2%'?A[\>>>/ /  *K #.9DXYeDf3f3f G\  G  GV_  G  G  EQ  G  G  qz  G  G  NZ  G  G  bv  wC  bD  G  G  G   $+ +2(-4*$$%5666%,,-EFFF'../IJJJJ4;D&4I4IXwtT^,,,ty-):;;; 4(( 	l..}d>OQjkkk4)** 	p..}d>QSnooooo	p 	pr:   c                 ^   t           st          d          t          | d          st          | | j                  n| j        }|                                 t          | d          r|                     | j                   t          | d          r|                     | j	                   d S d S Nr.   r(   r'   r  )
r   rA   rI   rJ   rK   r(   disable_lorarw  r'   r  rI  s     r7   r  zLoraLoaderMixin.disable_lora  s     	JHIII 5<D&4I4IXwtT^,,,ty 4(( 	B..t/@AAA4)** 	D..t/BCCCCC	D 	Dr:   c                 ^   t           st          d          t          | d          st          | | j                  n| j        }|                                 t          | d          r|                     | j                   t          | d          r|                     | j	                   d S d S r  )
r   rA   rI   rJ   rK   r(   enable_lorary  r'   r  rI  s     r7   r  zLoraLoaderMixin.enable_lora
  s     	JHIII 5<D&4I4IXwtT^,,,ty 4(( 	A--d.?@@@4)** 	C--d.ABBBBB	C 	Cr:   c                    t           st          d          t          |t                    r|g}t	          | d          st          | | j                  n| j        }|                    |           |D ]L}t	          | d          rt          | j
        |           t	          | d          rt          | j        |           MdS )a   
        Args:
        Deletes the LoRA layers of `adapter_name` for the unet and text-encoder(s).
            adapter_names (`Union[List[str], str]`):
                The names of the adapter to delete. Can be a single string or a list of strings
        r.   r(   r'   r  N)r   rA   rB   rp  rI   rJ   rK   r(   delete_adaptersr   r'   r  )rN   rN  r(   r>   s       r7   r  zLoraLoaderMixin.delete_adapters  s       	JHIIImS)) 	,*OM 5<D&4I4IXwtT^,,,ty]+++) 	I 	ILt^,, G%d&7FFFt-.. I%d&9<HHH	I 	Ir:   c                     t           st          d          ddlm} g }t	          | d          st          | | j                  n| j        }|                                D ]}t          ||          r	|j
        } n|S )a  
        Gets the list of the current active adapters.

        Example:

        ```python
        from diffusers import DiffusionPipeline

        pipeline = DiffusionPipeline.from_pretrained(
            "stabilityai/stable-diffusion-xl-base-1.0",
        ).to("cuda")
        pipeline.load_lora_weights("CiroN2022/toy-face", weight_name="toy_face_sdxl.safetensors", adapter_name="toy")
        pipeline.get_active_adapters()
        ```
        iPEFT backend is required for this method. Please install the latest version of PEFT `pip install -U peft`r   rP  r(   )r   rA   r^  rQ  rI   rJ   rK   r(   rT  rB   active_adapters)rN   rQ  r  r(   r[  s        r7   get_active_adaptersz#LoraLoaderMixin.get_active_adapters0  s        	{   	<;;;;;4;D&4I4IXwtT^,,,tyllnn 	 	F&.11 "("8 r:   c                    t           st          d          i }t          | d          rCt          | j        d          r.t	          | j        j                                                  |d<   t          | d          rCt          | j        d          r.t	          | j        j                                                  |d<   t          | d          st          | | j	                  n| j
        }t          | | j	                  rCt          |d          r3t	          | j
        j                                                  || j	        <   |S )zR
        Gets the current list of all available adapters in the pipeline.
        r  r'   r   r  r(   )r   rA   rI   r'   r   r   rG   r  rJ   rK   r(   )rN   r  r(   s      r7   r  z!LoraLoaderMixin.get_list_adaptersP  s;      	{   4(( 	VWT5F-V-V 	V+/0A0M0R0R0T0T+U+UL(4)** 	Zwt7JM/Z/Z 	Z-1$2E2Q2V2V2X2X-Y-YL)*4;D&4I4IXwtT^,,,ty4(( 	NWT=-I-I 	N+/	0E0J0J0L0L+M+ML(r:   rk   c                    t           st          d          ddlm} t	          | d          st          | | j                  n| j        }|                                D ]}t          ||          r|D ]}|j
        |                             |           |j        |                             |           t	          |d          r/|j        (|j        |                             |          |j        |<   g }t	          | d          r|                    | j                   t	          | d          r|                    | j                   |D ]}|                                D ]}	t          |	|          r|D ]}|	j
        |                             |           |	j        |                             |           t	          |	d          r/|	j        (|	j        |                             |          |	j        |<   dS )	a  
        Moves the LoRAs listed in `adapter_names` to a target device. Useful for offloading the LoRA to the CPU in case
        you want to load multiple adapters and free some GPU memory.

        Args:
            adapter_names (`List[str]`):
                List of adapters to send device to.
            device (`Union[torch.device, str, int]`):
                Device to send the adapters to. Can be either a torch device, a str or an integer.
        r.   r   rP  r(   lora_magnitude_vectorNr'   r  )r   rA   r^  rQ  rI   rJ   rK   r(   rT  rB   lora_Ar   r   r  r  r'   r  )
rN   rN  rk   rQ  r(   unet_moduler>   modules_to_processr'   text_encoder_modules
             r7   set_lora_devicezLoraLoaderMixin.set_lora_deviceg  sR      	JHIII;;;;;; 5<D&4I4IXwtT^,,,ty<<>> 		% 		%K+~66 %$1 % %L&|477???&|477???{,CDD %IjIvJUJk(K"V** $9,G
  4(( 	9%%d&78884)** 	;%%d&9:::. 	c 	cL'3';';'='= c c#1>BB c(5 
c 
c+2<@CCFKKK+2<@CCFKKK $$79PQQc 3 I U !4 I, W Z Z[a b b 0E ,c	c 	cr:   r   )rd   FNN)Nr   NNNNNTNNT)TTr   FN)TT)7__name__
__module____qualname____doc__TEXT_ENCODER_NAMErM   	UNET_NAMErK   TRANSFORMER_NAMEr   r_  r   rp  r   rx   TensorrR   classmethodr   rE   ru   r   rH   rL   r	  propertyfloatr@   r  r   PathLiker   r   boolr   r,  staticmethodr+  rJ  r   r   r`  rg  rq  rw  ry  r  r  r  r  r  r  rk   intr  r3   r:   r7   r+   r+   D   s        
 *I'O hl8
 8
5:3S%,EV@W;W5X8
 8
 8
 8
t Y*/4S$sEL?P:Q5Q/RY* Y* Y*  [Y*v di& & & [&P  A  A [ AD ! ! ! [!F  q> q> q> [q>h M: M: M: [M:` IE I I I XI
B B B  MQ?C>B $"&#'>
 >
c2;./>
 sE%(/5<*G$HHI>
 #'sEHO';"<	>

 "&c58?&:!;>
 >
 >
  >
 !>
 >
 >
 [>
@ ;el*+;; ; 	;
  ; !; ; ; \;B1 1 1. "&!-1Pr PrPr  Pr 	Pr
 Pr  S	*Pr Pr Pr Prd$" $"t $" $" $" $" $"R 59PT	-] -]T#Y^,-] 01-] 'uUDKd-K'LM	-] -] -] -]^8 8(CT:U 8 8 8 8"< <BS9T < < < <& RVIp IpT#Y^,Ip "%tT%[$t*(L"MNIp Ip Ip IpVD D DC C CIU49c>-B I I I I0T#Y    @4T#Y#7    .3cT#Y 3celTWY\F\@] 3cbf 3c 3c 3c 3c 3c 3cr:   r+   c                   ~   e Zd ZdZ	 ddeeeeej        f         f         de	e         fdZ
e	 	 	 	 	 	 	 ddeeej        f         deeeej        j        ej        f         f         d	eeeej        j        ej        f         f         d
eeeej        j        ej        f         f         dedededefd            Zd ZdS ) StableDiffusionXLLoraLoaderMixinz\This class overrides `LoraLoaderMixin` with LoRA loading/saving code that's specific to SDXLNr,   r>   c           	         t           st          d          t          |t                    r|                                } | j        |fd| j        j        i|\  }}t          d |	                                D                       }|st          d          | 
                    ||| j        ||            d |                                D             }t          |          dk    r&|                     ||| j        d| j        || 	           d
 |                                D             }t          |          dk    r(|                     ||| j        d| j        || 	           dS dS )a_  
        Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.unet` and
        `self.text_encoder`.

        All kwargs are forwarded to `self.lora_state_dict`.

        See [`~loaders.LoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded.

        See [`~loaders.LoraLoaderMixin.load_lora_into_unet`] for more details on how the state dict is loaded into
        `self.unet`.

        See [`~loaders.LoraLoaderMixin.load_lora_into_text_encoder`] for more details on how the state dict is loaded
        into `self.text_encoder`.

        Parameters:
            pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
                See [`~loaders.LoraLoaderMixin.lora_state_dict`].
            adapter_name (`str`, *optional*):
                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
                `default_{i}` where i is the total number of adapters being loaded.
            kwargs (`dict`, *optional*):
                See [`~loaders.LoraLoaderMixin.lora_state_dict`].
        r.   r]   c              3   &   K   | ]}d |v pd|v V  dS r0   r3   r4   s     r7   r8   zEStableDiffusionXLLoraLoaderMixin.load_lora_weights.<locals>.<genexpr>  r9   r:   r;   r<   c                 "    i | ]\  }}d |v 	||S )ztext_encoder.r3   r5   rr   r   s      r7   r   zFStableDiffusionXLLoraLoaderMixin.load_lora_weights.<locals>.<dictcomp>  s)    "_"_"_DAq/]^J^J^1aJ^J^J^r:   r   r'   )r=   r'   r   r@   r>   r?   c                 "    i | ]\  }}d |v 	||S )ztext_encoder_2.r3   r  s      r7   r   zFStableDiffusionXLLoraLoaderMixin.load_lora_weights.<locals>.<dictcomp>  s*    $c$c$cdaL]abLbLbQLbLbLbr:   r  N)r   rA   rB   rC   rD   rE   r(   configrF   rG   rH   r   r   rL   r'   r@   r  )	rN   r,   r>   rO   rP   r=   rQ   text_encoder_state_dicttext_encoder_2_state_dicts	            r7   rR   z2StableDiffusionXLLoraLoaderMixin.load_lora_weights  s   :   	JHIII ;TBB 	a4Y4^4^4`4`1 &:T%91&
 &
	(&
 &
 &
"
N
  bbPZP_P_PaPabbbbb  	97888  ~DIT`lp 	! 	
 	
 	
 #`"_J4D4D4F4F"_"_"_&''!++,,'-!.%?) -    %d$cj6F6F6H6H$c$c$c!())A--,,)-!0'?) -      .-r:   Tr  r  r  text_encoder_2_lora_layersr  r\   r  r  c	                 6   i }	d }
|s|s|st          d          |r|	                     |
|d                     |r|	                     |
|d                     |r|	                     |
|d                     |                     |	|||||           dS )u6  
        Save the LoRA parameters corresponding to the UNet and text encoder.

        Arguments:
            save_directory (`str` or `os.PathLike`):
                Directory to save LoRA parameters to. Will be created if it doesn't exist.
            unet_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
                State dict of the LoRA layers corresponding to the `unet`.
            text_encoder_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
                State dict of the LoRA layers corresponding to the `text_encoder`. Must explicitly pass the text
                encoder LoRA state dict because it comes from 🤗 Transformers.
            text_encoder_2_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
                State dict of the LoRA layers corresponding to the `text_encoder_2`. Must explicitly pass the text
                encoder LoRA state dict because it comes from 🤗 Transformers.
            is_main_process (`bool`, *optional*, defaults to `True`):
                Whether the process calling this is the main process or not. Useful during distributed training and you
                need to call this function on all processes. In this case, set `is_main_process=True` only on the main
                process to avoid race conditions.
            save_function (`Callable`):
                The function to use to save the state dictionary. Useful during distributed training when you need to
                replace `torch.save` with another method. Can be configured with the environment variable
                `DIFFUSERS_SAVE_MODE`.
            safe_serialization (`bool`, *optional*, defaults to `True`):
                Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
        c                     t          | t          j        j                  r|                                 n| }fd|                                D             }|S )Nc                 &    i | ]\  }} d | |S r  r3   r  s      r7   r   z\StableDiffusionXLLoraLoaderMixin.save_lora_weights.<locals>.pack_weights.<locals>.<dictcomp>  r!  r:   r"  r#  s    `  r7   r'  zHStableDiffusionXLLoraLoaderMixin.save_lora_weights.<locals>.pack_weights  r(  r:   zmYou must pass at least one of `unet_lora_layers`, `text_encoder_lora_layers` or `text_encoder_2_lora_layers`.r(   r'   r  r)  N)rA   r*  r+  )r}   r  r  r  r  r  r\   r  r  rP   r'  s              r7   r,  z2StableDiffusionXLLoraLoaderMixin.save_lora_weights  s   J 
	% 	% 	%
 ! 	$< 	@Z 	    	Fll+;VDDEEE# 	Vll+C^TTUUU% 	Zll+EGWXXYYY!)+#'1 	 	
 	
 	
 	
 	
r:   c                     t          | j                   t          | j        dd           | j        `d | j        _        t          | j                   t          | j        dd           | j        `d | j        _        d S d S )Nr   )r   r'   rJ   r   r  r  r  s    r7   r  zBStableDiffusionXLLoraLoaderMixin._remove_text_encoder_monkey_patch1  s    "4#45554$mT::F!-7;D4"4#67774&t<<H#/9=D666 IHr:   r   r  )r  r  r  r  r   rp  r   rx   r  r   rR   r  r   r  r   r   r  r   r,  r  r3   r:   r7   r  r    so       ff '+K K/4S$sEL?P:Q5Q/RK smK K K KZ  MQTXVZ $"&#'@
 @
c2;./@
 sE%(/5<*G$HHI@
 #'sE%(/5<2O,P'P"Q	@

 %)eEHOU\4Q.R)R$S@
 @
 @
  @
 !@
 @
 @
 [@
D
> 
> 
> 
> 
>r:   r  c                      e Zd ZdZeZdZ	 ddeee	ee
j        f         f         fdZeedeee	ee
j        f         f         fd                        Zedd            Ze	 	 	 	 	 dd	eeej        f         d
e	ee
j        j        f         dedededefd            Zede	ee
j        f         d	ededededefd            Zd Zed             ZdS )SD3LoraLoaderMixinz:
    Load LoRA layers into [`SD3Transformer2DModel`].
    r   Nr,   c                    t           st          d          t          |t                    r|                                } | j        |fi |}t          d |                                D                       }|st          d          |                     |t          | d          st          | | j                  n| j        ||            dS )a  
        Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.unet` and
        `self.text_encoder`.

        All kwargs are forwarded to `self.lora_state_dict`.

        See [`~loaders.LoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded.

        See [`~loaders.LoraLoaderMixin.load_lora_into_transformer`] for more details on how the state dict is loaded
        into `self.transformer`.

        Parameters:
            pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
                See [`~loaders.LoraLoaderMixin.lora_state_dict`].
            kwargs (`dict`, *optional*):
                See [`~loaders.LoraLoaderMixin.lora_state_dict`].
            adapter_name (`str`, *optional*):
                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
                `default_{i}` where i is the total number of adapters being loaded.
        r.   c              3   &   K   | ]}d |v pd|v V  dS r0   r3   r4   s     r7   r8   z7SD3LoraLoaderMixin.load_lora_weights.<locals>.<genexpr>g  r9   r:   r;   r)   )r)   r>   r?   N)r   rA   rB   rC   rD   rE   rF   rG   r	  rI   rJ   r   r)   )rN   r,   r>   rO   rP   rQ   s         r7   rR   z$SD3LoraLoaderMixin.load_lora_weightsF  s    .   	JHIII ;TBB 	a4Y4^4^4`4`1 *T)*OZZSYZZ
bbPZP_P_PaPabbbbb  	97888''DKDR_D`D`vd&;<<<fjfv%	 	( 	
 	
 	
 	
 	
r:   c                    |                     dd          }|                     dd          }|                     dd          }|                     dd          }|                     dd          }|                     dd          }|                     d	d          }	|                     d
d          }
|                     dd          }|                     dd          }d}|d}d}ddd}d}t          |t                    s|r|||                    d          rm	 t	          ||pt
          |||||||	|
|          }t          j                            |d          }n)# t          t          j
        f$ r}|s|d}Y d}~nd}~ww xY w|0t	          ||pt          |||||||	|
|          }t          |          }n|}|S )a
  
        Return state dict for lora weights and the network alphas.

        <Tip warning={true}>

        We support loading A1111 formatted LoRA checkpoints in a limited capacity.

        This function is experimental and might change in the future.

        </Tip>

        Parameters:
            pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
                Can be either:

                    - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
                      the Hub.
                    - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
                      with [`ModelMixin.save_pretrained`].
                    - A [torch state
                      dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).

            cache_dir (`Union[str, os.PathLike]`, *optional*):
                Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
                is not used.
            force_download (`bool`, *optional*, defaults to `False`):
                Whether or not to force the (re-)download of the model weights and configuration files, overriding the
                cached versions if they exist.
            resume_download (`bool`, *optional*, defaults to `False`):
                Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
                incompletely downloaded files are deleted.
            proxies (`Dict[str, str]`, *optional*):
                A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
                'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
            local_files_only (`bool`, *optional*, defaults to `False`):
                Whether to only load local model weights and configuration files or not. If set to `True`, the model
                won't be downloaded from the Hub.
            token (`str` or *bool*, *optional*):
                The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
                `diffusers-cli login` (stored in `~/.huggingface`) is used.
            revision (`str`, *optional*, defaults to `"main"`):
                The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
                allowed by Git.
            subfolder (`str`, *optional*, defaults to `""`):
                The subfolder location of a model file within a larger model repository on the Hub or locally.

        rT   NrU   FrV   rW   rX   rY   rZ   r[   r\   r^   Tr_   r`   ra   rd   rf   ri   rj   )rs   rB   rC   rt   r   rv   rw   rx   ry   rz   r{   r|   r   )r}   r,   rO   rT   rU   rV   rW   rX   rY   rZ   r[   r\   r^   r~   rh   r   rP   r   s                     r7   rE   z"SD3LoraLoaderMixin.lora_state_dictr  s3   p JJ{D11	$4e<< **%6>>**Y--!::&8$??

7D))::j$//JJ{D11	jj55 **%6==""OL ."
 


 
?FF +	? K$7'K,@,@,P,P'!0=%0%I4I"+'5(7 ')9#!)"+#-" " "J "-!2!<!<ZPU!<!V!VJJ!<=   '  !%JDDDD !,9!,!@0@'#1$3#%5%')  
 -Z88
>Js   AE   F6FFc                     ddl m}m}m} t	          |                                          } fd|D              fd|                                D             }t          |                                          dk    rW|t          |di           v rt          d| d          i }	|                                D ]\  }
}d|
v r|j
        d	         |	|
<   t          |	d
|          }d|v r<|d         rt          dd          rt          d          |                    d            |di |}|t          |          }                     |          \  }} ||||            ||||          }|1t          |dd
          }|rt                               d| d           |r|                                 d
S |r|                                 d
S d
S d
S )a  
        This will load the LoRA layers specified in `state_dict` into `transformer`.

        Parameters:
            state_dict (`dict`):
                A standard state dict containing the lora layer parameters. The keys can either be indexed directly
                into the unet or prefixed with an additional `unet` which can be used to distinguish between text
                encoder lora layers.
            transformer (`SD3Transformer2DModel`):
                The Transformer model to load the LoRA layers into.
            adapter_name (`str`, *optional*):
                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
                `default_{i}` where i is the total number of adapters being loaded.
        r   r   c                 H    g | ]}|                     j                  |S r3   r   r   s     r7   r   zASD3LoraLoaderMixin.load_lora_into_transformer.<locals>.<listcomp>  r   r:   c                 \    i | ](\  }}|v 	|                     j         d d          |)S r   r   r   s      r7   r   zASD3LoraLoaderMixin.load_lora_into_transformer.<locals>.<dictcomp>  r   r:   r   r   r   r   r   N)network_alpha_dictpeft_state_dictr   r   r   r   r  r  r  r  r3   r  )r}   rP   r)   r>   r?   r   r   r   rG   r   r6   r  r   r   r   r   r  r  r   s   `                 @r7   r	  z-SD3LoraLoaderMixin.load_lora_into_transformer  s     	XWWWWWWWWWJOO%%&&RRRRtRRR
 
 
 
 
EOEUEUEWEW
 
 

 z  !!A%%w{M2FFFF xLxxx   D&,,.. - -Ss?? #	!DI!0$`j!k!k!k///%j1 7oc76S6S 7$   '**:666$*::'9::K #/<< ?B>`>`aj>k>k; ";##K<XXXX 9 9+zS_ ` ` ,")*;=NPT"U"U" NN0+0 0 0   $ :2244444* :7799999[ &%X: :r:   Tr  r  r  r\   r  r  c                     i }d }|st          d          |r$|                     ||| j                             |                     ||||||           dS )a  
        Save the LoRA parameters corresponding to the UNet and text encoder.

        Arguments:
            save_directory (`str` or `os.PathLike`):
                Directory to save LoRA parameters to. Will be created if it doesn't exist.
            transformer_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
                State dict of the LoRA layers corresponding to the `transformer`.
            is_main_process (`bool`, *optional*, defaults to `True`):
                Whether the process calling this is the main process or not. Useful during distributed training and you
                need to call this function on all processes. In this case, set `is_main_process=True` only on the main
                process to avoid race conditions.
            save_function (`Callable`):
                The function to use to save the state dictionary. Useful during distributed training when you need to
                replace `torch.save` with another method. Can be configured with the environment variable
                `DIFFUSERS_SAVE_MODE`.
            safe_serialization (`bool`, *optional*, defaults to `True`):
                Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
        c                     t          | t          j        j                  r|                                 n| }fd|                                D             }|S )Nc                 &    i | ]\  }} d | |S r  r3   r  s      r7   r   zNSD3LoraLoaderMixin.save_lora_weights.<locals>.pack_weights.<locals>.<dictcomp>Y  r!  r:   r"  r#  s    `  r7   r'  z:SD3LoraLoaderMixin.save_lora_weights.<locals>.pack_weightsW  r(  r:   z(You must pass `transformer_lora_layers`.r)  N)rA   r*  r   r+  )	r}   r  r  r  r\   r  r  rP   r'  s	            r7   r,  z$SD3LoraLoaderMixin.save_lora_weights8  s    : 
	% 	% 	%
 ' 	IGHHH" 	[ll+BCDXYYZZZ 	!)+#'1 	 	
 	
 	
 	
 	
r:   rP   c                    t           j                            |          r t                              d| d           d S ||rd }nt
          j        }t          j        |d           ||rt          }nt          }t          ||                                          } || |           t                              d|            d S )Nr.  r/  c                 J    t           j                            | |ddi          S r1  r5  r7  s     r7   r  z;SD3LoraLoaderMixin.write_lora_layers.<locals>.save_function|  r:  r:   Tr;  r=  r>  rC  s          r7   r+  z$SD3LoraLoaderMixin.write_lora_layersl  rE  r:   c                     t          | d          st          | | j                  n| j        }t	          |           t          |d          r|`dS dS )rG  r)   r   N)rI   rJ   r   r)   r   r   )rN   r)   s     r7   rJ  z&SD3LoraLoaderMixin.unload_lora_weights  sg     CJ$P]B^B^tgdD$9:::dhdt";///;.. 	('''	( 	(r:   c                    d}d}||j         |j                                        D ]\  }}t          |t          j                  rt          |d          r|st          |j        t                    }|sTt          |j        t                    p9t          |j        d          o$t          |j        j
        d         t                    }t                              d           t          ||           ||fS r   r   r   s         r7   r   z1SD3LoraLoaderMixin._optionally_disable_offloading  s     %$)! Y%<%D ) 4 : : < < Z Z9i33 Z	:8V8V Z/ Z/9):Lj/Y/Y,4 &y'9;KLL Z&y'97CC  Z *9+=+CA+FHX Y Y 2 KK Y   ,I?XYYYY$&?@@r:   r   r  )NTNNT)r  r  r  r  r  r   r_  r   rp  r   rx   r  rR   r  r   rE   r	  r   r  r   r   r  r   r,  r  r+  rJ  r   r3   r:   r7   r  r  >  s         (O hl*
 *
5:3S%,EV@W;W5X*
 *
 *
 *
X y/4S$sEL?P:Q5Q/Ry y y  [yv E: E: E: [E:P  ?C $"&#'1
 1
c2;./1
 "&c58?&:!;1
 	1

 1
  1
 !1
 1
 1
 [1
f ;el*+;; ; 	;
  ; !; ; ; \;B( ( ("  A  A [ A  A  Ar:   r  )>rD   rV  r   pathlibr   typingr   r   r   r   r   rw   rx   huggingface_hubr	   huggingface_hub.constantsr
   huggingface_hub.utilsr   r   models.modeling_utilsr   utilsr   r   r   r   r   r   r   r   r   r   r   r   r   r   r   lora_conversion_utilsr   r    transformersr!   models.lorar"   r#   accelerate.hooksr$   r%   r&   
get_loggerr  r   r  r  r  r|   rv   LORA_DEPRECATION_MESSAGEr+   r  r  r3   r:   r7   <module>r     s     				       8 8 8 8 8 8 8 8 8 8 8 8 8 8      & & & & & & 4 4 4 4 4 4 6 6 6 6 6 6       3 3 3 3 3 3                                 " h g g g g g g g  R,,,,,,QQQQQQQQ WVVVVVVVVVV		H	%	%" 	  - :  W Vc Vc Vc Vc Vc Vc Vc Vcr"^> ^> ^> ^> ^> ^> ^> ^>BCA CA CA CA CA CA CA CA CA CAr:   