
    i                        d dl Z d dlmZmZmZmZmZ d dlZd dl	Z	d dl
mZ d dlmZmZmZmZ ddlmZmZ ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZ ddlm Z m!Z!m"Z"m#Z# ddl$m%Z% ddl&m'Z' ddl(m)Z) ddl*m+Z+  e!       rd dl,m-c m.Z/ dZ0ndZ0 e"jb                  e2      Z3 e        rd dl4Z4 e"jb                  e2      Z3dZ5d Z6d Z7d Z8 G d de)e      Z9y)    N)CallableDictListOptionalUnion)
functional)CLIPTextModelCLIPTokenizer"Qwen2_5_VLForConditionalGenerationQwen2VLProcessor   )MultiPipelineCallbacksPipelineCallback)PipelineImageInput)KandinskyLoraLoaderMixin)AutoencoderKLHunyuanVideo)Kandinsky5Transformer3DModel)FlowMatchEulerDiscreteScheduler)is_ftfy_availableis_torch_xla_availableloggingreplace_example_docstring)randn_tensor)VideoProcessor   )DiffusionPipeline   )KandinskyPipelineOutputTFa&  
    Examples:

        ```python
        >>> import torch
        >>> from diffusers import Kandinsky5I2VPipeline
        >>> from diffusers.utils import export_to_video, load_image

        >>> # Available models:
        >>> # kandinskylab/Kandinsky-5.0-I2V-Pro-sft-5s-Diffusers

        >>> model_id = "kandinskylab/Kandinsky-5.0-I2V-Pro-sft-5s-Diffusers"
        >>> pipe = Kandinsky5I2VPipeline.from_pretrained(model_id, torch_dtype=torch.bfloat16)
        >>> pipe = pipe.to("cuda")

        >>> image = load_image(
        ...     "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/astronaut.jpg"
        ... )
        >>> prompt = "An astronaut floating in space with Earth in the background, cinematic shot"
        >>> negative_prompt = "Static, 2D cartoon, cartoon, 2d animation, paintings, images, worst quality, low quality, ugly, deformed, walking backwards"

        >>> output = pipe(
        ...     image=image,
        ...     prompt=prompt,
        ...     negative_prompt=negative_prompt,
        ...     height=512,
        ...     width=768,
        ...     num_frames=121,
        ...     num_inference_steps=50,
        ...     guidance_scale=5.0,
        ... ).frames[0]

        >>> export_to_video(output, "output.mp4", fps=24, quality=9)
        ```
c                     t               rt        j                  |       } t        j                  t        j                  |             } | j                         S )z
    Copied from https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/wan/pipeline_wan.py

    Clean text using ftfy if available and unescape HTML entities.
    )r   ftfyfix_texthtmlunescapestriptexts    /home/obispo/Crisostomo_bridge/mision_env/lib/python3.12/site-packages/diffusers/pipelines/kandinsky5/pipeline_kandinsky_i2v.pybasic_cleanr(   _   s;     }}T"==t,-D::<    c                 T    t        j                  dd|       } | j                         } | S )z
    Copied from https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/wan/pipeline_wan.py

    Normalize whitespace in text by replacing multiple spaces with single space.
    z\s+ )resubr$   r%   s    r'   whitespace_cleanr.   k   s&     66&#t$D::<DKr)   c                 .    t        t        |             } | S )z
    Copied from https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/wan/pipeline_wan.py

    Apply both basic cleaning and whitespace normalization to prompts.
    )r.   r(   r%   s    r'   prompt_cleanr0   v   s     K-.DKr)   c            0       6    e Zd ZdZdZg dZdededede	de
d	ed
ef fdZdededefdZedDdededededededej&                  fd       Zd Z	 	 	 dEdeeee   f   deej4                     dedeej6                     fdZ	 	 dFdeeee   f   deej4                     deej6                     fdZed        ZdGd Z	 	 	 	 dHdeeee   f   d#ededeej4                     deej6                     f
d$Z 	 	 	 	 	 	 	 	 dId%Z!	 	 	 	 	 	 	 	 dJd&e"d'ed(ededed)edeej6                     deej4                     d*eeejF                  eejF                     f      d+eej&                     dej&                  fd,Z$e%d-        Z&e%d.        Z'e%d/        Z( ejR                          e*e+      ddd"d0d1d2d3d!ddddddddd4d5dd+gd"fd&e"deeee   f   d6eeeee   f      deded)ed7ed8e,d#ee   d*eeejF                  eejF                     f      d+eej&                     d9eej&                     d:eej&                     d;eej&                     d<eej&                     d=eej&                     d>eej&                     d?ee   d@e-dAeee.eee/gdf   e0e1f      dBee   def,dC              Z2 xZ3S )KKandinsky5I2VPipelinea  
    Pipeline for image-to-video generation using Kandinsky 5.0.

    This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
    implemented for all pipelines (downloading, saving, running on a particular device, etc.).

    Args:
        transformer ([`Kandinsky5Transformer3DModel`]):
            Conditional Transformer to denoise the encoded video latents.
        vae ([`AutoencoderKLHunyuanVideo`]):
            Variational Auto-Encoder Model [hunyuanvideo-community/HunyuanVideo
            (vae)](https://huggingface.co/hunyuanvideo-community/HunyuanVideo) to encode and decode videos to and from
            latent representations.
        text_encoder ([`Qwen2_5_VLForConditionalGeneration`]):
            Frozen text-encoder [Qwen2.5-VL](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct).
        tokenizer ([`AutoProcessor`]):
            Tokenizer for Qwen2.5-VL.
        text_encoder_2 ([`CLIPTextModel`]):
            Frozen [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel),
            specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
        tokenizer_2 ([`CLIPTokenizer`]):
            Tokenizer for CLIP.
        scheduler ([`FlowMatchEulerDiscreteScheduler`]):
            A scheduler to be used in combination with `transformer` to denoise the encoded video latents.
    z.text_encoder->text_encoder_2->transformer->vae)latentsprompt_embeds_qwenprompt_embeds_clipnegative_prompt_embeds_qwennegative_prompt_embeds_cliptransformervaetext_encoder	tokenizertext_encoder_2tokenizer_2	schedulerc           	         t         |           | j                  |||||||       dj                  g d      | _        d| _        t        | dd       r | j                  j                  j                  nd| _
        t        | dd       r | j                  j                  j                  nd| _        t        | j                        | _        y )	N)r8   r9   r:   r;   r<   r=   r>   
)zJ<|im_start|>system
You are a promt engineer. Describe the video in detail.zjDescribe how the camera moves or shakes, describe the zoom and view angle, whether it follows the objects.zPDescribe the location of the video, main characters or objects and their action.z9Describe the dynamism of the video and presented actions.zName the visual style of the video: whether it is a professional footage, user generated content, some kind of animation, video game or scren content.z_Describe the visual effects, postprocessing and transitions if they are presented in the video.zGPay attention to the order of key actions shown in the scene.<|im_end|>z<|im_start|>user
{}<|im_end|>   r9         )vae_scale_factor)super__init__register_modulesjoinprompt_template prompt_template_encode_start_idxgetattrr9   configtemporal_compression_ratiovae_scale_factor_temporalspatial_compression_ratiovae_scale_factor_spatialr   video_processor)	selfr8   r9   r:   r;   r<   r=   r>   	__class__s	           r'   rF   zKandinsky5I2VPipeline.__init__   s     	#%)# 	 	
  $yy	 
 14- ;B$t:TDHHOO66Z[ 	& V]]achjnUo(Q(Quv%-t?\?\]r)   heightwidthreturnc                 ,    d } ||      r	 ||      ryy)z
        Calculate the scale factor based on resolution.

        Args:
            height (int): Video height
            width (int): Video width

        Returns:
            tuple: Scale factor as (temporal_scale, height_scale, width_scale)
        c                 "    d| cxk  xr dk  S c S )N  iV   )xs    r'   between_480pz=Kandinsky5I2VPipeline._get_scale_factor.<locals>.between_480p   s    !?s?"?"r)   )r   r   r   )r   HzG	@r]   rZ   )rR   rT   rU   r\   s       r'   _get_scale_factorz'Kandinsky5I2VPipeline._get_scale_factor   s    	# L$7"r)   THWwTwHwWc                 >   t        j                  | ||g      j                         }t        j                  d|dt         j                  |      }|j                  d      |j                  d      z
  j                         }	|	d| d| f   j                         |	d|d|f   j                         |	d|d|f   j                         }}}
|
|dz  k  }
||dz  k  }||dz  k  }|j                  d      |j                  d      z  j                  ||||      j                  dd      j                         }|
j                  d      |j                  d      z  j                  | | ||z  ||z        j                  dd      }|j                  | |z  |z  | |z  |z        S )a  
        Create a sparse temporal attention (STA) mask for efficient video generation.

        This method generates a mask that limits attention to nearby frames and spatial positions, reducing
        computational complexity for video generation.

        Args:
            T (int): Number of temporal frames
            H (int): Height in latent space
            W (int): Width in latent space
            wT (int): Temporal attention window size
            wH (int): Height attention window size
            wW (int): Width attention window size
            device (str): Device to create tensor on

        Returns:
            torch.Tensor: Sparse attention mask of shape (T*H*W, T*H*W)
        r   r   dtypedeviceNr   )
torchTensoramaxarangeint16	unsqueezeabsflattenreshape	transpose)r_   r`   ra   rb   rc   rd   rh   lrmatsta_tsta_hsta_wsta_hwstas                  r'   fast_sta_nablaz$Kandinsky5I2VPipeline.fast_sta_nabla   s   ( LL!Q#((*LLAqFC{{1~A.335BQBK!BQBK!BQBK! u
 q q q //!$uq'99BB1aANXXYZ\]^ffhq!F$4$4Q$77@@Aq1uaRSeT^^_`bcd{{1q519a!eai00r)   c                    | j                   j                  j                  d   dk(  sJ |j                  \  }}}}}|| j                   j                  j                  d   z  || j                   j                  j                  d   z  || j                   j                  j                  d   z  }}}| j                   j                  j                  dk(  rz| j                  ||dz  |dz  | j                   j                  j                  | j                   j                  j                  | j                   j                  j                  |      }|j                  d      j                  d      | j                   j                  j                  d| j                   j                  j                  | j                   j                  j                  | j                   j                  j                  | j                   j                  j                  | j                   j                  j                  |||f| j                   j                  j                  d
}	|	S d	}	|	S )
a  
        Generate sparse attention parameters for the transformer based on sample dimensions.

        This method computes the sparse attention configuration needed for efficient video processing in the
        transformer model.

        Args:
            sample (torch.Tensor): Input sample tensor
            device (torch.device): Device to place tensors on

        Returns:
            Dict: Dictionary containing sparse attention parameters
        r   r   r   nablarC   rh   T)
sta_maskattention_type
to_fractalPrb   rd   rc   add_stavisual_shapemethodN)r8   rL   
patch_sizeshaper   r{   attention_wTattention_wHattention_wW
unsqueeze_attention_Pattention_add_staattention_method)
rR   samplerh   Br_   r`   ra   _r   sparse_paramss
             r'   get_sparse_paramsz'Kandinsky5I2VPipeline.get_sparse_params  s    &&11!49991aA!!((33A66!!((33A66!!((33A66 1
 ""11W<**QQ  ''44  ''44  ''44 + H %//2==a@"&"2"2"9"9"H"H"%%,,88&&--::&&--::&&--::++22DD!"Aq	**11BBM  !Mr)   Npromptrh   max_sequence_lengthrg   c           	         |xs | j                   }|xs | j                  j                  }|D cg c]  }| j                  j	                  |       }}| j
                  |z   }| j                  |dddd      d   }|j                  d   |kD  rt        |      D ]t  \  }	}
||	   | j
                  d }| j                  j                  ||dz
  d       }t        |      d	kD  sH|
dt        |        ||	<   t        j                  d
| d|        v | j                  |dd|ddd      j                  |      }| j                  |d   dd      d   d   dd| j
                  df   }|d   dd| j
                  df   }t        j                  |j!                  d      d	      }t#        j$                  |dd	      j                  t        j&                        }|j                  |      |fS c c}w )aa  
        Encode prompt using Qwen2.5-VL text encoder.

        This method processes the input prompt through the Qwen2.5-VL model to generate text embeddings suitable for
        video generation.

        Args:
            prompt (Union[str, List[str]]): Input prompt or list of prompts
            device (torch.device): Device to run encoding on
            max_sequence_length (int): Maximum sequence length for tokenization
            dtype (torch.dtype): Data type for embeddings

        Returns:
            Tuple[torch.Tensor, torch.Tensor]: Text embeddings and cumulative sequence lengths
        Nptlongest)r&   imagesvideosreturn_tensorspadding	input_idsr   r   zXThe following part of your input was truncated because `max_sequence_length` is set to  z	 tokens: T)r&   r   r   
max_length
truncationr   r   )r   return_dictoutput_hidden_stateshidden_statesattention_maskr   dim)r   r   )valuerg   )_execution_devicer:   rg   rI   formatrJ   r;   r   	enumeratedecodelenloggerwarningtori   cumsumsumFpadint32)rR   r   rh   r   rg   p
full_textsmax_allowed_lenuntruncated_idsir&   tokensremoved_textinputsembedsr   
cu_seqlenss                    r'   _encode_prompt_qwenz)Kandinsky5I2VPipeline._encode_prompt_qwen6  s@   , 14110**00>DEd**11!4E
E??BUU.. ) 
    $6$Z0 4(+D,Q,QTVW#~~44V<ORS<S<U5VW|$q($()=C,=+=$>JqMNN/0	,I &   
 "V* 	 ""[)!% # 
 	 	  !$"G"G"II	K   01!T5Z5Z5\2\]\\."4"4Q"7Q?
UU:vQ7:::M
yy++S Fs   "G-c                     |xs | j                   }|xs | j                  j                  }| j                  |ddddd      j	                  |      } | j                  di |d   }|j	                  |      S )a  
        Encode prompt using CLIP text encoder.

        This method processes the input prompt through the CLIP model to generate pooled embeddings that capture
        semantic information.

        Args:
            prompt (Union[str, List[str]]): Input prompt or list of prompts
            device (torch.device): Device to run encoding on
            dtype (torch.dtype): Data type for embeddings

        Returns:
            torch.Tensor: Pooled text embeddings from CLIP
        M   Tr   r   )r   r   add_special_tokensr   r   pooler_outputrZ   )r   r<   rg   r=   r   )rR   r   rh   rg   r   pooled_embeds         r'   _encode_prompt_clipz)Kandinsky5I2VPipeline._encode_prompt_clipz  s    ( 14112,,22!!#  " 
 "V* 	 +t**4V4_Eu%%r)   c                 .   | j                  dd      }| j                  dd      }d}d}d}d}t        j                  |j                         ||z
  ||z         }t        j                  |j                         ||z
  ||z         }	| |z
  |z  }
|
|	z  |z   }
|
S )N)r   r   r   rB   T)r   keepdimg?g?g      ?)meanstdri   clamp)source	referencesource_mean
source_stdclump_mean_lowclump_mean_highclump_std_lowclump_std_highreference_meanreference_std
normalizeds              r'   adaptive_mean_std_normalizationz5Kandinsky5I2VPipeline.adaptive_mean_std_normalization  s    kklDkAZZL$Z?
Y^^%5{^7SU`crUrsIMMOZ-5OQ[^lQlm {*j8
-/.@
r)   c           	         |j                         }|}|j                  d   dk  r|dfS d}|j                         d d d |f   }|d d ||t        ||j                  d   dz
        z   f   }| j                  ||      }	|r7|j                         }
|j	                         }t        j                  |	|
|      }	|	|d d d |f<   |S )Nr   z'Only one frame, no normalization neededrB   )cloner   minr   maxri   r   )rR   r3   reference_framesclump_valueslatents_copysamplesnFrfirst_framesreference_frames_datanormalized_firstmin_valmax_vals               r'   normalize_first_framez+Kandinsky5I2VPipeline.normalize_first_frame  s    }}==q FGG}}q$3$w/ '3s;KW]][\M]`aMa7b1b+b(b c??Ncd+//1G+//1G${{+;WgN+4C4r)   r      num_videos_per_promptc                    |xs | j                   }|xs | j                  j                  }t        |t              s|g}t        |      }|D cg c]  }t        |       }}| j                  ||||      \  }}	| j                  |||      }
|j                  d|d      }|j                  ||z  d|j                  d         }|
j                  d|d      }
|
j                  ||z  d      }
|	j                         }|j                  |      }t        j                  t        j                   dg|t        j"                        |j%                  d      g      }||
|fS c c}w )a  
        Encodes a single prompt (positive or negative) into text encoder hidden states.

        This method combines embeddings from both Qwen2.5-VL and CLIP text encoders to create comprehensive text
        representations for video generation.

        Args:
            prompt (`str` or `List[str]`):
                Prompt to be encoded.
            num_videos_per_prompt (`int`, *optional*, defaults to 1):
                Number of videos to generate per prompt.
            max_sequence_length (`int`, *optional*, defaults to 512):
                Maximum sequence length for text encoding.
            device (`torch.device`, *optional*):
                Torch device.
            dtype (`torch.dtype`, *optional*):
                Torch dtype.

        Returns:
            Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
                - Qwen text embeddings of shape (batch_size * num_videos_per_prompt, sequence_length, embedding_dim)
                - CLIP pooled embeddings of shape (batch_size * num_videos_per_prompt, clip_embedding_dim)
                - Cumulative sequence lengths (`cu_seqlens`) for Qwen embeddings of shape (batch_size *
                  num_videos_per_prompt + 1,)
        )r   rh   r   rg   )r   rh   rg   r   r   r   rh   rg   )r   r:   rg   
isinstancelistr   r0   r   r   repeatviewr   diffrepeat_interleaveri   cattensorr   r   )rR   r   r   r   rh   rg   
batch_sizer   r4   prompt_cu_seqlensr5   original_lengthsrepeated_lengthsrepeated_cu_seqlenss                 r'   encode_promptz#Kandinsky5I2VPipeline.encode_prompt  s   B 14110**00&$'XF[
+12a,q/22 150H0H 3	 1I 1
-- "55 6 
 066$a
 044..4F4L4LR4P

 066$a
 044ZBW5WY[\ -113+==!
 $ii\\1#fEKK@BRBYBYZ[B\]
 "#57JJJa 3s   Ec           
          ||dkD  rt        d      |t        d      |dz  dk7  s|dz  dk7  rt        d| d| d	      |Lt         fd
|D              s8t        d j                   d|D cg c]  }| j                  vs| c}       |||
|||
t        d      ||	|||	|t        d      ||t        d      |7t        |t              s't        |t
              st        dt        |             |9t        |t              s(t        |t
              st        dt        |             yyyc c}w )a  
        Validate input parameters for the pipeline.

        Args:
            prompt: Input prompt
            negative_prompt: Negative prompt for guidance
            image: Input image for conditioning
            height: Video height
            width: Video width
            prompt_embeds_qwen: Pre-computed Qwen prompt embeddings
            prompt_embeds_clip: Pre-computed CLIP prompt embeddings
            negative_prompt_embeds_qwen: Pre-computed Qwen negative prompt embeddings
            negative_prompt_embeds_clip: Pre-computed CLIP negative prompt embeddings
            prompt_cu_seqlens: Pre-computed cumulative sequence lengths for Qwen positive prompt
            negative_prompt_cu_seqlens: Pre-computed cumulative sequence lengths for Qwen negative prompt
            callback_on_step_end_tensor_inputs: Callback tensor inputs

        Raises:
            ValueError: If inputs are invalid
        Ni   z*max_sequence_length must be less than 1024z6`image` must be provided for image-to-video generation   r   z8`height` and `width` have to be divisible by 16 but are z and .c              3   :   K   | ]  }|j                   v   y w)N)_callback_tensor_inputs).0krR   s     r'   	<genexpr>z5Kandinsky5I2VPipeline.check_inputs.<locals>.<genexpr>O  s#      F
23A---F
s   z2`callback_on_step_end_tensor_inputs` has to be in z, but found zuIf any of `prompt_embeds_qwen`, `prompt_embeds_clip`, or `prompt_cu_seqlens` is provided, all three must be provided.zIf any of `negative_prompt_embeds_qwen`, `negative_prompt_embeds_clip`, or `negative_prompt_cu_seqlens` is provided, all three must be provided.zProvide either `prompt` or `prompt_embeds_qwen` (and corresponding `prompt_embeds_clip` and `prompt_cu_seqlens`). Cannot leave all undefined.z2`prompt` has to be of type `str` or `list` but is z;`negative_prompt` has to be of type `str` or `list` but is )
ValueErrorallr   r   strr   type)rR   r   negative_promptimagerT   rU   r4   r5   r6   r7   r   negative_prompt_cu_seqlens"callback_on_step_end_tensor_inputsr   r   s   `              r'   check_inputsz"Kandinsky5I2VPipeline.check_inputs!  s   J */BT/IIJJ=UVVB;!urzQWX^W__dejdkklmnn-9# F
7YF
 C
 DTEaEaDbbn  |^  pHvw  bc  ko  kG  kG  bGpq  pH  oI  J 
 )-?-KO`Ol!)-?-GK\Kd 2  (3*6)5 ,3.6-5 2  >08 ` 
 z&#'>zRXZ^G_QRVW]R^Q_`aa&?C0OUY9ZZ[_`o[pZqrss :[0 'I pHs   0EEr  r   num_channels_latents
num_frames	generatorr3   c                    |
|
j                  ||      S |dz
  | j                  z  dz   }||t        |      | j                  z  t        |      | j                  z  |f}t	        |	t
              r)t        |	      |k7  rt        dt        |	       d| d      t        ||	||      }
| j                  j                  |||      j                  ||	      }t        j                         5  |j                  d
      }| j                  j                  |      j                   j#                  |	      }t%        | j                  j&                  d      r#|| j                  j&                  j(                  z  }|j+                  dd
ddd      }||
ddddf<   ddd       | j,                  j.                  rt        j0                  |
      }t        j2                  ||t        |      | j                  z  t        |      | j                  z  dg|
j4                  |
j6                        }d|ddddf<   |ddddf<   t        j8                  |
||gd      }
|
S # 1 sw Y   xY w)a  
        Prepare initial latent variables for image-to-video generation.

        This method creates random noise latents for all frames except the first frame, which is replaced with the
        encoded input image.

        Args:
            image (PipelineImageInput): Input image to condition the generation on
            batch_size (int): Number of videos to generate
            num_channels_latents (int): Number of channels in latent space
            height (int): Height of generated video
            width (int): Width of generated video
            num_frames (int): Number of frames in video
            dtype (torch.dtype): Data type for latents
            device (torch.device): Device to create latents on
            generator (torch.Generator): Random number generator
            latents (torch.Tensor): Pre-existing latents to use

        Returns:
            torch.Tensor: Prepared latent tensor with first frame as encoded image
        Nr   r   z/You have passed a list of generators of length z+, but requested an effective batch size of z@. Make sure the batch size matches the length of the generators.)r  rh   rg   )rT   rU   r   r   )r  scaling_factorr   r   rB   rf   r   r   )r   rN   intrP   r   r   r   r  r   rQ   
preprocessri   no_gradrn   r9   encodelatent_distr   hasattrrL   r  permuter8   visual_cond
zeros_likezerosrg   rh   r   )rR   r  r   r  rT   rU   r  rg   rh   r  r3   num_latent_framesr   image_tensorimage_videoimage_latentsr  visual_cond_masks                     r'   prepare_latentsz%Kandinsky5I2VPipeline.prepare_latents|  s^   D ::V5:99'!^0N0NNQRRK4888J$777 
 i&3y>Z+GA#i.AQ R&<'gi  u	&PUV ++66uVSX6Y\\]ckp\q ]]_ 	,&003K HHOOK8DDKKV_K`M txx(89 -0N0N N *11!Q1a@M ,GAqsFO	, ''**73K${{%K4#@#@@J$"?"?? mm~~
  ()Q!V$"/K1Q3ii+7G HbQGG	, 	,s   )B*IIc                     | j                   S )z%Get the current guidance scale value.)_guidance_scalerR   s    r'   guidance_scalez$Kandinsky5I2VPipeline.guidance_scale  s     ###r)   c                     | j                   S )z&Get the number of denoising timesteps.)_num_timestepsr"  s    r'   num_timestepsz#Kandinsky5I2VPipeline.num_timesteps  s     """r)   c                     | j                   S )z)Check if generation has been interrupted.)
_interruptr"  s    r'   	interruptzKandinsky5I2VPipeline.interrupt  s     r)   i   y   2   g      @pilTr  num_inference_stepsr#  r4   r5   r6   r7   r   r  output_typer   callback_on_step_endr	  c                    t        |t        t        f      r|j                  }| j	                  |||||||||||||       || j
                  z  dk7  rBt        j                  d| j
                   d       || j
                  z  | j
                  z  dz   }t        |d      }|| _	        d| _
        | j                  }| j                  j                  }|t        |t              rd}|g}n-|t        |t              rt!        |      }n|j"                  d   }|| j%                  ||	|||      \  }}}| j&                  d	kD  r|d
}t        |t              r||gt!        |      z  n|g}n;t!        |      t!        |      k7  r$t)        dt!        |       dt!        |       d      || j%                  ||	|||      \  }}}| j*                  j-                  ||       | j*                  j.                  }| j                  j0                  j2                  }| j5                  |||	z  |||||||
|
      }|dz
  | j
                  z  dz   }t7        j8                  ||      t7        j8                  || j:                  z  dz  |      t7        j8                  || j:                  z  dz  |      g}t7        j8                  |j=                         j                         j?                         |      }|At7        j8                  |j=                         j                         j?                         |      nd}| jA                  ||      } | jC                  ||      }!t!        |      || j*                  jD                  z  z
  }"t!        |      | _#        | jI                  |      5 }#tK        |      D ]E  \  }$}%| jL                  r|%jO                  d      jQ                  ||	z        }&| j                  |jS                  |      |jS                  |      |jS                  |      |&jS                  |      ||| |!d	      jT                  }'| j&                  d	kD  rm|k| j                  |jS                  |      |jS                  |      |jS                  |      |&jS                  |      ||| |!d	      jT                  }(|(||'|(z
  z  z   }'| j*                  jW                  |'ddddf   |%|ddddddddd|f   d      d   |ddddddddd|f<   |~i })|D ]  }*tY               |*   |)|*<     || |$|%|)      }+|+j[                  d|      }|+j[                  d|      }|+j[                  d|      }|+j[                  d|      }|+j[                  d|      }|$t!        |      dz
  k(  s'|$dz   |"kD  r/|$dz   | j*                  jD                  z  dk(  r|#j]                          t^        s2ta        jb                          H 	 ddd       |ddddddddd|f   }| je                  |      }|dk7  r)|jS                  | jf                  j                        }|ji                  ||	|dz
  | j
                  z  dz   || j:                  z  || j:                  z  |      },|,jk                  dddddd      },|,ji                  ||	z  ||dz
  | j
                  z  dz   || j:                  z  || j:                  z        },|,| jf                  j0                  jl                  z  },| jf                  jo                  |,      jT                  },| jp                  js                  |,|      },n|},| ju                          |s|,fS tw        |,      S # 1 sw Y   xY w) aZ  
        The call function to the pipeline for image-to-video generation.

        Args:
            image (`PipelineImageInput`):
                The input image to condition the generation on. Must be an image, a list of images or a `torch.Tensor`.
            prompt (`str` or `List[str]`, *optional*):
                The prompt or prompts to guide the video generation. If not defined, pass `prompt_embeds` instead.
            negative_prompt (`str` or `List[str]`, *optional*):
                The prompt or prompts to avoid during video generation. If not defined, pass `negative_prompt_embeds`
                instead. Ignored when not using guidance (`guidance_scale` < `1`).
            height (`int`, defaults to `512`):
                The height in pixels of the generated video.
            width (`int`, defaults to `768`):
                The width in pixels of the generated video.
            num_frames (`int`, defaults to `121`):
                The number of frames in the generated video.
            num_inference_steps (`int`, defaults to `50`):
                The number of denoising steps.
            guidance_scale (`float`, defaults to `5.0`):
                Guidance scale as defined in classifier-free guidance.
            num_videos_per_prompt (`int`, *optional*, defaults to 1):
                The number of videos to generate per prompt.
            generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
                A torch generator to make generation deterministic.
            latents (`torch.Tensor`, *optional*):
                Pre-generated noisy latents.
            prompt_embeds_qwen (`torch.Tensor`, *optional*):
                Pre-generated Qwen text embeddings.
            prompt_embeds_clip (`torch.Tensor`, *optional*):
                Pre-generated CLIP text embeddings.
            negative_prompt_embeds_qwen (`torch.Tensor`, *optional*):
                Pre-generated Qwen negative text embeddings.
            negative_prompt_embeds_clip (`torch.Tensor`, *optional*):
                Pre-generated CLIP negative text embeddings.
            prompt_cu_seqlens (`torch.Tensor`, *optional*):
                Pre-generated cumulative sequence lengths for Qwen positive prompt.
            negative_prompt_cu_seqlens (`torch.Tensor`, *optional*):
                Pre-generated cumulative sequence lengths for Qwen negative prompt.
            output_type (`str`, *optional*, defaults to `"pil"`):
                The output format of the generated video.
            return_dict (`bool`, *optional*, defaults to `True`):
                Whether or not to return a [`KandinskyPipelineOutput`].
            callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*):
                A function that is called at the end of each denoising step.
            callback_on_step_end_tensor_inputs (`List`, *optional*):
                The list of tensor inputs for the `callback_on_step_end` function.
            max_sequence_length (`int`, defaults to `512`):
                The maximum sequence length for text encoding.

        Examples:

        Returns:
            [`~KandinskyPipelineOutput`] or `tuple`:
                If `return_dict` is `True`, [`KandinskyPipelineOutput`] is returned, otherwise a `tuple` is returned
                where the first element is a list with the generated videos.
        )r   r  r  rT   rU   r4   r5   r6   r7   r   r  r	  r   r   z(`num_frames - 1` has to be divisible by z!. Rounding to the nearest number.FNr   )r   r   r   rh   rg   g      ?z{Static, 2D cartoon, cartoon, 2d animation, paintings, images, worst quality, low quality, ugly, deformed, walking backwardsz9`negative_prompt` must have same length as `prompt`. Got z vs r   r~   )
r  r   r  rT   rU   r  rg   rh   r  r3   r   )totalT)	r   encoder_hidden_statespooled_projectionstimestepvisual_rope_postext_rope_posscale_factorr   r   )r   r3   r4   r5   r6   r7   latent   r   rB   )r.  )frames)<r   r   r   tensor_inputsr
  rN   r   r   r   r!  r(  r   r8   rg   r  r   r   r   r   r#  r  r>   set_timesteps	timestepsrL   in_visual_dimr  ri   rl   rP   r   itemr^   r   orderr%  progress_barr   r)  rn   r   r   r   steplocalspopupdateXLA_AVAILABLExm	mark_stepr   r9   rq   r  r  r   rQ   postprocess_videomaybe_free_model_hooksr   )-rR   r  r   r  rT   rU   r  r-  r#  r   r  r3   r4   r5   r6   r7   r   r  r.  r   r/  r	  r   rh   rg   r   r=  r  r  r5  r6  negative_text_rope_posr7  r   num_warmup_stepsrA  r   tr4  pred_velocityuncond_pred_velocitycallback_kwargsr   callback_outputsvideos-                                                r'   __call__zKandinsky5I2VPipeline.__call__  s   l *-=?U,VW1E1S1S. 	+11(C(C/'A/Q 3 	 	
  666!;NN:4;Y;Y:ZZ{| $t'E'EEHfHffijjJQ'
-''  && *VS"9JXFJvt$<VJ+11!4J %HLHZHZ&;$7 I[ IE 24E $& #`/3/EKEW?"3c&k"A^m]n_%V4 OPSTcPdOeeijmntjuivvwx  +2&&..C,?%# '  e+-HJd 	$$%8$HNN,,	  $//66DD&&!$99!5! ' 
 (!^0N0NNQRRLL*6:LL4#@#@@AEfULL$"?"??1DVT
 %6%;%;%=%A%A%C%H%H%JSYZ *5 LL388:>>@EEGPVW 	 --fe< ..w? y>,?$..BVBV,VV!)n%89 ;	#\!), :#1>>;;q>00>S1ST !% 0 0")**U"3*<*?*?*F'9'<'<U'C%[[/$3"/!-"/ $ !1 
! &  &&,1L1X+/+;+;&-jj&7.I.L.LU.S+F+I+I%+P!)U!3(7&<%1&3$( ,< 
, f ) %9>]]qMq;r$rM>Bnn>Q>Q!!QR%(!WQAqBWCWBW5W-Xfk ?R ??12q!%:&:%::; (3&(O? 9-3Xa[*9';D!Q'X$.229gFG)9)=)=>RTf)g&)9)=)=>RTf)g&2B2F2F57R3/ 3C2F2F57R3/ I**A9I/IqSTuX\XfXfXlXlNlpqNq '') LLNu:#;	#| !Q1&;';&;;< ,,W5 ("jj0GOO%aD$B$BBQF$777666$E MM!Q1a3EMM22$aD$B$BBQF$777666E DHHOO:::EHHOOE*11E((::5k:ZEE 	##%8O&e44K;	# ;	#s   H<]1]11];)r   r   r   cuda)N   N)NN)r9  F)r   r   NN)NNNNNNNN)r   rY   i@  Q   NNNN)4__name__
__module____qualname____doc__model_cpu_offload_seqr   r   r   r   r   r	   r
   r   rF   r  tupler^   staticmethodri   rj   r{   r   r   r  r   r   rh   rg   r   r   r   r   r   r
  r   	Generatorr  propertyr#  r&  r)  r  r   EXAMPLE_DOC_STRINGfloatboolr   r   r   r   rS  __classcell__)rS   s   @r'   r2   r2      sC   4 M(^1(^ '(^ 9	(^
 $(^ &(^ #(^ 3(^T# #C #E #(  1#  1#  1#  13  1  1S  1hmhtht  1  1D/h *.#&'+B,c49n%B, &B, !	B,
 $B,N *.'+	"&c49n%"& &"& $	"&H  $0 &'#&)-'+YKc49n%YK  #YK !	YK
 &YK $YKD  $($(#'+/ Yt~ %''+)-MQ*._!_ _ "	_
 _ _ _ $_ &_ E%//43H"HIJ_ %,,'_ 
_B $ $ # #   U]]_12 )-;?#% #/0MQ*.5959>B>B48=A%*  9B#&3p5!p5 c49n%p5 "%T#Y"78	p5
 p5 p5 p5 !p5 p5  (}p5 E%//43H"HIJp5 %,,'p5 %U\\2p5 %U\\2p5 &.ell%;p5  &.ell%;!p5" $ELL1#p5$ %-U\\$:%p5& c]'p5( )p5* '(Cd+T124DF\\]
+p50 -1I1p52 !3p5 3 p5r)   r2   ):r"   typingr   r   r   r   r   regexr,   ri   torch.nnr   r   transformersr	   r
   r   r   	callbacksr   r   image_processorr   loadersr   modelsr   models.transformersr   
schedulersr   utilsr   r   r   r   utils.torch_utilsr   rQ   r   pipeline_utilsr   pipeline_outputr   torch_xla.core.xla_modelcore	xla_modelrG  rF  
get_loggerrW  r   r    r`  r(   r.   r0   r2   rZ   r)   r'   <module>rv     s     8 8   $ k k A 1 / / ? 9  . - . 4 ))MM			H	% 
		H	%" J	^5-/G ^5r)   