from queue import Queue from threading import Thread from typing import Optional import numpy as np import torch from transformers import MusicgenMelodyForConditionalGeneration, AutoProcessor, set_seed from transformers.generation.streamers import BaseStreamer import gradio as gr import io, wave import spaces from transformers import MusicgenMelodyForConditionalGeneration, MusicgenForConditionalGeneration, AutoProcessor, set_seed from transformers.modeling_outputs import BaseModelOutput from transformers.utils import logging from transformers.generation.configuration_utils import GenerationConfig from transformers.generation.logits_process import ClassifierFreeGuidanceLogitsProcessor, LogitsProcessorList from transformers.generation.stopping_criteria import StoppingCriteriaList from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Union import copy import torch import torchaudio from demucs import pretrained from demucs.apply import apply_model from demucs.audio import convert_audio logger = logging.get_logger(__name__) class MusicgenMelodyForLongFormConditionalGeneration(MusicgenMelodyForConditionalGeneration): stride_longform = 750 def _prepare_audio_encoder_kwargs_for_longform_generation( self, audio_codes, model_kwargs,): frames, bsz, codebooks, seq_len = audio_codes.shape if frames != 1: raise ValueError( f"Expected 1 frame in the audio code outputs, got {frames} frames. Ensure chunking is " "disabled by setting `chunk_length=None` in the audio encoder." ) decoder_input_ids = audio_codes[0, ...].reshape(bsz * self.decoder.num_codebooks, seq_len) model_kwargs["decoder_input_ids"] = decoder_input_ids return model_kwargs @torch.no_grad() def generate( self, inputs: Optional[torch.Tensor] = None, generation_config: Optional[GenerationConfig] = None, logits_processor: Optional[LogitsProcessorList] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, synced_gpus: Optional[bool] = None, max_longform_generation_length: Optional[int] = 4000, streamer: Optional["BaseStreamer"] = None, **kwargs, ): """ Generates sequences of token ids for models with a language modeling head. Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the model's default generation configuration. You can override any `generation_config` by passing the corresponding parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`. For an overview of generation strategies and code examples, check out the [following guide](./generation_strategies). Parameters: inputs (`torch.Tensor` of varying shape depending on the modality, *optional*): The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs` should be in the format `input_ids`. For encoder-decoder models *inputs* can represent any of `input_ids`, `input_values`, `input_features`, or `pixel_values`. generation_config (`~generation.GenerationConfig`, *optional*): The generation configuration to be used as base parametrization for the generation call. `**kwargs` passed to generate matching the attributes of `generation_config` will override them. If `generation_config` is not provided, the default will be used, which had the following loading priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s default values, whose documentation should be checked to parameterize generation. logits_processor (`LogitsProcessorList`, *optional*): Custom logits processors that complement the default logits processors built from arguments and generation config. If a logit processor is passed that is already created with the arguments or a generation config an error is thrown. This feature is intended for advanced users. stopping_criteria (`StoppingCriteriaList`, *optional*): Custom stopping criteria that complement the default stopping criteria built from arguments and a generation config. If a stopping criteria is passed that is already created with the arguments or a generation config an error is thrown. This feature is intended for advanced users. synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) streamer (`BaseStreamer`, *optional*): Streamer object that will be used to stream the generated sequences. Generated tokens are passed through `streamer.put(token_ids)` and the streamer is responsible for any further processing. kwargs (`Dict[str, Any]`, *optional*): Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*. Return: [`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True` or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`. If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible [`~utils.ModelOutput`] types are: - [`~generation.GenerateDecoderOnlyOutput`], - [`~generation.GenerateBeamDecoderOnlyOutput`] If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible [`~utils.ModelOutput`] types are: - [`~generation.GenerateEncoderDecoderOutput`], - [`~generation.GenerateBeamEncoderDecoderOutput`] """ # 1. Handle `generation_config` and kwargs that might update it, and validate the resulting objects if generation_config is None: generation_config = self.generation_config generation_config = copy.deepcopy(generation_config) model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs generation_config.validate() self._validate_model_kwargs(model_kwargs.copy()) # 2. Set generation parameters if not already defined logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() if generation_config.pad_token_id is None and generation_config.eos_token_id is not None: if model_kwargs.get("attention_mask", None) is None: logger.warning( "The attention mask and the pad token id were not set. As a consequence, you may observe " "unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results." ) eos_token_id = generation_config.eos_token_id if isinstance(eos_token_id, list): eos_token_id = eos_token_id[0] logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.") generation_config.pad_token_id = eos_token_id # 3. Define model inputs # inputs_tensor has to be defined # model_input_name is defined if model-specific keyword input is passed # otherwise model_input_name is None # all model-specific keyword inputs are removed from `model_kwargs` inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs( inputs, generation_config.bos_token_id, model_kwargs ) batch_size = inputs_tensor.shape[0] # 4. Define other model kwargs model_kwargs["output_attentions"] = generation_config.output_attentions model_kwargs["output_hidden_states"] = generation_config.output_hidden_states model_kwargs["use_cache"] = generation_config.use_cache model_kwargs["guidance_scale"] = generation_config.guidance_scale if model_kwargs.get("attention_mask", None) is None: model_kwargs["attention_mask"] = self._prepare_attention_mask_for_generation( inputs_tensor, generation_config.pad_token_id, generation_config.eos_token_id ) if "encoder_hidden_states" not in model_kwargs: # encoder_hidden_states are created and added to `model_kwargs` model_kwargs = self._prepare_encoder_hidden_states_kwargs_for_generation( inputs_tensor, model_kwargs, model_input_name, guidance_scale=generation_config.guidance_scale, ) # 5. Prepare `input_ids` which will be used for auto-regressive generation input_ids, model_kwargs = self._prepare_decoder_input_ids_for_generation( batch_size=batch_size, model_input_name=model_input_name, model_kwargs=model_kwargs, decoder_start_token_id=generation_config.decoder_start_token_id, bos_token_id=generation_config.bos_token_id, device=inputs_tensor.device, ) # 6. Prepare `max_length` depending on other stopping criteria. input_ids_seq_length = input_ids.shape[-1] has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None if has_default_max_length and generation_config.max_new_tokens is None: logger.warning( f"Using the model-agnostic default `max_length` (={generation_config.max_length}) " "to control the generation length. We recommend setting `max_new_tokens` to control the maximum length of the generation." ) elif generation_config.max_new_tokens is not None: if not has_default_max_length: logger.warning( f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(=" f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. " "Please refer to the documentation for more information. " "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)" ) generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length if generation_config.min_length is not None and generation_config.min_length > generation_config.max_length: raise ValueError( f"Unfeasible length constraints: the minimum length ({generation_config.min_length}) is larger than" f" the maximum length ({generation_config.max_length})" ) if input_ids_seq_length >= generation_config.max_length: logger.warning( f"Input length of decoder_input_ids is {input_ids_seq_length}, but `max_length` is set to" f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider" " increasing `max_new_tokens`." ) # build the delay pattern mask for offsetting each codebook prediction by 1 (this behaviour is specific to Musicgen Melody) input_ids, decoder_delay_pattern_mask = self.decoder.build_delay_pattern_mask( input_ids, pad_token_id=generation_config.decoder_start_token_id, max_length=generation_config.max_length, ) # stash the delay mask so that we don't have to recompute in each forward pass model_kwargs["decoder_delay_pattern_mask"] = decoder_delay_pattern_mask # input_ids are ready to be placed on the streamer (if used) if streamer is not None: streamer.put(input_ids.cpu()) # 7. determine generation mode is_greedy_gen_mode = ( (generation_config.num_beams == 1) and (generation_config.num_beam_groups == 1) and generation_config.do_sample is False ) is_sample_gen_mode = ( (generation_config.num_beams == 1) and (generation_config.num_beam_groups == 1) and generation_config.do_sample is True ) # 8. prepare batched CFG externally (to enable coexistance with the unbatched CFG) if generation_config.guidance_scale is not None and generation_config.guidance_scale > 1: logits_processor.append(ClassifierFreeGuidanceLogitsProcessor(generation_config.guidance_scale)) generation_config.guidance_scale = None # 9. prepare distribution pre_processing samplers logits_processor = self._get_logits_processor( generation_config=generation_config, input_ids_seq_length=input_ids_seq_length, encoder_input_ids=inputs_tensor, prefix_allowed_tokens_fn=None, logits_processor=logits_processor, ) # 10. prepare stopping criteria stopping_criteria = self._get_stopping_criteria( generation_config=generation_config, stopping_criteria=stopping_criteria ) # ENTER LONGFORM GENERATION LOOP generated_tokens = [] # the first timestamps corresponds to decoder_start_token current_generated_length = input_ids.shape[1] - 1 max_new_tokens = generation_config.max_new_tokens while current_generated_length + 4 <= max_longform_generation_length: generation_config.max_new_tokens = min(max_new_tokens, max_longform_generation_length - current_generated_length) if is_greedy_gen_mode: if generation_config.num_return_sequences > 1: raise ValueError( "num_return_sequences has to be 1 when doing greedy search, " f"but is {generation_config.num_return_sequences}." ) # 11. run greedy search outputs = self._greedy_search( input_ids, logits_processor=logits_processor, stopping_criteria=stopping_criteria, pad_token_id=generation_config.pad_token_id, eos_token_id=generation_config.eos_token_id, output_scores=generation_config.output_scores, return_dict_in_generate=generation_config.return_dict_in_generate, synced_gpus=synced_gpus, streamer=streamer, **model_kwargs, ) elif is_sample_gen_mode: # 11. prepare logits warper logits_warper = self._get_logits_warper(generation_config) # expand input_ids with `num_return_sequences` additional sequences per batch input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids=input_ids, expand_size=generation_config.num_return_sequences, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs, ) # 12. run sample outputs = self._sample( input_ids, logits_processor=logits_processor, logits_warper=logits_warper, stopping_criteria=stopping_criteria, pad_token_id=generation_config.pad_token_id, eos_token_id=generation_config.eos_token_id, output_scores=generation_config.output_scores, return_dict_in_generate=generation_config.return_dict_in_generate, synced_gpus=synced_gpus, streamer=streamer, **model_kwargs, ) else: raise ValueError( "Got incompatible mode for generation, should be one of greedy or sampling. " "Ensure that beam search is de-activated by setting `num_beams=1` and `num_beam_groups=1`." ) if generation_config.return_dict_in_generate: output_ids = outputs.sequences else: output_ids = outputs # apply the pattern mask to the final ids output_ids = self.decoder.apply_delay_pattern_mask(output_ids, model_kwargs["decoder_delay_pattern_mask"]) # revert the pattern delay mask by filtering the pad token id output_ids = output_ids[output_ids != generation_config.pad_token_id].reshape( batch_size, self.decoder.num_codebooks, -1 ) if len(generated_tokens) >= 1: generated_tokens.append(output_ids[:, :, self.stride_longform:]) else: generated_tokens.append(output_ids) current_generated_length += generated_tokens[-1].shape[-1] # append the frame dimension back to the audio codes # use last generated tokens as begining of the newest generation output_ids = output_ids[None, :, :, - self.stride_longform:] model_kwargs = self._prepare_audio_encoder_kwargs_for_longform_generation(output_ids, model_kwargs) # Prepare new `input_ids` which will be used for auto-regressive generation input_ids, model_kwargs = self._prepare_decoder_input_ids_for_generation( batch_size=batch_size, model_input_name="input_ids", model_kwargs=model_kwargs, decoder_start_token_id=self.generation_config.decoder_start_token_id, bos_token_id=self.generation_config.bos_token_id, device=input_ids.device, ) # build the delay pattern mask for offsetting each codebook prediction by 1 (this behaviour is specific to Musicgen Melody) input_ids, decoder_delay_pattern_mask = self.decoder.build_delay_pattern_mask( input_ids, pad_token_id=generation_config.decoder_start_token_id, max_length=generation_config.max_length, ) # stash the delay mask so that we don't have to recompute in each forward pass model_kwargs["decoder_delay_pattern_mask"] = decoder_delay_pattern_mask # TODO(YL): periodic prompt song # encoder_hidden_states are created and added to `model_kwargs` # model_kwargs = self._prepare_encoder_hidden_states_kwargs_for_generation( # inputs_tensor, # model_kwargs, # model_input_name, # guidance_scale=generation_config.guidance_scale, # ) # append the frame dimension back to the audio codes output_ids = torch.cat(generated_tokens, dim=-1)[None, ...] # Specific to this gradio demo if streamer is not None: streamer.end(final_end=True) audio_scales = model_kwargs.get("audio_scales") if audio_scales is None: audio_scales = [None] * batch_size if self.decoder.config.audio_channels == 1: output_values = self.audio_encoder.decode( output_ids, audio_scales=audio_scales, ).audio_values else: codec_outputs_left = self.audio_encoder.decode(output_ids[:, :, ::2, :], audio_scales=audio_scales) output_values_left = codec_outputs_left.audio_values codec_outputs_right = self.audio_encoder.decode(output_ids[:, :, 1::2, :], audio_scales=audio_scales) output_values_right = codec_outputs_right.audio_values output_values = torch.cat([output_values_left, output_values_right], dim=1) if generation_config.return_dict_in_generate: outputs.sequences = output_values return outputs else: return output_values model = MusicgenMelodyForLongFormConditionalGeneration.from_pretrained("facebook/musicgen-melody", revision="refs/pr/14")#, attn_implementation="sdpa") processor = AutoProcessor.from_pretrained("facebook/musicgen-melody", revision="refs/pr/14") demucs = pretrained.get_model('htdemucs') title = "Streaming Long-form MusicGen" description = """ Stream the outputs of the MusicGen Melody text-to-music model by playing the generated audio as soon as the first chunk is ready. The generation loop is adapted to perform **long-form** music generation. In this demo, we limit the duration of the music generated to 1mn20, but in theory, it could run **endlessly**. Demo uses [MusicGen Melody](https://huggingface.co/facebook/musicgen-melody) in the 🤗 Transformers library. Note that the demo works best on the Chrome browser. If there is no audio output, try switching browser to Chrome. """ article = """ ## FAQ ### How Does It Work? MusicGen is an auto-regressive transformer-based model, meaning generates audio codes (tokens) in a causal fashion. At each decoding step, the model generates a new set of audio codes, conditional on the text input and all previous audio codes. From the frame rate of the [EnCodec model](https://huggingface.co/facebook/encodec_32khz) used to decode the generated codes to audio waveform, each set of generated audio codes corresponds to 0.02 seconds. This means we require a total of 1000 decoding steps to generate 20 seconds of audio. Rather than waiting for the entire audio sequence to be generated, which would require the full 1000 decoding steps, we can start playing the audio after a specified number of decoding steps have been reached, a techinque known as [*streaming*](https://huggingface.co/docs/transformers/main/en/generation_strategies#streaming). For example, after 250 steps we have the first 5 seconds of audio ready, and so can play this without waiting for the remaining 750 decoding steps to be complete. As we continue to generate with the MusicGen model, we append new chunks of generated audio to our output waveform on-the-fly. After the full 1000 decoding steps, the generated audio is complete, and is composed of four chunks of audio, each corresponding to 250 tokens. This method of playing incremental generations **reduces the latency** of the MusicGen model from the total time to generate 1000 tokens, to the time taken to play the first chunk of audio (250 tokens). This can result in **significant improvements** to perceived latency, particularly when the chunk size is chosen to be small. In practice, the chunk size should be tuned to your device: using a smaller chunk size will mean that the first chunk is ready faster, but should not be chosen so small that the model generates slower than the time it takes to play the audio. For details on how the streaming class works, check out the source code for the [MusicgenStreamer](https://huggingface.co/spaces/sanchit-gandhi/musicgen-streaming/blob/main/app.py#L52). ### Could this be used for stereo music generation? In theory, yes, but you would have to adapt the current demo a bit and use a checkpoint specificaly made for stereo generation, for example, this [one](https://huggingface.co/facebook/musicgen-stereo-melody). ### Why is there a delay between the moment the first chunk is generated and the moment the audio starts playing? This behaviour is specific to gradio and the different components it uses. If you ever adapt this demo for a streaming use-case, you could have lower latency. """ class MusicgenStreamer(BaseStreamer): def __init__( self, model: MusicgenMelodyForConditionalGeneration, device: Optional[str] = None, play_steps: Optional[int] = 10, stride: Optional[int] = None, timeout: Optional[float] = None, is_longform: Optional[bool] = False, longform_stride: Optional[float] = 10, ): """ Streamer that stores playback-ready audio in a queue, to be used by a downstream application as an iterator. This is useful for applications that benefit from accessing the generated audio in a non-blocking way (e.g. in an interactive Gradio demo). Parameters: model (`MusicgenForConditionalGeneration`): The MusicGen model used to generate the audio waveform. device (`str`, *optional*): The torch device on which to run the computation. If `None`, will default to the device of the model. play_steps (`int`, *optional*, defaults to 10): The number of generation steps with which to return the generated audio array. Using fewer steps will mean the first chunk is ready faster, but will require more codec decoding steps overall. This value should be tuned to your device and latency requirements. stride (`int`, *optional*): The window (stride) between adjacent audio samples. Using a stride between adjacent audio samples reduces the hard boundary between them, giving smoother playback. If `None`, will default to a value equivalent to play_steps // 6 in the audio space. timeout (`int`, *optional*): The timeout for the audio queue. If `None`, the queue will block indefinitely. Useful to handle exceptions in `.generate()`, when it is called in a separate thread. is_longform (`bool`, *optional*, defaults to `False`): If `is_longform`, will takes into account long form stride and non regular ending signal. """ self.decoder = model.decoder self.audio_encoder = model.audio_encoder self.generation_config = model.generation_config self.device = device if device is not None else model.device self.longform_stride = longform_stride # variables used in the streaming process self.play_steps = play_steps if stride is not None: self.stride = stride else: hop_length = np.prod(self.audio_encoder.config.upsampling_ratios) self.stride = hop_length * (play_steps - self.decoder.num_codebooks) // 6 self.token_cache = None self.to_yield = 0 self.is_longform = is_longform self.previous_len = -1 # varibles used in the thread process self.audio_queue = Queue() self.stop_signal = None self.timeout = timeout def apply_delay_pattern_mask(self, input_ids): # build the delay pattern mask for offsetting each codebook prediction by 1 (this behaviour is specific to MusicGen) _, decoder_delay_pattern_mask = self.decoder.build_delay_pattern_mask( input_ids[:, :1], pad_token_id=self.generation_config.decoder_start_token_id, max_length=input_ids.shape[-1], ) # apply the pattern mask to the input ids input_ids = self.decoder.apply_delay_pattern_mask(input_ids, decoder_delay_pattern_mask) # revert the pattern delay mask by filtering the pad token id input_ids = input_ids[input_ids != self.generation_config.pad_token_id].reshape( 1, self.decoder.num_codebooks, -1 ) # append the frame dimension back to the audio codes input_ids = input_ids[None, ...] # send the input_ids to the correct device input_ids = input_ids.to(self.audio_encoder.device) if self.decoder.config.audio_channels == 1: output_values = self.audio_encoder.decode( input_ids, audio_scales=[None], ).audio_values else: codec_outputs_left = self.audio_encoder.decode(input_ids[:, :, ::2, :], audio_scales=[None]) output_values_left = codec_outputs_left.audio_values codec_outputs_right = self.audio_encoder.decode(input_ids[:, :, 1::2, :], audio_scales=[None]) output_values_right = codec_outputs_right.audio_values output_values = torch.cat([output_values_left, output_values_right], dim=1) audio_values = output_values[0, 0] return audio_values.cpu().float().numpy() def put(self, value): batch_size = value.shape[0] // self.decoder.num_codebooks if batch_size > 1: raise ValueError("MusicgenStreamer only supports batch size 1") if self.token_cache is None: self.token_cache = value else: self.token_cache = torch.concatenate([self.token_cache, value[:, None]], dim=-1) if self.token_cache.shape[-1] % self.play_steps == 0: audio_values = self.apply_delay_pattern_mask(self.token_cache) self.on_finalized_audio(audio_values[self.to_yield : -self.stride]) self.to_yield = len(audio_values) - self.stride self.previous_len = len(audio_values) def end(self, stream_end=False, final_end=False): """Flushes any remaining cache and appends the stop symbol.""" if self.token_cache is not None: audio_values = self.apply_delay_pattern_mask(self.token_cache) else: audio_values = np.zeros(self.to_yield) if final_end: self.on_finalized_audio(audio_values[self.to_yield :], stream_end=True) def on_finalized_audio(self, audio: np.ndarray, stream_end: bool = False): """Put the new audio in the queue. If the stream is ending, also put a stop signal in the queue.""" self.audio_queue.put(audio, timeout=self.timeout) if stream_end: self.audio_queue.put(self.stop_signal, timeout=self.timeout) def __iter__(self): return self def __next__(self): value = self.audio_queue.get(timeout=self.timeout) if not isinstance(value, np.ndarray) and value == self.stop_signal: raise StopIteration() else: return value sampling_rate = model.audio_encoder.config.sampling_rate frame_rate = model.audio_encoder.config.frame_rate target_dtype = np.int16 max_range = np.iinfo(target_dtype).max def wave_header_chunk(frame_input=b"", channels=1, sample_width=2, sample_rate=24000): # This will create a wave header then append the frame input # It should be first on a streaming wav file # Other frames better should not have it (else you will hear some artifacts each chunk start) wav_buf = io.BytesIO() with wave.open(wav_buf, "wb") as vfout: vfout.setnchannels(channels) vfout.setsampwidth(sample_width) vfout.setframerate(sample_rate) vfout.writeframes(frame_input) wav_buf.seek(0) return wav_buf.read() @spaces.GPU(duration=90) def generate_audio(text_prompt, audio, seed=0): audio_length_in_s = 60 max_new_tokens = int(frame_rate * audio_length_in_s) play_steps_in_s = 2.0 play_steps = int(frame_rate * play_steps_in_s) if audio is not None: audio = torchaudio.load(audio) audio = convert_audio(audio[0], audio[1], demucs.samplerate, demucs.audio_channels) audio = apply_model(demucs, audio[None]) device = "cuda:0" if torch.cuda.is_available() else "cpu" if device != model.device: model.to(device) if device == "cuda:0": model.half() if audio is not None: inputs = processor( text=text_prompt, padding=True, return_tensors="pt", audio=audio, sampling_rate=demucs.samplerate ) if device == "cuda:0": inputs["input_features"] = inputs["input_features"].to(torch.float16) else: inputs = processor( text=text_prompt, padding=True, return_tensors="pt", ) streamer = MusicgenStreamer(model, device=device, play_steps=play_steps, is_longform=True, longform_stride=15*32000) generation_kwargs = dict( **inputs.to(device), temperature=1.2, streamer=streamer, max_new_tokens=min(max_new_tokens, 1503), max_longform_generation_length=max_new_tokens, ) thread = Thread(target=model.generate, kwargs=generation_kwargs) thread.start() yield wave_header_chunk() set_seed(seed) for new_audio in streamer: print(f"Sample of length: {round(new_audio.shape[0] / sampling_rate, 2)} seconds") new_audio = (new_audio * max_range).astype(np.int16) # (sampling_rate, new_audio) yield new_audio.tobytes() demo = gr.Interface( fn=generate_audio, inputs=[ gr.Text(label="Prompt", value="80s pop track with synth and instrumentals"), gr.Audio(type="filepath", label="Conditioning audio. Use this for melody-guided generation."), gr.Number(value=5, precision=0, step=1, minimum=0, label="Seed for random generations."), ], outputs=[ gr.Audio(label="Generated Music", autoplay=True, interactive=False, streaming=True) ], examples=[ ["An 80s driving pop song with heavy drums and synth pads in the background", None, 5], ["Bossa nova with guitars and synthesizer", "./assets/assets_bolero_ravel.mp3", 5], ["90s rock song with electric guitar and heavy drums", "./assets/assets_bach.mp3", 5], ["a light and cheerly EDM track, with syncopated drums, aery pads, and strong emotions bpm: 130", None, 5], ["lofi slow bpm electro chill with organic samples", None, 5], ], title=title, description=description, allow_flagging=False, article=article, cache_examples=False, ) demo.queue().launch(debug=True)