|
15 | 15 | from typing import Sequence |
16 | 16 | import jax |
17 | 17 | import time |
| 18 | +import os |
18 | 19 | from maxdiffusion.pipelines.wan.wan_pipeline import WanPipeline |
19 | 20 | from maxdiffusion import pyconfig, max_logging, max_utils |
20 | 21 | from absl import app |
21 | 22 | from maxdiffusion.utils import export_to_video |
| 23 | +from google.cloud import storage |
| 24 | + |
| 25 | +def upload_video_to_gcs(output_dir: str, video_path: str): |
| 26 | + """ |
| 27 | + Uploads a local video file to a specified Google Cloud Storage bucket. |
| 28 | + """ |
| 29 | + try: |
| 30 | + path_without_scheme = output_dir.removeprefix("gs://") |
| 31 | + parts = path_without_scheme.split('/', 1) |
| 32 | + bucket_name = parts[0] |
| 33 | + folder_name = parts[1] if len(parts) > 1 else '' |
| 34 | + |
| 35 | + storage_client = storage.Client() |
| 36 | + bucket = storage_client.bucket(bucket_name) |
| 37 | + |
| 38 | + source_file_path = f"./{video_path}" |
| 39 | + destination_blob_name = os.path.join(folder_name, "videos", video_path) |
| 40 | + |
| 41 | + blob = bucket.blob(destination_blob_name) |
| 42 | + |
| 43 | + max_logging.log(f"Uploading {source_file_path} to {bucket_name}/{destination_blob_name}...") |
| 44 | + blob.upload_from_filename(source_file_path) |
| 45 | + max_logging.log(f"Upload complete {source_file_path}.") |
| 46 | + |
| 47 | + except Exception as e: |
| 48 | + max_logging.log(f"An error occurred: {e}") |
| 49 | + |
| 50 | +def delete_file(file_path: str): |
| 51 | + if os.path.exists(file_path): |
| 52 | + try: |
| 53 | + os.remove(file_path) |
| 54 | + max_logging.log(f"Successfully deleted file: {file_path}") |
| 55 | + except OSError as e: |
| 56 | + max_logging.log(f"Error deleting file '{file_path}': {e}") |
| 57 | + else: |
| 58 | + max_logging.log(f"The file '{file_path}' does not exist.") |
22 | 59 |
|
23 | 60 | jax.config.update("jax_use_shardy_partitioner", True) |
24 | 61 |
|
| 62 | +def inference_generate_video(config, pipeline, filename_prefix=""): |
| 63 | + s0 = time.perf_counter() |
| 64 | + prompt = [config.prompt] * config.global_batch_size_to_train_on |
| 65 | + negative_prompt = [config.negative_prompt] * config.global_batch_size_to_train_on |
| 66 | + |
| 67 | + max_logging.log( |
| 68 | + f"Num steps: {config.num_inference_steps}, height: {config.height}, width: {config.width}, frames: {config.num_frames}, video: {filename_prefix}" |
| 69 | + ) |
| 70 | + |
| 71 | + videos = pipeline( |
| 72 | + prompt=prompt, |
| 73 | + negative_prompt=negative_prompt, |
| 74 | + height=config.height, |
| 75 | + width=config.width, |
| 76 | + num_frames=config.num_frames, |
| 77 | + num_inference_steps=config.num_inference_steps, |
| 78 | + guidance_scale=config.guidance_scale, |
| 79 | + ) |
| 80 | + |
| 81 | + max_logging.log(f"video {filename_prefix}, compile time: {(time.perf_counter() - s0)}") |
| 82 | + for i in range(len(videos)): |
| 83 | + video_path = f"{filename_prefix}wan_output_{config.seed}_{i}.mp4" |
| 84 | + export_to_video(videos[i], video_path, fps=config.fps) |
| 85 | + if config.output_dir.startswith("gs://"): |
| 86 | + upload_video_to_gcs(config.output_dir, video_path) |
| 87 | + # Delete local files to avoid storing too manys videos |
| 88 | + delete_file(f"./{video_path}") |
| 89 | + return |
25 | 90 |
|
26 | 91 | def run(config, pipeline=None, filename_prefix=""): |
27 | 92 | print("seed: ", config.seed) |
@@ -57,6 +122,8 @@ def run(config, pipeline=None, filename_prefix=""): |
57 | 122 | video_path = f"{filename_prefix}wan_output_{config.seed}_{i}.mp4" |
58 | 123 | export_to_video(videos[i], video_path, fps=config.fps) |
59 | 124 | saved_video_path.append(video_path) |
| 125 | + if config.output_dir.startswith("gs://"): |
| 126 | + upload_video_to_gcs(config.output_dir, video_path) |
60 | 127 |
|
61 | 128 | s0 = time.perf_counter() |
62 | 129 | videos = pipeline( |
|
0 commit comments