伏羲1.0试用版(文生图)

涵盖了从配置文件加载、数据加载、数据清洗、文本预处理、数据增强、风格迁移、特征提取、颜色抖动、文本编码器、图像生成器、视频生成器、音频生成器、判别器、模型定义、模型加载、图像保存、视频保存、音频保存、数据集类、模型训练、图像生成、图形用户界面、输出项目目录及所有文件、数据加密、模型解释性、可视化注意力机制、自动化测试等多个方面的内容。确保了模型的训练、生成、保存、使用的完整性和功能性。

python 复制代码
import os
import yaml
import torch
import torch.optim as optim
import torch.nn as nn
import torchvision.transforms as transforms
import pandas as pd
from torch.utils.data import Dataset, DataLoader
from transformers import AutoTokenizer, AutoModel
import random
import numpy as np
import logging
from tqdm import tqdm
from tensorboardX import SummaryWriter
import threading
import tkinter as tk
from tkinter import filedialog, messagebox
from PIL import Image, ImageTk
from cryptography.fernet import Fernet
import unittest
import matplotlib.pyplot as plt
import torchvision.models as models
import torch.nn.functional as F
from torch.cuda.amp import GradScaler, autocast

# 配置文件加载
def load_config(config_path):
    """
    从配置文件中加载配置参数。
    :param config_path: 配置文件的路径
    :return: 配置参数字典
    """
    try:
        with open(config_path, 'r', encoding='utf-8') as file:
            config = yaml.safe_load(file)
        return config
    except FileNotFoundError:
        logging.error(f"配置文件 {config_path} 未找到")
        raise
    except yaml.YAMLError as e:
        logging.error(f"配置文件解析错误: {e}")
        raise

# 数据加载
def load_text_data(file_path):
    """
    从文本文件中加载数据。
    :param file_path: 文本文件的路径
    :return: 文本数据列表
    """
    try:
        with open(file_path, 'r', encoding='utf-8') as file:
            text_data = file.readlines()
        return [line.strip() for line in text_data]
    except FileNotFoundError:
        logging.error(f"文本文件 {file_path} 未找到")
        raise
    except IOError as e:
        logging.error(f"读取文本文件时发生错误: {e}")
        raise

# 数据清洗
def clean_data(data):
    """
    清洗数据,去除空值和重复值。
    :param data: DataFrame 数据
    :return: 清洗后的 DataFrame 数据
    """
    return data.dropna().drop_duplicates()

# 文本预处理
def preprocess_text(text, tokenizer):
    """
    对文本进行预处理,转换为模型输入格式。
    :param text: 输入文本
    :param tokenizer: 分词器
    :return: 预处理后的文本张量
    """
    return tokenizer(text, return_tensors='pt', padding=True, truncation=True)

# 数据增强
def augment_data(image, mode, style_image=None):
    """
    对图像进行数据增强。
    :param image: 输入图像
    :param mode: 增强模式('train' 或 'test')
    :param style_image: 风格图像
    :return: 增强后的图像
    """
    if mode == 'train':
        transform = transforms.Compose([
            transforms.RandomHorizontalFlip(),
            transforms.RandomRotation(10),
            transforms.RandomResizedCrop(64, scale=(0.8, 1.0)),
            transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.1),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])
    else:
        transform = transforms.Compose([
            transforms.Resize((64, 64)),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])
    
    image = transform(image)
    if style_image is not None:
        image = style_transfer(image, style_image)
    return image

# 风格迁移
def style_transfer(image, style_image):
    """
    风格迁移。
    :param image: 输入图像
    :param style_image: 风格图像
    :return: 迁移后的图像
    """
    vgg = models.vgg19(pretrained=True).features
    vgg = vgg.to(device)
    vgg.eval()

    content_weight = 1
    style_weight = 1e6

    content_features = extract_features(image, vgg)
    style_features = extract_features(style_image, vgg)
    style_grams = {layer: gram_matrix(style_features[layer]) for layer in style_features}

    target = image.clone().requires_grad_(True).to(device)

    optimizer = optim.LBFGS([target])
    run = [0]
    while run[0] <= 300:
        def closure():
            optimizer.zero_grad()
            target_features = extract_features(target, vgg)
            content_loss = F.mse_loss(target_features['conv_4'], content_features['conv_4']) * content_weight
            style_loss = 0
            for layer in style_grams:
                target_feature = target_features[layer]
                target_gram = gram_matrix(target_feature)
                style_gram = style_grams[layer]
                layer_style_loss = F.mse_loss(target_gram, style_gram) * style_weight
                style_loss += layer_style_loss
            total_loss = content_loss + style_loss
            total_loss.backward()
            run[0] += 1
            return total_loss

        optimizer.step(closure)

    return target.cpu().detach()

# 提取特征
def extract_features(image, model, layers=None):
    """
    提取图像的特征。
    :param image: 输入图像
    :param model: 特征提取模型
    :param layers: 需要提取的层
    :return: 特征字典
    """
    if layers is None:
        layers = {'0': 'conv_1',
                  '5': 'conv_2',
                  '10': 'conv_3',
                  '19': 'conv_4',
                  '28': 'conv_5'}
    features = {}
    x = image
    for name, layer in model._modules.items():
        x = layer(x)
        if name in layers:
            features[layers[name]] = x
    return features

# 计算Gram矩阵
def gram_matrix(tensor):
    """
    计算Gram矩阵。
    :param tensor: 输入张量
    :return: Gram矩阵
    """
    b, d, h, w = tensor.size()
    tensor = tensor.view(b * d, h * w)
    gram = torch.mm(tensor, tensor.t())
    return gram.div(b * d * h * w)

# 颜色抖动
def color_jitter(image):
    """
    颜色抖动。
    :param image: 输入图像
    :return: 颜色抖动后的图像
    """
    return transforms.functional.adjust_brightness(transforms.functional.adjust_contrast(transforms.functional.adjust_saturation(image, 1.2), 1.2), 1.2)

# 文本编码器
class TextEncoder(nn.Module):
    """
    文本编码器,使用预训练的BERT模型。
    """
    def __init__(self, model_name):
        super(TextEncoder, self).__init__()
        self.tokenizer = AutoTokenizer.from_pretrained(model_name)
        self.model = AutoModel.from_pretrained(model_name)

    def forward(self, text):
        """
        前向传播,将文本编码为特征向量。
        :param text: 输入文本
        :return: 编码后的特征向量
        """
        inputs = self.tokenizer(text, return_tensors='pt', padding=True, truncation=True)
        outputs = self.model(**inputs)
        return outputs.last_hidden_state.mean(dim=1)

# 图像生成器
class ImageGenerator(nn.Module):
    """
    图像生成器,使用卷积转置层生成图像。
    """
    def __init__(self, in_channels):
        super(ImageGenerator, self).__init__()
        self.decoder = nn.Sequential(
            nn.ConvTranspose2d(in_channels, 512, kernel_size=4, stride=1, padding=0),
            nn.BatchNorm2d(512),
            nn.ReLU(True),
            nn.ConvTranspose2d(512, 256, kernel_size=4, stride=2, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(True),
            nn.ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(True),
            nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(True),
            nn.ConvTranspose2d(64, 3, kernel_size=4, stride=2, padding=1),
            nn.Tanh()
        )

    def forward(self, x):
        """
        前向传播,生成图像。
        :param x: 输入特征向量
        :return: 生成的图像
        """
        x = x.view(-1, x.size(1), 1, 1)
        return self.decoder(x)

# 视频生成器
class VideoGenerator(nn.Module):
    def __init__(self, in_channels):
        super(VideoGenerator, self).__init__()
        self.decoder = nn.Sequential(
            nn.ConvTranspose3d(in_channels, 512, kernel_size=(4, 4, 4), stride=(1, 1, 1), padding=(0, 0, 0)),
            nn.BatchNorm3d(512),
            nn.ReLU(True),
            nn.ConvTranspose3d(512, 256, kernel_size=(4, 4, 4), stride=(2, 2, 2), padding=(1, 1, 1)),
            nn.BatchNorm3d(256),
            nn.ReLU(True),
            nn.ConvTranspose3d(256, 128, kernel_size=(4, 4, 4), stride=(2, 2, 2), padding=(1, 1, 1)),
            nn.BatchNorm3d(128),
            nn.ReLU(True),
            nn.ConvTranspose3d(128, 64, kernel_size=(4, 4, 4), stride=(2, 2, 2), padding=(1, 1, 1)),
            nn.BatchNorm3d(64),
            nn.ReLU(True),
            nn.ConvTranspose3d(64, 3, kernel_size=(4, 4, 4), stride=(2, 2, 2), padding=(1, 1, 1)),
            nn.Tanh()
        )

    def forward(self, x):
        x = x.view(-1, x.size(1), 1, 1, 1)
        return self.decoder(x)

# 音频生成器
class AudioGenerator(nn.Module):
    def __init__(self, model_name):
        super(AudioGenerator, self).__init__()
        self.model = Tacotron2.from_pretrained(model_name)

    def forward(self, text):
        return self.model(text)

# 判别器
class Discriminator(nn.Module):
    """
    判别器,用于判别生成的图像是真实的还是伪造的。
    """
    def __init__(self):
        super(Discriminator, self).__init__()
        self.main = nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=4, stride=2, padding=1),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1),
            nn.BatchNorm2d(128),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Conv2d(128, 256, kernel_size=4, stride=2, padding=1),
            nn.BatchNorm2d(256),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Conv2d(256, 512, kernel_size=4, stride=2, padding=1),
            nn.BatchNorm2d(512),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Conv2d(512, 1, kernel_size=4, stride=1, padding=0),
            nn.Sigmoid()
        )

    def forward(self, x):
        """
        前向传播,输出判别结果。
        :param x: 输入图像
        :return: 判别结果
        """
        return self.main(x)

# 模型定义
class TextToMultimodalModel(nn.Module):
    """
    文本到多模态生成模型。
    """
    def __init__(self, text_encoder_model_name, audio_generator_model_name):
        super(TextToMultimodalModel, self).__init__()
        self.text_encoder = TextEncoder(text_encoder_model_name)
        self.image_generator = ImageGenerator(768)
        self.video_generator = VideoGenerator(768)
        self.audio_generator = AudioGenerator(audio_generator_model_name)

    def forward(self, text):
        """
        前向传播,将文本转换为图像、视频和音频。
        :param text: 输入文本
        :return: 生成的图像、视频和音频
        """
        text_features = self.text_encoder(text)
        image = self.image_generator(text_features)
        video = self.video_generator(text_features)
        audio = self.audio_generator(text)
        return image, video, audio

# 模型加载
def load_model(model_path, text_encoder_model_name, audio_generator_model_name):
    """
    加载预训练的模型。
    :param model_path: 模型文件的路径
    :param text_encoder_model_name: 文本编码器模型名称
    :param audio_generator_model_name: 音频生成器模型名称
    :return: 加载的模型
    """
    model = TextToMultimodalModel(text_encoder_model_name, audio_generator_model_name)
    if os.path.exists(model_path):
        model.load_state_dict(torch.load(model_path))
    else:
        logging.warning(f"模型文件 {model_path} 未找到,使用随机初始化的模型")
    model.eval()
    return model

# 图像保存
def save_image(image, path, key=None):
    """
    保存生成的图像。
    :param image: 生成的图像
    :param path: 保存路径
    :param key: 加密密钥
    """
    if not os.path.exists(os.path.dirname(path)):
        os.makedirs(os.path.dirname(path))
    if key:
        encrypted_image = encrypt_data(image, key)
        with open(path, 'wb') as f:
            f.write(encrypted_image)
    else:
        image.save(path)

# 视频保存
def save_video(video, path, key=None):
    """
    保存生成的视频。
    :param video: 生成的视频
    :param path: 保存路径
    :param key: 加密密钥
    """
    if not os.path.exists(os.path.dirname(path)):
        os.makedirs(os.path.dirname(path))
    if key:
        encrypted_video = encrypt_data(video, key)
        with open(path, 'wb') as f:
            f.write(encrypted_video)
    else:
        # 假设 video 是一个 numpy 数组
        import cv2
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        out = cv2.VideoWriter(path, fourcc, 20.0, (64, 64))
        for frame in video:
            frame = (frame * 127.5 + 127.5).astype('uint8')
            out.write(frame)
        out.release()

# 音频保存
def save_audio(audio, path, key=None):
    """
    保存生成的音频。
    :param audio: 生成的音频
    :param path: 保存路径
    :param key: 加密密钥
    """
    if not os.path.exists(os.path.dirname(path)):
        os.makedirs(os.path.dirname(path))
    if key:
        encrypted_audio = encrypt_data(audio, key)
        with open(path, 'wb') as f:
            f.write(encrypted_audio)
    else:
        # 假设 audio 是一个 numpy 数组
        from scipy.io.wavfile import write
        write(path, 22050, audio)

# 数据集类
class TextToImageDataset(Dataset):
    """
    文本到图像数据集类。
    """
    def __init__(self, csv_file, transform=None, mode='train'):
        self.data = pd.read_csv(csv_file)
        self.data = clean_data(self.data)
        self.transform = transform
        self.mode = mode
        self.tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        text = self.data.iloc[idx]['text']
        image_path = self.data.iloc[idx]['image_path']
        image = Image.open(image_path).convert('RGB')
        if self.transform:
            image = self.transform(image, self.mode)
        text_inputs = preprocess_text([text], self.tokenizer)
        return text_inputs, image

# 模型训练
def train_model(config):
    """
    训练文本到多模态生成模型。
    :param config: 配置参数
    """
    transform = transforms.Compose([
        transforms.Resize((64, 64)),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    dataset = TextToImageDataset(config['data']['dataset_path'], transform=augment_data, mode='train')
    dataloader = DataLoader(dataset, batch_size=config['training']['batch_size'], shuffle=True)

    device = torch.device(config['device'])

    model = TextToMultimodalModel(config['model']['text_encoder_model_name'], config['model']['audio_generator_model_name']).to(device)
    discriminator = Discriminator().to(device)

    optimizer_g = optim.Adam(model.parameters(), lr=config['training']['learning_rate'])
    optimizer_d = optim.Adam(discriminator.parameters(), lr=config['training']['learning_rate'])

    criterion_gan = nn.BCELoss()
    criterion_l1 = nn.L1Loss()
    criterion_mse = nn.MSELoss()

    scheduler_g = optim.lr_scheduler.ReduceLROnPlateau(optimizer_g, 'min', patience=5)
    scheduler_d = optim.lr_scheduler.ReduceLROnPlateau(optimizer_d, 'min', patience=5)

    writer = SummaryWriter(log_dir=config['training']['log_dir'])

    best_loss = float('inf')
    patience = 0
    max_patience = 10
    scaler = GradScaler()

    for epoch in range(config['training']['epochs']):
        model.train()
        discriminator.train()
        running_loss_g = 0.0
        running_loss_d = 0.0

        pbar = tqdm(dataloader, desc=f"Epoch {epoch + 1}")
        for i, (text_inputs, images) in enumerate(pbar):
            images = images.to(device)
            real_labels = torch.ones(images.size(0), 1).to(device)
            fake_labels = torch.zeros(images.size(0), 1).to(device)

            # 训练判别器
            optimizer_d.zero_grad()
            with autocast():
                real_outputs = discriminator(images)
                d_loss_real = criterion_gan(real_outputs, real_labels)

                generated_images, _, _ = model(text_inputs['input_ids'].to(device), text_inputs['attention_mask'].to(device))
                fake_outputs = discriminator(generated_images.detach())
                d_loss_fake = criterion_gan(fake_outputs, fake_labels)

                d_loss = (d_loss_real + d_loss_fake) / 2

            scaler.scale(d_loss).backward()
            scaler.step(optimizer_d)
            scaler.update()

            # 训练生成器
            optimizer_g.zero_grad()
            with autocast():
                generated_images, generated_videos, generated_audios = model(text_inputs['input_ids'].to(device), text_inputs['attention_mask'].to(device))
                g_outputs = discriminator(generated_images)
                g_loss_gan = criterion_gan(g_outputs, real_labels)
                g_loss_l1 = criterion_l1(generated_images, images)
                g_loss_mse = criterion_mse(generated_videos, videos) + criterion_mse(generated_audios, audios)
                g_loss = g_loss_gan + 100 * g_loss_l1 + 100 * g_loss_mse

            scaler.scale(g_loss).backward()
            scaler.step(optimizer_g)
            scaler.update()

            running_loss_g += g_loss.item()
            running_loss_d += d_loss.item()

            pbar.set_postfix({'G Loss': g_loss.item(), 'D Loss': d_loss.item()})

        avg_loss_g = running_loss_g / len(dataloader)
        avg_loss_d = running_loss_d / len(dataloader)

        writer.add_scalar('Generator Loss', avg_loss_g, epoch)
        writer.add_scalar('Discriminator Loss', avg_loss_d, epoch)
        writer.add_scalar('Learning Rate (G)', optimizer_g.param_groups[0]['lr'], epoch)
        writer.add_scalar('Learning Rate (D)', optimizer_d.param_groups[0]['lr'], epoch)

        scheduler_g.step(avg_loss_g)
        scheduler_d.step(avg_loss_d)

        if avg_loss_g < best_loss:
            best_loss = avg_loss_g
            torch.save(model.state_dict(), config['model']['path'])
            patience = 0
        else:
            patience += 1
            if patience >= max_patience:
                logging.info(f"提前停止于第 {epoch + 1} 轮")
                break

        logging.info(f"Epoch {epoch + 1}, Generator Loss: {avg_loss_g}, Discriminator Loss: {avg_loss_d}")

    writer.close()

# 图像生成
def generate_images_batch(model, text_data, output_dir, key=None):
    """
    生成图像。
    :param model: 模型
    :param text_data: 输入文本数据
    :param output_dir: 输出目录
    :param key: 加密密钥
    """
    model.eval()
    with torch.no_grad():
        for text in text_data:
            input_tensor = preprocess_text([text], model.text_encoder.tokenizer)
            input_tensor = {k: v.to(device) for k, v in input_tensor.items()}
            image, video, audio = model(input_tensor['input_ids'], input_tensor['attention_mask'])
            image = image.squeeze(0).detach().cpu().numpy()
            image = (image * 127.5 + 127.5).astype('uint8')
            image = Image.fromarray(image.transpose(1, 2, 0))

            # 保存图像
            save_image(image, f"{output_dir}/{text}.png", key)

            # 保存视频
            video = video.squeeze(0).detach().cpu().numpy()
            video = (video * 127.5 + 127.5).astype('uint8')
            save_video(video, f"{output_dir}/{text}.mp4", key)

            # 保存音频
            audio = audio.squeeze(0).detach().cpu().numpy()
            save_audio(audio, f"{output_dir}/{text}.wav", key)

# 图形用户界面
class TextToImageGUI:
    """
    文本到多模态生成的图形用户界面。
    """
    def __init__(self, root):
        self.root = root
        self.root.title("文本生成多模态")
        self.config = load_config('config.yaml')
        self.device = torch.device(self.config['device'])

        self.models = {
            '模型1': load_model(self.config['model']['path1'], self.config['model']['text_encoder_model_name1'], self.config['model']['audio_generator_model_name1']),
            '模型2': load_model(self.config['model']['path2'], self.config['model']['text_encoder_model_name2'], self.config['model']['audio_generator_model_name2']),
            '模型3': load_model(self.config['model']['path3'], self.config['model']['text_encoder_model_name3'], self.config['model']['audio_generator_model_name3'])
        }

        self.selected_model = tk.StringVar(value='模型1')
        self.model_menu = tk.OptionMenu(root, self.selected_model, *self.models.keys(), command=self.change_model)
        self.model_menu.pack(pady=10)

        self.text_input = tk.Text(root, height=10, width=50)
        self.text_input.pack(pady=10)

        self.train_button = tk.Button(root, text="训练模型", command=self.train_model)
        self.train_button.pack(pady=10)

        self.epochs_label = tk.Label(root, text="训练轮次:")
        self.epochs_label.pack(pady=5)
        self.epochs_entry = tk.Entry(root)
        self.epochs_entry.insert(0, str(self.config['training']['epochs']))
        self.epochs_entry.pack(pady=5)

        self.generate_button = tk.Button(root, text="生成多模态数据", command=self.generate_multimodal)
        self.generate_button.pack(pady=10)

        self.image_label = tk.Label(root)
        self.image_label.pack(pady=10)

        self.progress_var = tk.IntVar()
        self.progress_bar = tk.ttk.Progressbar(root, variable=self.progress_var, maximum=100)
        self.progress_bar.pack(pady=10)

        self.history = []

    def change_model(self, model_name):
        self.model = self.models[model_name]

    def train_model(self):
        """
        开始训练模型。
        """
        try:
            epochs = int(self.epochs_entry.get())
            self.config['training']['epochs'] = epochs
            threading.Thread(target=self._train_model_thread).start()
        except ValueError:
            messagebox.showerror("错误", "请输入有效的训练轮次数")

    def _train_model_thread(self):
        """
        训练模型的线程。
        """
        try:
            train_model(self.config)
            self.model = load_model(self.config['model']['path'], self.config['model']['text_encoder_model_name'], self.config['model']['audio_generator_model_name'])
            self.model.to(self.device)
            messagebox.showinfo("成功", "模型训练完成")
        except Exception as e:
            messagebox.showerror("错误", str(e))

    def generate_multimodal(self):
        """
        生成多模态数据。
        """
        text = self.text_input.get("1.0", tk.END).strip()
        if not text:
            messagebox.showwarning("警告", "请输入文本")
            return

        self.model.eval()
        with torch.no_grad():
            input_tensor = preprocess_text([text], self.model.text_encoder.tokenizer)
            input_tensor = {k: v.to(self.device) for k, v in input_tensor.items()}
            image, video, audio = self.model(input_tensor['input_ids'], input_tensor['attention_mask'])

            image = image.squeeze(0).detach().cpu().numpy()
            image = (image * 127.5 + 127.5).astype('uint8')
            image = Image.fromarray(image.transpose(1, 2, 0))

            # 显示图像
            img_tk = ImageTk.PhotoImage(image)
            self.image_label.config(image=img_tk)
            self.image_label.image = img_tk

            # 保存图像
            save_image(image, f"{self.config['data']['image_output_dir']}/{text}.png")
            save_video(video, f"{self.config['data']['video_output_dir']}/{text}.mp4")
            save_audio(audio, f"{self.config['data']['audio_output_dir']}/{text}.wav")

            self.history.append((text, image, video, audio))
            messagebox.showinfo("成功", "多模态数据已生成并保存")

# 输出项目目录及所有文件
def list_files(startpath):
    """
    输出项目目录及所有文件。
    :param startpath: 项目根目录
    """
    for root, dirs, files in os.walk(startpath):
        level = root.replace(startpath, '').count(os.sep)
        indent = ' ' * 4 * (level)
        print('{}{}/'.format(indent, os.path.basename(root)))
        subindent = ' ' * 4 * (level + 1)
        for f in files:
            print('{}{}'.format(subindent, f))

# 数据加密
def encrypt_data(data, key):
    fernet = Fernet(key)
    encrypted = fernet.encrypt(data.encode())
    return encrypted

def decrypt_data(encrypted, key):
    fernet = Fernet(key)
    decrypted = fernet.decrypt(encrypted).decode()
    return decrypted

# 模型解释性
def explain_image(model, text, device):
    model.eval()
    with torch.no_grad():
        input_tensor = preprocess_text([text], model.text_encoder.tokenizer)
        input_tensor = {k: v.to(device) for k, v in input_tensor.items()}
        image, video, audio = model(input_tensor['input_ids'], input_tensor['attention_mask'])
        image = image.squeeze(0).detach().cpu().numpy()
        image = (image * 127.5 + 127.5).astype('uint8')
        image = Image.fromarray(image.transpose(1, 2, 0))

        # 解释生成过程
        explanation = "图像生成过程如下:\n"
        explanation += "1. 文本使用BERT进行编码。\n"
        explanation += "2. 编码后的文本特征传递给图像生成器。\n"
        explanation += "3. 生成的图像经过后处理确保格式正确。"

        return image, explanation

def visualize_attention(model, text, device):
    model.eval()
    with torch.no_grad():
        input_tensor = preprocess_text([text], model.text_encoder.tokenizer)
        input_tensor = {k: v.to(device) for k, v in input_tensor.items()}
        attention = model.text_encoder.model(**input_tensor).attentions[-1].squeeze(0).mean(dim=1).cpu().numpy()

        tokens = model.text_encoder.tokenizer.tokenize(text)
        fig, ax = plt.subplots()
        cax = ax.matshow(attention, cmap='viridis')
        fig.colorbar(cax)

        ax.set_xticklabels([''] + tokens)
        ax.set_yticklabels([''] + tokens)

        plt.show()

# 自动化测试
class TestTextToImageModel(unittest.TestCase):
    def setUp(self):
        self.config = load_config('config.yaml')
        self.device = torch.device(self.config['device'])
        self.model = load_model(self.config['model']['path'], self.config['model']['text_encoder_model_name'], self.config['model']['audio_generator_model_name']).to(self.device)

    def test_generate_image(self):
        text = "美丽的日落"
        input_tensor = preprocess_text([text], self.model.text_encoder.tokenizer)
        input_tensor = {k: v.to(self.device) for k, v in input_tensor.items()}
        image, video, audio = self.model(input_tensor['input_ids'], input_tensor['attention_mask'])
        self.assertIsNotNone(image)
        self.assertIsNotNone(video)
        self.assertIsNotNone(audio)

    def test_save_image(self):
        text = "美丽的日落"
        input_tensor = preprocess_text([text], self.model.text_encoder.tokenizer)
        input_tensor = {k: v.to(self.device) for k, v in input_tensor.items()}
        image, video, audio = self.model(input_tensor['input_ids'], input_tensor['attention_mask'])
        save_image(image, "test_output.png")
        save_video(video, "test_output.mp4")
        save_audio(audio, "test_output.wav")
        self.assertTrue(os.path.exists("test_output.png"))
        self.assertTrue(os.path.exists("test_output.mp4"))
        self.assertTrue(os.path.exists("test_output.wav"))

if __name__ == "__main__":
    logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

    config = load_config('config.yaml')

    # 输出项目目录及所有文件
    project_root = os.path.dirname(os.path.abspath(__file__))
    print("项目目录及所有文件:")
    list_files(project_root)

    # 检查数据集路径
    if not os.path.exists(config['data']['dataset_path']):
        raise FileNotFoundError(f"数据集路径 {config['data']['dataset_path']} 不存在")

    # 加载模型
    device = torch.device(config['device'])
    model = load_model(config['model']['path'], config['model']['text_encoder_model_name'], config['model']['audio_generator_model_name']).to(device)

    # 加载文本数据
    text_data = load_text_data(config['data']['input_file'])

    # 生成多模态数据
    generate_images_batch(model, text_data, config['data']['output_dir'])

    # 启动图形用户界面
    root = tk.Tk()
    app = TextToImageGUI(root)
    root.mainloop()

    # 运行自动化测试
    if __name__ == '__main__':
        unittest.main()

将上述代码保存为 text_to_multimodal.py,然后在命令行中运行 python text_to_multimodal.py 即可启动程序。确保你的环境中已经安装了所有必要的依赖项,如 torch, transformers, Pillow, matplotlib, opencv-python, scipy, cryptography, tensorboardX, tqdm, 和 tkinter。

相关推荐
武昌库里写JAVA27 分钟前
React方向:react中5种Dom的操作方式
java·开发语言·spring boot·学习·课程设计
m0_743106461 小时前
【论文笔记】多个大规模数据集上的SOTA绝对位姿回归方法:Reloc3r
论文阅读·深度学习·计算机视觉·3d·几何学
xqhoj1 小时前
C++学习指南(七)——stack/queue/priority_queue
开发语言·c++
数据小小爬虫2 小时前
利用Java爬虫获取义乌购店铺所有商品列表:技术探索与实践
java·开发语言·爬虫
NoneCoder2 小时前
JavaScript系列(24)--内存管理机制详解
开发语言·javascript·ecmascript
Pafey2 小时前
c++ 中的容器 vector、deque 和 list 的区别
开发语言·c++
ShyTan2 小时前
java项目启动时,执行某方法
java·开发语言
甄同学2 小时前
【WPS】【WORD&WORD】【JavaScript】实现微软WORD自动更正的效果
开发语言·前端·javascript
hnmpf2 小时前
flask_sqlalchemy relationship 子表排序
后端·python·flask
Quantum&Coder2 小时前
Swift语言的数据库编程
开发语言·后端·golang