看病不求医,在JupyterLab中用U-Net实现病灶识别

从医生到AI

在肿瘤医院的阅片室里,王医生正聚精会神地盯着CT影像。她需要从数百张断层扫描图中,手工勾画出患者肺部的肿瘤区域。这项工作不仅耗时费力,更要求医生保持高度专注------一个细微的漏判就可能影响治疗方案的选择。这样的场景每天都在全球各大医院上演,直到AI技术的出现带来转机。

医学图像分割技术通过深度学习算法,可以自动识别影像中的特定解剖结构或病变区域。其中,U-Net模型因其在生物医学图像分割中的卓越表现,已成为该领域的标准工具。让我们通过一个完整的案例,了解如何在JupyterLab环境中构建并应用这个"AI医生助手"。

环境配置与数据准备

2.1 环境配置

在JupyterLab中新建Python笔记本,执行以下命令安装依赖库:

arduino 复制代码
!pip install tensorflow keras numpy matplotlib opencv-python scikit-image

2.2 数据获取与预处理

我们使用公开的ISIC皮肤病变数据集进行演示。这个数据集包含皮肤镜图像及对应的病灶标注图:

下载地址如下:(PS: 真实数据服务器跑不动,所以用的模拟数据,有能力的同学可以测试下真实数据哦。)

isic-challenge-data.s3.amazonaws.com/2016/ISBI20...

challenge.isic-archive.com/data/

css 复制代码
import numpy as np
import matplotlib.pyplot as plt
from skimage import io, transform
from sklearn.model_selection import train_test_split
import zipfile

# 下载示例数据集(实际使用时替换为真实数据)
!wget https://isic-challenge-data.s3.amazonaws.com/2016/ISBI2016_ISIC_Part2B_Training_Data.zip  

# 解压数据集
with zipfile.ZipFile('ISBI2016_ISIC_Part2B_Training_Data.zip', 'r') as zip_ref:
    zip_ref.extractall('dataset')

# def download_and_extract_dataset():
    # 下载数据集
    # dataset_url = "https://isic-challenge-data.s3.amazonaws.com/2016/ISBI2016_ISIC_Part2B_Training_Data.zip  "
    # os.system(f"wget {dataset_url}")
    
    # 解压数据集
    # with zipfile.ZipFile('ISBI2016_ISIC_Part2B_Training_Data.zip', 'r') as zip_ref:
    #     zip_ref.extractall('dataset')

# 下载并解压数据集
# download_and_extract_dataset()

# 加载数据
def load_dataset():
    images = []
    masks = []
    
    # 生成模拟数据(实际应加载真实数据)
    for i in range(200):
        img = np.random.rand(256, 256)  # 模拟灰度图像
        mask = np.zeros((256, 256))
        mask[50:150, 50:150] = 1  # 模拟方形病灶
        images.append(img)
        masks.append(mask)
    
    return np.array(images), np.array(masks)

X, y = load_dataset()

# 数据可视化示例
plt.figure(figsize=(10,4))
plt.subplot(1,2,1)
plt.imshow(X[0], cmap='gray')
plt.title('原始图像')
plt.subplot(1,2,2)
plt.imshow(y[0], cmap='gray')
plt.title('标注图像')
plt.show()

构建U-Net

3.1 模型架构解析

U-Net的核心思想是通过编码器-解码器结构,结合跳跃连接(Skip Connections),同时捕捉局部细节和全局上下文。就像医生先整体观察CT片,再聚焦病灶区域一样。

3.2 代码实现

ini 复制代码
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, concatenate

def build_unet(input_shape=(256, 256, 1)):
    # 输入层
    inputs = Input(input_shape)
    
    # 编码器(下采样)
    c1 = Conv2D(64, 3, activation='relu', padding='same')(inputs)
    c1 = Conv2D(64, 3, activation='relu', padding='same')(c1)
    p1 = MaxPooling2D((2, 2))(c1)
    
    c2 = Conv2D(128, 3, activation='relu', padding='same')(p1)
    c2 = Conv2D(128, 3, activation='relu', padding='same')(c2)
    p2 = MaxPooling2D((2, 2))(c2)
    
    # 瓶颈层
    c3 = Conv2D(256, 3, activation='relu', padding='same')(p2)
    c3 = Conv2D(256, 3, activation='relu', padding='same')(c3)
    
    # 解码器(上采样)
    u4 = UpSampling2D((2, 2))(c3)
    u4 = concatenate([u4, c2])
    c4 = Conv2D(128, 3, activation='relu', padding='same')(u4)
    c4 = Conv2D(128, 3, activation='relu', padding='same')(c4)
    
    u5 = UpSampling2D((2, 2))(c4)
    u5 = concatenate([u5, c1])
    c5 = Conv2D(64, 3, activation='relu', padding='same')(u5)
    c5 = Conv2D(64, 3, activation='relu', padding='same')(c5)
    
    # 输出层
    outputs = Conv2D(1, 1, activation='sigmoid')(c5)
    
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
    return model

model = build_unet()
model.summary()

训练你的AI医生

4.1 数据预处理

ini 复制代码
# 划分训练集/验证集
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)

# 增加通道维度
X_train = X_train[..., np.newaxis]
X_val = X_val[..., np.newaxis]
y_train = y_train[..., np.newaxis]
y_val = y_val[..., np.newaxis]

# 数据增强配置
from tensorflow.keras.preprocessing.image import ImageDataGenerator

data_gen_args = dict(
    rotation_range=15,
    width_shift_range=0.1,
    height_shift_range=0.1,
    zoom_range=0.2,
    horizontal_flip=True,
    fill_mode='nearest'
)

image_generator = ImageDataGenerator(**data_gen_args)
mask_generator = ImageDataGenerator(**data_gen_args)

# 创建数据生成器
def create_generator(image_generator, mask_generator, X, y, batch_size):
    seed = 42
    image_flow = image_generator.flow(X, batch_size=batch_size, seed=seed)
    mask_flow = mask_generator.flow(y, batch_size=batch_size, seed=seed)
    while True:
        yield (image_flow.next(), mask_flow.next())

train_generator = create_generator(image_generator, mask_generator, X_train, y_train, 8)

4.2 模型训练

ini 复制代码
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping

callbacks = [
    EarlyStopping(patience=15, restore_best_weights=True),
    ModelCheckpoint('best_model.h5', save_best_only=True)
]

history = model.fit(
    train_generator,
    steps_per_epoch=len(X_train)//8,
    epochs=100,
    validation_data=(X_val, y_val),
    callbacks=callbacks
)

效果评估

5.1 训练过程可视化

ini 复制代码
plt.figure(figsize=(12,4))
plt.subplot(1,2,1)
plt.plot(history.history['loss'], label='训练损失')
plt.plot(history.history['val_loss'], label='验证损失')
plt.title('损失曲线')
plt.legend()

plt.subplot(1,2,2)
plt.plot(history.history['accuracy'], label='训练准确率')
plt.plot(history.history['val_accuracy'], label='验证准确率')
plt.title('准确率曲线')
plt.legend()
plt.show()

5.2 分割效果展示

scss 复制代码
def show_predictions(model, X, y, num=3):
    preds = model.predict(X[:num])
    
    plt.figure(figsize=(15, 5*num))
    for i in range(num):
        plt.subplot(num,3,i*3+1)
        plt.imshow(X[i].squeeze(), cmap='gray')
        plt.title('输入图像')
        
        plt.subplot(num,3,i*3+2)
        plt.imshow(y[i].squeeze(), cmap='gray')
        plt.title('真实标注')
        
        plt.subplot(num,3,i*3+3)
        plt.imshow(preds[i].squeeze() > 0.5, cmap='gray')  # 阈值处理
        plt.title('预测结果')
    
    plt.tight_layout()
    plt.show()

show_predictions(model, X_val, y_val)

未来展望

通过本教程,我们完成了一个完整的医学图像分割项目。但AI在医疗领域的应用远不止于此:多模态融合3D分割实时分割疾病预测、等等领域。

随着技术的进步,未来的AI医生将不仅能够识别病灶,还能结合临床数据给出诊疗建议。在这个过程中,每个开发者都能成为这场医疗革命的参与者。现在,就让我们从这行代码开始,共同书写智慧医疗的新篇章。

(完整代码示例:如下):

ini 复制代码
import numpy as np
import matplotlib.pyplot as plt
from skimage import io, transform
from sklearn.model_selection import train_test_split
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, concatenate

# 下载数据集(示例用随机数据生成)
def load_dataset():
    images = []
    masks = []
    
    # 生成模拟数据(实际应加载真实数据)
    for i in range(200):
        img = np.random.rand(256, 256)  # 模拟灰度图像
        mask = np.zeros((256, 256))
        mask[50:150, 50:150] = 1  # 模拟方形病灶
        images.append(img)
        masks.append(mask)
    
    return np.array(images), np.array(masks)

X, y = load_dataset()

# 划分训练集/验证集
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)

# 确保数据的形状是四维 (samples, height, width, channels)
X_train = np.squeeze(X_train)[..., np.newaxis]  # 转换为四维数组
X_val = np.squeeze(X_val)[..., np.newaxis]      # 转换为四维数组
y_train = np.squeeze(y_train)[..., np.newaxis]  # 转换为四维数组
y_val = np.squeeze(y_val)[..., np.newaxis]      # 转换为四维数组

# 检查数据形状
print("X_train shape:", X_train.shape)  # 应为 (samples, 256, 256, 1)
print("y_train shape:", y_train.shape)  # 应为 (samples, 256, 256, 1)

# 数据增强配置
data_gen_args = dict(
    rotation_range=15,
    width_shift_range=0.1,
    height_shift_range=0.1,
    zoom_range=0.2,
    horizontal_flip=True,
    fill_mode='nearest'
)

image_generator = ImageDataGenerator(**data_gen_args)
mask_generator = ImageDataGenerator(**data_gen_args)

# 创建数据生成器
def create_generator(image_generator, mask_generator, X, y, batch_size):
    seed = 42
    image_flow = image_generator.flow(X, batch_size=batch_size, seed=seed)
    mask_flow = mask_generator.flow(y, batch_size=batch_size, seed=seed)
    while True:
        # 使用 __next__() 或者 next() 来替代 .next()
        yield (next(image_flow), next(mask_flow))

# U-Net模型定义
def build_unet(input_shape=(256, 256, 1)):
    inputs = Input(input_shape)
    
    # 编码器(下采样)
    c1 = Conv2D(64, 3, activation='relu', padding='same')(inputs)
    c1 = Conv2D(64, 3, activation='relu', padding='same')(c1)
    p1 = MaxPooling2D((2, 2))(c1)
    
    c2 = Conv2D(128, 3, activation='relu', padding='same')(p1)
    c2 = Conv2D(128, 3, activation='relu', padding='same')(c2)
    p2 = MaxPooling2D((2, 2))(c2)
    
    # 瓶颈层
    c3 = Conv2D(256, 3, activation='relu', padding='same')(p2)
    c3 = Conv2D(256, 3, activation='relu', padding='same')(c3)
    
    # 解码器(上采样)
    u4 = UpSampling2D((2, 2))(c3)
    u4 = concatenate([u4, c2])
    c4 = Conv2D(128, 3, activation='relu', padding='same')(u4)
    c4 = Conv2D(128, 3, activation='relu', padding='same')(c4)
    
    u5 = UpSampling2D((2, 2))(c4)
    u5 = concatenate([u5, c1])
    c5 = Conv2D(64, 3, activation='relu', padding='same')(u5)
    c5 = Conv2D(64, 3, activation='relu', padding='same')(c5)
    
    # 输出层
    outputs = Conv2D(1, 1, activation='sigmoid')(c5)
    
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
    return model

# 创建并查看模型
model = build_unet()
model.summary()

# 训练回调
callbacks = [
    EarlyStopping(patience=15, restore_best_weights=True),
    ModelCheckpoint('best_model.h5', save_best_only=True)
]

# 创建训练数据生成器
train_generator = create_generator(image_generator, mask_generator, X_train, y_train, batch_size=8)

# 模型训练
history = model.fit(
    train_generator,
    steps_per_epoch=len(X_train) // 8,
    epochs=100,
    validation_data=(X_val, y_val),
    callbacks=callbacks
)

# 训练过程可视化
plt.figure(figsize=(12,4))
plt.subplot(1,2,1)
plt.plot(history.history['loss'], label='训练损失')
plt.plot(history.history['val_loss'], label='验证损失')
plt.title('损失曲线')
plt.legend()

plt.subplot(1,2,2)
plt.plot(history.history['accuracy'], label='训练准确率')
plt.plot(history.history['val_accuracy'], label='验证准确率')
plt.title('准确率曲线')
plt.legend()
plt.show()

# 分割效果展示
def show_predictions(model, X, y, num=3):
    preds = model.predict(X[:num])
    
    plt.figure(figsize=(15, 5*num))
    for i in range(num):
        plt.subplot(num,3,i*3+1)
        plt.imshow(X[i].squeeze(), cmap='gray')
        plt.title('输入图像')
        
        plt.subplot(num,3,i*3+2)
        plt.imshow(y[i].squeeze(), cmap='gray')
        plt.title('真实标注')
        
        plt.subplot(num,3,i*3+3)
        plt.imshow(preds[i].squeeze() > 0.5, cmap='gray')  # 阈值处理
        plt.title('预测结果')
    
    plt.tight_layout()
    plt.show()

show_predictions(model, X_val, y_val)
相关推荐
数据智能老司机6 分钟前
使用Python和PyTorch的生成式AI——提示工程
pytorch·深度学习·llm
LouisCanBe8 分钟前
理解 MCP:从配置到本地服务集成的完整探索
llm·api·mcp
玲小珑11 分钟前
LLMOps开发(四) Tool + Agent
langchain·node.js·ai编程
数据智能老司机13 分钟前
使用Python和PyTorch的生成式AI——开源大语言模型(LLMs)
pytorch·架构·llm
go4it16 分钟前
聊聊Spring AI的Evaluator
llm
九歌AI大模型17 分钟前
Dify Sandbox实现文件路径获取与Excel数据处理
aigc
蒹葭苍苍8731 小时前
分布式多卡训练与Xtuner
aigc
justdoit5211 小时前
前端的AI路其之二:初试MCP Server
前端·aigc·mcp
运营黑客1 小时前
爆肝整理!Google官方67页提示工程秘籍,小白也能轻松看懂
aigc·openai
蒹葭苍苍8731 小时前
模型压缩
aigc