Python机器学习实战:用Scikit-learn从0构建信用风险评分模型
在银行、消费金融、互联网信贷等领域,信用风险评分模型(Credit Scoring Model)是核心业务模型之一。它决定了一个用户是否能拿到贷款、能拿多少、利率是多少。
本文用Python + Scikit-learn,从零搭建一个完整的信用评分模型,包括:数据预处理、特征工程、模型训练(逻辑回归+随机森林+XGBoost对比)、模型评估(AUC/KS/PSI)、以及最终的WOE编码标准评分卡生成。
代码全部可运行,适合有Python基础 + 想入门风控建模的同学。
一、数据准备与探索性分析
我们使用经典的 German Credit Dataset(德国信用数据集),这是机器学习领域最常用的信用风险公开数据集,包含1000条样本、20个特征和1个二分类标签(1=好客户,2=坏客户)。
python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
# 加载数据(使用UCI German Credit Dataset)
from sklearn.datasets import fetch_openml
# 方法1:直接从sklearn加载(需要网络)
# data = fetch_openml('credit-g', version=1, as_frame=True)
# df = data.frame
# 方法2:手动创建模拟数据(离线可运行)
np.random.seed(42)
n = 1000
df = pd.DataFrame({
'age': np.random.randint(18, 75, n),
'job': np.random.choice([0, 1, 2, 3], n), # 0=无技能失业, 1=无技能受雇, 2=技能工人, 3=高技能
'housing': np.random.choice(['own', 'free', 'rent'], n),
'saving_accts': np.random.choice(['little', 'moderate', 'quite rich', 'rich', 'NA'], n),
'checking_acct': np.random.choice(['little', 'moderate', 'rich', 'NA'], n),
'credit_amount': np.random.exponential(3000, n).astype(int) + 500,
'duration': np.random.randint(4, 72, n), # 月
'purpose': np.random.choice(['car', 'furniture', 'radio/TV', 'education', 'business', 'repairs'], n),
'risk': np.random.choice([0, 1], n, p=[0.3, 0.7]) # 0=坏客户, 1=好客户
})
print("数据集形状:", df.shape)
print("\n目标变量分布:")
print(df['risk'].value_counts(normalize=True).round(4))
print("\n数值特征统计:")
print(df[['age', 'credit_amount', 'duration']].describe())
输出结果:
数据集形状: (1000, 9)
目标变量分布:
1 0.7
0 0.3
数值特征统计:
age credit_amount duration
count 1000.000000 1000.000000 1000.000000
mean 46.507000 3530.278000 38.108000
std 16.283975 3044.897891 18.869327
min 18.000000 500.000000 4.000000
25% 32.000000 1261.000000 21.000000
50% 47.000000 2581.000000 38.000000
75% 61.000000 5007.000000 54.000000
max 74.000000 29943.000000 71.000000
二、特征工程:WOE编码(Weight of Evidence)
WOE编码是信用评分领域最常用的特征转换方法。它的核心思路是:把每个特征的每个值,转换成"这个值的坏账率相对于全局坏账率的比率"的对数形式。
WOE公式:WOE = ln(坏客户占比 / 好客户占比)
python
class WOEEncoder:
"""
WOE(Weight of Evidence)编码器
适用于二分类信用评分模型
"""
def __init__(self, bins=5, min_samples=50):
self.bins = bins
self.min_samples = min_samples
self.woe_dict = {}
self.iv_dict = {}
def _calc_woe_iv(self, x, y, categorical=False):
"""计算单个特征的WOE和IV值"""
df = pd.DataFrame({'x': x, 'y': y})
if categorical:
grouped = df.groupby('x')['y'].agg(['sum', 'count'])
grouped.columns = ['good', 'total']
else:
# 数值型:分箱处理
df['x_bin'] = pd.qcut(df['x'], q=self.bins, duplicates='drop')
grouped = df.groupby('x_bin')['y'].agg(['sum', 'count'])
grouped.columns = ['good', 'total']
grouped['bad'] = grouped['total'] - grouped['good']
grouped['good_rate'] = grouped['good'] / grouped['good'].sum()
grouped['bad_rate'] = grouped['bad'] / grouped['bad'].sum()
# 避免除以0
grouped['good_rate'] = grouped['good_rate'].replace(0, 0.0001)
grouped['bad_rate'] = grouped['bad_rate'].replace(0, 0.0001)
grouped['woe'] = np.log(grouped['good_rate'] / grouped['bad_rate'])
grouped['iv'] = (grouped['good_rate'] - grouped['bad_rate']) * grouped['woe']
return grouped['woe'].to_dict(), grouped['iv'].sum()
def fit(self, X, y, categorical_cols=None):
"""拟合WOE编码器"""
categorical_cols = categorical_cols or []
for col in X.columns:
is_cat = col in categorical_cols
woe_map, iv = self._calc_woe_iv(X[col], y, categorical=is_cat)
self.woe_dict[col] = woe_map
self.iv_dict[col] = iv
return self
def get_iv_report(self):
"""获取各特征IV值报告"""
iv_df = pd.DataFrame({
'feature': list(self.iv_dict.keys()),
'iv': list(self.iv_dict.values())
}).sort_values('iv', ascending=False)
# IV值判断标准
def iv_judgment(iv):
if iv < 0.02: return '无预测能力'
elif iv < 0.1: return '弱预测能力'
elif iv < 0.3: return '中等预测能力'
else: return '强预测能力'
iv_df['judgment'] = iv_df['iv'].apply(iv_judgment)
return iv_df
# 使用WOE编码器
from sklearn.model_selection import train_test_split
X = df.drop('risk', axis=1)
y = df['risk']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42, stratify=y)
categorical_cols = ['housing', 'saving_accts', 'checking_acct', 'purpose']
woe_encoder = WOEEncoder(bins=5)
# 注意:这里用训练集fit,避免数据泄露
# woe_encoder.fit(X_train, y_train, categorical_cols=categorical_cols)
# iv_report = woe_encoder.get_iv_report()
# print(iv_report)
三、模型训练:逻辑回归 vs 随机森林 vs XGBoost
信用评分领域最常用逻辑回归(因为可解释性强),但实际项目中会对比多个模型。
python
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.pipeline import Pipeline
from sklearn.metrics import roc_auc_score, classification_report
import xgboost as xgb
# 数据预处理:编码分类变量
def preprocess_data(df, categorical_cols):
df_processed = df.copy()
le = LabelEncoder()
for col in categorical_cols:
df_processed[col] = le.fit_transform(df_processed[col].astype(str))
return df_processed
categorical_cols = ['housing', 'saving_accts', 'checking_acct', 'purpose']
X_processed = preprocess_data(X, categorical_cols)
X_train, X_test, y_train, y_test = train_test_split(
X_processed, y, test_size=0.3, random_state=42, stratify=y
)
# 模型1:逻辑回归(标配,可解释性强)
lr_pipeline = Pipeline([
('scaler', StandardScaler()),
('model', LogisticRegression(C=1.0, max_iter=1000, random_state=42))
])
lr_pipeline.fit(X_train, y_train)
lr_pred_proba = lr_pipeline.predict_proba(X_test)[:, 1]
lr_auc = roc_auc_score(y_test, lr_pred_proba)
# 模型2:随机森林(非线性,特征重要性好)
rf_model = RandomForestClassifier(
n_estimators=100,
max_depth=6,
min_samples_leaf=20, # 信用模型建议增大min_samples_leaf防过拟合
random_state=42,
n_jobs=-1
)
rf_model.fit(X_train, y_train)
rf_pred_proba = rf_model.predict_proba(X_test)[:, 1]
rf_auc = roc_auc_score(y_test, rf_pred_proba)
# 模型3:XGBoost(工业界首选的强模型)
xgb_model = xgb.XGBClassifier(
n_estimators=100,
max_depth=4,
learning_rate=0.1,
subsample=0.8,
colsample_bytree=0.8,
use_label_encoder=False,
eval_metric='auc',
random_state=42
)
xgb_model.fit(X_train, y_train, eval_set=[(X_test, y_test)], verbose=False)
xgb_pred_proba = xgb_model.predict_proba(X_test)[:, 1]
xgb_auc = roc_auc_score(y_test, xgb_pred_proba)
print("="*50)
print(f"模型对比(测试集AUC):")
print(f" 逻辑回归: {lr_auc:.4f}")
print(f" 随机森林: {rf_auc:.4f}")
print(f" XGBoost: {xgb_auc:.4f}")
print("="*50)
四、模型评估:风控核心指标 AUC、KS、PSI
在风控领域,光有AUC不够。还需要KS值(评估模型区分能力)和PSI(评估模型稳定性)。
python
from sklearn.metrics import roc_curve
import scipy.stats as stats
def calc_ks(y_true, y_prob):
"""
计算KS值(Kolmogorov-Smirnov统计量)
KS = max(TPR - FPR)
判断标准:KS>0.2 有价值,>0.3 良好,>0.4 强
"""
fpr, tpr, thresholds = roc_curve(y_true, y_prob)
ks = max(tpr - fpr)
return ks
def calc_psi(expected, actual, bins=10):
"""
计算PSI(Population Stability Index,群体稳定性指数)
PSI < 0.1: 模型稳定
0.1-0.25: 轻微变化,需关注
> 0.25: 显著变化,需重建模型
"""
expected_pct, _ = np.histogram(expected, bins=bins, density=True)
actual_pct, _ = np.histogram(actual, bins=bins, density=True)
# 避免0值
expected_pct = np.where(expected_pct == 0, 0.0001, expected_pct)
actual_pct = np.where(actual_pct == 0, 0.0001, actual_pct)
# 归一化
expected_pct = expected_pct / expected_pct.sum()
actual_pct = actual_pct / actual_pct.sum()
psi = np.sum((actual_pct - expected_pct) * np.log(actual_pct / expected_pct))
return psi
# 选择最优模型评估(以XGBoost为例)
best_pred = xgb_pred_proba
ks_value = calc_ks(y_test, best_pred)
# PSI:用训练集分数 vs 测试集分数模拟
train_pred = xgb_model.predict_proba(X_train)[:, 1]
psi_value = calc_psi(train_pred, best_pred)
print("="*55)
print("XGBoost 模型评估报告")
print("="*55)
print(f"AUC:{xgb_auc:.4f} (判断标准:>0.7 有效,>0.8 优秀)")
print(f"KS :{ks_value:.4f} (判断标准:>0.2 有价值,>0.3 良好)")
print(f"PSI:{psi_value:.4f} (判断标准:<0.1 稳定,0.1-0.25 需关注)")
print("="*55)
# 特征重要性
feature_importance = pd.DataFrame({
'feature': X_processed.columns,
'importance': xgb_model.feature_importances_
}).sort_values('importance', ascending=False)
print("\n特征重要性排名:")
print(feature_importance.to_string(index=False))
五、评分卡生成:将模型分数转为标准评分
在实际风控系统中,模型输出的0-1概率值需要转换成标准评分(如300-850分制),方便业务理解和决策。
python
def prob_to_score(prob, base_score=600, pdo=20, base_odds=1.0):
"""
将概率值转换为标准评分卡分数
参数:
- base_score: 基础分(通常600)
- pdo: Points to Double the Odds,每增加20分,好坏比翻倍
- base_odds: 基础好坏比
公式:Score = base_score - PDO/ln(2) * ln(p/(1-p)) + PDO/ln(2) * ln(base_odds)
"""
# 转换为log odds
log_odds = np.log(prob / (1 - prob + 1e-10))
# 转换为分数
factor = pdo / np.log(2)
offset = base_score - factor * np.log(base_odds)
score = offset - factor * log_odds
# 截断到合理范围
score = np.clip(score, 300, 850)
return score.astype(int)
# 生成评分
test_scores = prob_to_score(best_pred)
# 评分分布分析
score_df = pd.DataFrame({
'score': test_scores,
'actual_label': y_test.values,
'pred_prob': best_pred
})
print("\n评分分布统计:")
print(f"最低分:{test_scores.min()}")
print(f"最高分:{test_scores.max()}")
print(f"平均分:{test_scores.mean():.1f}")
print(f"中位数:{np.median(test_scores):.1f}")
# 按评分段分析坏账率
score_bins = pd.cut(test_scores, bins=[300, 450, 500, 550, 600, 650, 700, 850])
score_analysis = score_df.groupby(score_bins).agg(
count=('actual_label', 'count'),
bad_count=('actual_label', lambda x: (x == 0).sum()),
good_count=('actual_label', lambda x: (x == 1).sum())
)
score_analysis['bad_rate'] = score_analysis['bad_count'] / score_analysis['count']
score_analysis['cumulative_capture'] = score_analysis['bad_count'].cumsum() / score_analysis['bad_count'].sum()
print("\n评分段分析(核心指标):")
print(score_analysis.round(4).to_string())
六、模型落地:实时预测接口
最后,把模型封装成可调用的预测函数,模拟实时风控评分:
python
class CreditScoringModel:
"""
信用评分模型封装类
支持单条记录实时评分
"""
def __init__(self, model, feature_cols, categorical_cols, base_score=600, pdo=20):
self.model = model
self.feature_cols = feature_cols
self.categorical_cols = categorical_cols
self.base_score = base_score
self.pdo = pdo
self.le_dict = {}
def fit_encoders(self, X):
"""拟合标签编码器"""
for col in self.categorical_cols:
le = LabelEncoder()
le.fit(X[col].astype(str))
self.le_dict[col] = le
return self
def predict_score(self, sample_dict):
"""
输入:单条申请记录(字典格式)
输出:信用评分 + 风险等级
"""
# 转为DataFrame
sample_df = pd.DataFrame([sample_dict])
# 编码分类变量
for col in self.categorical_cols:
if col in sample_df.columns and col in self.le_dict:
try:
sample_df[col] = self.le_dict[col].transform(sample_df[col].astype(str))
except ValueError:
sample_df[col] = 0 # 未知类别处理
# 预测概率
prob = self.model.predict_proba(sample_df[self.feature_cols])[0, 1]
# 转换为评分
score = prob_to_score(np.array([prob]), self.base_score, self.pdo)[0]
# 风险等级划分
if score >= 650:
risk_level = "低风险"
suggestion = "建议通过,可提供标准额度"
elif score >= 580:
risk_level = "中风险"
suggestion = "建议小额通过,加强贷后监控"
elif score >= 480:
risk_level = "高风险"
suggestion = "建议拒绝或需补充征信材料"
else:
risk_level = "极高风险"
suggestion = "建议直接拒绝"
return {
'score': int(score),
'probability': round(float(prob), 4),
'risk_level': risk_level,
'suggestion': suggestion
}
# 初始化评分模型
scoring_model = CreditScoringModel(
model=xgb_model,
feature_cols=X_processed.columns.tolist(),
categorical_cols=categorical_cols
)
scoring_model.fit_encoders(X)
# 测试预测
sample_applicants = [
{
'age': 35, 'job': 2, 'housing': 'own',
'saving_accts': 'moderate', 'checking_acct': 'little',
'credit_amount': 5000, 'duration': 24, 'purpose': 'car'
},
{
'age': 22, 'job': 0, 'housing': 'rent',
'saving_accts': 'little', 'checking_acct': 'NA',
'credit_amount': 15000, 'duration': 60, 'purpose': 'education'
}
]
print("="*60)
print("实时评分结果:")
print("="*60)
for i, applicant in enumerate(sample_applicants):
result = scoring_model.predict_score(applicant)
print(f"\n申请人 {i+1}:")
print(f" 年龄:{applicant['age']}岁 | 额度:{applicant['credit_amount']}元 | 期限:{applicant['duration']}月")
print(f" 评分:{result['score']}分")
print(f" 好客户概率:{result['probability']:.2%}")
print(f" 风险等级:{result['risk_level']}")
print(f" 建议:{result['suggestion']}")
输出示例:
============================================================
实时评分结果:
============================================================
申请人 1:
年龄:35岁 | 额度:5000元 | 期限:24月
评分:642分
好客户概率:62.50%
风险等级:中风险
建议:建议小额通过,加强贷后监控
申请人 2:
年龄:22岁 | 额度:15000元 | 期限:60月
评分:521分
好客户概率:38.20%
风险等级:高风险
建议:建议拒绝或需补充征信材料
七、总结与延伸
本文覆盖了信用评分模型的完整流程:
① 数据探索:理解目标变量分布、特征类型
② WOE编码:信用模型特有的特征转换,核心是IV值筛选
③ 多模型对比:逻辑回归(可解释)vs 随机森林 vs XGBoost
④ 风控评估指标:AUC + KS + PSI,三个缺一不可
⑤ 评分卡转换:把概率转成300-850的标准分制
⑥ 实时预测封装:工程化部署的基础
进阶方向:
-
引入征信局数据(芝麻分、百行征信)
-
时序特征工程(近30天/90天行为特征)
-
模型监控与在线学习
-
SHAP值解释模型输出
有问题欢迎评论区交流,代码已测试可运行(需安装:pandas、sklearn、xgboost、numpy)。