特征工程完全手册:2025 Python实战技巧

概述

特征工程是机器学习项目中决定成败的关键环节,高质量的特征能够显著提升模型性能。本文系统介绍2025年特征工程的最新技术和实践方法,涵盖从基础数据预处理到高级特征生成的完整流程。

特征工程基础概念

特征类型与处理方法

特征类型 数据形式 典型处理方法 注意事项
数值型特征 连续数值 标准化、归一化、离散化 注意异常值和分布形态
类别型特征 离散标签 One-Hot编码、标签编码、目标编码 处理高基数类别和未知类别
时间型特征 时间戳 周期编码、时间差计算 考虑时区、节假日效应
文本型特征 字符串 TF-IDF、词嵌入、主题模型 处理多语言和特殊字符
空间型特征 坐标数据 地理编码、距离计算 考虑投影和精度问题

特征工程工作流

python 复制代码
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.feature_selection import SelectKBest, f_classif
import warnings
warnings.filterwarnings('ignore')

class FeatureEngineeringPipeline:
    """特征工程基础管道"""
    
    def __init__(self):
        self.numeric_scaler = StandardScaler()
        self.categorical_encoder = LabelEncoder()
        self.feature_selector = None
        
    def load_sample_data(self):
        """加载示例数据"""
        np.random.seed(42)
        n_samples = 1000
        
        data = {
            'age': np.random.normal(35, 10, n_samples),
            'income': np.random.lognormal(10, 1, n_samples),
            'education': np.random.choice(['高中', '本科', '硕士', '博士'], n_samples),
            'city': np.random.choice(['北京', '上海', '广州', '深圳', '其他'], n_samples),
            'purchase_amount': np.random.exponential(100, n_samples),
            'target': np.random.choice([0, 1], n_samples, p=[0.7, 0.3])
        }
        
        return pd.DataFrame(data)
    
    def analyze_features(self, df):
        """特征分析报告"""
        print("=== 特征分析报告 ===")
        print(f"数据集形状: {df.shape}")
        print("\n数据类型分布:")
        print(df.dtypes.value_counts())
        
        print("\n缺失值统计:")
        missing_stats = df.isnull().sum()
        print(missing_stats[missing_stats > 0])
        
        print("\n数值特征统计:")
        numeric_cols = df.select_dtypes(include=[np.number]).columns
        if len(numeric_cols) > 0:
            print(df[numeric_cols].describe())
        
        return df

# 初始化管道
pipeline = FeatureEngineeringPipeline()
sample_data = pipeline.load_sample_data()
pipeline.analyze_features(sample_data)

数值型特征处理

数据标准化与归一化

python 复制代码
from sklearn.preprocessing import MinMaxScaler, RobustScaler
from scipy import stats

class NumericFeatureProcessor:
    """数值型特征处理器"""
    
    def __init__(self):
        self.scalers = {}
        self.transform_methods = {}
    
    def detect_outliers(self, series, method='iqr'):
        """异常值检测"""
        if method == 'iqr':
            Q1 = series.quantile(0.25)
            Q3 = series.quantile(0.75)
            IQR = Q3 - Q1
            lower_bound = Q1 - 1.5 * IQR
            upper_bound = Q3 + 1.5 * IQR
            outliers = (series < lower_bound) | (series > upper_bound)
        elif method == 'zscore':
            z_scores = np.abs(stats.zscore(series))
            outliers = z_scores > 3
        else:
            raise ValueError("不支持的异常值检测方法")
        
        return outliers
    
    def handle_outliers(self, series, method='clip'):
        """异常值处理"""
        outliers = self.detect_outliers(series)
        
        if method == 'clip':
            Q1 = series.quantile(0.25)
            Q3 = series.quantile(0.75)
            IQR = Q3 - Q1
            lower_bound = Q1 - 1.5 * IQR
            upper_bound = Q3 + 1.5 * IQR
            return series.clip(lower_bound, upper_bound)
        elif method == 'remove':
            return series[~outliers]
        elif method == 'transform':
            return np.log1p(series)
        else:
            return series
    
    def scale_features(self, df, columns, method='standard'):
        """特征缩放"""
        scaled_df = df.copy()
        
        for col in columns:
            if method == 'standard':
                scaler = StandardScaler()
            elif method == 'minmax':
                scaler = MinMaxScaler()
            elif method == 'robust':
                scaler = RobustScaler()
            else:
                raise ValueError("不支持的缩放方法")
            
            scaled_df[col] = scaler.fit_transform(df[[col]])
            self.scalers[col] = scaler
        
        return scaled_df
    
    def create_interaction_features(self, df, feature_pairs):
        """创建交互特征"""
        interaction_df = df.copy()
        
        for col1, col2 in feature_pairs:
            if col1 in df.columns and col2 in df.columns:
                # 乘法交互
                interaction_df[f'{col1}_x_{col2}'] = df[col1] * df[col2]
                # 除法交互(避免除零)
                interaction_df[f'{col1}_div_{col2}'] = np.where(
                    df[col2] != 0, df[col1] / df[col2], 0
                )
        
        return interaction_df
    
    def create_polynomial_features(self, df, columns, degree=2):
        """创建多项式特征"""
        from sklearn.preprocessing import PolynomialFeatures
        
        poly = PolynomialFeatures(degree=degree, include_bias=False)
        poly_features = poly.fit_transform(df[columns])
        
        # 创建特征名称
        feature_names = poly.get_feature_names_out(columns)
        poly_df = pd.DataFrame(poly_features, columns=feature_names, index=df.index)
        
        return pd.concat([df, poly_df], axis=1)

# 数值特征处理示例
numeric_processor = NumericFeatureProcessor()

# 处理异常值
sample_data['income_cleaned'] = numeric_processor.handle_outliers(sample_data['income'])

# 特征缩放
scaled_data = numeric_processor.scale_features(sample_data, ['age', 'income_cleaned'])

print("数值特征处理完成")
print(f"处理前收入范围: {sample_data['income'].min():.2f} - {sample_data['income'].max():.2f}")
print(f"处理后收入范围: {scaled_data['income_cleaned'].min():.2f} - {scaled_data['income_cleaned'].max():.2f}")

分箱与离散化

python 复制代码
class BinningProcessor:
    """分箱处理器"""
    
    def equal_width_binning(self, series, n_bins=5):
        """等宽分箱"""
        bins = pd.cut(series, bins=n_bins, labels=False)
        return bins
    
    def equal_frequency_binning(self, series, n_bins=5):
        """等频分箱"""
        bins = pd.qcut(series, q=n_bins, labels=False, duplicates='drop')
        return bins
    
    def target_guided_binning(self, series, target, n_bins=5):
        """目标引导分箱"""
        # 基于目标变量的统计进行分箱
        temp_df = pd.DataFrame({'feature': series, 'target': target})
        temp_df = temp_df.sort_values('feature')
        
        # 计算每个分箱的目标统计
        temp_df['bin'] = pd.qcut(temp_df['feature'], q=n_bins, labels=False, duplicates='drop')
        bin_stats = temp_df.groupby('bin')['target'].mean()
        
        return temp_df['bin'].values
    
    def create_binning_features(self, df, numeric_columns, target_col=None, n_bins=5):
        """创建分箱特征"""
        binning_df = df.copy()
        binner = BinningProcessor()
        
        for col in numeric_columns:
            if col in df.columns:
                # 等宽分箱
                binning_df[f'{col}_eq_width'] = binner.equal_width_binning(df[col], n_bins)
                
                # 等频分箱
                binning_df[f'{col}_eq_freq'] = binner.equal_frequency_binning(df[col], n_bins)
                
                # 如果有目标变量,使用目标引导分箱
                if target_col and target_col in df.columns:
                    binning_df[f'{col}_target_guide'] = binner.target_guided_binning(
                        df[col], df[target_col], n_bins
                    )
        
        return binning_df

# 分箱示例
binning_processor = BinningProcessor()
binned_data = binning_processor.create_binning_features(
    sample_data, ['age', 'income'], 'target', n_bins=4
)

print("\n分箱特征创建完成")
print("年龄分箱分布:")
print(binned_data['age_eq_width'].value_counts().sort_index())

类别型特征编码

多种编码技术对比

python 复制代码
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from category_encoders import TargetEncoder, CountEncoder
import category_encoders as ce

class CategoricalFeatureEncoder:
    """类别型特征编码器"""
    
    def __init__(self):
        self.encoders = {}
        self.encoding_methods = {}
    
    def one_hot_encoding(self, df, columns, max_categories=50):
        """One-Hot编码"""
        encoded_df = df.copy()
        
        for col in columns:
            if col in df.columns:
                # 检查类别数量
                unique_count = df[col].nunique()
                if unique_count > max_categories:
                    print(f"警告: {col}有{unique_count}个类别,跳过One-Hot编码")
                    continue
                
                encoder = OneHotEncoder(sparse_output=False, drop='first')
                encoded_features = encoder.fit_transform(df[[col]])
                
                # 创建特征名称
                feature_names = [f"{col}_{cat}" for cat in encoder.categories_[0][1:]]
                encoded_features_df = pd.DataFrame(
                    encoded_features, columns=feature_names, index=df.index
                )
                
                encoded_df = pd.concat([encoded_df, encoded_features_df], axis=1)
                encoded_df.drop(col, axis=1, inplace=True)
                self.encoders[col] = encoder
        
        return encoded_df
    
    def target_encoding(self, df, columns, target_col, smoothing=1.0):
        """目标编码"""
        encoded_df = df.copy()
        
        for col in columns:
            if col in df.columns and target_col in df.columns:
                encoder = TargetEncoder(smoothing=smoothing)
                encoded_df[f'{col}_target_enc'] = encoder.fit_transform(
                    df[col], df[target_col]
                )
                self.encoders[f'{col}_target'] = encoder
        
        return encoded_df
    
    def frequency_encoding(self, df, columns):
        """频率编码"""
        encoded_df = df.copy()
        
        for col in columns:
            if col in df.columns:
                freq_encoding = df[col].value_counts(normalize=True)
                encoded_df[f'{col}_freq_enc'] = df[col].map(freq_encoding)
        
        return encoded_df
    
    def label_encoding(self, df, columns):
        """标签编码"""
        encoded_df = df.copy()
        
        for col in columns:
            if col in df.columns:
                encoder = LabelEncoder()
                encoded_df[f'{col}_label_enc'] = encoder.fit_transform(df[col])
                self.encoders[f'{col}_label'] = encoder
        
        return encoded_df
    
    def compare_encoding_methods(self, df, feature_col, target_col):
        """比较不同编码方法"""
        results = {}
        
        # 原始标签编码
        le = LabelEncoder()
        le_encoded = le.fit_transform(df[feature_col])
        results['Label Encoding'] = le_encoded
        
        # 目标编码
        te = TargetEncoder()
        te_encoded = te.fit_transform(df[feature_col], df[target_col])
        results['Target Encoding'] = te_encoded
        
        # 频率编码
        freq_map = df[feature_col].value_counts(normalize=True)
        freq_encoded = df[feature_col].map(freq_map)
        results['Frequency Encoding'] = freq_encoded
        
        return results

# 类别编码示例
categorical_encoder = CategoricalFeatureEncoder()

# 应用不同编码方法
categorical_cols = ['education', 'city']

# One-Hot编码
onehot_encoded = categorical_encoder.one_hot_encoding(sample_data, categorical_cols)

# 目标编码
target_encoded = categorical_encoder.target_encoding(
    sample_data, categorical_cols, 'target'
)

print("\n类别特征编码完成")
print(f"One-Hot编码后特征数: {onehot_encoded.shape[1]}")
print(f"目标编码后特征数: {target_encoded.shape[1]}")

时间特征工程

时间特征提取

python 复制代码
from datetime import datetime, timedelta

class TimeFeatureEngineer:
    """时间特征工程师"""
    
    def create_time_features(self, df, time_column):
        """创建时间特征"""
        time_df = df.copy()
        
        if time_column in df.columns:
            # 确保时间列是datetime类型
            time_df[time_column] = pd.to_datetime(time_df[time_column])
            
            # 基础时间特征
            time_df[f'{time_column}_year'] = time_df[time_column].dt.year
            time_df[f'{time_column}_month'] = time_df[time_column].dt.month
            time_df[f'{time_column}_day'] = time_df[time_column].dt.day
            time_df[f'{time_column}_hour'] = time_df[time_column].dt.hour
            time_df[f'{time_column}_dayofweek'] = time_df[time_column].dt.dayofweek
            time_df[f'{time_column}_quarter'] = time_df[time_column].dt.quarter
            time_df[f'{time_column}_is_weekend'] = time_df[time_column].dt.dayofweek.isin([5, 6]).astype(int)
            
            # 周期编码(处理周期性)
            time_df[f'{time_column}_month_sin'] = np.sin(2 * np.pi * time_df[time_column].dt.month / 12)
            time_df[f'{time_column}_month_cos'] = np.cos(2 * np.pi * time_df[time_column].dt.month / 12)
            time_df[f'{time_column}_day_sin'] = np.sin(2 * np.pi * time_df[time_column].dt.day / 31)
            time_df[f'{time_column}_day_cos'] = np.cos(2 * np.pi * time_df[time_column].dt.day / 31)
        
        return time_df
    
    def create_time_based_features(self, df, time_column, value_columns):
        """创建基于时间的聚合特征"""
        time_df = df.copy()
        time_df[time_column] = pd.to_datetime(time_df[time_column])
        time_df = time_df.sort_values(time_column)
        
        # 滚动统计特征
        for value_col in value_columns:
            if value_col in df.columns:
                # 滚动均值
                time_df[f'{value_col}_rolling_mean_7'] = time_df[value_col].rolling(
                    window=7, min_periods=1
                ).mean()
                
                # 滚动标准差
                time_df[f'{value_col}_rolling_std_7'] = time_df[value_col].rolling(
                    window=7, min_periods=1
                ).std()
                
                # 滞后特征
                time_df[f'{value_col}_lag_1'] = time_df[value_col].shift(1)
                time_df[f'{value_col}_lag_7'] = time_df[value_col].shift(7)
        
        return time_df
    
    def create_seasonal_features(self, df, time_column):
        """创建季节性特征"""
        time_df = df.copy()
        time_df[time_column] = pd.to_datetime(time_df[time_column])
        
        # 季节特征
        time_df['season'] = (time_df[time_column].dt.month % 12 + 3) // 3
        
        # 节假日特征(简化版)
        time_df['is_holiday'] = 0  # 实际应用中需要真实的节假日数据
        
        # 业务周期特征
        time_df['is_month_start'] = time_df[time_column].dt.is_month_start.astype(int)
        time_df['is_month_end'] = time_df[time_column].dt.is_month_end.astype(int)
        time_df['is_quarter_start'] = time_df[time_column].dt.is_quarter_start.astype(int)
        time_df['is_quarter_end'] = time_df[time_column].dt.is_quarter_end.astype(int)
        time_df['is_year_start'] = time_df[time_column].dt.is_year_start.astype(int)
        time_df['is_year_end'] = time_df[time_column].dt.is_year_end.astype(int)
        
        return time_df

# 创建示例时间数据
def create_time_series_data():
    """创建时间序列示例数据"""
    dates = pd.date_range('2023-01-01', '2024-01-01', freq='D')
    n_samples = len(dates)
    
    data = {
        'date': dates,
        'value': np.random.normal(100, 20, n_samples) + 
                10 * np.sin(2 * np.pi * np.arange(n_samples) / 30),  # 月度周期
        'category': np.random.choice(['A', 'B', 'C'], n_samples)
    }
    
    return pd.DataFrame(data)

# 时间特征工程示例
time_engineer = TimeFeatureEngineer()
time_data = create_time_series_data()

# 创建时间特征
time_features = time_engineer.create_time_features(time_data, 'date')
time_features = time_engineer.create_seasonal_features(time_features, 'date')

print("\n时间特征工程完成")
print(f"时间特征数量: {time_features.shape[1]}")
print("时间特征示例:")
print(time_features[['date_year', 'date_month', 'season', 'is_weekend']].head())

文本特征工程

文本特征提取技术

python 复制代码
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
import re
from collections import Counter

class TextFeatureEngineer:
    """文本特征工程师"""
    
    def __init__(self):
        self.vectorizers = {}
        self.text_stats = {}
    
    def clean_text(self, text_series):
        """文本清洗"""
        cleaned_text = text_series.str.lower()
        cleaned_text = cleaned_text.str.replace(r'[^\w\s]', '', regex=True)
        cleaned_text = cleaned_text.str.replace(r'\d+', '', regex=True)
        cleaned_text = cleaned_text.str.strip()
        return cleaned_text
    
    def create_basic_text_features(self, text_series):
        """创建基础文本特征"""
        features = {}
        
        # 文本长度特征
        features['text_length'] = text_series.str.len()
        features['word_count'] = text_series.str.split().str.len()
        features['char_count'] = text_series.str.replace(' ', '').str.len()
        features['avg_word_length'] = features['char_count'] / features['word_count']
        
        # 特殊字符统计
        features['digit_count'] = text_series.str.count(r'\d')
        features['uppercase_count'] = text_series.str.count(r'[A-Z]')
        features['special_char_count'] = text_series.str.count(r'[^\w\s]')
        
        return pd.DataFrame(features)
    
    def tfidf_vectorization(self, text_series, max_features=100):
        """TF-IDF向量化"""
        tfidf = TfidfVectorizer(
            max_features=max_features,
            stop_words='english',
            ngram_range=(1, 2)
        )
        
        tfidf_matrix = tfidf.fit_transform(text_series)
        feature_names = tfidf.get_feature_names_out()
        
        tfidf_df = pd.DataFrame(
            tfidf_matrix.toarray(),
            columns=[f'tfidf_{name}' for name in feature_names],
            index=text_series.index
        )
        
        self.vectorizers['tfidf'] = tfidf
        return tfidf_df
    
    def count_vectorization(self, text_series, max_features=50):
        """计数向量化"""
        count_vec = CountVectorizer(
            max_features=max_features,
            stop_words='english',
            ngram_range=(1, 1)
        )
        
        count_matrix = count_vec.fit_transform(text_series)
        feature_names = count_vec.get_feature_names_out()
        
        count_df = pd.DataFrame(
            count_matrix.toarray(),
            columns=[f'count_{name}' for name in feature_names],
            index=text_series.index
        )
        
        self.vectorizers['count'] = count_vec
        return count_df
    
    def create_advanced_text_features(self, text_series):
        """创建高级文本特征"""
        features = {}
        
        # 词汇丰富度
        def lexical_richness(text):
            words = text.split()
            if len(words) == 0:
                return 0
            return len(set(words)) / len(words)
        
        features['lexical_richness'] = text_series.apply(lexical_richness)
        
        # 句子复杂度
        def sentence_complexity(text):
            sentences = re.split(r'[.!?]+', text)
            sentences = [s.strip() for s in sentences if s.strip()]
            if len(sentences) == 0:
                return 0
            avg_words = sum(len(s.split()) for s in sentences) / len(sentences)
            return avg_words
        
        features['sentence_complexity'] = text_series.apply(sentence_complexity)
        
        return pd.DataFrame(features)

# 创建示例文本数据
def create_text_data():
    """创建文本示例数据"""
    texts = [
        "This is a great product with amazing features",
        "I love using this software for my daily work",
        "The user interface is very intuitive and easy to use",
        "This application has some performance issues",
        "Excellent customer support and quick response",
        "Needs improvement in documentation and tutorials"
    ]
    
    return pd.DataFrame({'text': texts * 50})  # 扩展数据量

# 文本特征工程示例
text_engineer = TextFeatureEngineer()
text_data = create_text_data()

# 文本清洗
cleaned_text = text_engineer.clean_text(text_data['text'])

# 创建基础特征
basic_features = text_engineer.create_basic_text_features(cleaned_text)

# TF-IDF特征
tfidf_features = text_engineer.tfidf_vectorization(cleaned_text, max_features=20)

# 合并所有特征
all_text_features = pd.concat([basic_features, tfidf_features], axis=1)

print("\n文本特征工程完成")
print(f"文本特征数量: {all_text_features.shape[1]}")
print("文本特征示例:")
print(all_text_features.iloc[:, :5].head())

特征选择技术

多种特征选择方法

python 复制代码
from sklearn.feature_selection import RFE, SelectFromModel
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import Lasso
from sklearn.feature_selection import mutual_info_classif, f_regression

class FeatureSelector:
    """特征选择器"""
    
    def __init__(self):
        self.selector_info = {}
    
    def correlation_analysis(self, df, target_col, threshold=0.8):
        """相关性分析"""
        # 计算特征相关性
        corr_matrix = df.corr()
        
        # 找出高相关特征对
        high_corr_pairs = []
        for i in range(len(corr_matrix.columns)):
            for j in range(i+1, len(corr_matrix.columns)):
                if abs(corr_matrix.iloc[i, j]) > threshold:
                    high_corr_pairs.append((
                        corr_matrix.columns[i], 
                        corr_matrix.columns[j],
                        corr_matrix.iloc[i, j]
                    ))
        
        # 目标相关性
        target_corr = corr_matrix[target_col].abs().sort_values(ascending=False)
        
        return {
            'high_correlation_pairs': high_corr_pairs,
            'target_correlation': target_corr
        }
    
    def univariate_selection(self, X, y, k=10, method='f_classif'):
        """单变量特征选择"""
        if method == 'f_classif':
            selector = SelectKBest(score_func=f_classif, k=k)
        elif method == 'mutual_info':
            selector = SelectKBest(score_func=mutual_info_classif, k=k)
        else:
            raise ValueError("不支持的筛选方法")
        
        X_selected = selector.fit_transform(X, y)
        selected_features = X.columns[selector.get_support()]
        feature_scores = selector.scores_
        
        self.selector_info['univariate'] = selector
        return X_selected, selected_features, feature_scores
    
    def recursive_elimination(self, X, y, n_features=10):
        """递归特征消除"""
        estimator = RandomForestClassifier(n_estimators=100, random_state=42)
        selector = RFE(estimator=estimator, n_features_to_select=n_features)
        
        X_selected = selector.fit_transform(X, y)
        selected_features = X.columns[selector.get_support()]
        feature_ranking = selector.ranking_
        
        self.selector_info['rfe'] = selector
        return X_selected, selected_features, feature_ranking
    
    def model_based_selection(self, X, y, threshold='mean'):
        """基于模型的特征选择"""
        # 使用Lasso进行特征选择
        lasso = Lasso(alpha=0.01, random_state=42)
        selector = SelectFromModel(lasso, threshold=threshold)
        
        X_selected = selector.fit_transform(X, y)
        selected_features = X.columns[selector.get_support()]
        feature_importance = selector.estimator_.coef_
        
        self.selector_info['lasso'] = selector
        return X_selected, selected_features, feature_importance
    
    def compare_selection_methods(self, X, y, target_names):
        """比较不同特征选择方法"""
        results = {}
        
        # 单变量选择
        _, uni_features, uni_scores = self.univariate_selection(X, y, k=5)
        results['Univariate'] = {
            'features': uni_features,
            'scores': dict(zip(uni_features, uni_scores[uni_scores > 0]))
        }
        
        # 递归消除
        _, rfe_features, rfe_ranking = self.recursive_elimination(X, y, n_features=5)
        results['RFE'] = {
            'features': rfe_features,
            'ranking': dict(zip(X.columns, rfe_ranking))
        }
        
        # 模型选择
        _, model_features, model_importance = self.model_based_selection(X, y)
        results['Model-Based'] = {
            'features': model_features,
            'importance': dict(zip(X.columns, model_importance))
        }
        
        return results

# 特征选择示例
feature_selector = FeatureSelector()

# 准备数据(使用之前处理的特征)
X = all_text_features.fillna(0)
y = np.random.choice([0, 1], len(X))  # 随机目标变量

# 相关性分析
corr_results = feature_selector.correlation_analysis(
    pd.concat([X, pd.Series(y, name='target')], axis=1), 
    'target'
)

# 比较不同选择方法
selection_comparison = feature_selector.compare_selection_methods(X, y, ['feature'])

print("\n特征选择完成")
print("不同方法选择的特征:")
for method, result in selection_comparison.items():
    print(f"{method}: {list(result['features'])[:5]}")

自动化特征工程

自动化特征生成

python 复制代码
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer

class AutomatedFeatureEngineering:
    """自动化特征工程"""
    
    def __init__(self):
        self.preprocessor = None
        self.feature_names = []
    
    def create_preprocessing_pipeline(self, numeric_features, categorical_features):
        """创建预处理管道"""
        numeric_transformer = Pipeline(steps=[
            ('imputer', SimpleImputer(strategy='median')),
            ('scaler', StandardScaler())
        ])
        
        categorical_transformer = Pipeline(steps=[
            ('imputer', SimpleImputer(strategy='constant', fill_value='missing')),
            ('onehot', OneHotEncoder(handle_unknown='ignore', sparse_output=False))
        ])
        
        self.preprocessor = ColumnTransformer(
            transformers=[
                ('num', numeric_transformer, numeric_features),
                ('cat', categorical_transformer, categorical_features)
            ]
        )
        
        return self.preprocessor
    
    def auto_generate_features(self, df, target_col=None):
        """自动生成特征"""
        generated_df = df.copy()
        
        # 自动检测特征类型
        numeric_cols = df.select_dtypes(include=[np.number]).columns.tolist()
        categorical_cols = df.select_dtypes(include=['object']).columns.tolist()
        
        if target_col and target_col in numeric_cols:
            numeric_cols.remove(target_col)
        
        # 数值特征变换
        numeric_processor = NumericFeatureProcessor()
        if numeric_cols:
            # 交互特征
            if len(numeric_cols) >= 2:
                feature_pairs = [(numeric_cols[0], numeric_cols[1])]
                generated_df = numeric_processor.create_interaction_features(
                    generated_df, feature_pairs
                )
            
            # 多项式特征
            if len(numeric_cols) >= 2:
                generated_df = numeric_processor.create_polynomial_features(
                    generated_df, numeric_cols[:2], degree=2
                )
        
        # 类别特征编码
        categorical_encoder = CategoricalFeatureEncoder()
        if categorical_cols:
            generated_df = categorical_encoder.frequency_encoding(
                generated_df, categorical_cols
            )
        
        self.feature_names = generated_df.columns.tolist()
        return generated_df
    
    def evaluate_feature_importance(self, X, y, top_k=10):
        """评估特征重要性"""
        from sklearn.ensemble import RandomForestRegressor
        
        model = RandomForestRegressor(n_estimators=100, random_state=42)
        model.fit(X, y)
        
        feature_importance = pd.DataFrame({
            'feature': X.columns,
            'importance': model.feature_importances_
        }).sort_values('importance', ascending=False)
        
        return feature_importance.head(top_k)

# 自动化特征工程示例
auto_fe = AutomatedFeatureEngineering()

# 准备示例数据
sample_df = pipeline.load_sample_data()

# 自动生成特征
auto_features = auto_fe.auto_generate_features(sample_df, 'target')

print("\n自动化特征工程完成")
print(f"原始特征数: {sample_df.shape[1]}")
print(f"生成特征数: {auto_features.shape[1]}")
print(f"新增特征示例: {[col for col in auto_features.columns if col not in sample_df.columns][:5]}")

特征工程最佳实践

性能优化检查表

python 复制代码
class FeatureEngineeringBestPractices:
    """特征工程最佳实践"""
    
    def __init__(self):
        self.checklist = {
            '数据质量': [
                '处理缺失值',
                '处理异常值', 
                '验证数据分布',
                '检查数据一致性'
            ],
            '特征生成': [
                '创建领域相关特征',
                '生成交互特征',
                '考虑时间序列特征',
                '处理类别特征编码'
            ],
            '特征选择': [
                '移除低方差特征',
                '处理多重共线性',
                '基于重要性选择特征',
                '验证特征稳定性'
            ],
            '性能优化': [
                '特征标准化',
                '处理数据倾斜',
                '优化内存使用',
                '建立特征管道'
            ]
        }
    
    def validate_features(self, X, y):
        """特征验证"""
        validation_results = {}
        
        # 检查缺失值
        missing_ratio = X.isnull().sum() / len(X)
        validation_results['missing_values'] = missing_ratio[missing_ratio > 0.05]
        
        # 检查低方差特征
        from sklearn.feature_selection import VarianceThreshold
        selector = VarianceThreshold(threshold=0.01)
        try:
            selector.fit(X)
            low_variance_mask = ~selector.get_support()
            validation_results['low_variance_features'] = X.columns[low_variance_mask].tolist()
        except:
            validation_results['low_variance_features'] = []
        
        # 检查特征与目标的相关性
        if y is not None:
            correlations = []
            for col in X.select_dtypes(include=[np.number]).columns:
                try:
                    corr = np.corrcoef(X[col], y)[0, 1]
                    correlations.append((col, abs(corr)))
                except:
                    continue
            
            validation_results['target_correlations'] = sorted(
                correlations, key=lambda x: x[1], reverse=True
            )[:10]
        
        return validation_results
    
    def create_feature_documentation(self, feature_df):
        """创建特征文档"""
        documentation = {}
        
        for col in feature_df.columns:
            doc = {
                'data_type': str(feature_df[col].dtype),
                'missing_count': feature_df[col].isnull().sum(),
                'unique_count': feature_df[col].nunique(),
                'description': f'自动生成的特征: {col}'
            }
            
            if feature_df[col].dtype in [np.int64, np.float64]:
                doc.update({
                    'min_value': feature_df[col].min(),
                    'max_value': feature_df[col].max(),
                    'mean_value': feature_df[col].mean(),
                    'std_value': feature_df[col].std()
                })
            
            documentation[col] = doc
        
        return documentation

# 最佳实践应用
best_practices = FeatureEngineeringBestPractices()

# 特征验证
validation_results = best_practices.validate_features(auto_features, sample_data['target'])

# 创建特征文档
feature_docs = best_practices.create_feature_documentation(auto_features)

print("\n特征工程最佳实践检查")
print("验证结果:")
for check_type, results in validation_results.items():
    if len(results) > 0:
        print(f"{check_type}: {len(results)}个问题")
    else:
        print(f"{check_type}: 通过")

print(f"\n特征文档已创建,包含{len(feature_docs)}个特征的详细信息")

总结与展望

特征工程技术对比

技术类别 主要方法 适用场景 优缺点
数值处理 标准化、分箱、多项式 连续数值数据 提升模型稳定性,可能丢失信息
类别编码 One-Hot、目标编码、频率编码 分类变量 处理类别关系,可能增加维度
时间特征 周期编码、滞后特征、季节分解 时间序列数据 捕捉时间模式,需要时间对齐
文本特征 TF-IDF、词嵌入、N-gram 自然语言数据 提取语义信息,计算成本高
特征选择 相关性、模型重要性、递归消除 高维数据 减少过拟合,可能丢失重要特征

2025年发展趋势

  1. 自动化特征工程:基于AutoML的智能特征生成
  2. 深度学习特征:神经网络自动学习特征表示
  3. 可解释性特征:确保特征工程的透明度和可解释性
  4. 实时特征工程:流式数据处理和实时特征更新
  5. 跨域特征学习:迁移学习在特征工程中的应用

实践建议

  1. 理解业务背景:特征工程必须结合领域知识
  2. 迭代优化:通过实验不断验证和改进特征
  3. 监控特征稳定性:确保特征在时间上的稳定性
  4. 文档化过程:详细记录特征生成逻辑和假设
  5. 考虑计算效率:在效果和效率之间找到平衡

通过系统化的特征工程流程,结合领域知识和数据理解,能够显著提升机器学习模型的性能和鲁棒性。记住,好的特征工程往往比复杂的模型算法更能带来实质性的性能提升。

相关推荐
p***h6432 小时前
JavaScript图像处理开发
开发语言·javascript·图像处理
2501_941148152 小时前
高并发搜索引擎Elasticsearch与Solr深度优化在互联网实践分享
java·开发语言·前端
用户2345267009822 小时前
Python实现异步任务队列深度好文
后端·python
夫唯不争,故无尤也3 小时前
PyTorch 的维度变形一站式入门
人工智能·pytorch·python
专家大圣3 小时前
告别局域网束缚!飞牛云 NAS+cpolar 让远程管理更简单
开发语言·网络·内网穿透·cpolar
共享家95273 小时前
QT-界面优化(上)
开发语言·qt
熊猫钓鱼>_>3 小时前
从零开始构建RPG游戏战斗系统:实战心得与技术要点
开发语言·人工智能·经验分享·python·游戏·ai·qoder
FuckPatience3 小时前
C++ 常用类型写法和全称
开发语言·c++
q***R3083 小时前
Kotlin注解处理
android·开发语言·kotlin