lightgbm做分类

python 复制代码
```python
import pandas as pd#导入csv文件的库
import numpy as np#进行矩阵运算的库
import json#用于读取和写入json数据格式

#model lgb分类模型,日志评估,早停防止过拟合
from  lightgbm import LGBMClassifier,log_evaluation,early_stopping
#metric
from sklearn.metrics import roc_auc_score#导入roc_auc曲线
#KFold是直接分成k折,StratifiedKFold还要考虑每种类别的占比
from sklearn.model_selection import StratifiedKFold

#config
class Config():
    seed=2024#随机种子
    num_folds=10#K折交叉验证
    TARGET_NAME ='label'#标签
import random#提供了一些用于生成随机数的函数
#设置随机种子,保证模型可以复现
def seed_everything(seed):
    np.random.seed(seed)#numpy的随机种子
    random.seed(seed)#python内置的随机种子
seed_everything(Config.seed)


path='/kaggle/input/'
#sample: Iki037dt dict_keys(['name', 'normal_data', 'outliers'])
with open(path+"whoiswho-ind-kdd-2024/IND-WhoIsWho/train_author.json") as f:
    train_author=json.load(f)
#sample : 6IsfnuWU dict_keys(['id', 'title', 'authors', 'abstract', 'keywords', 'venue', 'year'])   
with open(path+"whoiswho-ind-kdd-2024/IND-WhoIsWho/pid_to_info_all.json") as f:
    pid_to_info=json.load(f)
#efQ8FQ1i dict_keys(['name', 'papers'])
with open(path+"whoiswho-ind-kdd-2024/IND-WhoIsWho/ind_valid_author.json") as f:
    valid_author=json.load(f)

with open(path+"whoiswho-ind-kdd-2024/IND-WhoIsWho/ind_valid_author_submit.json") as f:
    submission=json.load(f)

train_feats=[]
labels=[]
for id,person_info in train_author.items():
    for text_id in person_info['normal_data']:#正样本
        feat=pid_to_info[text_id]
        #['title', 'abstract', 'keywords', 'authors', 'venue', 'year']
        try:
            train_feats.append(
                [len(feat['title']),len(feat['abstract']),len(feat['keywords']),len(feat['authors'])
                 ,len(feat['keywords']),int(feat['year'])]
                 )
        except:
            train_feats.append(
                [len(feat['title']),len(feat['abstract']),len(feat['keywords']),len(feat['authors'])
                 ,len(feat['keywords']),2000]
                 )
        labels.append(1)
    for text_id in person_info['outliers']:#负样本
        feat=pid_to_info[text_id]
        #['title', 'abstract', 'keywords', 'authors', 'venue', 'year']
        try:
            train_feats.append(
                [len(feat['title']),len(feat['abstract']),len(feat['keywords']),len(feat['authors'])
                 ,len(feat['keywords']),int(feat['year'])]
                 )
        except:
            train_feats.append(
                [len(feat['title']),len(feat['abstract']),len(feat['keywords']),len(feat['authors'])
                 ,len(feat['keywords']),2000]
                 )
        labels.append(0)   
train_feats=np.array(train_feats)
labels=np.array(labels)
print(f"train_feats.shape:{train_feats.shape},labels.shape:{labels.shape}")
print(f"np.mean(labels):{np.mean(labels)}")
train_feats=pd.DataFrame(train_feats)
train_feats['label']=labels
train_feats.head()

valid_feats=[]
for id,person_info in valid_author.items():
    for text_id in person_info['papers']:
        feat=pid_to_info[text_id]
        #['title', 'abstract', 'keywords', 'authors', 'venue', 'year']
        try:
            valid_feats.append(
                [len(feat['title']),len(feat['abstract']),len(feat['keywords']),len(feat['authors'])
                 ,len(feat['keywords']),int(feat['year'])]
                 )
        except:
            valid_feats.append(
                [len(feat['title']),len(feat['abstract']),len(feat['keywords']),len(feat['authors'])
                 ,len(feat['keywords']),2000]
                 )
valid_feats=np.array(valid_feats)
print(f"valid_feats.shape:{valid_feats.shape}")
valid_feats=pd.DataFrame(valid_feats)
valid_feats.head()

choose_cols=[col for col in valid_feats.columns]
def fit_and_predict(model,train_feats=train_feats,test_feats=valid_feats,name=0):
    X=train_feats[choose_cols].copy()
    y=train_feats[Config.TARGET_NAME].copy()
    test_X=test_feats[choose_cols].copy()
    oof_pred_pro=np.zeros((len(X),2))
    test_pred_pro=np.zeros((Config.num_folds,len(test_X),2))

    #10折交叉验证
    skf = StratifiedKFold(n_splits=Config.num_folds,random_state=Config.seed, shuffle=True)

    for fold, (train_index, valid_index) in (enumerate(skf.split(X, y.astype(str)))):
        print(f"name:{name},fold:{fold}")

        X_train, X_valid = X.iloc[train_index], X.iloc[valid_index]
        y_train, y_valid = y.iloc[train_index], y.iloc[valid_index]
        
        model.fit(X_train,y_train,eval_set=[(X_valid, y_valid)],
                  callbacks=[log_evaluation(100),early_stopping(100)]
                 )
        
        oof_pred_pro[valid_index]=model.predict_proba(X_valid)
        #将数据分批次进行预测.
        test_pred_pro[fold]=model.predict_proba(test_X)
    print(f"roc_auc:{roc_auc_score(y.values,oof_pred_pro[:,1])}")
    
    return oof_pred_pro,test_pred_pro
#参数来源:https://www.kaggle.com/code/daviddirethucus/home-credit-risk-lightgbm
lgb_params={
    "boosting_type": "gbdt",
    "objective": "binary",
    "metric": "auc",
    "max_depth": 12,
    "learning_rate": 0.05,
    "n_estimators":3072,
    "colsample_bytree": 0.9,
    "colsample_bynode": 0.9,
    "verbose": -1,
    "random_state": Config.seed,
    "reg_alpha": 0.1,
    "reg_lambda": 10,
    "extra_trees":True,
    'num_leaves':64,
    "verbose": -1,
    "max_bin":255,
    }


lgb_oof_pred_pro,lgb_test_pred_pro=fit_and_predict(model= LGBMClassifier(**lgb_params),name='lgb'
                                                  )
test_preds=lgb_test_pred_pro.mean(axis=0)[:,1]


cnt=0
for id,names in submission.items():
    for name in names:
        submission[id][name]=test_preds[cnt]
        cnt+=1
with open('baseline.json', 'w', encoding='utf-8') as f:
    json.dump(submission, f, ensure_ascii=False, indent=4)
复制代码
相关推荐
武乐乐~几秒前
论文精读:YOLO-UniOW: Efficient Universal Open-World Object Detection
人工智能·yolo·目标检测
Leinwin1 分钟前
GPT-4.1和GPT-4.1-mini系列模型支持微调功能,助力企业级智能应用深度契合业务需求
人工智能
唐兴通个人2 分钟前
知名人工智能AI培训公开课内训课程培训师培训老师专家咨询顾问唐兴通AI在金融零售制造业医药服务业创新实践应用
人工智能
MVP-curry-萌神19 分钟前
FPGA图像处理(六)------ 图像腐蚀and图像膨胀
图像处理·人工智能·fpga开发
struggle202535 分钟前
ebook2audiobook开源程序使用动态 AI 模型和语音克隆将电子书转换为带有章节和元数据的有声读物。支持 1,107+ 种语言
人工智能·开源·自动化
深空数字孪生38 分钟前
AI+可视化:数据呈现的未来形态
人工智能·信息可视化
标贝科技1 小时前
标贝科技:大模型领域数据标注的重要性与标注类型分享
数据库·人工智能
aminghhhh1 小时前
多模态融合【十九】——MRFS: Mutually Reinforcing Image Fusion and Segmentation
人工智能·深度学习·学习·计算机视觉·多模态
格林威1 小时前
Baumer工业相机堡盟工业相机的工业视觉是否可以在室外可以做视觉检测项目
c++·人工智能·数码相机·计算机视觉·视觉检测
zeroporn1 小时前
在Mac M1/M2上使用Hugging Face Transformers进行中文文本分类(完整指南)
macos·分类·数据挖掘·nlp·transformer·预训练模型·文本分类