joblib加载模型报KeyError: 0错误

项目场景:

通过joblib库加载模型,报错代码如下

classifier = joblib.load(classifier_path)

问题描述

报错如下

D:\ANACONDA\envs\pytorch-gpu\python.exe “D:/GUI (5)/GUI/test.py”
Traceback (most recent call last):
File “D:/GUI (5)/GUI/test.py”, line 4, in
x = joblib.load(code_path + “/Model_Weight/Bag_of_Features/details_files”)
File “D:\ANACONDA\envs\pytorch-gpu\lib\site-packages\joblib\numpy_pickle.py”, line 587, in load
obj = _unpickle(fobj, filename, mmap_mode)
File “D:\ANACONDA\envs\pytorch-gpu\lib\site-packages\joblib\numpy_pickle.py”, line 506, in _unpickle
obj = unpickler.load()
File “D:\ANACONDA\envs\pytorch-gpu\lib\pickle.py”, line 1088, in load
dispatchkey[0]
KeyError: 0

原因分析:

更新一下joblib版本就可以了,太蠢了!这是我之前的版本
在这里插入图片描述


解决方案:

更新成1.3.1就好了,蠢!
在这里插入图片描述

import chardet import streamlit as st import pandas as pd import numpy as np import joblib import os import time import matplotlib.pyplot as plt import matplotlib as mpl import matplotlib.font_manager as fm import seaborn as sns from pyspark.sql import SparkSession from pyspark.ml.feature import VectorAssembler, StandardScaler from pyspark.ml.classification import LogisticRegression, DecisionTreeClassifier, RandomForestClassifier from pyspark.ml.evaluation import BinaryClassificationEvaluator from极 pyspark.ml.tuning import ParamGridBuilder, CrossValidator from pyspark.sql.functions import when, col from sklearn.metrics import classification_report, confusion_matrix import warnings import dask.dataframe as dd from dask.diagnostics import ProgressBar from dask_ml.preprocessing import StandardScaler as DaskStandardScaler import tempfile import shutil warnings.filterwarnings("ignore") plt.rcParams['font.sans-serif'] = ['SimHei'] plt.rcParams['axes.unicode_minus'] = False # 页面设置 st.set_page_config( page_title="单宽转融用户预测系统", page_icon="📶", layout="wide", initial_sidebar_state="expanded" ) # 自定义CSS样式 st.markdown(""" <style> .stApp { background: linear-gradient(135deg, #f5f7fa 0%, #e4edf5 100%); font-family: 'Helvetica Neue', Arial, sans-serif; } .header { background: linear-gradient(90deg, #2c3e50 0%, #4a6491 100%); color: white; padding: 1.5rem; border-radius: 0.75rem; box-shadow: 0 4px 12px rgba(0,0,0,0.1); margin-bottom: 2rem; } .card { background: white; border-radius: 0.75rem; padding: 1.5rem; margin-bottom: 1.5rem; box-shadow: 0 4px 12px rgba(0,0,0,0.08); transition: transform 0.3s ease; } .card:hover { transform: translateY(-5px); box-shadow: 0 6px 16px rgba(0,0,0,0.12); } .stButton button { background: linear-gradient(90deg, #3498db 0%, #1a5276 100%) !important; color: white !important; border: none !important; border-radius: 0.5rem; padding: 0.75rem 1.5rem; font-size: 1rem; font-weight: 600; transition: all 0.3s ease; width: 100%; } .stButton button:hover { transform: scale(1.05); box-shadow: 0 4px 8px rgba(52, 152, 219, 0.4); } .feature-box { background: linear-gradient(135deg, #e3f2fd 0%, #bbdefb 100%); border-radius: 0.75rem; padding: 1.5rem; margin-bottom: 1.5rem; } .result-box { background: linear-gradient(135deg, #e8f5e9 0%, #c8e6c9 100%); border-radius: 0.75rem; padding: 1.5rem; margin-top: 1.5rem; } .model-box { background: linear-gradient(135deg, #fff3e0 0%, #ffe0b2 100%); border-radius: 0.75rem; padding: 1.5rem; margin-top: 1.5rem; } .stProgress > div > div > div { background: linear-gradient(90deg, #2ecc71 0%, #27ae60 100%) !important; } .metric-card { background: white; border-radius: 0.75rem; padding: 1rem; text-align: center; box-shadow: 0 4px 8px rgba(0,0,0,0.06); } .metric-value { font-size: 1.8rem; font-weight: 700; color: #2c3e50; } .metric-label { font-size: 0.9rem; color: #7f8c8d; margin-top: 0.5rem; } .highlight { background: linear-gradient(90deg, #ffeb3b 0%, #fbc02d 100%); padding: 0.2rem 0.5rem; border-radius: 0.25rem; font-weight: 600; } .stDataFrame { border-radius: 0.75rem; box-shadow: 0 4px 8px rgba(0,0,0,0.06); } .risk-high { background-color: #ffcdd2 !important; color: #c62828 !important; font-weight: 700; } .risk-medium { background-color: #fff9c4 !important; color: #f57f17 !important; font-weight: 600; } .risk-low { background-color: #c8e6c9 !important; color: #388e3c !important; } </style> """, unsafe_allow_html=True) def preprocess_data(ddf): """ 使用Dask进行大数据预处理 参数: ddf (dask.DataFrame): 原始数据 返回: processed_ddf (dask.DataFrame): 处理后的数据 feature_cols (list): 特征列名列表 """ # 创建副本以避免修改原始数据 processed_ddf = ddf.copy() # 1. 删除无意义特征 drop_cols = ['BIL_MONTH', 'ASSET_ROW_ID', 'CCUST_ROW_ID', 'BELONG_CITY', 'MKT_CHANNEL_NAME', 'MKT_CHANNEL_SUB_NAME', 'PREPARE_FLG', 'SERV_START_DT', 'COMB_STAT_NAME', 'FIBER_ACCESS_CATEGORY'] # 检查并删除存在的列 existing_cols = [col for col in drop_cols if col in processed_ddf.columns] processed_ddf = processed_ddf.drop(columns=existing_cols) # 2. 处理缺失值 - 使用Dask的延迟计算 # 数值型特征用均值填充 numeric_cols = processed_ddf.select_dtypes(include=[np.number]).columns.tolist() if 'is_rh_next' in numeric_cols: numeric_cols.remove('is_rh_next') # 计算均值 - 使用Dask的延迟计算 with ProgressBar(): means = processed_ddf[numeric_cols].mean().compute() # 填充缺失值 - 使用map_partitions分块处理 for col in numeric_cols: processed_ddf[col] = processed_ddf[col].fillna(means[col]) # 类型转换 - 将整数列转换为支持缺失值的Int64类型 for col in numeric_cols: if processed_ddf[col].dtype == 'float64' and processed_ddf[col].dropna().apply(float.is_integer).all(): # 如果所有值都是整数,转换为Int64(支持缺失值) processed_ddf[col] = processed_ddf[col].astype('float').fillna(0).astype('Int64') elif processed_ddf[col].dtype == 'float64': # 保留为float64 processed_ddf[col] = processed_ddf[col].astype('float64') # 类别型特征用"Unknown"填充 object_cols = processed_ddf.select_dtypes(include=['object']).columns.tolist() for col in object_cols: processed_ddf[col] = processed_ddf[col].fillna("Unknown") # 3. 离散特征编码 # 对二元特征进行简单映射 binary_cols = ['IF_YHTS', 'is_kdts', 'is_itv_up', 'is_mobile_up', 'if_zzzw_up'] for col in binary_cols: if col in processed_ddf.columns: processed_ddf[col] = processed_ddf[col].map({'否': 0, '是': 1, 0: 0, 1: 1, 'Unknown': -1}) # 对性别进行映射 if 'GENDER' in processed极ddf.columns: gender_mapping = {'男': 0, '女': 1, 'Unknown': -1} processed_ddf['GENDER'] = processed_ddf['GENDER'].map(gender_mapping) # 4. 用户星级映射 if 'MKT_STAR_GRADE_NAME' in processed_ddf.columns: star_mapping = { '五星级': 5, '四星级': 4, '三星级': 3, '二星级': 2, '一星级': 1, 'Unknown': 0 } processed_ddf['MKT_STAR_GRADE_NAME'] = processed_ddf['MKT_STAR_GRADE_NAME'].map(star_mapping) # 5. 特征工程 - 使用Dask的延迟计算 # 计算消费比率(套餐价格/出账金额) if 'PROM_AM极T' in processed_ddf.columns and 'STMT_AMT' in processed_ddf.columns: processed_ddf['CONSUMPTION_RATIO'] = processed_ddf['PROM_AMT'] / (processed_ddf['STMT_AMT'] + 1) # 计算流量使用密度(下载流量/在网天数) if 'DWN_VOL' in processed_ddf.columns and 'ONLINE_DAY' in processed_ddf.columns: processed_ddf['TRAFFIC_DENSITY'] = processed_ddf['DWN_VOL'] / (processed_ddf['ONLINE_DAY'] + 1) # 是否有终端设备 if 'TERM_CNT' in processed_ddf.columns: processed_ddf['HAS_TERMINAL'] = (processed_ddf['TERM_CNT'] > 0).astype(int) # 6. 标准化处理 - 使用Dask的StandardScaler scaler = DaskStandardScaler() numeric_cols_for_scaling = list(set(numeric_cols) - set(['is_rh_next'])) if len(numeric_cols_for_scaling) > 0: # 使用Dask的scaler进行分块标准化 processed_ddf[numeric_cols_for_scaling] = scaler.fit_transform(processed_ddf[numeric_cols_for_scaling]) # 保存特征列 feature_cols = [col for col in processed_ddf.columns if col != 'is_rh_next'] return processed_ddf, feature_cols, means, numeric_cols_for_scaling, scaler def create_spark_session(): """创建或获取现有的Spark会话""" return SparkSession.builder \ .appName("SingleToMeltUserPrediction") \ .config("spark.sql.shuffle.partitions", "8") \ .config("spark.driver.memory", "8g") \ .config("spark.executor.memory", "8g") \ .getOrCreate() def train_models(spark_df, feature_cols): """ 使用Spark训练多个模型并评估性能 参数: spark_df (pyspark.sql.DataFrame): 处理后的数据 feature_cols (list): 特征列名列表 返回: results (dict): 包含训练好的模型及其性能指标 """ # 初始化Spark会话 spark = create_spark_session() # 将特征列组合为特征向量 assembler = VectorAssembler(inputCols=feature_cols, outputCol="rawFeatures") assembled_df = assembler.transform(spark_df) # 标准化特征 scaler = StandardScaler(inputCol="rawFeatures", outputCol="features") scaler_model = scaler.fit(assembled_df) scaled_df = scaler_model.transform(assembled_df) # 划分训练集和测试集 train_df, test_df = scaled_df.randomSplit([0.8, 0.2], seed=42) # 定义评估器 lr = LogisticRegression(featuresCol="features", labelCol="is_rh_next") dt = DecisionTreeClassifier(featuresCol="features", labelCol="is_rh_next") rf = RandomForestClassifier(featuresCol="features", labelCol="is_rh_next", numTrees=10) # 定义参数网格 lr_param_grid = ParamGridBuilder() \ .addGrid(lr.regParam, [0.01, 0.1]) \ .addGrid(lr.elasticNetParam, [0.0, 0.5]) \ .build() dt_param_grid = ParamGridBuilder() \ .addGrid(dt.maxDepth, [5, 10]) \ .addGrid(dt.minInstancesPerNode, [10, 20]) \ .build() rf_param_grid = ParamGridBuilder() \ .addGrid(rf.numTrees, [10, 20]) \ .addGrid(rf.maxDepth, [5, 10]) \ .build() # 定义交叉验证器 evaluator = BinaryClassificationEvaluator(labelCol="is_rh_next", metricName="areaUnderROC") lr_cv = CrossValidator(estimator=lr, estimatorParamMaps=lr_param_grid, evaluator=evaluator, numFolds=3) dt_cv = CrossValidator(estimator=dt, estimatorParamMaps=dt_param_grid, evaluator=evaluator, numFolds=3) rf_cv = CrossValidator(estimator=rf, estimatorParamMaps=rf_param_grid, evaluator=evaluator, numFolds=3) # 训练模型 results = {} # 逻辑回归 with st.spinner("正在训练逻辑回归模型..."): lr_model = lr_cv.fit(train_df) lr_predictions = lr_model.transform(test_df) lr_auc = evaluator.evaluate(lr_predictions) lr_accuracy = lr_predictions.filter(lr_predictions.is_rh_next == lr_predictions.prediction).count() / test_df.count() results["logistic_regression"] = { "model": lr_model, "auc": lr_auc, "accuracy": lr_accuracy, "best_params": lr_model.bestModel._java_obj.parent().extractParamMap() } # 决策树 with st.spinner("正在训练决策树模型..."): dt_model = dt_cv.fit(train_df) dt_predictions = dt_model.transform(test_df) dt_auc = evaluator.evaluate(dt_predictions) dt_accuracy = dt_predictions.filter(dt_predictions.is_rh_next == dt_predictions.prediction).count() / test_df.count() results["decision_tree"] = { "model": dt_model, "auc": dt_auc, "accuracy": dt_accuracy, "best_params": dt_model.bestModel._java_obj.parent().extractParamMap(), "feature_importances": dt_model.bestModel.featureImportances.toArray().tolist() } # 随机森林 with st.spinner("正在训练随机森林模型..."): rf_model = rf_cv.fit(train_df) rf_predictions = rf_model.transform(test_df) rf_auc = evaluator.evaluate(rf_predictions) rf_accuracy = rf_predictions.filter(rf_predictions.is_rh_next == rf_predictions.prediction).count() / test_df.count() results["random_forest"] = { "model": rf_model, "auc": rf_auc, "accuracy": rf_accuracy, "best_params": rf_model.bestModel._java_obj.parent().extractParamMap(), "feature_importances": rf_model.bestModel.featureImportances.toArray().tolist() } return results # 标题区域 st.markdown(""" <div class="header"> <h1 style='text-align: center; margin: 0;'>单宽转融用户预测系统</h1> <p style='text-align: center; margin: 0.5rem 0 0; font-size: 1.1rem;'>基于大数据挖掘的精准营销分析平台</p> </div> """, unsafe_allow_html=True) # 页面布局 col1, col2 = st.columns([1, 1.5]) # 左侧区域 - 图片和简介 with col1: st.markdown(""" <div class="card"> <h3 style='text-align: center; color: #2c3e50;'>精准营销系统</h3> <p style='text-align: center;'>利用先进数据挖掘技术识别潜在融合套餐用户</p> </div> """, unsafe_allow_html=True) # 使用在线图片作为占位符 st.image("https://images.unsplash.com/photo-1550751822256-00808c92fc8d?ixlib=rb-4.0.3&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D&auto=format&fit=crop&w=1200&q=80", caption="精准营销示意图", use_column_width=True) st.markdown(""" <div class="feature-box"> <h4>📈 系统功能</h4> <ul> <li>用户转化预测</li> <li>多模型对比分析</li> <li>特征重要性分析</li> <li>可视化数据洞察</li> </ul> </div> """, unsafe_allow_html=True) # 右侧区域 - 功能选择 with col2: st.markdown(""" <div class="card"> <h3 style='color: #2c3e50;'>请选择操作类型</h3> <p>您可以选择训练新模型或查看现有模型分析结果</p> </div> """, unsafe_allow_html=True) # 功能选择 option = st.radio("", ["🚀 训练新模型 - 使用新数据训练预测模型", "🔍 模型分析 - 查看现有模型的分析结果"], index=0, label_visibility="hidden") # 模型训练部分 if "训练新模型" in option: st.markdown(""" <div class="model-box"> <h4>模型训练</h4> <p>上传训练数据并训练新的预测模型</p> </div> """, unsafe_allow_html=True) # 上传训练数据 train_file = st.file_uploader("上传训练数据 (CSV格式)", type=["csv"], accept_multiple_files=False) # 文件上传处理部分 if train_file is not None: try: # 使用临时目录处理大文件 with tempfile.TemporaryDirectory() as tmpdir: tmp_path = os.path.join(tmpdir, "large_file.csv") # 分块写入文件 with open(tmp_path, "wb") as f: f.write(train_file.getvalue()) # 检测文件编码 def detect_encoding(file_path): with open(file_path, 'rb') as f: raw_data = f.read(10000) result = chardet.detect(raw_data) return result['encoding'] detected_encoding = detect_encoding(tmp_path) st.info(f"检测到文件编码: {detected_encoding}") # 使用Dask分块读取大文件 chunksize = 256 * 1024 * 1024 # 256MB每块 # 定义缺失值列表 na_values_list = ['', '#N/A', '#N/A N/A', '#NA', '-1.#IND', '-1.#QNAN', '-NaN', '-nan', '1.#IND', '1.#QNAN', '<NA>', 'N/A', 'NA', 'NULL', 'NaN', 'n/a', 'nan', 'null'] # 尝试读取文件 try: raw_ddf = dd.read_csv( tmp_path, blocksize=chunksize, dtype={'is_rh_next': 'float64'}, # 使用float64避免整数缺失问题 encoding=detected_encoding, na_values=na_values_list, assume_missing=True, low_memory=False ) except UnicodeDecodeError: # 如果检测的编码失败,尝试常见中文编码 st.warning("检测编码读取失败,尝试GB18030编码...") try: raw_ddf = dd.read_csv( tmp_path, blocksize=chunksize, dtype={'is_rh_next': 'float64'}, encoding='GB18030', na_values=na_values_list, assume_missing=True, low_memory=False ) except UnicodeDecodeError: st.warning("GB18030读取失败,尝试Latin-1编码...") raw_ddf = dd.read_csv( tmp_path, blocksize=chunksize, dtype={'is_rh_next': 'float64'}, encoding='latin-1', na_values=na_values_list, assume_missing=True, low_memory=False ) # 显示数据预览 with st.expander("数据预览", expanded=True): # 只显示前1000行 try: preview_data = raw_ddf.head(1000) st.dataframe(preview_data) col1, col2 = st.columns(2) col1.metric("总样本数", f"{raw_ddf.shape[0].compute():,}") col2.metric("特征数量", len(raw_ddf.columns)) # 检查目标变量是否存在 if 'is_rh_next' not in raw_ddf.columns: st.warning("⚠️ 注意:未找到目标变量 'is_rh_next'") except Exception as e: st.error(f"数据预览错误: {str(e)}") st.write("尝试显示前100...") preview_data = raw_ddf.head(100) st.dataframe(preview_data) # 数据预处理按钮 if st.button("开始数据预处理", use_container_width=True): with st.spinner("正在进行数据预处理,请稍候..."): processed_ddf, feature_cols, means, numeric_cols_for_scaling, scaler = preprocess_data(raw_ddf) # 保存预处理参数 preprocessor_params = { 'means': means, 'numeric_cols_for_scaling': numeric_cols_for_scaling, 'scaler': scaler, 'feature_cols': feature_cols } joblib.dump(preprocessor_params, 'preprocessor_params.pkl') # 保存处理后的数据 - 使用通配符模式保存为多个文件 processed_ddf.to_csv('processed_data_*.csv', index=False) st.success("✅ 数据预处理完成!") # 显示处理后的数据统计 st.subheader("数据质量检查") # 使用Dask计算缺失值 with st.spinner("计算缺失值统计..."): null_counts = processed_ddf.isnull().sum().compute() st.write("缺失值统计:") st.write(null_counts[null_counts > 0]) # 可视化关键特征分布 st.subheader("关键特征分布") # 使用Dask计算采样数据 sample_ddf = processed_ddf.sample(frac=0.1) # 采样10%的数据 sample_df = sample_ddf.compute() fig, axes = plt.subplots(2, 2, figsize=(12, 10)) sns.histplot(sample_df['AGE'], ax=axes[0, 0], kde=True) sns.histplot(sample_df['ONLINE_DAY'], ax=axes[0, 1], kde=True) sns.histplot(sample_df['PROM_AMT'], ax=axes[1, 0], kde=True) sns.histplot(sample_df['DWN_VOL'], ax=axes[1, 1], kde=True) plt.tight_layout() st.pyplot(fig) # 目标变量分布 st.subheader("目标变量分布") fig, ax = plt.subplots(figsize=(6, 4)) sns.countplot(x='is_rh_next', data=sample_df, ax=ax) ax.set_xlabel("是否转化 (0=未转化, 1=转化)") ax.set_ylabel("用户数量") ax.set_title("用户转化分布") st.pyplot(fig) # 特征与目标变量相关性 st.subheader("特征与转化的相关性") with st.spinner("计算特征相关性..."): # 使用采样数据计算相关性 correlation = sample_df[feature_cols + ['is_rh_next']].corr()['is_rh_next'].sort_values(ascending=False) fig, ax = plt.subplots(figsize=(10, 6)) sns.barplot(x=correlation.values, y=correlation.index, ax=ax) ax.set_title("特征与转化的相关性") st.pyplot(fig) # 模型训练按钮 if st.button("开始模型训练", use_container_width=True): # 检查预处理文件是否存在 if not any(fname.startswith('processed_data_') for fname in os.listdir('.')): st.error("请先进行数据预处理") else: # 创建Spark会话 spark = create_spark_session() # 使用通配符读取所有预处理文件 spark_df = spark.read.csv('processed_data_*.csv', header=True, inferSchema=True) # 加载预处理参数 preprocessor_params = joblib.load('preprocessor_params.pkl') feature_cols = preprocessor_params['feature_cols'] # 训练模型 with st.spinner("正在训练模型,请耐心等待..."): results = train_models(spark_df, feature_cols) # 保存模型结果 joblib.dump(results, 'model_results.pkl') st.success("🎉 模型训练完成!") # 显示模型比较 st.subheader("模型性能对比") model_performance = pd.DataFrame({ '模型': ['逻辑回归', '决策树', '随机森林'], '准确率': [results['logistic_regression']['accuracy'], results['decision_tree']['accuracy'], results['random_forest']['accuracy']], 'AUC': [results['logistic_regression']['auc'], results['decision_tree']['auc'], results['random_forest']['auc']] }).sort_values('AUC', ascending=False) st.table(model_performance.style.format({ '准确率': '{:.2%}', 'AUC': '{:.4f}' })) # 最佳模型特征重要性 best_model_name = model_performance.iloc[0]['模型'] model_map = { '逻辑回归': 'logistic_regression', '决策极': 'decision_tree', '随机森林': 'random_forest' } best_model_key = model_map[best_model_name] best_model = results[best_model_key]['model'].bestModel st.subheader(f"最佳模型 ({best_model_name}) 分析") if best_model_key in ['decision_tree', 'random_forest']: feature_importances = results[best_model_key]['feature_importances'] importance_df = pd.DataFrame({ '特征': feature_cols, '重要性': feature_importances }).sort_values('重要性', ascending=False).head(10) fig, ax = plt.subplots(figsize=(10, 6)) sns.barplot(x='重要性', y='特征', data=importance_df, ax=ax) ax.set_title('Top 10 重要特征') st.pyplot(fig) # 显示最佳模型参数 st.subheader("最佳模型参数") params = results[best_model_key]['best_params'] param_table = pd.DataFrame({ '参数': [str(param.name) for param in params.keys()], '值': [str(value) for value in params.values()] }) st.table(param_table) except Exception as e: st.error(f"数据处理错误: {str(e)}") st.exception(e) # 模型分析部分 else: st.markdown(""" <div class="model-box"> <h4>模型分析</h4> <p>查看已有模型的详细分析结果</p> </div> """, unsafe_allow_html=True) if not os.path.exists('model_results.pkl'): st.info("ℹ️ 当前没有可用模型。请先进行模型训练以生成分析告。") # 显示可执行的操作 col1, col2 = st.columns(2) with col1: st.markdown(""" <div class="card"> <h4>🛠️ 数据准备</h4> <p>确保您已准备好符合要求的数据文件,包含所有必要的特征字段。</p> </div> """, unsafe_allow_html=True) with col2: st.markdown(""" <div class="card"> <h4>🧠 模型训练</h4> <p>切换到“训练新模型”选项卡,上传您的数据并开始训练过程。</p> </div> """, unsafe_allow_html=True) else: # 加载模型结果 results = joblib.load('model_results.pkl') preprocessor_params = joblib.load('preprocessor_params.pkl') feature_cols = preprocessor_params['feature_cols'] # 模型选择 model_choice = st.selectbox( "选择要分析的模型", ("逻辑回归", "决策树", "随机森林") ) model_map = { '逻辑回归': 'logistic_regression', '决策树': 'decision_tree', '随机森林': 'random_forest' } model_key = model_map[model_choice] # 显示模型基本信息 model_info = results[model_key] st.markdown(f""" <div class="card"> <h3>{model_choice}</h3> <p><strong>AUC得分:</strong> {model_info['auc']:.4f}</p> <p><strong>准确率:</strong> {model_info['accuracy']:.2%}</p> </div> """, unsafe_allow_html=True) # 显示参数详情 with st.expander("模型参数详情", expanded=False): params = model_info['best_params'] param_table = pd.DataFrame({ '参数': [str(param.name) for param in params.keys()], '值': [str(value) for value in params.values()] }) st.table(param_table) # 如果存在特征重要性信息 if model_key in ['decision_tree', 'random_forest']: feature_importances = model_info['feature_importances'] importance_df = pd.DataFrame({ '特征': feature_cols, '重要性': feature_importances }).sort_values('重要性', ascending=False) st.subheader("特征重要性分析") # Top 10 重要特征 top_features = importance_df.head(10) fig, ax = plt.subplots(figsize=(10, 6)) sns.barplot(x='重要性', y='特征', data=top_features, ax=ax) ax.set_title('Top 10 重要特征') st.pyplot(fig) # 所有特征的重要性分布 fig, ax = plt.subplots(figsize=(10, 6)) sns.histplot(importance_df['重要性'], bins=20, ax=ax) ax.set_title('特征重要性分布') st.pyplot(fig) # 显示具体数值 st.write("特征重要性详细数据:") st.dataframe(importance_df.style.background_gradient(subset=['重要性'], cmap='viridis')) # 模型比较 st.subheader("与其他模型的对比") model_performance = pd.DataFrame({ '模型': ['逻辑回归', '决策树', '随机森林'], '准确率': [results['logistic_regression']['accuracy'], results['decision_tree']['accuracy'], results['random_forest']['accuracy']], 'AUC': [results['logistic_regression']['auc'], results['decision_tree']['auc'], results['random_forest']['auc']] }).sort_values('AUC', ascending=False) fig, ax = plt.subplots(figsize=(10, 6)) model_performance.set_index('模型')[['AUC', '准确率']].plot(kind='bar', ax=ax) ax.set_title('模型性能对比') ax.set_ylabel('评分') plt.xticks(rotation=0) st.pyplot(fig) st.table(model_performance.style.format({ '准确率': '{:.2%}', 'AUC': '{:.4f}' }).apply(lambda x: ['background: lightgreen' if x.name == model_performance.index[0] else '' for _ in x])) # 页脚 st.markdown("---") st.markdown(""" <div style="text-align: center; color: #7f8c8d; font-size: 0.9rem; padding: 1rem;"> © 2023 单宽转融用户预测系统 | 2231030273 基于Streamlit和Spark开发 </div> """, unsafe_allow_html=True) Single_breadth_to_melt.csv文件前一百行数据为 BIL_MONTH ASSET_ROW_ID CCUST_ROW_ID BELONG_CITY MKT_CHANNEL_NAME MKT_CHANNEL_SUB_NAME PREPARE_FLG SERV_START_DT COMB_STAT_NAME FIBER_ACCESS_CATEGORY … AVG_STMT_AMT_LV is_kdts is_itv_up is_mobile_up if_zzzw_up itv_cnt itv_day serv_in_time PROM_AMT_MONTH is_rh_next 0 201706 1-1E6Z49HF 1-UTSNWVU 杭州 NaN 其它部门-未知部门细分-未知 … 0 20140126 现行 普通宽带 … c30-59 0 0 0 0 0 0 41 44.44 0.0 1 201706 3-J591KYI 1-LKFKET 杭州 NaN 其它部门-未知部门细分-未知 … 0 20160406 现行 普通宽带 … e89-129 0 0 0 0 0 0 14 100.00 0.0 2 201706 1-F3YGP4D 1-6T16M75 杭州 营业厅 营业厅-营业服务中心-城市 … 0 20100112 现行 普通宽带 … c30-59 0 0 0 0 0 28 89 44.44 0.0 3 201706 1-1AITRLCN 1-1AB5KV9U 杭州 NaN 其它部门-未知部门细分-未知 … 0 20131017 现行 普通宽带 … c30-59 1 0 0 0 0 10 44 55.56 0.0 4 201706 1-132ZSIVX 1-LPVY5O 杭州 10000号 其它部门-10000客服部-城市 … 0 20130209 现行 普通宽带 … d59-89 0 0 0 0 0 0 52 0.00 0.0 根据如上修改代码,给出修改后完整代码
06-28
--------------------------------------------------------------------------- KeyError Traceback (most recent call last) File ~\anaconda3\Lib\site-packages\pandas\core\indexes\base.py:3791, in Index.get_loc(self, key) 3790 try: -> 3791 return self._engine.get_loc(casted_key) 3792 except KeyError as err: File index.pyx:152, in pandas._libs.index.IndexEngine.get_loc() File index.pyx:181, in pandas._libs.index.IndexEngine.get_loc() File pandas\_libs\hashtable_class_helper.pxi:7080, in pandas._libs.hashtable.PyObjectHashTable.get_item() File pandas\_libs\hashtable_class_helper.pxi:7088, in pandas._libs.hashtable.PyObjectHashTable.get_item() KeyError: 'date' The above exception was the direct cause of the following exception: KeyError Traceback (most recent call last) Cell In[5], line 409 406 print("训练流程完成!") 408 if __name__ == "__main__": --> 409 main() Cell In[5], line 326, in main() 323 positive_samples['label'] = 1 325 # 负样本采样 --> 326 negative_samples = vectorized_negative_sampling( 327 hist_exposure, positive_samples, sample_ratio=0.05 328 ) 330 # 合并数据集 331 click_data = pd.concat([positive_samples, negative_samples], ignore_index=True) Cell In[5], line 130, in vectorized_negative_sampling(exposure, positive_set, sample_ratio) 127 negative_samples['label'] = 0 129 # 添加时间信息(使用最近曝光时间) --> 130 negative_samples['date'] = exposure['date'].max() 132 return negative_samples File ~\anaconda3\Lib\site-packages\pandas\core\frame.py:3893, in DataFrame.__getitem__(self, key) 3891 if self.columns.nlevels > 1: 3892 return self._getitem_multilevel(key) -> 3893 indexer = self.columns.get_loc(key) 3894 if is_integer(indexer): 3895 indexer = [indexer] File ~\anaconda3\Lib\site-packages\pandas\core\indexes\base.py:3798, in Index.get_loc(self, key) 3793 if isinstance(casted_key, slice) or ( 3794 isinstance(casted_key, abc.Iterable) 3795 and any(isinstance(x, slice) for x in casted_key) 3796 ): 3797 raise InvalidIndexError(key) -> 3798 raise KeyError(key) from err 3799 except TypeError: 3800 # If we have a listlike key, _check_indexing_error will raise 3801 # InvalidIndexError. Otherwise we fall through and re-raise 3802 # the TypeError. 3803 self._check_indexing_error(key) KeyError: 'date'
最新发布
07-13
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值