1473 D. Program(预处理^_^)

本文介绍了一种高效的方法,通过预处理连续子段和的最值,能在O(n)时间内解决删除区间后最小最大值问题。关键步骤包括计算中间值、增量数组和减量数组,便于后续O(1)查询答案。适合处理数组操作的动态规划问题。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

传送门

有个显然的结论:在一段连续的 + − +- +之后,值域的变化都是连续的

这意味着,变化的过程有个最小值和最大值,中间的所有值都是答案。

所以我们的目的是,找到删除 [ l , r ] [l,r] [l,r]后的最小值和最大值

不妨分段考虑,删掉 [ l , r ] [l,r] [l,r]后分裂为两段 [ 1 , l − 1 ] [1,l-1] [1,l1] [ r + 1 , n ] [r+1,n] [r+1,n]

[ 1 , l − 1 ] [1,l-1] [1,l1]的最大值 m x [ l − 1 ] mx[l-1] mx[l1]最小值 [ m i [ l − 1 ] [mi[l-1] [mi[l1]可以预处理出来,且预处理当前值为 z h i [ i ] zhi[i] zhi[i]

现在的值为 z h i [ l − 1 ] zhi[l-1] zhi[l1],那么现在考虑 [ r + 1 , n ] [r+1,n] [r+1,n]段的影响

也就是需要预处理从点 r + 1 r+1 r+1开始的, + + + − - 最多的个数 f m a x [ r + 1 ] fmax[r+1] fmax[r+1]

预处理从点 r + 1 r+1 r+1开始的, − - + + +最多的个数 f m i n [ r + 1 ] fmin[r+1] fmin[r+1]

倒过来做一遍最大最小连续子段和可以 O ( n ) O(n) O(n)预处理这两个数组

那么最后的最小值为

a n s m i = m i n ( m i [ l − 1 ] , z h i [ l − 1 ] + f m i [ r + 1 ] ) ansmi = min( mi[l-1],zhi[l-1]+fmi[r+1] ) ansmi=min(mi[l1],zhi[l1]+fmi[r+1])

最后的最大值为

a n s m x = m a x ( m x [ l − 1 ] , z h i [ l − 1 ] + f m x [ r + 1 ] ) ansmx = max(mx[l-1], zhi[l-1]+fmx[r+1] ) ansmx=max(mx[l1],zhi[l1]+fmx[r+1])

于是可以 O ( 1 ) O(1) O(1)知道答案

#include <bits/stdc++.h>
using namespace std;
const int maxn = 2e5+10;
int n,m,mi[maxn],mx[maxn],zhi[maxn],fmi[maxn],fmx[maxn];
char a[maxn];
int main()
{
	int t; cin >> t;
	while( t-- )
	{
		cin >> n >> m;
		for(int i=1;i<=n;i++)
		{
			cin >> a[i];
			if( a[i]=='-' )	zhi[i]=zhi[i-1]-1;
			else	zhi[i]=zhi[i-1]+1;
			mi[i] = min( mi[i-1],zhi[i] );
			mx[i] = max( mx[i-1],zhi[i] );
		}
		fmi[n+1]=fmx[n+1]=0;
		for(int i=n;i>=1;i--)
		{
			if( a[i]=='-' )
			{
				fmi[i] = min( -1,fmi[i+1]-1 );
				fmx[i] = max( -1,fmx[i+1]-1 ); 
			}
			else
			{
				fmi[i] = min( 1,fmi[i+1]+1 );
				fmx[i] = max( 1,fmx[i+1]+1 ); 
			} 
		}
		for(int i=1;i<=m;i++)
		{
			int l,r; scanf("%d%d",&l,&r);
			int ansmi = 0,ansmx = 0;
			ansmi = min( zhi[l-1]+fmi[r+1],mi[l-1] );
			ansmx = max( zhi[l-1]+fmx[r+1],mx[l-1] );
			printf("%d\n",ansmx-ansmi+1);
		}		
	}
}
03-28
# 环境要求:Python 3.8+, scikit-learn, OpenCV, TensorFlow/Keras, numpy, pandas, matplotlib # 安装命令:pip install scikit-learn opencv-python tensorflow numpy pandas matplotlib scikit-image xgboost import os import numpy as np import cv2 import tensorflow as tf from sklearn.model_selection import train_test_split, GridSearchCV, StratifiedKFold from sklearn.preprocessing import StandardScaler, LabelEncoder from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.metrics import classification_report, confusion_matrix, accuracy_score from sklearn.pipeline import make_pipeline from sklearn.decomposition import PCA import matplotlib.pyplot as plt import seaborn as sns from tensorflow.keras.applications import MobileNetV2, EfficientNetB0 # 改用更先进的轻量级模型 from tensorflow.keras.models import Model from tensorflow.keras.layers import GlobalAveragePooling2D, Dense from tensorflow.keras.optimizers import Adam from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau from skimage.feature import graycomatrix, graycoprops, hog import joblib import xgboost as xgb from imblearn.over_sampling import SMOTE import application2 # 设置中文字体支持 plt.rcParams['font.sans-serif'] = ['SimHei', 'Microsoft YaHei', 'WenQuanYi Micro Hei'] plt.rcParams['axes.unicode_minus'] = False # ====================== # 1. 数据准备与预处理(增强版) # ====================== def load_and_preprocess_data(data_dir, styles, img_size=(224, 224), max_per_class=1000, augment=True): """ 加载并预处理图像数据,包含数据增强 :param data_dir: 数据集目录 :param styles: 艺术风格列表 :param img_size: 目标图像尺寸 :param max_per_class: 每个风格最多加载的图像数 :param augment: 是否应用数据增强 :return: 图像数据和标签 """ images = [] labels = [] # 数据增强函数 def augment_image(img): # 随机旋转 angle = np.random.randint(-20, 20) M = cv2.getRotationMatrix2D((img.shape[1] // 2, img.shape[0] // 2), angle, 1.0) rotated = cv2.warpAffine(img, M, (img.shape[1], img.shape[0])) # 随机裁剪 crop_size = int(min(img.shape[0], img.shape[1]) * 0.8) x = np.random.randint(0, img.shape[1] - crop_size) y = np.random.randint(0, img.shape[0] - crop_size) cropped = img[y:y + crop_size, x:x + crop_size] cropped = cv2.resize(cropped, img_size) # 随机翻转 if np.random.rand() > 0.5: flipped = cv2.flip(img, 1) else: flipped = img # 随机亮度调整 hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) hsv = hsv.astype("float32") hsv[:, :, 2] = hsv[:, :, 2] * np.random.uniform(0.8, 1.2) hsv[:, :, 2] = np.clip(hsv[:, :, 2], 0, 255) brightness_adjusted = cv2.cvtColor(hsv.astype("uint8"), cv2.COLOR_HSV2RGB) return [rotated, cropped, flipped, brightness_adjusted] print("开始加载和预处理图像...") for style_idx, style in enumerate(styles): style_dir = os.path.join(data_dir, style) if not os.path.exists(style_dir): print(f"警告: {style_dir} 目录不存在,跳过") continue print(f"处理风格: {style}") count = 0 for img_name in os.listdir(style_dir): if max_per_class and count >= max_per_class: break img_path = os.path.join(style_dir, img_name) try: # 读取并调整大小 img = cv2.imread(img_path) if img is None: continue # 调整尺寸和颜色空间转换 img = cv2.resize(img, img_size) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # 添加原始图像 images.append(img) labels.append(style_idx) count += 1 # 数据增强(引用[2]建议) if augment and count < max_per_class // 2: # 限制增强数量 augmented_images = augment_image(img) for aug_img in augmented_images: if count >= max_per_class: break images.append(aug_img) labels.append(style_idx) count += 1 except Exception as e: print(f"处理图像 {img_path} 时出错: {str(e)}") print(f"已加载 {count} 张 {style} 风格的图像") return np.array(images), np.array(labels) # 设置数据集路径和风格类别 DATA_DIR = application2.MY_DATA_DIR # 需替换为实际数据集路径 ART_STYLES = application2.MY_ART_STYLES # 示例风格 # 加载数据(每类最多加载200张图像,应用数据增强) images, labels = load_and_preprocess_data(DATA_DIR, ART_STYLES, max_per_class=100, augment=True) if len(images) == 0: print("错误: 未加载到任何图像数据,请检查路径和文件") exit() print(f"成功加载 {len(images)} 张图像,{len(ART_STYLES)} 种风格") # 检查类别平衡 label_counts = {style: np.sum(labels == i) for i, style in enumerate(ART_STYLES)} print("类别分布:") for style, count in label_counts.items(): print(f"{style}: {count} 张") # 划分训练集和测试集 (80%训练, 20%测试) X_train, X_test, y_train, y_test = train_test_split( images, labels, test_size=0.2, random_state=42, stratify=labels ) # ====================== # 2. 特征工程(优化版) # ====================== def extract_traditional_features(images): """ 提取传统图像特征(优化版) :param images: 图像数据 :return: 特征矩阵 """ traditional_features = [] print("提取传统图像特征...") for i, img in enumerate(images): if i % 50 == 0: # 每处理50张图像输出一次进度 print(f"处理进度: {i + 1}/{len(images)}") features = [] try: # 1. 颜色直方图 (RGB + HSV) for j in range(3): # RGB hist = cv2.calcHist([img], [j], None, [64], [0, 256]) cv2.normalize(hist, hist) features.extend(hist.flatten()) # HSV颜色空间 hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) for j in range(3): # HSV hist = cv2.calcHist([hsv], [j], None, [64], [0, 256]) cv2.normalize(hist, hist) features.extend(hist.flatten()) # 2. 纹理特征 (灰度共生矩阵 + HOG) gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) # GLCM纹理特征 glcm = graycomatrix(gray, distances=[1, 3], angles=[0, np.pi / 4, np.pi / 2, 3 * np.pi / 4], levels=256, symmetric=True, normed=True) for prop in ['contrast', 'dissimilarity', 'homogeneity', 'energy', 'correlation']: prop_val = graycoprops(glcm, prop) features.extend(prop_val.flatten()) # HOG特征(引用[2]建议) hog_features = hog(gray, orientations=9, pixels_per_cell=(8, 8), cells_per_block=(2, 2), block_norm='L2-Hys') features.extend(hog_features) except Exception as e: print(f"处理图像时出错: {str(e)},使用默认特征值") # 提供默认特征值 features.extend([0.0] * (64 * 6 + 20 + len(hog_features)) if 'hog_features' in locals() else 36) traditional_features.append(features) return np.array(traditional_features) def create_feature_extractor(model_name='MobileNetV2'): """ 创建深度特征提取器(使用更先进的轻量级模型) :param model_name: 模型名称 (MobileNetV2 或 EfficientNetB0) :return: 特征提取模型 """ input_shape = (224, 224, 3) if model_name == 'EfficientNetB0': base_model = EfficientNetB0( weights='imagenet', include_top=False, input_shape=input_shape ) else: # 默认使用MobileNetV2 base_model = MobileNetV2( weights='imagenet', include_top=False, input_shape=input_shape ) # 添加全局平均池化层 x = base_model.output x = GlobalAveragePooling2D()(x) # 创建特征提取模型 model = Model(inputs=base_model.input, outputs=x) # 冻结基础模型权重 for layer in base_model.layers: layer.trainable = False return model def extract_deep_features(images, model): """ 使用预训练模型提取深度特征 :param images: 图像数据 :param model: 特征提取模型 :return: 深度特征矩阵 """ print("提取深度特征...") # 预处理图像 if isinstance(model.layers[0].input_shape[0], tuple): # EfficientNet preprocessed_imgs = tf.keras.applications.efficientnet.preprocess_input( images.astype('float32')) else: # MobileNetV2 preprocessed_imgs = tf.keras.applications.mobilenet_v2.preprocess_input( images.astype('float32')) # 分批提取特征以节省内存 batch_size = 32 deep_features = [] for i in range(0, len(preprocessed_imgs), batch_size): batch = preprocessed_imgs[i:i + batch_size] batch_features = model.predict(batch, verbose=0) deep_features.append(batch_features) if i % (4 * batch_size) == 0: # 每处理4个批次输出一次进度 print(f"深度特征提取进度: {min(i + batch_size, len(preprocessed_imgs))}/{len(preprocessed_imgs)}") deep_features = np.vstack(deep_features) return deep_features # 创建特征提取器(引用[4]建议使用轻量模型) feature_extractor = create_feature_extractor('MobileNetV2') print("开始提取传统特征...") train_trad_features = extract_traditional_features(X_train) test_trad_features = extract_traditional_features(X_test) print("开始提取深度特征...") train_deep_features = extract_deep_features(X_train, feature_extractor) test_deep_features = extract_deep_features(X_test, feature_extractor) # 合并特征 print("合并传统特征和深度特征...") X_train_features = np.hstack([train_trad_features, train_deep_features]) X_test_features = np.hstack([test_trad_features, test_deep_features]) # 释放内存 del train_trad_features, test_trad_features, train_deep_features, test_deep_features # 特征降维(PCA) print("应用PCA降维...") pca = PCA(n_components=0.95, random_state=42) # 保留95%的方差 X_train_pca = pca.fit_transform(X_train_features) X_test_pca = pca.transform(X_test_features) print(f"降维后特征数量: {X_train_pca.shape[1]} (原始: {X_train_features.shape[1]})") # 处理类别不平衡(SMOTE) print("应用SMOTE处理类别不平衡...") smote = SMOTE(random_state=42) X_train_res, y_train_res = smote.fit_resample(X_train_pca, y_train) # 特征标准化 scaler = StandardScaler() X_train_scaled = scaler.fit_transform(X_train_res) X_test_scaled = scaler.transform(X_test_pca) # ====================== # 3. 模型构建与训练(优化版) # ====================== def train_xgboost(X_train, y_train): """ 训练XGBoost模型(引用[1]建议的先进算法) """ print("训练XGBoost模型...") # 标签编码 le = LabelEncoder() y_train_encoded = le.fit_transform(y_train) # 定义参数网格 param_grid = { 'n_estimators': [100, 200], 'max_depth': [3, 5, 7], 'learning_rate': [0.01, 0.1], 'subsample': [0.8, 1.0], 'colsample_bytree': [0.8, 1.0] } # 创建XGBoost分类器 xgb_model = xgb.XGBClassifier( objective='multi:softmax', random_state=42, n_jobs=-1 ) # 使用分层K折交叉验证 skf = StratifiedKFold(n_splits=3, shuffle=True, random_state=42) # 网格搜索 grid_search = GridSearchCV( estimator=xgb_model, param_grid=param_grid, scoring='accuracy', cv=skf, n_jobs=-1, verbose=1 ) grid_search.fit(X_train, y_train_encoded) print(f"最佳参数: {grid_search.best_params_}") print(f"最佳准确率: {grid_search.best_score_:.4f}") return grid_search.best_estimator_, le def train_cnn_model(X_train, y_train, X_val, y_val, num_classes): """ 训练端到端的CNN模型(可选) """ print("训练端到端CNN模型...") # 创建模型 base_model = MobileNetV2( weights='imagenet', include_top=False, input_shape=(224, 224, 3) ) # 添加自定义层 x = base_model.output x = GlobalAveragePooling2D()(x) x = Dense(512, activation='relu')(x) x = Dense(256, activation='relu')(x) predictions = Dense(num_classes, activation='softmax')(x) model = Model(inputs=base_model.input, outputs=predictions) # 冻结基础模型层 for layer in base_model.layers: layer.trainable = False # 编译模型 model.compile( optimizer=Adam(learning_rate=0.0001), loss='sparse_categorical_crossentropy', metrics=['accuracy'] ) # 回调函数(引用[3]建议的早停机制) callbacks = [ EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True), ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, min_lr=1e-6) ] # 训练模型 history = model.fit( X_train, y_train, epochs=50, batch_size=32, validation_data=(X_val, y_val), callbacks=callbacks, verbose=1 ) # 绘制训练历史 plt.figure(figsize=(12, 5)) plt.subplot(1, 2, 1) plt.plot(history.history['accuracy'], label='训练准确率') plt.plot(history.history['val_accuracy'], label='验证准确率') plt.title('模型准确率') plt.ylabel('准确率') plt.xlabel('轮次') plt.legend() plt.subplot(1, 2, 2) plt.plot(history.history['loss'], label='训练损失') plt.plot(history.history['val_loss'], label='验证损失') plt.title('模型损失') plt.ylabel('损失') plt.xlabel('轮次') plt.legend() plt.savefig('cnn_training_history.png') plt.show() return model print("构建和训练分类模型...") # 训练XGBoost模型(主要模型) xgb_model, label_encoder = train_xgboost(X_train_scaled, y_train_res) # 训练端到端CNN模型(可选) # 注意:需要原始图像数据,且需要额外划分验证集 X_train_cnn, X_val_cnn, y_train_cnn, y_val_cnn = train_test_split( X_train, y_train, test_size=0.2, random_state=42, stratify=y_train ) cnn_model = train_cnn_model(X_train_cnn, y_train_cnn, X_val_cnn, y_val_cnn, len(ART_STYLES)) # ====================== # 4. 模型评估(增强版) # ====================== def evaluate_model(model, X_test, y_test, model_name, le=None): """ 评估模型性能并可视化结果 """ print(f"\n{model_name} 性能评估:") # 对于XGBoost需要标签编码 if le: y_test_encoded = le.transform(y_test) y_pred = model.predict(X_test) y_pred_labels = le.inverse_transform(y_pred) else: y_pred = model.predict(X_test) y_pred_labels = np.argmax(y_pred, axis=1) # 计算准确率 accuracy = accuracy_score(y_test, y_pred_labels) print(f"整体准确率: {accuracy:.4f}") # 打印分类报告 print(classification_report(y_test, y_pred_labels, target_names=ART_STYLES)) # 绘制混淆矩阵 cm = confusion_matrix(y_test, y_pred_labels) plt.figure(figsize=(10, 8)) sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=ART_STYLES, yticklabels=ART_STYLES) plt.title(f'{model_name} 混淆矩阵 (准确率: {accuracy:.2f})') plt.xlabel('预测标签') plt.ylabel('真实标签') plt.savefig(f'{model_name}_confusion_matrix.png') plt.show() return accuracy # 评估XGBoost模型 xgb_accuracy = evaluate_model(xgb_model, X_test_scaled, y_test, "XGBoost", le=label_encoder) # 评估CNN模型 cnn_accuracy = evaluate_model(cnn_model, X_test, y_test, "CNN模型") # ====================== # 5. 模型集成(可选) # ====================== def ensemble_predict(models, weights, X): """ 集成多个模型的预测结果 """ predictions = np.zeros((X.shape[0], len(ART_STYLES))) for model, weight in zip(models, weights): if isinstance(model, xgb.XGBClassifier): pred = model.predict_proba(X) else: pred = model.predict(X) predictions += weight * pred return np.argmax(predictions, axis=1) # 集成模型(权重根据准确率分配) models = [xgb_model, cnn_model] weights = [xgb_accuracy, cnn_accuracy] # 根据准确率分配权重 total_weight = sum(weights) normalized_weights = [w / total_weight for w in weights] # 准备CNN模型的测试数据(需要原始图像) X_test_cnn = np.array([cv2.resize(img, (224, 224)) for img in X_test]) X_test_cnn = tf.keras.applications.mobilenet_v2.preprocess_input(X_test_cnn.astype('float32')) # 集成预测 ensemble_pred = ensemble_predict(models, normalized_weights, [X_test_scaled, X_test_cnn]) # 评估集成模型 ensemble_accuracy = accuracy_score(y_test, ensemble_pred) print(f"\n集成模型性能评估:") print(f"整体准确率: {ensemble_accuracy:.4f}") print(classification_report(y_test, ensemble_pred, target_names=ART_STYLES)) # ====================== # 6. 可视化预测结果(增强版) # ====================== def visualize_predictions(models, weights, X_test, y_test, num_samples=5): """ 可视化模型预测结果(支持多模型) """ plt.figure(figsize=(15, 10)) indices = np.random.choice(range(len(X_test)), num_samples) # 准备CNN输入 X_test_cnn = np.array([cv2.resize(img, (224, 224)) for img in X_test]) X_test_cnn = tf.keras.applications.mobilenet_v2.preprocess_input(X_test_cnn.astype('float32')) for i, idx in enumerate(indices): # 获取图像和真实标签 image = X_test[idx] true_label = ART_STYLES[y_test[idx]] # 获取特征 trad_features = extract_traditional_features([image]) deep_features = extract_deep_features(np.expand_dims(image, axis=0), feature_extractor) features = np.hstack([trad_features, deep_features]) features_pca = pca.transform(features) features_scaled = scaler.transform(features_pca) # 获取各模型预测 model_probs = [] model_preds = [] # XGBoost预测 xgb_prob = xgb_model.predict_proba(features_scaled)[0] model_probs.append(xgb_prob) model_preds.append(ART_STYLES[np.argmax(xgb_prob)]) # CNN预测 cnn_input = np.expand_dims(X_test_cnn[idx], axis=0) cnn_prob = cnn_model.predict(cnn_input, verbose=0)[0] model_probs.append(cnn_prob) model_preds.append(ART_STYLES[np.argmax(cnn_prob)]) # 集成预测 ensemble_prob = weights[0] * xgb_prob + weights[1] * cnn_prob ensemble_pred = ART_STYLES[np.argmax(ensemble_prob)] # 绘制图像和预测结果 plt.subplot(2, num_samples, i + 1) plt.imshow(image) plt.title(f"真实: {true_label}\n集成预测: {ensemble_pred}") plt.axis('off') # 绘制概率分布 plt.subplot(2, num_samples, i + num_samples + 1) width = 0.35 x = np.arange(len(ART_STYLES)) plt.bar(x - width / 2, model_probs[0], width, label='XGBoost') plt.bar(x + width / 2, model_probs[1], width, label='CNN') # 标记最高概率 max_idx = np.argmax(ensemble_prob) plt.annotate(f"{ensemble_prob[max_idx]:.2f}", xy=(max_idx, ensemble_prob[max_idx]), xytext=(max_idx, ensemble_prob[max_idx] + 0.05), arrowprops=dict(facecolor='black', shrink=0.05)) plt.xticks(x, ART_STYLES, rotation=45) plt.xlabel('艺术风格') plt.ylabel('概率') plt.ylim(0, 1) plt.legend() plt.tight_layout() plt.savefig('ensemble_prediction_visualization.png') plt.show() # 可视化集成模型的预测 visualize_predictions(models, normalized_weights, X_test, y_test, num_samples=5) # ====================== # 7. 保存模型 # ====================== # 设置保存目录 save_dir = "saved_models" os.makedirs(save_dir, exist_ok=True) # 保存最佳模型 xgb_model_path = os.path.join(save_dir, 'art_style_xgb_classifier.pkl') cnn_model_path = os.path.join(save_dir, 'art_style_cnn_classifier.h5') scaler_path = os.path.join(save_dir, 'feature_scaler.pkl') pca_path = os.path.join(save_dir, 'pca_transformer.pkl') label_encoder_path = os.path.join(save_dir, 'label_encoder.pkl') joblib.dump(xgb_model, xgb_model_path) cnn_model.save(cnn_model_path) joblib.dump(scaler, scaler_path) joblib.dump(pca, pca_path) joblib.dump(label_encoder, label_encoder_path) print(f"XGBoost模型已保存为 {xgb_model_path}") print(f"CNN模型已保存为 {cnn_model_path}") print(f"特征标准化器已保存为 {scaler_path}") print(f"PCA转换器已保存为 {pca_path}") print(f"标签编码器已保存为 {label_encoder_path}") print("程序执行完成!")报错Traceback (most recent call last): File "D:\python_program\python_work2\train5.py", line 522, in <module> ensemble_pred = ensemble_predict(models, normalized_weights, [X_test_scaled, X_test_cnn]) File "D:\python_program\python_work2\train5.py", line 499, in ensemble_predict predictions = np.zeros((X.shape[0], len(ART_STYLES))) AttributeError: 'list' object has no attribute 'shape'帮我解决并给出完整代码
06-17
C:\Users\12114\anaconda3\envs\Missyao\python.exe "D:/Program Files/JetBrains/PyCharm 2023.1/plugins/python/helpers/pydev/pydevconsole.py" --mode=client --host=127.0.0.1 --port=13819 import sys; print('Python %s on %s' % (sys.version, sys.platform)) sys.path.extend(['D:\\py\\pythonProject1']) PyDev console: starting. Python 3.11.12 | packaged by conda-forge | (main, Apr 10 2025, 22:09:00) [MSC v.1943 64 bit (AMD64)] on win32 runfile('D:\\py\\pythonProject1\\tem相关性.py', wdir='D:\\py\\pythonProject1') Warning 3: Cannot find gdalvrt.xsd (GDAL_DATA is not defined) Traceback (most recent call last): File "rasterio\\_base.pyx", line 310, in rasterio._base.DatasetBase.__init__ File "rasterio\\_base.pyx", line 221, in rasterio._base.open_dataset File "rasterio\\_err.pyx", line 359, in rasterio._err.exc_wrap_pointer rasterio._err.CPLE_OpenFailedError: vegetation.tif: No such file or directory During handling of the above exception, another exception occurred: Traceback (most recent call last): File "D:\Program Files\JetBrains\PyCharm 2023.1\plugins\python\helpers\pydev\pydevconsole.py", line 364, in runcode coro = func() ^^^^^^ File "<input>", line 1, in <module> File "D:\Program Files\JetBrains\PyCharm 2023.1\plugins\python\helpers\pydev\_pydev_bundle\pydev_umd.py", line 198, in runfile pydev_imports.execfile(filename, global_vars, local_vars) # execute the script ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "D:\Program Files\JetBrains\PyCharm 2023.1\plugins\python\helpers\pydev\_pydev_imps\_pydev_execfile.py", line 18, in execfile exec(compile(contents+"\n", file, 'exec'), glob, loc) File "D:\py\pythonProject1\tem相关性.py", line 46, in <module> calculate_correlation_tif( File "D:\py\pythonProject1\tem相关性.py", line 8, in calculate_correlation_tif with rasterio.open(veg_tif) as src_veg: ^^^^^^^^^^^^^^^^^^^^^^ File "C:\Users\12114\anaconda3\envs\Missyao\Lib\site-packages\rasterio\env.py", line 463, in wrapper return f(*args, **kwds) ^^^^^^^^^^^^^^^^ File "C:\Users\12114\anaconda3\envs\Missyao\Lib\site-packages\rasterio\__init__.py", line 356, in open dataset = DatasetReader(path, driver=driver, sharing=sharing, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "rasterio\\_base.pyx", line 312, in rasterio._base.DatasetBase.__init__ rasterio.errors.RasterioIOError: vegetation.tif: No such file or directory
05-13
UnicodeEncodeError Traceback (most recent call last) Cell In[3], line 146 137 if name in param_grids: 138 grid_search = GridSearchCV( 139 estimator=model, 140 param_grid=param_grids[name], (...) 144 verbose=1 145 ) --> 146 grid_search.fit(X_train, y_train) 148 best_models[name] = grid_search.best_estimator_ 149 print(f"Best params: {grid_search.best_params_}") File ~\AppData\Roaming\Python\Python312\site-packages\sklearn\base.py:1365, in _fit_context.<locals>.decorator.<locals>.wrapper(estimator, *args, **kwargs) 1358 estimator._validate_params() 1360 with config_context( 1361 skip_parameter_validation=( 1362 prefer_skip_nested_validation or global_skip_validation 1363 ) 1364 ): -> 1365 return fit_method(estimator, *args, **kwargs) File ~\AppData\Roaming\Python\Python312\site-packages\sklearn\model_selection\_search.py:979, in BaseSearchCV.fit(self, X, y, **params) 967 fit_and_score_kwargs = dict( 968 scorer=scorers, 969 fit_params=routed_params.estimator.fit, (...) 976 verbose=self.verbose, 977 ) 978 results = {} --> 979 with parallel: 980 all_candidate_params = [] 981 all_out = [] File D:\anacondaxiaz\Lib\site-packages\joblib\parallel.py:1347, in Parallel.__enter__(self) 1345 self._managed_backend = True 1346 self._calling = False -> 1347 self._initialize_backend() 1348 return self File D:\anacondaxiaz\Lib\site-packages\joblib\parallel.py:1359, in Parallel._initialize_backend(self) 1357 """Build a process or thread pool and return the number of workers""" 1358 try: -> 1359 n_jobs = self._backend.configure(n_jobs=self.n_jobs, parallel=self, 1360 **self._backend_args) 1361 if self.timeout is not None and not self._backend.supports_timeout: 1362 warnings.warn( 1363 'The backend class {!r} does not support timeout. ' 1364 "You have set 'timeout={}' in Parallel but " 1365 "the 'timeout' parameter will not be used.".format( 1366 self._backend.__class__.__name__, 1367 self.timeout)) File D:\anacondaxiaz\Lib\site-packages\joblib\_parallel_backends.py:538, in LokyBackend.configure(self, n_jobs, parallel, prefer, require, idle_worker_timeout, **memmappingexecutor_args) 534 if n_jobs == 1: 535 raise FallbackToBackend( 536 SequentialBackend(nesting_level=self.nesting_level)) --> 538 self._workers = get_memmapping_executor( 539 n_jobs, timeout=idle_worker_timeout, 540 env=self._prepare_worker_env(n_jobs=n_jobs), 541 context_id=parallel._id, **memmappingexecutor_args) 542 self.parallel = parallel 543 return n_jobs File D:\anacondaxiaz\Lib\site-packages\joblib\executor.py:20, in get_memmapping_executor(n_jobs, **kwargs) 19 def get_memmapping_executor(n_jobs, **kwargs): ---> 20 return MemmappingExecutor.get_memmapping_executor(n_jobs, **kwargs) File D:\anacondaxiaz\Lib\site-packages\joblib\executor.py:42, in MemmappingExecutor.get_memmapping_executor(cls, n_jobs, timeout, initializer, initargs, env, temp_folder, context_id, **backend_args) 39 reuse = _executor_args is None or _executor_args == executor_args 40 _executor_args = executor_args ---> 42 manager = TemporaryResourcesManager(temp_folder) 44 # reducers access the temporary folder in which to store temporary 45 # pickles through a call to manager.resolve_temp_folder_name. resolving 46 # the folder name dynamically is useful to use different folders across 47 # calls of a same reusable executor 48 job_reducers, result_reducers = get_memmapping_reducers( 49 unlink_on_gc_collect=True, 50 temp_folder_resolver=manager.resolve_temp_folder_name, 51 **backend_args) File D:\anacondaxiaz\Lib\site-packages\joblib\_memmapping_reducer.py:540, in TemporaryResourcesManager.__init__(self, temp_folder_root, context_id) 534 if context_id is None: 535 # It would be safer to not assign a default context id (less silent 536 # bugs), but doing this while maintaining backward compatibility 537 # with the previous, context-unaware version get_memmaping_executor 538 # exposes too many low-level details. 539 context_id = uuid4().hex --> 540 self.set_current_context(context_id) File D:\anacondaxiaz\Lib\site-packages\joblib\_memmapping_reducer.py:544, in TemporaryResourcesManager.set_current_context(self, context_id) 542 def set_current_context(self, context_id): 543 self._current_context_id = context_id --> 544 self.register_new_context(context_id) File D:\anacondaxiaz\Lib\site-packages\joblib\_memmapping_reducer.py:569, in TemporaryResourcesManager.register_new_context(self, context_id) 562 new_folder_name = ( 563 "joblib_memmapping_folder_{}_{}_{}".format( 564 os.getpid(), self._id, context_id) 565 ) 566 new_folder_path, _ = _get_temp_dir( 567 new_folder_name, self._temp_folder_root 568 ) --> 569 self.register_folder_finalizer(new_folder_path, context_id) 570 self._cached_temp_folders[context_id] = new_folder_path File D:\anacondaxiaz\Lib\site-packages\joblib\_memmapping_reducer.py:585, in TemporaryResourcesManager.register_folder_finalizer(self, pool_subfolder, context_id) 578 def register_folder_finalizer(self, pool_subfolder, context_id): 579 # Register the garbage collector at program exit in case caller forgets 580 # to call terminate explicitly: note we do not pass any reference to 581 # ensure that this callback won't prevent garbage collection of 582 # parallel instance and related file handler resources such as POSIX 583 # semaphores and pipes 584 pool_module_name = whichmodule(delete_folder, 'delete_folder') --> 585 resource_tracker.register(pool_subfolder, "folder") 587 def _cleanup(): 588 # In some cases the Python runtime seems to set delete_folder to 589 # None just before exiting when accessing the delete_folder (...) 594 # because joblib should only use relative imports to allow 595 # easy vendoring. 596 delete_folder = __import__( 597 pool_module_name, fromlist=['delete_folder'] 598 ).delete_folder File D:\anacondaxiaz\Lib\site-packages\joblib\externals\loky\backend\resource_tracker.py:179, in ResourceTracker.register(self, name, rtype) 177 """Register a named resource, and increment its refcount.""" 178 self.ensure_running() --> 179 self._send("REGISTER", name, rtype) File D:\anacondaxiaz\Lib\site-packages\joblib\externals\loky\backend\resource_tracker.py:196, in ResourceTracker._send(self, cmd, name, rtype) 192 if len(name) > 512: 193 # posix guarantees that writes to a pipe of less than PIPE_BUF 194 # bytes are atomic, and that PIPE_BUF >= 512 195 raise ValueError("name too long") --> 196 msg = f"{cmd}:{name}:{rtype}\n".encode("ascii") 197 nbytes = os.write(self._fd, msg) 198 assert nbytes == len(msg) UnicodeEncodeError: 'ascii' codec can't encode characters in position 18-19: ordinal not in range(128)
最新发布
08-12
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值