> Preprocessing The Data > Evaluate Your Model’s Performance
Python For Data Science
Standardization Classification Metrics
Scikit-Learn Cheat Sheet >>>
>>>
>>>
from [Link] import StandardScaler
scaler = StandardScaler().fit(X_train)
standardized_X = [Link](X_train)
Accuracy Score
>>> [Link](X_test, y_test)
#Estimator score method
>>> from [Link] import accuracy_score
#Metric scoring functions
>>> standardized_X_test = [Link](X_test) >>> accuracy_score(y_test, y_pred)
Learn Scikit-Learn online at [Link]
Classification Report
Normalization >>> from [Link] import classification_report
#Precision, recall, f1-score
and support
>>> print(classification_report(y_test, y_pred))
>>> from [Link] import Normalizer
Confusion Matrix
>>> scaler = Normalizer().fit(X_train)
Scikit-learn >>>
>>>
normalized_X = [Link](X_train)
normalized_X_test = [Link](X_test)
>>> from [Link] import confusion_matrix
>>> print(confusion_matrix(y_test, y_pred))
Scikit-learn is an open source Python library that
implements a range of Binarization Regression Metrics
machine learning,
preprocessing, cross-validation and visualization
algorithms using a unified interface. >>> from [Link] import Binarizer
Mean Absolute Error
>>> binarizer = Binarizer(threshold=0.0).fit(X)
>>> from [Link] import mean_absolute_error
>>> binary_X = [Link](X)
A Basic Example >>> y_true = [3, -0.5, 2]
>>> mean_absolute_error(y_true, y_pred)
>>> from sklearn import neighbors, datasets, preprocessing
Encoding Categorical Features Mean Squared Error
>>> from sklearn.model_selection import train_test_split
>>> from [Link] import mean_squared_error
>>> from [Link] import accuracy_score
>>> from [Link] import LabelEncoder
>>> mean_squared_error(y_test, y_pred)
>>> iris = datasets.load_iris()
>>> enc = LabelEncoder()
>>> X, y = [Link][:, :2], [Link]
R² Score
>>> y = enc.fit_transform(y)
>>> X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=33)
>>> from [Link] import r2_score
>>> scaler = [Link]().fit(X_train)
>>> r2_score(y_true, y_pred)
>>>
>>>
X_train = [Link](X_train)
X_test = [Link](X_test)
Imputing Missing Values
>>> knn = [Link](n_neighbors=5)
>>> from [Link] import Imputer
Clustering Metrics
>>> [Link](X_train, y_train)
>>> y_pred = [Link](X_test)
>>> imp = Imputer(missing_values=0, strategy='mean', axis=0)
>>> imp.fit_transform(X_train) Adjusted Rand Index
>>> accuracy_score(y_test, y_pred)
>>> from [Link] import adjusted_rand_score
Generating Polynomial Features >>> adjusted_rand_score(y_true, y_pred)
> Loading The Data Also see NumPy & Pandas
>>> from [Link] import PolynomialFeatures
Homogeneity
>>> from [Link] import homogeneity_score
>>> poly = PolynomialFeatures(5)
>>> homogeneity_score(y_true, y_pred)
Your data needs to be numeric and stored as NumPy arrays or SciPy sparse matrices. Other types that are >>> poly.fit_transform(X)
convertible to numeric arrays, such as Pandas DataFrame, are also acceptable. V-measure
>>> import numpy as np
>>> from [Link] import v_measure_score
> Create Your Model
>>> X = [Link]((10,5))
>>> metrics.v_measure_score(y_true, y_pred)
>>> y = [Link](['M','M','F','F','M','F','M','M','F','F','F'])
>>> X[X < 0.7] = 0
Cross-Validation
Supervised Learning Estimators
> Training And Test Data Linear Regression
>>> from sklearn.cross_validation import cross_val_score
>>> print(cross_val_score(knn, X_train, y_train, cv=4))
>>> print(cross_val_score(lr, X, y, cv=2))
>>> from sklearn.linear_model import LinearRegression
>>> from sklearn.model_selection import train_test_split
>>> lr = LinearRegression(normalize=True)
>>> X_train, X_test, y_train, y_test = train_test_split(X,
y,
random_state=0)
Support Vector Machines (SVM)
>>> from [Link] import SVC
> Tune Your Model
>>> svc = SVC(kernel='linear')
Grid Search
> Model Fitting
Naive Bayes
>>> from sklearn.naive_bayes import GaussianNB
>>> gnb = GaussianNB() >>> from sklearn.grid_search import GridSearchCV
>>> params = {"n_neighbors": [Link](1,3),
Supervised learning KNN "metric": ["euclidean", "cityblock"]}
>>> [Link](X, y)
#Fit the model to the data
>>> from sklearn import neighbors
>>> grid = GridSearchCV(estimator=knn,
>>> [Link](X_train, y_train)
>>> knn = [Link](n_neighbors=5) param_grid=params)
>>> [Link](X_train, y_train) >>> [Link](X_train, y_train)
>>> print(grid.best_score_)
Unsupervised Learning
Unsupervised Learning Estimators >>> print(grid.best_estimator_.n_neighbors)
>>> k_means.fit(X_train) #Fit the model to the data
>>> pca_model = pca.fit_transform(X_train) #Fit to data, then transform it
Principal Component Analysis (PCA) Randomized Parameter Optimization
>>> from [Link] import PCA
>>> pca = PCA(n_components=0.95) >>> from sklearn.grid_search import RandomizedSearchCV
> Prediction K Means
>>> params = {"n_neighbors": range(1,5), "weights": ["uniform", "distance"]}
>>> rsearch = RandomizedSearchCV(estimator=knn,
param_distributions=params,
>>> from [Link] import KMeans
cv=4,
n_iter=8,
random_state=5)
Supervised Estimators >>> k_means = KMeans(n_clusters=3, random_state=0) >>> [Link](X_train, y_train)
>>> print(rsearch.best_score_)
>>> y_pred = [Link]([Link]((2,5)))
#Predict labels
>>> y_pred = [Link](X_test)
#Predict labels
>>> y_pred = knn.predict_proba(X_test) #Estimate probability of a label
Unsupervised Estimators
Learn Data Skills Online at [Link]
>>> y_pred = k_means.predict(X_test) #Predict labels in clustering algos