March 22, 2022

sklearn skimage packages in Python

Python scikit-learn (sklearn) skimage packages

print("scikit-learn version: {}". format(sklearn.__version__))

scikit-learn package, in Python, comes with so many default data sets, like iris, digits recongition data set, diabetes, breast cancer etc.

from sklearn import datasets
iris = datasets.load_iris()
type(iris) ==> Bunch
>>> X, y = iris.data, iris.target
digits = datasets.load_digits()
>>> print(digits.data)  
print digits.DESCR
X = digits.images.reshape((len(digits.images), -1))
from sklearn.datasets import load_diabetes
load_diabetes()
from sklearn.datasets import load_breast_cancer
cancer = load_breast_cancer()
breast_cancer = sklearn.datasets.load_breast_cancer()
from sklearn.datasets.samples_generator import make_blobs
X, y = make_blobs(n_samples=5000, centers=[[4,4], [-2, -1], [2, -3], [1, 1]], cluster_std=0.9)
data, labels = make_blobs(n_samples=1000, centers=4, n_features=2, random_state=0)
from sklearn.datasets import make_friedman1
X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
X, y = datasets.make_classification(n_samples=100000, n_features=20, n_informative=2, n_redundant=2)
from sklearn.datasets import fetch_mldata
mnist = fetch_mldata('MNIST original')
from sklearn.datasets import make_regression
X, y = make_regression(n_samples=100, n_features=2, noise=0.1)
from sklearn.datasets import fetch_lfw_people
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
from sklearn.datasets import fetch_20newsgroups
twenty_train = fetch_20newsgroups(subset='train', shuffle=True)
from sklearn.datasets import make_classification, make_blobs, make_moons
X, y = make_moons(n_samples=200)
from sklearn.datasets import fetch_openml
X, y = fetch_openml('mnist_784', version=1, return_X_y=True)

train_test_split function in sklearn package in Python.
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.1)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25, random_state=123)
X_trainset, X_testset, y_trainset, y_testset = train_test_split(X, y, test_size=0.33, stratify = Y)
train_test_split(x, y, test_size=0.3, random_state=2019, stratify=y)
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, train_size=0.75, test_size=0.25, random_state=101)
>>> train_test_split(y, shuffle=False)

Previously (before scikit-learn version 0.18), train_test_split was located in the cross_validation module of scikit-learn package.
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, train_size=0.75, random_state=101)

Linear Regression ML models using Python sklearn package.
from sklearn import datasets, linear_model
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
reg = LinearRegression()
LinearRegression(copy_X=True, fit_intercept=True, n_jobs=1, normalize=False)
ln = LinearRegression(normalize=True)

regr.fit(diabetes_X_train, diabetes_y_train)
reg = reg.fit(X, Y)
Y_pred = reg.predict(X_test)
diabetes_y_pred = regr.predict(diabetes_X_test)
rmse = np.sqrt(mean_squared_error(Y, Y_pred))
print("Mean squared error: %.2f" % mean_squared_error(diabetes_y_test, diabetes_y_pred))
r2 = reg.score(X, Y)
print('Variance score: %.2f' % r2_score(diabetes_y_test, diabetes_y_pred))
print('Coefficients: \n', regr.coef_)
print('Intercept: \n', regr.intercept_)
list(zip(feature_cols, lm2.coef_))

lm1 = smf.ols(formula='Sales ~ TV', data=data).fit()
lm1 = smf.ols(formula='Sales ~ TV + Radio + Newspaper', data=data).fit()
lm1.params
lm1.predict(X_new)
lm1.rsquared
lm1.summary()

sqft_model = graphlab.linear_regression.create(train_data, target='price', features=['sqft_living'], validation_set=None)
features_model = graphlab.linear_regression.create(train_data, target='price', features=['bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors', 'zipcode'], validation_set=None)
print sqft_model.evaluate(test_data)
print sqft_model.predict(house1)
print model.predict(graphlab.SFrame(houses_dict))
sqft_model.get('coefficients')
plt.plot(test_data['sqft_living'], test_data['price'], '.', test_data['sqft_living'], sqft_model.predict(test_data), '-')

plt.scatter(X_testset , y_testset, color="black")
plt.plot(X_testset, LinReg.predict(X_testset), linewidth=3, color="blue" )
plt.show()

Logistic Regression Machine Learning models using sklearn package in Python.
from sklearn.linear_model import LogisticRegression
logreg = LogisticRegression()
logsk = LogisticRegression(C=1e9)
clf = linear_model.LogisticRegression(C=1e5, solver='lbfgs')
logreg = LogisticRegression(C=1e5, solver='lbfgs', multi_class='multinomial')
clf = LogisticRegression(random_state=0, solver='lbfgs', multi_class='multinomial').fit(X, y)
clf_log = LogisticRegression(random_state=0, solver='lbfgs', multi_class='multinomial').fit(X_train_tfidf, target)
log_cv = LogisticRegression(C=1.0, n_jobs=-1, multi_class ='auto', penalty= 'l2',solver='newton-cg', tol= 10.0, class_weight='balanced', random_state=40)
clf_log = LogisticRegression(C=4, dual=True).fit(X_train_tfidf, target)
clf = LogisticRegression(C=1e20, penalty='l1')
lr = LogisticRegression(C = 1e30, penalty = 'l2', tol = 0.001, fitintercept = True, interceptscaling = 1e30)
lr = LogisticRegression(C=10,tol=0.0001, random_state=51, n_jobs=-1, solver='liblinear', class_weight='balanced')

logreg.fit(X, Y)
logreg.fit(X_train[col], y_train)
y_pred = logreg.predict(x_test)
clf.predict(X[:2, :])
y_pred = logreg.predict_proba(X_test[col])
clf.predict_proba(X[:2, :])
prob_pos = clf.predict_proba(X_test)[:, 1]
y_pred = logreg.predict_log_proba(X_test)
pd.DataFrame(zip(X.columns, np.transpose(model.coef_)))

for c in [0.01, 0.05, 0.25, 0.5, 1]:
    lr = LogisticRegression(C=c)
    lr.fit(X_train, y_train)
    print ("Accuracy for C=%s: %s" % (c, accuracy_score(y_val, lr.predict(X_val))))

clf.score(X, y)
print metrics.accuracy_score(y_test, predicted)
print metrics.roc_auc_score(y_test, probs[:, 1])
logit_roc_auc = roc_auc_score(y_test, logreg.predict(X_test))
fpr, tpr, thresholds = roc_curve(y_test, logreg.predict_proba(X_test)[:,1])
fpr, tpr, _ = metrics.roc_curve(y_val, y_pred_binary)
plt.plot(fpr, tpr, label="data 1, auc="+str(metrics.roc_auc_score(y_test, y_pred_proba)))
cm = metrics.confusion_matrix(y_test, predictions)
sns.heatmap(pd.DataFrame(metrics.confusion_matrix(y_test, y_pred)), annot=True, cmap="YlGnBu", fmt='g')
print metrics.classification_report(y_test, predicted)
scores = cross_val_score(LogisticRegression(), X, y, scoring='accuracy', cv=10)

prob_pos = clf.decision_function(X_test)
clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))

grid={"C":np.logspace(-3,3,7), "penalty":["l2"],"tol":np.logspace(-3,3,7), "solver":["newton-cg", "lbfgs", "liblinear", "sag", "saga"],'multi_class':["auto"],'max_iter':[1000]} # l1 lasso l2 ridge
logreg_cv=GridSearchCV(LogisticRegression(), grid, cv=10)

>>> from sklearn.linear_model import SGDClassifier
clf = SGDClassifier(loss='hinge', penalty='l2',   alpha=1e-3, random_state=42,  max_iter=5, tol=None)
clf = linear_model.SGDClassifier(loss='hinge')
alpha_optimal = linear_model.SGDClassifier(alpha=0.00001, loss='hinge', penalty='l1')

Recursive Feature Elimination (RFE) used as Feature Selection mechanism For Machine Learning in Python.
from sklearn.feature_selection import RFE
rfe = RFE(logreg, 13)
selector = RFE(estimator, 5, step=1)
rfe = RFE(estimator=svc, n_features_to_select=1, step=1)
rfe = rfe.fit(X, y)
rfe = rfe.fit(data_clean.iloc[:,:-1].values, data_clean.iloc[:,-1].values)
print(rfe.support_)           # Printing the boolean results
X_train.columns[rfe.support_]
print(rfe.ranking_)           # Printing the ranking
ranking = rfe.ranking_.reshape(digits.images[0].shape)
plt.matshow(ranking, cmap=plt.cm.Blues)
from sklearn.feature_selection import RFECV
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(2), scoring='accuracy')

from sklearn import metrics
print("Accuracy: "), metrics.accuracy_score(y_testset, pred)
from sklearn.metrics import homogeneity_score, completeness_score, v_measure_score, adjusted_rand_score, adjusted_mutual_info_score, silhouette_score
from sklearn.metrics import accuracy_score
metrics.accuracy_score( y_pred_final.Churn, y_pred_final.predicted)
accuracy = accuracy_score(Y_pred_test, Y_test)

Generating confusion matrix using scikit-learn package in Python.
from sklearn.metrics import confusion_matrix
confusion_matrix(y_true, y_pred)
confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
tn, fp, fn, tp = confusion_matrix([0, 1, 0, 1], [1, 1, 1, 0]).ravel()
confusion = metrics.confusion_matrix( y_pred_final.Churn, y_pred_final.predicted )
TP = confusion[0, 0] # true positive 
TN = confusion[1, 1] # true negatives
FP = confusion[0, 1] # false positives
FN = confusion[1, 0] # false negatives
accuracy = (TP + TN)/sum(sum(confusion))
sensitivity = TP / float(TP+FN)
specificity = TN / float(TN+FP)
fpr = FP/ float(TN+FP)
fnr = FN/ float(TP+FN)

Finding out ROC - Receiver Operating Characteristics and AUC - Area Under Curve
fpr, tpr, thresholds = metrics.roc_curve( actual, probs, drop_intermediate = False )
false_positive_rate, true_positive_rate, thresholds = roc_curve(y_test, y_pred_proba[:,1])
roc_auc = auc(false_positive_rate, true_positive_rate)
auc_score = metrics.roc_auc_score( actual, probs )
print("PRECISION SCORE :",metrics.precision_score(y_test, y_pred_class))
print("RECALL SCORE :", metrics.recall_score(y_test, y_pred_class))
print("F1 SCORE :",metrics.f1_score(y_test, y_pred_class))
from sklearn.metrics import f1_score
F1 = 2 * (precision * recall) / (precision + recall)
f1_score(y_true, y_pred)
f1_score(y_true, y_pred, average='macro') 
f1_score(y_true, y_pred, average='micro') 
f1_score(y_true, y_pred, average='weighted')

from sklearn.metrics import classification_report
print("KNN", classification_report(y_test, knn_pred, target_names=None))

from sklearn.model_selection import KFold, validation_curve, cross_val_score
kf = KFold(n_splits=5)
kf = KFold(n_splits=4, shuffle=True)
folds = KFold(n_splits = 6, shuffle = True, random_state = 4)
kf = KFold(891, n_folds=10)

cv_results = cross_val_score(model, X_train, y_train, cv = folds, scoring = 'accuracy')
cv_score = np.sqrt(-cross_val_score(estimator=rf, X=x_train, y=y_train, cv=3, scoring = make_scorer(mean_squared_error, False)))

from sklearn.model_selection import StratifiedKFold
fold = StratifiedKFold(n_splits=20, shuffle=True, random_state=2019)
skf = StratifiedKFold(n_splits=5, random_state=2018, shuffle=True)

from sklearn.model_selection import ShuffleSplit
ss = ShuffleSplit(n_splits=10, test_size=0.2)
cv = ShuffleSplit(n_splits=100, test_size=0.2, random_state=0)

from sklearn.model_selection import GridSearchCV
grid_search_ABC = GridSearchCV(ABC, cv = 3, param_grid={"base_estimator__max_depth" : [2, 5], "n_estimators": [200, 400, 600]}, scoring = 'roc_auc', return_train_score=True, verbose = 1)
model_cv = GridSearchCV(estimator = Ridge(), param_grid = params, scoring= 'neg_mean_absolute_error', cv = folds, return_train_score=True, verbose = 1)
model_cv = GridSearchCV(estimator = SVC(), param_grid = params, scoring= 'accuracy', cv = folds, verbose = 1, return_train_score=True)
tree = GridSearchCV(DecisionTreeClassifier(criterion = "gini", random_state = 100), {'max_depth': range(1, 40)}, cv=5, scoring="accuracy")
rf = GridSearchCV(RandomForestClassifier(), {'max_depth': range(2, 20, 5)}, cv=5,  scoring="accuracy")
CV_rfc = GridSearchCV(estimator=rfc, param_grid=param_grid, cv= 5)

grid_obj.fit(X_train, y_train)
predicted = clf.predict(x_val)

grid_search_GBC.cv_results_
sorted(clf.cv_results_.keys())
grid.cv_results_['mean_test_score']
print(CV_rfc.best_estimator_)
print(CV_rfc.best_params_)
print(grid.best_score_)

from sklearn.grid_search import RandomizedSearchCV
rand = RandomizedSearchCV(knn, param_dist, cv=10, scoring='accuracy', n_iter=10, random_state=5)
rand = RandomizedSearchCV(rf, param_dist, cv=10, scoring='accuracy', n_iter=len(maxFeatures), random_state=10)
print(rand.best_estimator_)
rand.grid_scores_

SVM (Support Vector Machine) models using sklearn package in Python.
from sklearn import svm
SVC (Support Vector Classifier) Machine Learning models using Python sklearn package.
from sklearn.svm import SVC
mdl = SVC()
model = SVC(C = 1)
model = svm.SVC(kernel='linear', c=1, gamma=1) 
clf = svm.SVC(gamma=0.001, C=100)
sklearn.svm.SVC(C=1.0, kernel='rbf', degree=3, gamma=0.0, coef0=0.0, shrinking=True, probability=False, tol=0.001, cache_size=200, class_weight=None, verbose=False, max_iter=-1, random_state=None)
svc = svm.SVC(kernel='linear', C=1,gamma=0).fit(X, y)
poly_svm = SVC(kernel='poly', C=C, degree=3).fit(X_tr, Y_tr)
svc = SVC(kernel="rbf", C=1)
>>> clf = svm.SVC(gamma='scale')
svm_clf = SVC(gamma=0.1, C=0.01, kernel="poly", degree=3, coef0=10.0, probability=True)
model = SVC(kernel='sigmoid', c=1, gamma=1)

model.fit(X, y)
mdl.fit(ds.data, ds.target)
>>> clf.set_params(kernel='linear').fit(X, y)  
>>> clf.set_params(kernel='rbf', gamma='scale').fit(X, y)  
predicted= model.predict(x_test)
predicted = mdl.predict(ds.data)

model.score(X, y)
clf.score(X_test, y_test)
print(metrics.classification_report(ds.target, predicted))
confusion_matrix(y_true=y_test, y_pred=y_pred)
pd.crosstab(y_test, y_pred)
print("accuracy", metrics.accuracy_score(y_test, y_pred)) # accuracy
print("precision", metrics.precision_score(y_test, y_pred)) # precision
print("recall", metrics.recall_score(y_test, y_pred)) # recall/sensitivity

from sklearn.svm import LinearSVC
clf = LinearSVC()
classifier_liblinear = svm.LinearSVC()

SVR (Support Vector Regression) models using sklearn package in Python.
from sklearn.svm import SVR
svr_rbf = SVR(kernel='rbf', C=1e3)
estimator = SVR(kernel="linear")
svr_linear = SVR(kernel='linear', C=1e3)
svr_sigmoid = SVR(kernel='sigmoid', C=1e3)

clf = svm.SVR(kernel='rbf', C=100, gamma=0.001)
svr_rbf.fit(X, y)
y_pred_sigmoid = svr_sigmoid.predict(X)

from sklearn.linear_model import Perceptron
p = Perceptron(random_state=42, max_iter=10, tol=0.001)
p.fit(X, y)
pred = p.predict([value])

from sklearn.neural_network import MLPClassifier
MLP - MultiLayer Perceptron -> feedforward ANN
clf_NN = MLPClassifier(random_state=2019)
ml = MLPClassifier(hidden_layer_sizes=(100, 100, 100,))
clf_NN = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(5, 2), random_state=1)
mlp = MLPClassifier(hidden_layer_sizes=(10, 10, 10), max_iter=1000)
>>> clf = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(15,), random_state=1)
>>> clf = MLPClassifier(hidden_layer_sizes=(15,), random_state=1, max_iter=1, warm_start=True)
classifier = MLPClassifier(hidden_layer_sizes=(150,100,50), max_iter=300, activation = 'relu',solver='adam',random_state=1)
clf = MLPClassifier(hidden_layer_sizes=(0, 0), activation='logistic', algorithm='l-bfgs', early_stopping=True)
mlp = MLPClassifier(solver='adam', activation='relu', alpha=1e-4, hidden_layer_sizes=(50,50,50), random_state=1, max_iter=11, verbose=10, learning_rate_init=0.1)
clf = MLPClassifier(hidden_layer_sizes=(100,100,100), max_iter=500, alpha=0.0001,  solver='sgd', verbose=10,  random_state=21,tol=0.000000001)
clf_NN.fit(x_train, y_train)
predict_NN = clf_NN.predict(x_val)
predictproba_NN = clf_NN.predict_proba(x_val)[:,1]
print (mlp.score(x_test,y_test))
print (mlp.n_layers_)
print (mlp.n_iter_)
print (mlp.loss_)
[coef.shape for coef in clf.coefs_]
print("weights between input and first hidden layer:", clf.coefs_[0])
print("\nweights between first hidden and second hidden layer:", clf.coefs_[1])

parameter_space = {
    'hidden_layer_sizes': [(50,50,50), (50,100,50), (100,)],
    'activation': ['tanh', 'relu'],
    'solver': ['sgd', 'adam'],
    'alpha': [0.0001, 0.05],
    'learning_rate': ['constant','adaptive'],
}
clf = GridSearchCV(mlp, parameter_space, n_jobs=-1, cv=3)
print('Best parameters found:\n', clf.best_params_)
print(clf.cv_results_['mean_test_score'])
print(clf.cv_results_['std_test_score'])

>>> from sklearn.multiclass import OneVsRestClassifier
>>> classif = OneVsRestClassifier(estimator=SVC(gamma='scale', random_state=0))
m = OneVsRestClassifier(LogisticRegression(class_weight='balanced',random_state=1994))
LR = OneVsRestClassifier(SGDClassifier(loss='log', alpha=0.00001, penalty='l1'))

from sklearn.preprocessing import Binarizer
bn = Binarizer(threshold=0.9)
popsong_df['pd_watched'] = bn.transform([popsong_df['listen_count']])[0]
>>> transformer = Binarizer().fit(X)
>>> transformer.transform(X)
onehot  = Binarizer()
vectors = onehot.fit_transform(vectors.toarray())
binariser = Binarizer(copy=True)
scores = Binarizer(1 / 3).transform(scores)
binarizer = Binarizer(threshold=0, copy=True)
binarizer.fit_transform(X.f3.values.reshape(-1, 1))
train_binarised = pd.DataFrame(preprocessing.Binarizer().fit_transform(data), columns=data.columns)
>>> from sklearn.preprocessing import LabelBinarizer
>>> y = LabelBinarizer().fit_transform(y)
LabelBinarizer().fit_transform(X)
>>> from sklearn.preprocessing import MultiLabelBinarizer
>>> y = MultiLabelBinarizer().fit_transform(y)

from sklearn.preprocessing import LabelEncoder
label_encoder = LabelEncoder()
LabelEncoder().fit_transform(X)
le = preprocessing.LabelEncoder()
df_categorical = df_categorical.apply(le.fit_transform)
item_le.fit(test_items)
item_dm = dict(zip(test_items, item_le.transform(test_items)))
encoded_values = encoder.fit_transform(data_frame[target_feature].values)
decoded = encoder.inverse_transform(data_frame[target_feature].values)
from sklearn.preprocessing import OneHotEncoder
onehot_encoder = OneHotEncoder(sparse=False)
onehot = OneHotEncoder(dtype=np.int, sparse=True)
OneHotEncoder().fit_transform(X).toarray()
y_OH_train = enc.fit_transform(np.expand_dims(Y_train, 1)).toarray()
enc.fit(train["sentiment"].values.reshape(-1, 1))

from sklearn.preprocessing import OrdinalEncoder
encoder = OrdinalEncoder()
encoder = OrdinalEncoder(categories=['low', 'medium', 'high'])
X.edu_level = encoder.fit_transform(X.edu_level.values.reshape(-1, 1))

from sklearn.preprocessing import KBinsDiscretizer
disc = KBinsDiscretizer(n_bins=3, encode='uniform', strategy='uniform')
disc.fit_transform(X)

from sklearn.impute import SimpleImputer
my_imputer = SimpleImputer()
imp = SimpleImputer(missing_values=np.nan, strategy='mean')
imp.fit_transform(X)
data_with_imputed_values = my_imputer.fit_transform(original_data)

from sklearn.preprocessing import Imputer
imr = Imputer(missing_values='NaN', strategy='mean', axis=0)
imp = Imputer(strategy='mean')

from sklearn.impute import MissingIndicator
indicator = MissingIndicator(missing_values=np.NaN)
indicator = indicator.fit_transform(X)

random_undersampler = RandomUnderSampler()
X_res, y_res = random_undersampler.fit_sample(X_train, y_train)

Decision Tree models using Python scikit-learn package.
from sklearn import tree
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier()
dt_clf_4 = DecisionTreeClassifier(max_depth=4)
skullsTree = DecisionTreeClassifier(criterion="entropy")
shallow_tree = DecisionTreeClassifier(max_depth=2, random_state = 100)
clf_gini = DecisionTreeClassifier(criterion = "gini", random_state = 100, max_depth=10, min_samples_leaf=50, min_samples_split=50)

dt_clf_4.fit(X_train, y_train)
y_pred_4 = dt_clf_4.predict(X_test)
accuracy = float(np.sum(y_pred_4==y_test))/y_test.shape[0]
print("DecisionTrees's Accuracy: "), metrics.accuracy_score(y_testset, predTree)
print("best accuracy", grid_search.best_score_)
print(grid_search.best_estimator_)

To visualize decision tree:
from sklearn.externals.six import StringIO
import pydotplus
import matplotlib.image as mpimg
dot_data = StringIO()
filename = "skulltree.png"
out = tree.export_graphviz(skullsTree, feature_names=featureNames,   out_file=dot_data, class_names= np.unique(y_trainset), filled=True,  special_characters=True, rotate=False)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())  
graph.write_png(filename)
img = mpimg.imread(filename)
plt.figure(figsize=(100, 200))
plt.imshow(img, interpolation='nearest')

from sklearn.tree import export_graphviz
export_graphviz(dt_default, out_file=dot_data, feature_names=features, filled=True, rounded=True)
import pydot, graphviz
graph = pydot.graph_from_dot_data(dot_data.getvalue())

Random Forest Machine Learning models using scikit-learn package in Python.
from sklearn.ensemble import RandomForestClassifier
rfc = RandomForestClassifier()
forest=RandomForestClassifier(n_estimators=10, criterion="entropy")
rf = RandomForestClassifier(max_depth=4)
rfc = RandomForestClassifier(bootstrap=True, max_depth=10, min_samples_leaf=100, min_samples_split=200, max_features=10, n_estimators=100)
rf = RandomForestClassifier(n_estimators=800, random_state=2019, min_samples_split=2, min_samples_leaf=2, criterion="entropy", bootstrap=False)
rfc = RandomForestClassifier(n_jobs=-1, max_features= 'sqrt' , n_estimators=50, oob_score = True) 
m = RandomForestClassifier(class_weight='balanced', random_state=1994, max_depth=17, max_features=50000)
rf = RandomForestClassifier( n_estimators=500, criterion='gini', max_depth=100, min_samples_split=40, min_samples_leaf=10, min_weight_fraction_leaf=0.2, max_features='auto', max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, bootstrap=True, oob_score=False, random_state=0, verbose=0, warm_start=False, class_weight='balanced' )
rf_clf = RandomForestClassifier(n_estimators=500, max_features=0.25, criterion="entropy", class_weight="balanced")
rf_clf = RandomForestClassifier(n_estimators=300, max_features="sqrt", criterion="gini", min_samples_leaf=5, class_weight="balanced")

forest.fit(X_trainset, y_trainset)
predForest = forest.predict(X_testset)
print("RandomForests's Accuracy: "), metrics.accuracy_score(y_testset, predForest)
print(forest.estimators_)
print(rf.n_classes_)
print(rf.n_features_)
print(rf.n_outputs_)
print(clf.feature_importances_)
indices = np.argsort(rf_clf.feature_importances_)[::-1]
print(classification_report(y_test, predictions))
print(confusion_matrix(y_test, predictions))
scores = rf.cv_results_
plt.plot(x= scores["param_max_depth"], y= scores["mean_train_score"], label="training accuracy")
plt.plot(x= scores["param_max_depth"], y= scores["mean_test_score"], label="test accuracy")

To visualize decision tree in a forest:
from IPython.display import Image  
from sklearn.externals.six import StringIO  
import pydot
dot_data = StringIO()
#Replace the '&' below with the tree number
tree.export_graphviz(skullsForest[&], out_file=dot_data, feature_names=featureNames, class_names=targetNames, filled=True, rounded=True, special_characters=True, leaves_parallel=True)
graph = pydot.graph_from_dot_data(dot_data.getvalue())
Image(graph.create_png())

from sklearn.ensemble import RandomForestRegressor
rf = RandomForestRegressor(n_estimators = 1000, random_state = 42)
rf_small = RandomForestRegressor(n_estimators=10, max_depth = 3)
rf = RandomForestRegressor(n_estimators=3500, criterion='mse', max_leaf_nodes=3000, max_features='auto', oob_score=True)
rf = RandomForestRegressor(n_jobs=-1, random_state=2019, n_estimators=160, oob_score=True, max_features=0.5, 
                          max_depth=None, min_samples_leaf=2, max_leaf_nodes=250, min_impurity_decrease=0.00001, min_impurity_split=None)
rf.fit(train_features, train_labels);
predictions = rf.predict(test_features)
rf.score(x_train, y_train)

gridCV_RF = GridSearchCV(estimator = PipeRF, 
                         param_grid = param_grid_RF,
                         scoring = 'neg_mean_squared_error',
                         cv = 3)
print(gridCV_RF.best_params_)
print('Score: ', (gridCV_RF.best_score_ * -1) ** 0.5)
model_RF = gridCV_RF.best_estimator_.fit(train_features_full.fillna(-1), train_labels_full)
result_RF = model_RF.predict(train_features_full.fillna(-1))

from sklearn.ensemble import ExtraTreesClassifier
model = ExtraTreesClassifier()
extra = make_pipeline(StandardScaler(), ExtraTreesClassifier())
extra = ExtraTreesClassifier(n_jobs=10)

from sklearn.ensemble import VotingClassifier
vc = VotingClassifier(estimators=models)
clf = VotingClassifier([ ('svm',svm) , ('ovr' , ovr) ] , voting='hard')
voting_clf = VotingClassifier([("xgb", xgb_clf), ("svm", svm_clf), ("rf", rf_clf)], voting="soft", flatten_transform=True)
:
votingC = VotingClassifier(estimators=[('rfc', RFC_best), ('svc', SVMC_best), ('gbc',GBC_best)], voting='soft', n_jobs=4)
votingC = votingC.fit(X_train, Y_train)
voting_clf.fit(X_res, y_res)
xgb_model, svm_model, rf_model = voting_clf.estimators_

K-nearest neighbors (KNN) Machine Learning models using sklearn package in Python.
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=6)
neigh = KNeighborsClassifier(n_neighbors=1)
KNeighborsClassifier(algorithm='auto', leaf_size=30, metric='minkowski', metric_params=None, n_jobs=1, n_neighbors=90, p=2, weights='uniform')
clf_knn = KNeighborsClassifier(n_neighbors=5, leaf_size=15, weights='distance').fit(X_train_tfidf, y_train)

knn.fit(iris['data'], iris['target'])
knn.predict(X_new)
labels = model.fit_predict(samples)
knn.score(X_test, y_test)

from mlxtend.classifier import StackingCVClassifier
stack_gen_6 = StackingCVClassifier(classifiers=(xgboost, lightgbm, gbr, rf), meta_classifier=gbr, use_features_in_secondary=True)

Creating unsupervised clustering algorithm K-Means Machine Learning models using sklearn library.
from sklearn import cluster
from sklearn.cluster import KMeans
model = KMeans(n_clusters=3)
clf = cluster.KMeans(init='k-means++', n_clusters=10, random_state=42)
k_means = KMeans(init = "k-means++", n_clusters = 4, n_init = 12)
classmodel_clus = KMeans(n_clusters=3, init='random', n_init=10, max_iter=50).fit(RFM_norm1)
model_clus = KMeans(n_clusters = num_clusters, max_iter=50)

k_means.fit(X)
k_means.labels_
k_means.cluster_centers_
from scipy.cluster.hierarchy import linkage, dendrogram, cut_tree
mergings = linkage(RFM_norm1, method = "single", metric='euclidean')
dendrogram(mergings)
from sklearn.manifold import Isomap
X_iso = Isomap(n_neighbors=10).fit_transform(X_train)
from sklearn.cluster import AgglomerativeClustering
agglom = AgglomerativeClustering(n_clusters = 4, linkage = 'average')
agglom.fit(X2, y2)
from sklearn.cluster import DBSCAN
db = DBSCAN(eps=epsilon, min_samples=minimumSamples).fit(X)

from sklearn.preprocessing import PolynomialFeatures
poly = PolynomialFeatures()
poly = PolynomialFeatures(2)
poly = PolynomialFeatures(interaction_only=True)
poly = PolynomialFeatures(degree=3, interaction_only=True)
pf = PolynomialFeatures(degree=2, interaction_only=False, include_bias=False)
poly = PolynomialFeatures(degree=3, include_bias=False)
pl = PolynomialFeatures(degree=2, include_bias=True, interaction_only=False, order='C')
pipeline = Pipeline([('poly_features', PolynomialFeatures(degree=degree)), ('model', LinearRegression())])
poly.fix_transform(Xtrain)
poly.transform(Xtest)
a_poly = poly.fit_transform(a)

a_poly = a_poly.flatten()

PCA (Principal Component Analysis) Machine Learning models using sklearn package.
from sklearn import decomposition
from sklearn.decomposition import PCA
pca = decomposition.PCA(n_components=2)

pca = PCA(n_components=50)
pca_again = PCA(0.90)
X_pca = PCA(n_components=2).fit_transform(X_train)
>>> pca = PCA(n_components=2, svd_solver='full')
>>> pca = PCA(n_components=1, svd_solver='arpack')
X_pca = PCA(n_components=2).fit_transform(X_train)

pca.fit(X_new)
PCA_X = pca.transform(X_new)
projected = pca.fit_transform(digits.data)
X_new = pca.inverse_transform(X_pca)
pca.components_
pca.explained_variance_
pca.explained_variance_ratio_
pcs_df = pd.DataFrame({'PC1':pca.components_[0],'PC2':pca.components_[1], 'Feature':colnames})
plt.scatter(pcs_df.PC1, pcs_df.PC2)
principalDf = pd.DataFrame(data = projected, columns = ['principal component 1', 'principal component 2'])

from sklearn.decomposition import IncrementalPCA
pca_final = IncrementalPCA(n_components=16)
>>> ipca = IncrementalPCA(n_components=2, batch_size=3)
corrmat = np.corrcoef(df_train_pca.transpose())
print("max corr:", corrmat_nodiag.max(), ", min corr: ", corrmat_nodiag.min(),)

from sklearn.decomposition import RandomizedPCA
pca = RandomizedPCA(150)
pca = RandomizedPCA(150).fit(faces.data)
pca.fit(faces.data)
components = pca.transform(faces.data)
projected = pca.inverse_transform(components)

from sklearn.decomposition import TruncatedSVD
lsa = TruncatedSVD(n_components=2)
lsa_scores = lsa.fit_transform(test_data)

from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
lda = LinearDiscriminantAnalysis();

Standardisation: Ensuring values in a feature follows normal distribution whereby mean of the values is 0 and standard deviation is 1. sklearn.preprocessing.StandardScaler can be used to perform standarisation.
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
StandardScaler(copy=True, with_mean=True, with_std=True)
scaler.mean_
scaler.transform(samples)
StandardScaler().fit_transform(X)
scaler.fit_transform(X.f3.values.reshape(-1, 1))
scaled_train = pd.DataFrame(preprocessing.StandardScaler().fit_transform(train_new), columns=train_new.columns)
saleprice_scaled = StandardScaler().fit_transform(train['SalePrice'][:, np.newaxis])
scaler.fit(train.item_price.as_matrix().reshape(-1, 1))
X_scaled_train = scaler.fit_transform(X_train)
X_scaled_test = scaler.transform(X_test)
train.item_price = scaler.transform(train.item_price.as_matrix().reshape(-1, 1))
predict_test = scaler.inverse_transform(predict_test)
cov_matrix = np.cov(X_scaled_train.T)
print('Covariance Matrix \n%s', cov_matrix)
eigenvalues, eigenvectors = np.linalg.eig(cov_matrix)
print('Eigen Vectors \n%s', eigenvectors)
print('\n Eigen Values \n%s', eigenvalues)

Normalisation: Ensuring all values range between 0 and 1. sklearn.preprocessing.MinMaxScaler can be used to perform normalisation.
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler(feature_range=(-3,3))
scaler = MinMaxScaler(feature_range=(0, 1))
scaler.fit_transform(X.f3.values.reshape(-1, 1))
Y_scaled_test = MinMaxScaler().transform(Y_test.reshape(-1, 1))
rescaledX = scaler.fit_transform(X)
scaled_df = pd.DataFrame(preprocessing.MinMaxScaler().fit_transform(data), columns=data.columns)
Y_scaled_train = minmax_scaler.fit_transform(Y_train.reshape(-1, 1))
from sklearn.preprocessing import MaxAbsScaler
scaler = MaxAbsScaler()
scaler.fit_transform(X.f3.values.reshape(-1, 1))
scaled_df = pd.DataFrame(preprocessing.MaxAbsScaler().fit_transform(data), columns=data.columns)
from sklearn.preprocessing import RobustScaler
robust = RobustScaler()
robust = RobustScaler(quantile_range = (0.1,0.9))
robust.fit_transform(X.f3.values.reshape(-1, 1))
from sklearn.preprocessing import QuantileTransformer
>>> qt = QuantileTransformer(n_quantiles=10, random_state=0)
>>> qt.fit_transform(X)
scaled_train_df = pd.DataFrame(preprocessing.QuantileTransformer().fit_transform(train_new), columns=train_new.columns)
from sklearn.preprocessing import PowerTransformer
>>> pt = PowerTransformer()
scaled_df = pd.DataFrame(preprocessing.PowerTransformer().fit_transform(data), columns=data.columns)
from sklearn.preprocessing import FunctionTransformer
transformer = FunctionTransformer(np.log1p, validate=True)
transformer.fit_transform(X.f2.values.reshape(-1, 1))
X.f2.apply(lambda x : np.log1p(x))
from sklearn.preprocessing import Normalizer
normalizer = Normalizer()
scaler = Normalizer().fit(X)
normalizedX = scaler.transform(X)
scaled_df = pd.DataFrame(preprocessing.Normalizer().fit_transform(data), columns=data.columns)
X_scaled_train = scaler.fit_transform(X_train)
df_cont[col] = Normalizer().fit_transform(df_cont[col].reshape(1,-1))[0]

from sklearn.feature_selection import VarianceThreshold
sel=VarianceThreshold()
sel.fit_transform(dataset)
sel60 = VarianceThreshold(threshold=(0.6 * (1 - 0.6)))
from sklearn.feature_selection import SelectKBest, chi2
X_new = SelectKBest(chi2, k=3).fit_transform(X, y)
test = SelectKBest(score_func=chi2, k=4)
features_chi2 = chi2(features, labels == category_id)

from sklearn.feature_extraction import DictVectorizer
vec = DictVectorizer()
vec = DictVectorizer(sparse=False, dtype=int)
vec = DictVectorizer(sparse=True, dtype=int)
vec.fit_transform(dataset).toarray()
pos_vectorized = vec.fit_transform(pos_window)
vec.get_feature_names()

from sklearn.feature_extraction.text import TfidfVectorizer
TF stands for Term Frequency, and IDF stands for Inverse Document Frequency
TF  = (Frequency of a word in the document)/(Total words in the document)
IDF = Log((Total number of docs)/(Number of docs containing the word))
>>> vectorizer = TfidfVectorizer()
TfidfVec = TfidfVectorizer(binary=True, min_df=2)
vect = TfidfVectorizer(stop_words='english')
TfidfVec = TfidfVectorizer(tokenizer=LemNormalize, stop_words='english' , min_df=2, max_df=0.75, sublinear_tf=True)
tfidf_vect_ngram_chars = TfidfVectorizer(analyzer='char', token_pattern=r'\w{1,}', ngram_range=(2,3), max_features=5000)
tfidf_vect_ngram = TfidfVectorizer(analyzer='word', token_pattern=r'\w{1,}', ngram_range=(2,3), max_features=5000)
tfidf = TfidfVectorizer(stop_words='english', ngram_range=(2,3), norm=12, min_df=5, sublinear_tf=True )
vectorizer = TfidfVectorizer(ngram_range=(1, 3), min_df=2, max_df=1, binary=True, stop_words='english')
word_vectorizer = TfidfVectorizer(stop_words='english', sublinear_tf=True, strip_accents='unicode', analyzer='word', token_pattern=r'\w{2,}', ngram_range=(1, 1), max_features=30000)
vectorizer = TfidfVectorizer(min_df=5, max_df = 0.8, sublinear_tf=True, use_idf=True)
vc_name = TfidfVectorizer(ngram_range=(1,7),stop_words="english", analyzer='char',max_features=50000)
tfidf = TfidfVec.fit_transform(textlist)
(tfidf * tfidf.T).toarray()
tfidf.vocabulary_
features = vect.get_feature_names()
idf = tfidf.idf_
from sklearn.feature_extraction.text import TfidfTransformer
tfidfconverter = TfidfTransformer()
>>> transformer = TfidfTransformer(smooth_idf=False)
tf_transformer = TfidfTransformer(use_idf=False).fit(X_train_counts)
X_train_tfidf = TfidfTransformer().fit_transform(X_train_counts)
X = tfidfconverter.fit_transform(X).toarray()

from sklearn.feature_extraction.text import CountVectorizer 
vec = CountVectorizer( )
vec = CountVectorizer(stop_words='english' )
cvec = CountVectorizer(lowercase=False)
vect = CountVectorizer(max_features=1000, binary=True)
custom_vec = CountVectorizer(preprocessor=my_preprocessor, tokenizer=my_tokenizer, ngram_range=(1,2), stop_words='english')
custom_vec = CountVectorizer(analyzer=analyzer, ngram_range=(1,2), stop_words='english')
cv = sklearn.feature_extraction.text.CountVectorizer(vocabulary=['hot', 'cold', 'old'])
bigram_vectorizer = CountVectorizer(ngram_range=(1, 2), token_pattern=r'\b\w+\b', min_df=1)
ngram_vectorizer = CountVectorizer(analyzer='char_wb', ngram_range=(5, 5))
vec = CountVectorizer(analyzer=process_text)
vect = CountVectorizer(stop_words='english', max_features=100)

vec.fit(X)
vec.vocabulary_
print(len(vec.get_feature_names()))
vect.vocabulary_.get(u'algorithm')
X_transformed = vec.transform(X)
X = X_transformed.toarray()
X_train_counts = count_vect.fit_transform(twenty_train.data)
pd.DataFrame(X_train_counts.toarray())
X_train_counts.shape
vec_text_df = pd.DataFrame(vec_text.todense())
test_vec_text_df = pd.DataFrame(vec.fit_transform(test['text_new']).todense())
X_train_cv = pd.SparseDataFrame(X_train_cv, default_fill_value=0)
print (vect.get_feature_names()[-50:])
print(vect.get_stop_words())

>>> from sklearn.feature_extraction.text import HashingVectorizer
>>> hv = HashingVectorizer()
>>> hv = HashingVectorizer(n_features=10)
>>> hv.transform(corpus)

from sklearn.feature_extraction import text 
stop_words = text.ENGLISH_STOP_WORDS.union(['abcd'])

from sklearn.pipeline import make_pipeline
pl = make_pipeline(scaler, kmeans)
from sklearn.pipeline import Pipeline
PipeRF = Pipeline([
        ('std', MinMaxScaler()),
        ('RF', rf)
    ])
>>> text_clf = Pipeline([('vect', CountVectorizer()),
...                      ('tfidf', TfidfTransformer()),
...                      ('clf', MultinomialNB()),
... ])
text_clf = text_clf.fit(twenty_train.data, twenty_train.target)

>>> from sklearn.externals import joblib
>>> joblib.dump(clf, 'filename.pkl') 
>>> clf = joblib.load('filename.pkl') 
>>> from sklearn import random_projection
>>> transformer = random_projection.GaussianRandomProjection()

from sklearn.naive_bayes import MultinomialNB
mnb=MultinomialNB()
clf = MultinomialNB().fit(X_train_tfidf, y_train)
m=MultinomialNB(alpha=0.00000000001)
mnb.fit(X,Y)
nb.fit(X_train_res, y_train_res)
mnb.predict_proba(X_test)
nb.score(X_train_res, y_train_res)
from sklearn.naive_bayes import BernoulliNB
bnb=BernoulliNB()
bnb.fit(X,Y)
bnb.predict_proba(X_test)
proba_bnb=bnb.predict_proba(X_test)

AdaBoost (Adaptive Boosting) models using Python scikit-learn package.
from sklearn.ensemble import AdaBoostClassifier
ABC = AdaBoostClassifier(base_estimator=shallow_tree, n_estimators = 6)
adaboost_model = AdaBoostClassifier(base_estimator=tree, n_estimators=600, learning_rate=1.5, algorithm="SAMME")
clf = RandomForestClassifier(n_estimators=3, max_depth=10, min_samples_split=10)

bclf = AdaBoostClassifier(base_estimator=clf, n_estimators=clf.n_estimators)
ABC.fit(X_train, y_train)
y_pred = ABC.predict(X_test)
predictions = adaboost_model.predict_proba(X_test)
score = metrics.accuracy_score(y_test, y_pred)
abc_scores.append(score)
metrics.roc_auc_score(y_test, predictions[:,1])

Gradient Boosting ML models using Python sklearn package.
from sklearn.ensemble import GradientBoostingClassifier
gbm0 = GradientBoostingClassifier(random_state=10) 
GBC = GradientBoostingClassifier(max_depth=2, n_estimators=200)
A model with higher learning_rate learns fast, but is prone to overfitting; one with a lower learning rate learns slowly, but avoids overfitting.
clf_gbc = GradientBoostingClassifier(n_estimators=300,max_depth=5,random_state=2019).fit(X_train_tfidf, y_train)
gbr = GradientBoostingClassifier(criterion='friedman_mse', init=None, learning_rate=0.1, loss='deviance', max_depth=3, max_features=None, max_leaf_nodes=None, min_impurity_split=1e-07, min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, n_estimators=50, presort='auto', random_state=None, subsample=1.0, verbose=0, warm_start=False)
gbrt = GradientBoostingClassifier(loss="deviance", learning_rate=0.1, n_estimators=100, max_depth=3, random_state=123)
gbrt.fit(X_res, y_res)

from sklearn.linear_model import RidgeClassifier
clf_rdg = RidgeClassifier()
clf_rdg = RidgeClassifier(tol=1e-5, solver="sag").fit(X_train_tfidf, y_train)

from sklearn.linear_model import PassiveAggressiveClassifier
clf_prc = PassiveAggressiveClassifier().fit(X_train_tfidf, y_train)

from sklearn.gaussian_process import GaussianProcessClassifier

from sklearn.utils import shuffle

from sklearn.linear_model import RidgeCV, LassoCV, ElasticNetCV
ridge = RidgeCV(alphas = [0.01, 0.03, 0.06, 0.1, 0.3, 0.6, 1, 3, 6, 10, 30, 60])
ridge.fit(X_train,y_train)
y_train_rdg = ridge.predict(X_train)
alpha = ridge.alpha_
print("Ridge RMSE on Training set :", rmse_CV_train(ridge).mean())
print("Ridge RMSE on Test set :", rmse_CV_test(ridge).mean())

lasso = LassoCV(alphas = [0.0001, 0.0003, 0.0006, 0.001, 0.003, 0.006, 0.01, 0.03, 0.06, 0.1,   0.3, 0.6, 1],  max_iter = 50000, cv = 10)
model_lasso = LassoCV(alphas = [1, 0.1, 0.001, 0.0005]).fit(X_train, y)
lasso.fit(X_train, y_train)
alpha = lasso.alpha_
y_test_las = lasso.predict(X_test)

elasticNet = ElasticNetCV(l1_ratio = [0.1, 0.3, 0.5, 0.6, 0.7, 0.8, 0.85, 0.9, 0.95, 1],
                          alphas = [0.0001, 0.0003, 0.0006, 0.001, 0.003, 0.006,  0.01, 0.03, 0.06, 0.1, 0.3, 0.6, 1, 3, 6], 
                          max_iter = 50000, cv = 10)
elasticNet.fit(X_train, y_train)
alpha = elasticNet.alpha_
ratio = elasticNet.l1_ratio_

from sklearn.ensemble import BaggingClassifier
model = BaggingClassifier(base_estimator=randomForest, n_estimators=num_trees, random_state=seed)
model = BaggingClassifier(base_estimator=clf_LR, random_state=seed)
results = model_selection.cross_val_score(model, data_clean.iloc[:,:-1].values, data_clean.iloc[:,-1].values, cv=kfold)

from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone


skimage package in Python
from skimage import data
image = data.coins()
edges = filters.sobel(image)
io.imshow(edges)
ax[0].imshow(image, cmap=plt.cm.gray, interpolation='nearest')
io.show()
from skimage import transform as tf
from skimage.color import rgb2gray
from skimage.feature import CENSURE
img_orig = rgb2gray(data.astronaut())
img_warp = tf.warp(img_orig, tform)

Related Python Articles:  pandas package in Python    Comprehensions in Python

No comments:

Post a Comment