LightGBM Binary Classification, Multi-Class Classification, Regression using Python

LightGBM Tree Leaf Growth
Other Algorithm Level Growth
#importing libraries
import numpy as np
from collections import Counter
import pandas as pd
import lightgbm as lgb
from sklearn.datasets import load_breast_cancer,load_boston,load_wine
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import mean_squared_error,roc_auc_score,precision_score
pd.options.display.max_columns = 999
#loading the breast cancer dataset
X=load_breast_cancer()
df=pd.DataFrame(X.data,columns=X.feature_names)
Y=X.target
#scaling the features using Standard Scaler
sc=StandardScaler()
sc.fit(df)
X=pd.DataFrame(sc.fit_transform(df))
#train_test_split
X_train,X_test,y_train,y_test=train_test_split(X,Y,test_size=0.3,random_state=0)
#converting the dataset into proper LGB format
d_train=lgb.Dataset(X_train, label=y_train)
#Specifying the parameter
params={}
params['learning_rate']=0.03
params['boosting_type']='gbdt' #GradientBoostingDecisionTree
params['objective']='binary' #Binary target feature
params['metric']='binary_logloss' #metric for binary classification
params['max_depth']=10
#train the model
clf=lgb.train(params,d_train,100) #train the model on 100 epocs
#prediction on the test set
y_pred=clf.predict(X_test)
if>=0.5 ---> 1
else ---->0
#rounding the values
y_pred=y_pred.round(0)
#converting from float to integer
y_pred=y_pred.astype(int)
#roc_auc_score metric
roc_auc_score(y_pred,y_test)
#0.9672167056074766
#loading the dataset
X1=load_wine()
df_1=pd.DataFrame(X1.data,columns=X1.feature_names)
Y_1=X1.target
#Scaling using the Standard Scaler
sc_1=StandardScaler()
sc_1.fit(df_1)
X_1=pd.DataFrame(sc_1.fit_transform(df_1))
#train-test-split
X_train,X_test,y_train,y_test=train_test_split(X_1,Y_1,test_size=0.3,random_state=0)
#Converting the dataset in proper LGB format
d_train=lgb.Dataset(X_train, label=y_train)
#setting up the parameters
params={}
params['learning_rate']=0.03
params['boosting_type']='gbdt' #GradientBoostingDecisionTree
params['objective']='multiclass' #Multi-class target feature
params['metric']='multi_logloss' #metric for multi-class
params['max_depth']=10
params['num_class']=3 #no.of unique values in the target class not inclusive of the end value
#training the model
clf=lgb.train(params,d_train,100) #training the model on 100 epocs
#prediction on the test dataset
y_pred_1=clf.predict(X_test)
#printing the predictions
y_pred_1
[0.95819859, 0.02205037, 0.01975104],
[0.05465546, 0.09575231, 0.84959223],
[0.20955298, 0.69498737, 0.09545964],
[0.95852959, 0.02096561, 0.02050481],
[0.04243184, 0.92053949, 0.03702867],....
#argmax() method 
y_pred_1 = [np.argmax(line) for line in y_pred_1]
#printing the predictions
[0,2,1,0,1,0,0,2,...]
#using precision score for error metrics
precision_score(y_pred_1,y_test,average=None).mean()
# 0.9545454545454546
#loading the Boston Dataset
X=load_boston()
df=pd.DataFrame(X.data,columns=X.feature_names)
Y=X.target
#Scaling using the Standard Scaler
sc=StandardScaler()
sc.fit(df)
X=pd.DataFrame(sc.fit_transform(df))
#train_test_split
X_train,X_test,y_train,y_test=train_test_split(X,Y,test_size=0.3,random_state=0)
#Converting the data into proper LGB Dataset Format
d_train=lgb.Dataset(X_train, label=y_train)
#Declaring the parameters
params={}
params['learning_rate']=0.03
params['boosting_type']='gbdt' #GradientBoostingDecisionTree
params['objective']='regression'#regression task
params['n_estimators']=100
params['max_depth']=10
#model creation and training
clf=lgb.train(params,d_train,100)
#model prediction on X_test
y_pred=clf.predict(X_test)
#using RMSE error metric
mean_squared_error(y_pred,y_test)
# 0.9672167056074766

--

--

Get the Medium app

A button that says 'Download on the App Store', and if clicked it will lead you to the iOS App store
A button that says 'Get it on, Google Play', and if clicked it will lead you to the Google Play store