Now Access to Gemini (Google) : Really beat to ChatGPT ?

Linear Regression using Gradient descent from scratch python: Used MNIST DATASET

Linear Regression Using Gradient descent method performed on MNIST dataset that is available in Keras from Tensorflow.

Follow steps :

1. Download the MNIST dataset : 

       from keras.datasets import mnist
       (train_X, train_y), (test_X, test_y) = mnist.load_data()

2. Normalize the data  : 

        train_X = train_X/255.0
        train_X =train_X.reshape(60000,784)

3. Flatten the data :

      test_X =  test_X / 255.0
      test_X = test_X.reshape(10000,784)

4. Linear regression Code  :

  import matplotlib.pyplot as plt
class LinearReg:
    
    def __init__(self,learning_rate=0.0001,epochs=10000):
        self.epochs = epochs
        self.lr = learning_rate
        self.w = None
        self.b = None
        self.cost_list = []
        
    def __initial_params(self,shape):
        #initialize weigth and bias as zero
        self.w = np.zeros(shape)
        self.b = 0
        return True
        
    def __predictions(self,X):
        return np.dot(X, self.w) + self.b
    
    def __calculate_cost(self,error):
        return (1/(2*error.size)) * np.dot(error.T,error)
    
    def __gradient_descent(self,X,y,y_pred):
        #difference between prediction and actual
        error = y_pred - y
        #calculate cost and append them to list
        cost = self.__calculate_cost(error)
        self.cost_list.append(cost)
        #gradients
        dw = (1 / X.shape[0]) * np.dot(X.T,error)
        db = (1 / X.shape[0]) * np.sum(error)
        return dw, db
    
    def __update_parameters(self,dw,db):
        #update weight and bias with gradients
        self.w -= self.lr * dw
        self.b -= self.lr * db
        return True
    
    def fit(self,X,y):
        """fits the model"""
        self.__initial_params(X.shape[1])
        count= 0
        for _ in range(self.epochs):
            count = count +1
            y_pred = self.__predictions(X)
            dw, db,  = self.__gradient_descent(X, y, y_pred)
            self.__update_parameters(dw, db)
            print("Epoch", count)
        return True
    
    def predict(self,X):
        return self.__predictions(X)
    
    def calculate_rmse(self,y_real,y_pred):
        """returns root mean square error"""
        return np.sqrt(np.mean((y_pred-y_real)**2))
    
    def plot_cost(self):
        """plots the progress of cost on each iteration"""
        plt.title('Cost Function J')
        plt.xlabel('No. of iterations')
        plt.ylabel('Cost')
        plt.plot(self.cost_list)
        plt.show()
        
    def calculate_r2(self,X,y):
        """returns r2"""
        sum_squares = 0
        sum_residuals = 0
        y_mean = np.mean(y)
        for i in range(X.shape[0]):
            y_pred = self.__predictions(X[i])
            sum_squares += (y[i] - y_mean) ** 2
            sum_residuals += (y[i] - y_pred) ** 2
        score = 1- (sum_residuals / sum_squares)
        return score

5. Call Linear regression  :

        model = LinearReg()
        model.fit(train_X,train_y)

6. Plot Graph (Cost v/s Iteration):

        model.plot_cost()

7. Calculate the Cost : 

    model.calculate_rmse(test_y,model.predict(test_X)) 

8. Predict Test Data

   for i in range(20):
        print(round(model.predict(test_X[i])),test_y[i])

References :  

Thank you for reading this article.


Comments