Created
October 3, 2020 15:47
-
-
Save sparshs413/f6bc7f18691e7c12e2f9520d28dfa087 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import pandas as pd | |
| import numpy as np | |
| import matplotlib.pyplot as plt | |
| #Loading dataset – User_Data | |
| dataset = pd.read_csv('...\\User_Data.csv') | |
| #Now, to predict whether a user will purchase the product or not, one needs to find out the relationship between Age and Estimated Salary. Here User ID and Gender are not important factors for finding out this. | |
| # input | |
| x = dataset.iloc[:, [2, 3]].values | |
| # output | |
| y = dataset.iloc[:, 4].values | |
| #Splitting the dataset to train and test. 75% of data is used for training the model and 25% of it is used to test the performance of our model. | |
| from sklearn.cross_validation import train_test_split | |
| xtrain, xtest, ytrain, ytest = train_test_split( | |
| x, y, test_size = 0.25, random_state = 0) | |
| #Now, it is very important to perform feature scaling here because Age and Estimated Salary values lie in different ranges. If we don’t scale the features then Estimated Salary feature will dominate Age feature when the model finds the nearest neighbor to a data point in data space. | |
| from sklearn.preprocessing import StandardScaler | |
| sc_x = StandardScaler() | |
| xtrain = sc_x.fit_transform(xtrain) | |
| xtest = sc_x.transform(xtest) | |
| print (xtrain[0:10, :]) | |
| #Here once see that Age and Estimated salary features values are sacled and now there in the -1 to 1. Hence, each feature will contribute equally in decision making i.e. finalizing the hypothesis. | |
| #Finally, we are training our Logistic Regression model. | |
| from sklearn.linear_model import LogisticRegression | |
| classifier = LogisticRegression(random_state = 0) | |
| classifier.fit(xtrain, ytrain) | |
| #After training the model, it time to use it to do prediction on testing data. | |
| y_pred = classifier.predict(xtest) | |
| #Let’s test the performance of our model – Confusion Matrix | |
| from sklearn.metrics import confusion_matrix | |
| cm = confusion_matrix(ytest, y_pred) | |
| print ("Confusion Matrix : \n", cm) | |
| #Performance measure – Accuracy | |
| from sklearn.metrics import accuracy_score | |
| print ("Accuracy : ", accuracy_score(ytest, y_pred)) | |
| #Visualizing the performance of our model. | |
| from matplotlib.colors import ListedColormap | |
| X_set, y_set = xtest, ytest | |
| X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, | |
| stop = X_set[:, 0].max() + 1, step = 0.01), | |
| np.arange(start = X_set[:, 1].min() - 1, | |
| stop = X_set[:, 1].max() + 1, step = 0.01)) | |
| plt.contourf(X1, X2, classifier.predict( | |
| np.array([X1.ravel(), X2.ravel()]).T).reshape( | |
| X1.shape), alpha = 0.75, cmap = ListedColormap(('red', 'green'))) | |
| plt.xlim(X1.min(), X1.max()) | |
| plt.ylim(X2.min(), X2.max()) | |
| for i, j in enumerate(np.unique(y_set)): | |
| plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1], | |
| c = ListedColormap(('red', 'green'))(i), label = j) | |
| plt.title('Classifier (Test set)') | |
| plt.xlabel('Age') | |
| plt.ylabel('Estimated Salary') | |
| plt.legend() | |
| plt.show() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment