Вы находитесь на странице: 1из 4

{

"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "vdTVSEFWWWB9"
},
"outputs": [],
"source": [
"import numpy as np #for linear algebra\n",
"import pandas as pd #for chopping, processing\n",
"import csv #for opening csv files\n",
"#%matplotlib inline \n",
"#import matplotlib.pyplot as plt #for plotting the graphs\n",
"from sklearn.linear_model import LogisticRegression #for logistic
regression\n",
"from sklearn.pipeline import Pipeline #to assemble steps for cross
validation\n",
"from sklearn.preprocessing import PolynomialFeatures #for all the polynomial
features\n",
"from sklearn import svm #for Support Vector Machines\n",
"from sklearn.neighbors import NearestNeighbors #for nearest neighbor
classifier\n",
"from sklearn import tree\n",
"from sklearn.tree import DecisionTreeClassifier #for decision tree
classifier\n",
"from sklearn.naive_bayes import GaussianNB #for naive bayes classifier\n",
"from scipy import stats #for statistical info\n",
"from sklearn.model_selection import train_test_split # to split the data in
train and test\n",
"from sklearn.model_selection import KFold # for cross validation\n",
"#from sklearn.grid_search import GridSearchCV # for tuning parameters\n",
"from sklearn.model_selection import learning_curve, GridSearchCV\n",
"from sklearn.neighbors import KNeighborsClassifier #for k-neighbor
classifier\n",
"from sklearn import metrics # for checking the accuracy \n",
"from time import time\n",
"from azureml.core.run import Run"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [

"path_url='https://raw.githubusercontent.com/AarthiAlagammai/project3/main/dataR2.c
sv'"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from azureml.data.dataset_factory import TabularDatasetFactory\n",
"\n",
"from azureml.core import Workspace, Dataset, Experiment\n",
"\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"run=Run.get_context()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ds = Dataset.Tabular.from_delimited_files(path =path_url)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def clean_data(data):\n",
" df=data.to_pandas_dataframe()\n",
" print(df.head())\n",
" print(\"length_of_dataframe\",len(df))\n",
" # Number of malignant cases\n",
" malignant = len(df[df['Classification']==2])\n",
" print(\"Number of malignant cases\",malignant)\n",
" #Number of benign cases\n",
" benign = len(df[df['Classification']==1])\n",
" print(\"Number of benign cases\",benign)\n",
" #Rate of malignant tumors over all cases\n",
" rate = (float(malignant)/(len(df)))*100\n",
" print(\"Rate of malignant tumors over all cases\",rate)\n",
" x_df=df.drop('Classification',axis=1)\n",
" \n",
" y_df=df['Classification']\n",
" return x_df,y_df"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def train_classifier(clf, X_train, Y_train):\n",
" start = time()\n",
" model=clf.fit(X_train, Y_train)\n",
" end = time()\n",
" print (\"Trained model in {:.4f} seconds\".format(end - start))\n",
" return model"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "Ig_HF8RKWyWj"
},
"outputs": [],
"source": [
"from sklearn.ensemble import RandomForestClassifier\n",
"from sklearn import svm\n",
"from sklearn.svm import SVC\n",
"import argparse\n",
"import joblib"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "S7afCTAqWyk1"
},
"outputs": [],
"source": [
"def main():\n",
" # Add arguments to script\n",
" parser = argparse.ArgumentParser()\n",
"\n",
" parser.add_argument('--C', type=float, default=1.0, help=\"Inverse of
regularization strength. Smaller values cause stronger regularization\")\n",
" #parser.add_argument('--gamma', type=int, default=1, help=\"Maximum number
of iterations to converge\")\n",
" #parser.add_argument('--kernel', type=str, default='sigmoid',
help=\"Specifies the kernel type to be used in the algorithm\")\n",
" parser.add_argument('--coef0', type=int, default=0, help=\"Independent
term in kernel function. It is only significant in ‘poly’ and ‘sigmoid’\")\n",
"\n",
" args = parser.parse_args()\n",
"\n",
" run.log(\"Regularization Strength:\", np.float(args.C))\n",
" #run.log(\"Kernel:\", str(args.kernel))\n",
" run.log(\"coef0:\", np.int(args.coef0))\n",
"\n",
" x, y = clean_data(ds)\n",
" # TODO: Split data into train and test sets.\n",
"
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2,random_state=403,s
huffle=True)\n",
" clf=SVC(C=args.C,coef0=args.coef0)\n",
" #clf=SVC(C=0.1,kernel='sigmoid',coef0=1)\n",
" model=train_classifier(clf, x_train, y_train)\n",
" \n",
" os.makedirs('outputs',exist_ok=True)\n",
" joblib.dump(model,'outputs/model.joblib')\n",
" accuracy = model.score(x_test, y_test)\n",
" run.log(\"Accuracy\", np.float(accuracy))\n",
"\n",
"if __name__ == '__main__':\n",
" main()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%tb"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"jupytext": {
"encoding": "# -*- coding: utf-8 -*-",
"text_representation": {
"extension": ".py",
"format_name": "light",
"format_version": "1.5",
"jupytext_version": "1.6.0"
}
},
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

Вам также может понравиться