Looking at random graphs
Table of Contents
Imports
# python standard library
import os
import pickle
# from pypi
import networkx
import numpy
import pandas
from sklearn.linear_model import LogisticRegressionCV
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import (
ExtraTreesClassifier,
RandomForestClassifier,
)
from sklearn.feature_selection import (
RFECV,
SelectFromModel,
)
from sklearn.model_selection import (
GridSearchCV,
StratifiedKFold,
train_test_split,
)
% matplotlib inline
Part 1 - Random Graph Identification
For the first part of this assignment you will analyze randomly generated graphs and determine which algorithm created them.
Load the data
part_one_graphs = pickle.load(open('A4_graphs','rb'))
print(len(part_one_graphs))
print(type(part_one_graphs[0]))
part_one_graphs
is a list containing 5 networkx graphs. Each of these graphs were generated by one of three possible algorithms:
- Preferential Attachment (`'PA'`)
- Small World with low probability of rewiring (`'SWL'`)
- Small World with high probability of rewiring (`'SWH'`)
Analyze each of the 5 graphs and determine which of the three algorithms generated the graph.
The `graphidentification` function should return a list of length 5 where each element in the list is either `'PA'`, `'SWL'`, or `'SWH'`.
Graph Identification
def graph_identification():
"""Identifies the type of graph each of the graphs is
Returns:
list: string identifiers for the type of graph
"""
graph_types = []
for graph in part_one_graphs:
path = networkx.average_shortest_path_length(graph)
coefficient = networkx.average_clustering(graph)
if path > 6:
if coefficient < 0.5:
graph_types.append("SW_L")
else:
raise Exception("unexpected type")
else:
if coefficient < 0.5:
graph_types.append("PA")
else:
graph_types.append("SW_H")
return graph_types
This was marked wrong by the grader.
Part 2 - Company Emails
For the second part of this assignment you will be working with a company's email network where each node corresponds to a person at the company, and each edge indicates that at least one email has been sent between two people.
The network also contains the node attributes `Department` and `ManagementSalary`.
`Department` indicates the department in the company which the person belongs to, and `ManagementSalary` indicates whether that person is receiving a managment position salary.
email = networkx.read_gpickle('email_prediction.txt')
print(networkx.info(email))
Part 2A - Salary Prediction
Using network `email`, identify the people in the network with missing values for the node attribute `ManagementSalary` and predict whether or not these individuals are receiving a managment position salary.
To accomplish this, you will need to create a matrix of node features using networkx, train a sklearn classifier on nodes that have `ManagementSalary` data, and predict a probability of the node receiving a managment salary for nodes where `ManagementSalary` is missing.
Your predictions will need to be given as the probability that the corresponding employee is receiving a managment position salary.
The evaluation metric for this assignment is the Area Under the ROC Curve (AUC).
Your grade will be based on the AUC score computed for your classifier. A model which with an AUC of 0.75 or higher will receive full points.
Using your trained classifier, return a series of length 252 with the data being the probability of receiving managment salary, and the index being the node id.
1 1.0 2 0.0 5 0.8 8 1.0 ... 996 0.7 1000 0.5 1001 0.0 Length: 252, dtype: float64
The Data Frame
if not os.path.isfile("email_data.h5"):
data = pandas.DataFrame(index=email.nodes())
data["department"] = pandas.Series(networkx.get_node_attributes(email, "Department"))
data["management"] = pandas.Series(networkx.get_node_attributes(email, "ManagementSalary"))
data["clustering"] = pandas.Series(networkx.clustering(email))
data["degree"] = pandas.Series(email.degree())
data["degree_centrality"] = pandas.Series(networkx.degree_centrality(email))
data["closeness_centrality"] = pandas.Series(networkx.closeness_centrality(email))
data["betweenness_centrality"] = pandas.Series(networkx.betweenness_centrality(email))
data["pagerank"] = pandas.Series(networkx.pagerank(email))
_, authority = networkx.hits(email)
data["authority"] = pandas.Series(authority)
data.to_hdf("email_data.h5","df" )
else:
data = pandas.read_hdf('email_data.h5', "df")
print(data.head())
print(data.management.unique())
print(data.department.unique())
Department Dummy Variables
Even though I don't think it's going to prove useful, the department
feature is actually categorical, despite the use of integers so we'll have to use One-Hot-Encoding to add dummy variables for it.
dummies_data = pandas.get_dummies(data, columns=["department"])
print(dummies_data.head(1))
Separating the Training and Prediction Sets
We're going to use the model to predict what the missing management
values are so I'm going to separate the missing and non-missing sets.
training_data = dummies_data[pandas.notnull(dummies_data.management)]
prediction_data = dummies_data[pandas.isnull(dummies_data.management)]
print(training_data.shape)
print(prediction_data.shape)
The problem description tells us that the answer should have 252 entries so this is a safe assertion.
assert len(prediction_data) == 252
Training and Target Data
To train the model we'll need to separate out the management
column (and remove it entirely from the prediction
set).
non_management = [column for column in training_data.columns if column != "management"]
y_train = training_data.management
x_train = training_data[non_management]
x_predict = prediction_data[non_management]
Scaling
I don't think the Random Forest model that I'm going to use needs it, but I'm going to standardize the data.
scaler = StandardScaler()
x_train = scaler.fit_transform(x_train)
x_predict = pandas.DataFrame(scaler.transform(x_predict), index=x_predict.index)
Feature Selection
Since we now have so many features, I'm going to do some feature selection.
print(x_train.shape)
print(x_predict.shape)
trees = ExtraTreesClassifier(n_estimators=10)
eliminator = RFECV(estimator=trees, cv=StratifiedKFold(10), scoring="roc_auc")
eliminator.fit(x_train, y_train)
x_train_reduced = eliminator.transform(x_train)
x_predict_reduced = pandas.DataFrame(eliminator.transform(x_predict), index=x_predict.index)
print(x_train_reduced.shape)
print(x_predict_reduced.shape)
When I used the train-test-split training model it left 17 columns. I wonder if using the whole training set messes it up.
Logistic Regression
model = LogisticRegressionCV(penalty="l1", scoring="roc_auc",
solver="liblinear", cv=StratifiedKFold(10))
model.fit(x_train_reduced, y_train)
print(model.scores_[1.0].mean())
print(model.scores_[1.0].std())
It seems to be doing much worse than when I used the train-test split.
Random Forests
parameter_grid = dict(n_estimators=range(10, 100, 10))
search = GridSearchCV(RandomForestClassifier(), parameter_grid,
cv=StratifiedKFold(10), scoring="roc_auc")
search.fit(x_train_reduced, y_train)
print(search.best_score_)
class RandomForest(object):
"""builds the random forest
Args:
x_train(array): data to train on
y_train(array): targets for training
start (int): start value for number of estimators
stop (int): upper value for range of estimators
step (int): increment for range of estimators
folds (int): K-folds for cross-validation
"""
def __init__(self, x_train, y_train,
start=10, stop=100, step=10, folds=10):
self.x_train = x_train
self.y_train = y_train
self.start = start
self.stop = stop
self.step = step
self.folds = folds
self._parameters = None
self._search = None
self._model = None
return
@property
def parameters(self):
"""parameters for the grid-search"""
if self._parameters is None:
self._parameters = dict(n_estimators=range(self.start,
self.stop,
self.step))
return self._parameters
@property
def search(self):
"""fitted grid search to find hyper-parameters"""
if self._search is None:
self._search = GridSearchCV(RandomForestClassifier(),
self.parameters,
cv=StratifiedKFold(self.folds),
scoring="roc_auc")
self._search.fit(self.x_train, self.y_train)
return self._search
@property
def model(self):
"""best model found by the grid search"""
if self._model is None:
self._model = self.search.best_estimator_
return self._model
Data Loader
Since having all these org-babel things around makes things kind of hard I'm going to make a class to bundle everything together.
class DataLoader(object):
"""loads and transforms the data
Args:
estimators (int): number of trees to use for feature elimination
"""
def __init__(self, estimators=10):
self.estimators = estimators
self._data = None
self._dummies_data = None
self._training_data = None
self._prediction_data = None
self._non_management = None
self._y_train = None
self._x_train = None
self._x_predict = None
self._scaler = None
self._x_train_scaled = None
self._x_predict_scaled = None
self._eliminator = None
self._x_train_reduced = None
self._x_predict_reduced = None
return
@property
def data(self):
"""The initial data"""
if self._data is None:
if not os.path.isfile("email_data.h5"):
data = pandas.DataFrame(index=email.nodes())
data["department"] = pandas.Series(networkx.get_node_attributes(email, "Department"))
data["management"] = pandas.Series(networkx.get_node_attributes(email, "ManagementSalary"))
data["clustering"] = pandas.Series(networkx.clustering(email))
data["degree"] = pandas.Series(email.degree())
data["degree_centrality"] = pandas.Series(networkx.degree_centrality(email))
data["closeness_centrality"] = pandas.Series(networkx.closeness_centrality(email))
data["betweenness_centrality"] = pandas.Series(networkx.betweenness_centrality(email))
data["pagerank"] = pandas.Series(networkx.pagerank(email))
_, authority = networkx.hits(email)
data["authority"] = pandas.Series(authority)
data.to_hdf("email_data.h5","df" )
self._data = data
else:
self._data = pandas.read_hdf('email_data.h5', "df")
return self._data
@property
def dummies_data(self):
"""one-hot-encoded data"""
if self._dummies_data is None:
self._dummies_data = pandas.get_dummies(self.data, columns=["department"])
return self._dummies_data
@property
def training_data(self):
"""data with management information"""
if self._training_data is None:
self._training_data = self.dummies_data[pandas.notnull(
self.dummies_data.management)]
return self._training_data
@property
def prediction_data(self):
"""data missing management information"""
if self._prediction_data is None:
self._prediction_data = self.dummies_data[pandas.isnull(
self.dummies_data.management)]
assert len(self._prediction_data) == 252
return self._prediction_data
@property
def non_management(self):
"""list of columns minus management"""
if self._non_management is None:
self._non_management = [
column for column in self.training_data.columns
if column != "management"]
return self._non_management
@property
def y_train(self):
"""target-data for training"""
if self._y_train is None:
self._y_train = self.training_data.management
return self._y_train
@property
def x_train(self):
"""data for training"""
if self._x_train is None:
self._x_train = self.training_data[self.non_management]
return self._x_train
@property
def x_predict(self):
"""set to make predictions"""
if self._x_predict is None:
self._x_predict = self.prediction_data[self.non_management]
return self._x_predict
@property
def scaler(self):
"""standard scaler"""
if self._scaler is None:
self._scaler = StandardScaler()
return self._scaler
@property
def x_train_scaled(self):
"""training data scaled to 1 std, 0 mean"""
if self._x_train_scaled is None:
self._x_train_scaled = self.scaler.fit_transform(self.x_train)
return self._x_train_scaled
@property
def x_predict_scaled(self):
"""prediction data with mean 0, std 1
The answer requires the index so this is a dataframe
instead of an array
Returns:
pandas.DataFrame: scaled data with index preserved
"""
if self._x_predict_scaled is None:
self._x_predict_scaled = pandas.DataFrame(
self.scaler.transform(self.x_predict),
index=self.x_predict.index)
return self._x_predict_scaled
@property
def eliminator(self):
"""recursive feature eliminator"""
if self._eliminator is None:
trees = ExtraTreesClassifier(n_estimators=10)
self._eliminator = RFECV(estimator=trees, cv=StratifiedKFold(10),
scoring="roc_auc")
self._eliminator.fit(self.x_train_scaled, self.y_train)
return self._eliminator
@property
def x_train_reduced(self):
"""training data with features eliminated"""
if self._x_train_reduced is None:
self._x_train_reduced = self.eliminator.transform(
self.x_train_scaled)
return self._x_train_reduced
@property
def x_predict_reduced(self):
"""prediction data with features eliminated"""
if self._x_predict_reduced is None:
self._x_predict_reduced = pandas.DataFrame(
self.eliminator.transform(self.x_predict_scaled),
index=self.x_predict_scaled.index)
return self._x_predict_reduced
Submission
def salary_predictions():
"""Prediction that employee is management
Calculates the probability that an employee is management
Returns:
pandas.Series: Node ID, probability of node
"""
data = DataLoader()
forest = RandomForest(data.x_train_reduced, data.y_train)
# probabilites is an array with rows of
# [<probability not management>, <probability management>]
# see forest.model.classes_ to see what each entry represents
probabilities = forest.model.predict_proba(data.x_predict_reduced)
return pandas.Series(probabilities[:, 1], index=data.x_predict_reduced.index)
output = salary_predictions()
print(output.head())
assert all(output.index == DataLoader().prediction_data.index)
assert len(output) == 252
Part 2B - New Connections Prediction
For the last part of this assignment, you will predict future connections between employees of the network. The future connections information has been loaded into the variable `futureconnections`. The index is a tuple indicating a pair of nodes that currently do not have a connection, and the `Future Connection` column indicates if an edge between those two nodes will exist in the future, where a value of 1.0 indicates a future connection.
future_connections = pandas.read_csv('Future_Connections.csv', index_col=0, converters={0: eval})
print(future_connections.head(10))
print(future_connections['Future Connection'].value_counts())
Using network `G` and `futureconnections`, identify the edges in `futureconnections` with missing values and predict whether or not these edges will have a future connection.
To accomplish this, you will need to create a matrix of features for the edges found in `futureconnections` using networkx, train a sklearn classifier on those edges in `futureconnections` that have `Future Connection` data, and predict a probability of the edge being a future connection for those edges in `futureconnections` where `Future Connection` is missing.
Your predictions will need to be given as the probability of the corresponding edge being a future connection.
The evaluation metric for this assignment is the Area Under the ROC Curve (AUC).
Your grade will be based on the AUC score computed for your classifier. A model which with an AUC of 0.75 or higher will receive full points.
Using your trained classifier, return a series of length 122112 with the data being the probability of the edge being a future connection, and the index being the edge as represented by a tuple of nodes.
(107, 348) 0.35 (542, 751) 0.40 (20, 426) 0.55 (50, 989) 0.35 ... (939, 940) 0.15 (555, 905) 0.35 (75, 101) 0.65 Length: 122112, dtype: float64
Add Network Features
class Futures(object):
target = "Future Connection"
data_file = "Future_Connections.csv"
graph_file = "email_prediction.txt"
networkx_data_index = 2
folds = 10
class DataNames(object):
resource_allocation = 'resource_allocation'
jaccard = 'jaccard_coefficient'
adamic = "adamic_adar"
preferential = "preferential_attachment"
def add_networkx_data(adder, name, graph=email, frame=future_connections):
"""Adds networkx data to the frame
The networkx link-prediction functions return generators of triples:
(first-node, second-node, value)
This will use the index of the frame that's passed in as the source of
node-pairs for the networkx function (called `ebunch` in the networkx
documentation) and the add only the value we want back to the frame
Args:
adder: networkx function to call to get the new data
name: column-name to add to the frame
graph: networkx graph to pass to the function
frame (pandas.DataFrame): frame with node-pairs as index to add data to
"""
frame[name] = [output[Futures.networkx_data_index]
for output in adder(graph, frame.index)]
return frame
Adding A Resource Allocation Index
add_networkx_data(networkx.resource_allocation_index,
DataNames.resource_allocation)
print(future_connections.head(1))
Adding the Jaccard Coefficient
add_networkx_data(networkx.jaccard_coefficient, DataNames.jaccard)
print(future_connections.head(1))
Adamic Adar
add_networkx_data(networkx.adamic_adar_index, DataNames.adamic)
print(future_connections.head(1))
Preferential Attachment
add_networkx_data(networkx.preferential_attachment, DataNames.preferential)
print(future_connections.head(1))
Setup the Training and Testing Data
Separating the Edges Without 'Future Connection' Values
We are going to train on the values in the data with predictions and then make predictions for those that don't.
prediction_set = future_connections[future_connections[Futures.target].isnull()]
training_set = future_connections[future_connections[Futures.target].notnull()]
print(prediction_set.shape)
print(training_set.shape)
assert len(prediction_set) + len(training_set) == len(future_connections)
Separate the Target and Training Sets
non_target = [column for column in future_connections.columns
if column != Futures.target]
x_train = training_set[non_target]
y_train = training_set[Futures.target]
x_predict = prediction_set[non_target]
assert all(x_train.columns == x_predict.columns)
assert len(x_train) == len(x_test)
Scaling the Data
To enable the use of linear models I'm going to scale the data so the mean is 0 and the variance is 1.
scaler = StandardScaler()
x_train_scaled = scaler.fit_transform(x_train)
x_predict_scaled = scaler.transform(x_predict)
x_train_frame = pandas.DataFrame(x_train_scaled, columns=x_train.columns)
x_predict_frame = pandas.DataFrame(x_predict_scaled, columns=x_predict.columns)
print(training.describe())
print(predictions.describe())
Feature Selection
To reduce the dimensionality I'm going to use model-based selection with Extra Trees.
estimator = ExtraTreesClassifier()
estimator.fit(x_train_scaled, y_train)
selector = SelectFromModel(estimator, prefit=True)
x_train_trees_sfm = selector.transform(x_train_scaled)
x_predict_sfm = selector.transform(x_predict_scaled)
print(estimator.feature_importances_)
print(x_train_trees_sfm.shape)
Missing Future Connections
model = LogisticRegressionCV(n_jobs=-1, scoring='roc_auc', solver='liblinear',
cv=StratifiedKFold())
model.fit(x_train_trees_sfm, y_train)
for scores in model.scores_[1.0]:
print(max(scores))
print(model.classes_)
def new_connections_predictions():
probabilities = model.predict_proba(x_predict_sfm)
return pandas.Series(probabilities[:, 1], index=prediction_set.index)
outcome = new_connections_predictions()
assert len(outcome) == 122112, len(outcome)
print(outcome.head())