Commit 14ac2467 authored by Chanelle Lee's avatar Chanelle Lee
Browse files

Finished all testing. Passes everything. Complete.

parent 92fc635b
......@@ -229,16 +229,16 @@ int check_neighbourhood_consistent(int num_options, int pool_size,
return 1;
}
double ret_prob_of_confusion(double options_diff, double lambda)
{
double denom = 1 - exp(-lambda);
if (denom != 0)
{
return 0.5 * ((exp((-lambda) * options_diff) - exp(-lambda)) / denom);
}
else
{
return 0.5;
}
}
// double ret_prob_of_confusion(double options_diff, double lambda)
// {
// double denom = 1 - exp(-lambda);
// if (denom != 0)
// {
// return 0.5 * ((exp((-lambda) * options_diff) - exp(-lambda)) / denom);
// }
// else
// {
// return 0.5;
// }
// }
......@@ -3,6 +3,7 @@ from multiprocessing import Pool
import pandas as pd
import contextlib
import numpy as np
import os
def unwrap_self_runSingleSim(arg, **kwarg):
......@@ -21,6 +22,7 @@ class Trial(object):
Simulation data is saved as pandas dataframes in hdf datastores
"""
def __init__(self, numSims, parameters, optionWeights, filePath, path):
"""
Initialising method for Trial class
......@@ -30,16 +32,34 @@ class Trial(object):
numSims : int
Number of simulations that will be run
parameters : dict
Contains simulation parameters generated by runParams.py
Contains simulation parameters generated by parameters.py
{totalIts: int, popSize: int, distrust: float, k: int, eR: int,
w: float}
w: float, numOptions: float}
optionWeights : list
Weights to use when picking options
Attributes
----------
Same as Parameters
"""
if not isinstance(parameters, dict):
raise TypeError("Passed value must be a dictionary")
if not all(key in parameters for key in ('numOptions', 'totalIts',
'popSize', 'distrust',
'poolSize', 'evidenceRate',
'w')):
raise ValueError("Parameters dict must contain the following:"
"numOptions, totalIts, popSize, distrust, "
"poolSize, evidenceRate, w")
if np.array(optionWeights).shape != (parameters["numOptions"], ):
raise ValueError("Passed value is not correct length - must have "
"weight for every option.")
if not os.path.isdir(path):
raise ValueError("Path is invalid")
self._numSims = numSims
self._parameters = parameters
self._numOptions = parameters['numOptions']
......@@ -52,29 +72,30 @@ class Trial(object):
def runSingleSim(self, i):
"""
Intialises the ith simulation with the parameters set when Trial
instance is initialised. Calls Simulation.run on the simulation and
then Simulation.save to save it to the trial's intialised file path.
Note
----
Simulations are saved to file and then deleted to free up running
memory. Lists of simulations get too big for running memory to contain.
instance is initialised. Runs the simulation, stores the results
before deleting the simulation to save memory. Then returns the
results.
Parameters
----------
i : int
Iterator for running the pool mapping. Also simulation identity to
add to file name for saving. Will always be between 0 and numSims
add to file name for saving. Will always be in [0, numSims)
Returns
-------
1 : int
Must return something to Pool.map, but simulations too large and
so saved separately. Thus 1 is returned so results can be counted
to ensure all simulations were run.
result : dict
sim : int
identifier of the simulation
beliefs : numpy array
beliefs of the population throughout the simulation
neededInteractions : int
number of iterations need by the simulation to reaching
correct consensus or time out
"""
s = Simulation(self._parameters, self._optionWeights)
if not (0 <= i < self._numSims):
raise ValueError("Sim id out of range")
s = Simulation(optionWeights=self._optionWeights, **self._parameters)
neededIterations = s.run()
result = {'sim': i,
'beliefs': s.populationBeliefs,
......@@ -82,6 +103,60 @@ class Trial(object):
del s
return result
def retBeliefResultsDF(self, beliefs):
"""
Stores the beliefs from all the simulation populations in a
dataframe with information about the simulation it is from and which
iteration, agent and option
e.g. if in simulation 0, iteration 0 and n=5 then all agents would have
belief 0.2 in all options
simID | iteration | agent | option | belief
0 | 0 | 0 | 0 | 0.2
...
0 | 0 | 0 | 4 | 0.2
...
0 | 0 | 3 | 2 | 0.2
"""
sims = np.arange(0, self._numSims)
iterations = np.arange(0, self._parameters['totalIts'])
agents = np.arange(0, self._parameters['popSize'])
options = np.arange(0, self._numOptions)
arrays = np.meshgrid(sims, iterations, agents, options)
data = np.stack([a.reshape(-1).T for a in arrays]).T
df = pd.DataFrame(data=data,
columns=[str('simID'),
str('iteration'),
str('agent'),
str('option')])
df.sort_values(by=['simID',
'iteration',
'agent',
'option'],
inplace=True)
df['belief'] = np.array([belief
for belief in beliefs]).flatten()
return df
def retSimResultsDF(self, simResults):
"""
Stores the number of needed iterations before a correct consensus was
found or a timeout for each simulation in the trial in a dataframe
e.g.
simID | iterations
0 | 4
1 | 10
"""
df = pd.DataFrame([[d['sim'],
d['neededIterations']]
for d in simResults],
columns=['simID', 'iterations'])
return df
def runSimulations(self):
"""
Uses the multiprocessing Pool.map function to simultaneously run
......@@ -106,61 +181,31 @@ class Trial(object):
range(0, self._numSims)))
assert (len(results) == self._numSims), "Not all simulations run!"
self.updateBeliefResults(results)
self.updateSimResults(results)
self.beliefResults = self.retBeliefResultsDF([d["beliefs"]
for d in results])
self.simResults = self.retSimResultsDF(results)
del results
def retConfigDF(self):
config = self._parameters.copy()
config['numOptions'] = self._numOptions
configDF = pd.DataFrame(data=config, index=[0])
configDF = pd.DataFrame(data=self._parameters, index=[0])
configDF['key'] = "{:<50}".format(self._filePath)
return configDF
def saveConfigDF(self):
configDF = self.retConfigDF()
configDF['key'] = "{:<50}".format(self._filePath)
configDF.to_hdf(self._path + 'Config', key='configs', append=True,
configDF.to_hdf(self._path + 'Config.h5', key='configs', append=True,
mode='a')
def saveBeliefResults(self):
self.beliefResults.to_hdf(self._path + 'Beliefs', key=self._filePath,
mode='a', format='table', complevel=9)
def updateSimResults(self, results):
self.simResults = pd.DataFrame([[d['sim'],
d['neededIterations']]
for d in results],
columns=['simID', 'iterations'])
self.beliefResults.to_hdf(self._path + 'Beliefs.h5',
key=self._filePath, mode='a', format='table',
complevel=9)
def saveSimResults(self):
configDF = pd.concat([self.retConfigDF()] * self._numSims,
ignore_index=True)
df = pd.concat([configDF, self.simResults], axis=1)
df.to_hdf(self._path + 'AllTrialResults', append=True, key='results',
mode='a', complevel=9)
self.simResults.to_hdf(self._path + 'AllTrialResults.h5', append=True,
key=self._filePath, mode='a', complevel=9)
def saveTrial(self):
self.saveConfigDF()
self.saveBeliefResults()
self.saveSimResults()
def updateBeliefResults(self, results):
sims = np.arange(0, self._numSims)
iterations = np.arange(0, self._parameters['totalIts'])
agents = np.arange(0, self._parameters['popSize'])
options = np.arange(0, self._numOptions)
arrays = np.meshgrid(sims, iterations, agents, options)
data = np.stack([a.reshape(-1).T for a in arrays]).T
self.beliefResults = pd.DataFrame(data=data,
columns=[str('simID'),
str('iteration'),
str('agent'),
str('option')])
self.beliefResults.sort_values(by=['simID',
'iteration',
'agent',
'option'],
inplace=True)
self.beliefResults['belief'] = np.array([d['beliefs']
for d in results]).flatten()
temp_results/
......@@ -2,9 +2,39 @@ import pytest
import numpy.testing
import numpy as np
from collections import Counter
from noComparisonSimulation import helperFunctions as hf
@pytest.mark.parametrize("num_options, weights, expected", [
(4, (1, 0, 0, 0), (100000, 0, 0, 0)),
(4, (0.25, 0.25, 0.25, 0.25), (25000, 25000, 25000, 25000)),
(4, (0, 0.5, 0.25, 0.25), (0, 50000, 25000, 25000))
])
def test_pick_option(num_options, weights, expected):
trials = [hf.pick_option(num_options, weights) for i in range(0, 100000)]
counter = Counter(trials)
for i in range(0, num_options):
assert (expected[i] * 0.9) <= counter[i] <= (expected[i] * 1.1)
@pytest.mark.parametrize("evidenceID, distrust, belief, expected", [
(0, 0.001, np.array([1.0, 0.0]), np.array([1.0, 0.0])),
(1, 0.001, np.array([1.0, 0.0]), np.array([1.0, 0.0])),
(0, 0.001, np.array([0.8, 0.2]), np.array([0.9997, 0.0003])),
(1, 0.001, np.array([0.8, 0.2]), np.array([0.0040, 0.9960])),
(0, 0.2, np.array([0.8, 0.2]), np.array([0.9412, 0.0588])),
(0, 0.001, np.array([0.6, 0.3, 0.1]), np.array([0.9993, 0.0005, 0.0002])),
(2, 0.001, np.array([0.6, 0.3, 0.1]), np.array([0.0060, 0.0030, 0.9911]))
])
def test_update_on_evidence(evidenceID, distrust, belief, expected):
numpy.testing.assert_almost_equal(hf.update_on_evidence(evidenceID,
distrust,
belief),
expected,
decimal=4)
@pytest.mark.parametrize("num_options, pool_size, beliefs, expected", [
(2, 2, np.array([[0.6, 0.4], [0.6, 0.4]]), np.array([0.6923, 0.3077])),
(3, 2, np.array([[0.6, 0.3, 0.1], [0.0, 0.25, 0.75]]),
......@@ -17,6 +47,18 @@ def test_aggregate_MProdOp(num_options, pool_size, beliefs, expected):
numpy.testing.assert_almost_equal(ret_belief, expected, decimal=4)
@pytest.mark.parametrize("belief1, belief2, expected", [
(np.array([1.0, 0.0]), np.array([1.0, 0.0]), True),
(np.array([0.6, 0.4]), np.array([0.4, 0.6]), True),
(np.array([1.0, 0.0]), np.array([0.0, 1.0]), False),
(np.array([0.6, 0.3, 0.1]), np.array([0.0, 0.25, 0.75]), True),
(np.array([1.0, 0.0, 0.0]), np.array([0.0, 0.0, 1.0]), False),
(np.array([1.0, 0.0, 0.0]), np.array([0.0, 1.0, 0.0]), False)
])
def test_check_neighbour_consistent(belief1, belief2, expected):
assert hf.check_neighbour_consistent(belief1, belief2) == expected
def test_check_neighbourhood_consistent():
beliefs = np.array([[0.6, 0.3, 0.1], [0.0, 0.25, 0.75]])
assert hf.check_neighbourhood_consistent(3, 2, beliefs) == 1
This diff is collapsed.
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment