Commit 213c0da2 authored by Thom Badings's avatar Thom Badings
Browse files

Complete remake of implementation

parent 1139e94d
import numpy as np
import pandas as pd
from functions import ticDiff, tocDiff
from parser import parser, parse_settings
from storm_interface import load_problem, sample_results
from compute_bounds import compute_avg_beta, compute_avg_beta_sttt, \
compute_avg_eta_sttt
args = parser()
Z = parse_settings(args)
# Create buffer for output file
buffer = ['OUTPUT FOR MODEL: '+str(Z['model']),'\n']
# Load problem
parameters,model,properties=load_problem(Z['model'],
Z['property'],
Z['bisim'])
totalN = Z['iterations'] * max(Z['Nsamples'])
ticDiff()
# Sample solutions
solutions = sample_results(totalN, parameters, model, properties,
Z['model'], Z['verbose'])
storm_time = np.round(tocDiff(False) / totalN * 1000, 3)
buffer += ['\n','Run time in Storm per 1000 samples: '+str(storm_time),'\n']
beta_results = pd.Series()
eta_columns = [str(beta)+'-'+str(c) for beta in Z['beta']
for c in Z['comperator']]
eta_index = Z['Nsamples']
eta_results = pd.DataFrame(0, index=eta_index, columns=eta_columns)
# For every number of samples
for n,N in enumerate(Z['Nsamples']):
# For every comperator
for c,thres in zip(Z['comperator'], Z['threshold']):
if c != "leq" and c != "geq":
raise RuntimeError("Invalid direction type, direction type should be 'leq' or 'geq'")
num_iter = np.floor(totalN / N)
violated = np.zerso(num_iter)
# For every iteration
for i in range(num_iter):
start_idx = i * N
end_idx = start_idx + N
if c == "leq":
violated[i] = np.sum( solutions[start_idx:end_idx] > thres )
else:
violated[i] = np.sum( solutions[start_idx:end_idx] < thres )
# Compute bounds
if Z['eta'] != 0:
print('\nCOMPUTE BETA BASED ON ETA...')
print('\n------------\nNEW THEOREM:')
key = str(N)+'-'+str(c)
beta_results[key] = np.round(compute_avg_beta_sttt(violated, N, Z['eta']), 5)
buffer += ['\nConfidence probability for eta='+str(Z['eta'])+' is beta='+str(beta_results[key])]
if Z['beta'] != 0:
print('\nCOMPUTE ETA BASED ON BETA...')
print('\n------------\nNEW THEOREM:')
for i,bet in enumerate(Z['beta']):
print('\nCompute eta for beta='+str(bet))
key = str(bet)+'-'+str(c)
eta_results[key][N] = np.round(compute_avg_eta_sttt(violated, N, bet), 5)
buffer += ['\nLower bound sat.prob. for beta='+str(bet)+' is eta='+str(eta_star[n,i])]
beta_star = np.zeros(len(num_samples))
eta_star = np.zeros((len(num_samples), len(beta)))
storm_timePerIter = np.zeros(len(num_samples))
guarantee_time = np.zeros(len(num_samples))
for n,N in enumerate(num_samples):
N = int(N)
print('\n >> Start for N='+str(N))
buffer += ['\n----------','\nStart for N='+str(N)]
ticDiff()
counter_array=run_sample(numiter, N, threshold, direction, parameters,
model, properties, model_file, verbose)
print("printing specification violation array")
print(counter_array)
#this is violation probability
storm_timePerIter[n] = np.round(tocDiff(False) / numiter, 3)
buffer += ['\n','Run time per iteration (of N samples) in Storm: '+str(storm_timePerIter[n]),'\n']
if eta != 0:
print('\nCOMPUTE BETA BASED ON ETA...')
print('\n------------\nOLD THEOREM:')
compute_avg_beta(counter_array, N, eta)
print('\n------------\nNEW THEOREM:')
beta_star[n] = np.round(compute_avg_beta_sttt(counter_array, N, eta), 5)
buffer += ['\nConfidence probability for eta='+str(eta)+' is beta='+str(beta_star[n])]
if beta != 0:
print('\nCOMPUTE ETA BASED ON BETA...')
print('\n------------\nNEW THEOREM:')
for i,bet in enumerate(beta):
print('\nCompute eta for beta='+str(bet))
eta_star[n,i] = np.round(compute_avg_eta_sttt(counter_array, N, bet), 5)
buffer += ['\nLower bound sat.prob. for beta='+str(bet)+' is eta='+str(eta_star[n,i])]
guarantee_time[n] = tocDiff(False)
buffer += ['\nTime required to compute guarantees: '+str(guarantee_time[n])]
f = open('output/'+outfile+'.txt', "w")
f.writelines(buffer)
f.close()
import pandas as pd
# Create a Pandas Excel writer using XlsxWriter as the engine.
writer = pd.ExcelWriter('output/'+outfile+'.xlsx', engine='xlsxwriter')
if eta != 0:
beta_df = pd.DataFrame(data=beta_star, index=num_samples, columns=[outfile])
beta_df.to_excel(writer, sheet_name='Confidence (beta)')
if beta != 0:
eta_df = pd.DataFrame(data=eta_star, index=num_samples, columns=beta)
eta_df.to_excel(writer, sheet_name='Sat.probability (eta)')
time_df = pd.DataFrame(data=storm_timePerIter, index=num_samples, columns=[outfile])
time_df.to_excel(writer, sheet_name='Time in storm')
time2_df = pd.DataFrame(data=guarantee_time, index=num_samples, columns=[outfile])
time2_df.to_excel(writer, sheet_name='Time for bounds')
# Close the Pandas Excel writer and output the Excel file.
writer.save()
print('\n\n---------------------\n\n')
\ No newline at end of file
......@@ -2,6 +2,6 @@
# Run all benchmarks at once
bash run_all_brp.sh;
bash run_all_crowds.sh;
bash run_all_nand.sh;
bash run_all_consensus.sh;
bash run_all_drone.sh;
\ No newline at end of file
bash run_all_drone.sh;
bash run_all_nand.sh;
\ No newline at end of file
import numpy as np
from core.functions import compute_eta, compute_beta
# def compute_avg_beta(counterarray,N,eta):
# '''
# :param counterarray: approximate satisfaction probs
# :param N: number of samples
# :param eps:
# :param direction: if True, then the spec is \geq, if False, then it is \leq
# :return:
# '''
# #storing probabilities for each iteration
# valuearray = np.zeros(len(counterarray))
# for iters in range(len(counterarray)):
# #compute number of constraints to remove in the LP
# removeconstraints = int(N * (counterarray[iters]))
# #approximately compute the confidence prob
# val3 = BCDF(1-eta, N, removeconstraints)
# valuearray[iters] = val3
# print("average eta value of the array:")
# print(np.mean(valuearray))
# return np.mean(valuearray)
def compute_avg_beta_sttt(violated_array, N, eta, verbose=False):
'''
:param counterarray: approximate satisfaction probs
:param N: number of samples
:param eps:
:param direction: if True, then the spec is \geq, if False, then it is \leq
:return:
'''
#storing probabilities for each iteration
beta_array = np.zeros(len(violated_array))
for i in range(len(violated_array)):
#compute number of constraints to remove in the LP
removeconstraints = violated_array[i]
#approximately compute the confidence prob
beta = compute_beta(N, removeconstraints, eta)
if verbose:
print('Beta for N',N,' k',removeconstraints,' eta',eta,' is',beta)
beta_array[i] = beta
beta_mean = np.mean(beta_array)
print("Avg. confidence probability (beta) for eta="+str(eta)+" is:",beta_mean)
return beta_mean
def compute_avg_eta_sttt(violated_array, N, beta, verbose=False):
'''
:param counterarray: approximate satisfaction probs
:param N: number of samples
:param eps:
:param direction: if True, then the spec is \geq, if False, then it is \leq
:return:
'''
#storing probabilities for each iteration
eta_array = np.zeros(len(violated_array))
for i in range(len(violated_array)):
#compute number of constraints to remove in the LP
removeconstraints = violated_array[i]
#approximately compute the confidence prob
eta = compute_eta(N, removeconstraints, beta)
if verbose:
print('Eta for N',N,' k',removeconstraints,' beta',beta,' is',eta)
eta_array[i] = eta
eta_mean = np.mean(eta_array)
print("Avg. low bound on the satisfaction probability (eta) for beta="+str(beta)+" is:",eta_mean)
return eta_mean
\ No newline at end of file
This diff is collapsed.
import argparse
from ast import literal_eval
def parser():
# Define the parser
parser = argparse.ArgumentParser(description='Sampler interfact')
# Declare an argument (`--algo`), saying that the
# corresponding value should be stored in the `algo`
# field, and using a default value if the argument
# isn't given
parser.add_argument('--model', action="store", dest='model', default=0)
parser.add_argument('--property', action="store", dest='property', default=0)
parser.add_argument('--bisimulation', action="store", dest='bisim', default=0)
parser.add_argument('--threshold', action="store", dest='threshold', default=0)
parser.add_argument('--num_samples', action="store", dest='num_samples', default=0)
parser.add_argument('--num_iter', action="store", dest='num_iter', default=0)
parser.add_argument('--comperator', action="store", dest='comperator', default=0)
parser.add_argument('--eta', action="store", dest='eta', default=0)
parser.add_argument('--beta', action="store", dest='beta', default=0)
parser.add_argument('--outfile', action="store", dest='outfile', default='output.txt')
parser.add_argument('--verbose', action="store", dest='verbose', default=0)
# Weather conditions for drone
parser.add_argument('--weather', action="store", dest='weather', default=0)
# Now, parse the command line arguments and store the
# values in the `args` variable
args = parser.parse_args()
return args
def parse_settings(args):
beta=args.beta
if beta != 0:
try:
beta = [float(args.beta)]
except:
beta = list(literal_eval(args.beta))
try:
num_samples = [int(args.num_samples)]
except:
num_samples = list(literal_eval(args.num_samples))
try:
eta = [int(args.eta)]
except:
eta = list(literal_eval(args.eta))
try:
comperator = literal_eval(args.comperator)
except:
comperator = args.property
Z = {
'model': args.model,
'property': args.property,
'bisimulation': args.bisim,
#
'iterations': int(args.num_iter),
'Nsamples': num_samples,
'eta': eta,
'beta': beta,
#
'threshold': float(args.threshold),
'comperator': comperator,
#
'outfile': str(args.outfile),
'verbose': int(args.verbose),
#
'weather': args.weather
}
return Z
\ No newline at end of file
import numpy as np
from tqdm import tqdm
from drone import create_prism
import stormpy
import stormpy.core
import stormpy.logic
import stormpy.pars
import stormpy.examples
import stormpy.examples.files
def search(values, searchFor):
for k in values:
# for v in values[k]:
if searchFor in k:
return k
return None
def load_problem(model_file, property_file, bisimulation_type):
# Load and build model from file
path = model_file
prism_program = stormpy.parse_prism_program(path)
print("\nBuilding model from {}".format(path))
with open(property_file) as f:
formula_str= " ".join([l.rstrip() for l in f])
# Load properties
properties = stormpy.parse_properties_for_prism_program(formula_str, prism_program)
# Set model options
options = stormpy.BuilderOptions([p.raw_formula for p in properties])
options. set_add_out_of_bounds_state()
model = stormpy.build_sparse_parametric_model_with_options(prism_program, options)
print((model.model_type))
print("Model supports parameters: {}".format(model.supports_parameters))
parameters = model.collect_probability_parameters()
numstate=model.nr_states
print("Number of states before bisim:",numstate)
print ("Number of params before bisim:",len(parameters))
# Set bisimulation for model
if bisimulation_type == 'strong':
print(" -- Perform strong bisimulation")
model = stormpy.perform_bisimulation(model, properties, stormpy.BisimulationType.STRONG)
elif bisimulation_type == 'weak':
print(" -- Perform weak bisimulation")
model = stormpy.perform_bisimulation(model, properties, stormpy.BisimulationType.WEAK)
elif bisimulation_type == 'none':
print(" -- Skip bisimulation")
pass
else:
raise RuntimeError("invalid bisimulation type, bisimulation type should be 'strong', 'weak', or 'none'. ")
parameters= model.collect_probability_parameters()
parameters_rew = model.collect_reward_parameters()
parameters.update(parameters_rew)
if bisimulation_type != 'none':
numstate=model.nr_states
print("Number of states after bisim:",numstate)
print ("Number of params after bisim:",len(parameters))
print('\nModel successfully loaded!\n')
return parameters,model,properties
def sample_results(N, parameters, model, properties, model_file, verbose,
weather=None):
'''
Sample results for instantiated MDPs from Storm.
'''
if 'drone' in model_file:
drone = True
else:
drone = False
# Evaluate model type
if str(model.model_type)=='ModelType.DTMC':
instantiator = stormpy.pars.PDtmcInstantiator(model)
elif str(model.model_type)=='ModelType.MDP':
instantiator = stormpy.pars.PMdpInstantiator(model)
else:
raise RuntimeError("Invalid model type, model should be pDTMC or pMDP.")
# Initialize results array
solutions = np.zeros(N)
if drone:
groups = create_prism.parameter_groups()
#instantiate parameters
parameter_defs = create_prism.parameter_definitions(groups,False)
# For every sample...
for n in tqdm(range(N)):
#sample parameters according to the region defined by
# "Parameter synthesis for Markov models: Faster than ever" paper
point=dict()
# Switch between drone and other benchmarks
if drone:
parameter_instantiations=create_prism.parameter_dirichlet_instantiations(groups,weather)
parameter_samples=create_prism.parameter_dirichlet_samples(parameter_instantiations)
for x in parameters:
#instantiate parameters
parameter_group = search(parameter_instantiations, str(x.name))
element_int=0
for element in parameter_group:
parameter_sample_array = parameter_samples[tuple(parameter_group)]
#print(parameter_sample_array)
if (str(element)) == (str(x.name)):
point[x] = parameter_sample_array[0,element_int]
#print(point[x])
element_int=element_int+1
else:
for x in parameters:
if "coin" in model_file:
s = np.random.uniform(0.2, 0.8)
else:
s= np.random.uniform(1e-5,1-1e-5)
point[x] = stormpy.RationalRF(s)
# Assign parameter values to model
rational_parameter_assignments = dict(
[[x, stormpy.RationalRF(val)] for x, val in point.items()])
# Instantiate model
instantiated_model = instantiator.instantiate(rational_parameter_assignments)
# Obtain solution and store
sol = stormpy.model_checking(instantiated_model, properties[0]).at(instantiated_model.initial_states[0])
solutions[n] = float(sol)
return solutions
# def run_sample(numiter,numsample,thres,direction,parameters,model,properties,model_file,verbose):
# '''
# :param numiter: number of trials to compute the number of samples that satisfies the probability
# :param numsample: number of sampled pMDPs/pMCs to check
# :param thres: specification threshold
# :param direction: if True, then the spec is \leq, if False, then it is \geq
# :return:
# '''
# #storing the approximate satisfaction probability for each iteration
# counterarray= [0 for _ in range(numiter)]
# if str(model.model_type)=='ModelType.DTMC':
# instantiator = stormpy.pars.PDtmcInstantiator(model)
# elif str(model.model_type)=='ModelType.MDP':
# instantiator = stormpy.pars.PMdpInstantiator(model)
# else:
# raise RuntimeError("Invalid model type, model should be pDTMC or pMDP.")
# start3 = time.time()
# #for each iteration
# for iter in tqdm(range(numiter)):
# counter=0
# # for each sample
# for i in range(int(numsample)):
# div = 10
# if i % int(numsample/div) == 0:
# print('Run for sample',i)
# #sample parameters according to the region defined by
# # "Parameter synthesis for Markov models: Faster than ever" paper
# point=dict()
# #point2=dict()
# for x in parameters:
# if "coin" in model_file:
# s = np.random.uniform(0.2, 0.8)
# #s= np.random.uniform(1e-5,1-1e-5)
# else:
# s= np.random.uniform(1e-5,1-1e-5)
# point[x] = stormpy.RationalRF(s)
# # point2[x]=s
# #check result
# # f = open('dump.txt', "a")
# # f.write(",".join([f"{k.name}={str(v)}".format(k,v) for k,v in point.items()]))
# # f.close()
# rational_parameter_assignments = dict(
# [[x, stormpy.RationalRF(val)] for x, val in point.items()])
# instantiated_model = instantiator.instantiate(rational_parameter_assignments)
# result = stormpy.model_checking(instantiated_model, properties[0]).at(instantiated_model.initial_states[0])
# if verbose and i % int(numsample/div) == 0:
# print(' -- Solution value:',float(result))
# #print(result)
# #append the counter according to the spec
# if direction==True:
# if float(result)>thres:
# counter=counter+1
# else:
# if float(result)<thres:
# counter=counter+1
# counterarray[iter]= counter / numsample
# print('\nNumber of violated samples this iteration:',counter)
# if iter>1:
# #print(counterarray[0:iter])
# print("\nAverage violation so far:",np.mean(counterarray[0:iter]))
# #print(dict1)
# t3 = time.time()
# print("Average solver time :", (t3 - start3)/(numiter))
# return counterarray
\ No newline at end of file
OUTPUT FOR MODEL: brp/brp_rewards4_16_5.pm
Run time in Storm per 1000 samples: 0.396
Confidence probability for eta=0.275 is beta=0.04327
Lower bound sat.prob. for beta=0.9 is eta=0.24609
Lower bound sat.prob. for beta=0.99 is eta=0.23887
Lower bound sat.prob. for beta=0.999 is eta=0.2325
Lower bound sat.prob. for beta=0.9999 is eta=0.22675