Commit 7fa852c8 authored by Thom Badings's avatar Thom Badings
Browse files

New Theorem 1 implemented

parent cb688002
import numpy as np
from core.functions import compute_eta, compute_beta
from core.functions import compute_eta_satisfying, compute_beta_satisfying, \
compute_eta_discard, compute_beta_discard
def compute_avg_beta(violated_array, N, eta, verbose=False, precision=5):
def compute_avg_beta(violated_array, N, eta, threshold,
verbose=False, precision=5):
'''
Compute the average confidence level (beta), given the violations for a
number of iterations, and given a fixed satisfaction probability (eta)
......@@ -20,30 +22,40 @@ def compute_avg_beta(violated_array, N, eta, verbose=False, precision=5):
'''
#storing probabilities for each iteration
beta_array = np.zeros(len(violated_array))
for i in range(len(violated_array)):
#compute number of constraints to remove in the LP
removeconstraints = violated_array[i]
#approximately compute the confidence prob
beta = compute_beta(N, removeconstraints, eta)
if verbose:
print('Beta for N',N,' k',removeconstraints,' eta',eta,' is',beta)
beta_array[i] = beta
if type(threshold) == bool:
beta = compute_beta_satisfying(N, eta)
beta_mean = np.mean(beta_array)
print("Low confidence probability (beta) for eta="+
str(eta)+" is: ",str(beta)+' (obtained from Theorem 1)')
print("Avg. confidence probability (beta) for eta="+str(eta)+" is:",
beta_mean)
else:
#storing probabilities for each iteration
beta_array = np.zeros(len(violated_array))
for i in range(len(violated_array)):
#compute number of constraints to remove in the LP
removeconstraints = violated_array[i]
#approximately compute the confidence prob
beta = compute_beta_discard(N, removeconstraints, eta)
if verbose:
print('Beta for N',N,' k',removeconstraints,' eta',eta,' is',beta)
beta_array[i] = beta
beta = np.mean(beta_array)
print("Avg. confidence probability (beta) for eta="+str(eta)+" is:",
beta)
return np.round(beta_mean, precision)
return np.round(beta, precision)
def compute_avg_eta(violated_array, N, beta, verbose=False, precision=5):
def compute_avg_eta(violated_array, N, beta, threshold,
verbose=False, precision=5):
'''
Compute the average satisfaction probability (eta), given the violations
for a number of iterations, and given a fixed confidence level (beta)
......@@ -62,25 +74,34 @@ def compute_avg_eta(violated_array, N, beta, verbose=False, precision=5):
'''
#storing probabilities for each iteration
eta_array = np.zeros(len(violated_array))
for i in range(len(violated_array)):
#compute number of constraints to remove in the LP
removeconstraints = violated_array[i]
if type(threshold) == bool:
eta = compute_eta_satisfying(N, beta)
#approximately compute the confidence prob
eta = compute_eta(N, removeconstraints, beta)
if verbose:
print('Eta for N',N,' k',removeconstraints,' beta',beta,' is',eta)
eta_array[i] = eta
print("Low bound on the satisfaction probability (eta) for beta="+
str(beta)+" is: "+str(eta)+' (obtained from Theorem 1)')
else:
eta_mean = np.mean(eta_array)
#storing probabilities for each iteration
eta_array = np.zeros(len(violated_array))
print("Avg. low bound on the satisfaction probability (eta) for beta="+
str(beta)+" is:",eta_mean)
for i in range(len(violated_array)):
#compute number of constraints to remove in the LP
removeconstraints = violated_array[i]
#approximately compute the confidence prob
eta = compute_eta_discard(N, removeconstraints, beta)
if verbose:
print('Eta for N',N,' k',removeconstraints,' beta',beta,' is',eta)
eta_array[i] = eta
eta = np.mean(eta_array)
print("Avg. low bound on the satisfaction probability (eta) for beta="+
str(beta)+" is:",eta)
return np.round(eta_mean, precision)
\ No newline at end of file
return np.round(eta, precision)
\ No newline at end of file
......@@ -41,7 +41,15 @@ def computeBetaCDF(N, k, d, epsilon):
return cum_prob
def compute_eta(N, nr_discarded, beta):
def compute_eta_satisfying(N, beta):
return (1-beta)**(1/N)
def compute_beta_satisfying(N, eta_low):
return 1 - eta_low ** N
def compute_eta_discard(N, nr_discarded, beta):
'''
Compute the lower bound on the satisfaction probability (eta) based on the
confidence probability (beta)
......@@ -62,26 +70,21 @@ def compute_eta(N, nr_discarded, beta):
'''
if nr_discarded == 0:
# No constraints discarded
eta_low = (1-beta)**(1/N)
# Constraints discarded
if nr_discarded >= N:
eta_low = 0
else:
# Constraints discarded
if nr_discarded >= N:
eta_low = 0
else:
# Compute confidence level, compensated for the number of samples
beta_bar = (1-beta)/N
# Compute the lower bound on the satisfaction probability
eta_low = 1 - computeBetaPPF(N, k=nr_discarded, d=1,
beta=1-beta_bar)
# Compute confidence level, compensated for the number of samples
beta_bar = (1-beta)/N
# Compute the lower bound on the satisfaction probability
eta_low = 1 - computeBetaPPF(N, k=nr_discarded, d=1,
beta=1-beta_bar)
return eta_low
def compute_beta(N, nr_discarded, eta_low):
def compute_beta_discard(N, nr_discarded, eta_low):
'''
Compute the confidence probability (beta) based on the lower bound on the
satisfaction bound (eta)
......@@ -102,21 +105,15 @@ def compute_beta(N, nr_discarded, eta_low):
'''
if nr_discarded == 0:
# No samples discarded
# Samples discarded
if nr_discarded >= N:
beta = 0
beta = 1 - eta_low ** N
else:
# Samples discarded
if nr_discarded >= N:
beta = 0
else:
# Compute the confidence level by which a given lower bound eta
# is valid
RHS = computeBetaCDF(N, k=nr_discarded, d=1, epsilon=1-eta_low)
beta = 1 - N * (1-RHS)
# Compute the confidence level by which a given lower bound eta
# is valid
RHS = computeBetaCDF(N, k=nr_discarded, d=1, epsilon=1-eta_low)
beta = 1 - N * (1-RHS)
return max(0, beta)
\ No newline at end of file
......@@ -20,10 +20,12 @@ def parser():
# isn't given
P.add_argument('--model', action="store", dest='model', default=0)
P.add_argument('--property', action="store", dest='property', default=0)
P.add_argument('--bisimulation', action="store", dest='bisim', default='strong')
P.add_argument('--threshold', action="store", dest='threshold', default=0)
P.add_argument('--bisimulation', action="store", dest='bisim',
default='strong')
P.add_argument('--threshold', action="store", dest='threshold',
default=False)
P.add_argument('--num_samples', action="store", dest='num_samples',
default=0)
default=1000)
P.add_argument('--num_iter', action="store", dest='num_iter', default=1)
P.add_argument('--comparator', action="store", dest='comparator',
default=0)
......@@ -80,6 +82,11 @@ def parse_settings():
if any([c != "leq" and c != "geq" for c in comparator]):
raise RuntimeError("Invalid direction type: should be 'leq' or 'geq'")
if type(args.threshold) is bool:
threshold = False
else:
threshold = float(args.threshold)
# Store arguments in dictionary
Z = {
'model': args.model,
......@@ -91,7 +98,7 @@ def parse_settings():
'eta': eta,
'beta': beta,
#
'threshold': float(args.threshold),
'threshold': threshold,
'comparator': comparator,
#
'outfile': str(args.outfile),
......
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 17 14:29:15 2022
@author: Thom Badings
"""
import seaborn as sns
import matplotlib.pyplot as plt
def threshold_histogram(thresholds, N, outfile, bins=25):
print('-- Create histogram of thresholds obtained using Theorem 1 (N='+
str(N)+'...')
sns.histplot(thresholds, binwidth=0.25, binrange=[12,18])
plt.xlabel('Threshold on spec.')
plt.ylabel('Count')
plt.xlim(8, 18)
plt.xticks([8,10,12,14,16,18])
plt.tight_layout()
plt.show()
plt.savefig(outfile, format='pdf', bbox_inches='tight')
print('-- Histogram exported to: '+str(outfile))
\ No newline at end of file
......@@ -177,7 +177,7 @@ def sample_results(N, parameters, model, properties, model_file,
s = np.random.uniform(0.2, 0.8)
else:
s= np.random.uniform(1e-5,1-1e-5)
s = np.random.uniform(1e-5,1-1e-5)
point[x] = stormpy.RationalRF(s)
......
OUTPUT FOR MODEL: models/brp/brp_rewards4_16_5.pm
Run time in Storm per 1000 samples: 0.758
- Lower bound sat.prob. for beta=0.99 is eta=0.99541
\ No newline at end of file
OUTPUT FOR MODEL: models/brp/brp_256_5.pm
Run time in Storm per 1000 samples: 3.127
- Lower bound sat.prob. for beta=0.999 is eta=0.93325
\ No newline at end of file
;Confidence probability (beta)
OUTPUT FOR MODEL: models/crowds/crowds_10_5.pm
Run time in Storm per 1000 samples: 0.228
- Lower bound sat.prob. for beta=0.99 is eta=0.63096
\ No newline at end of file
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment