sampler.py 3.85 KB
Newer Older
Thom Badings's avatar
Thom Badings committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
'''
This is the main file of the Python implementation for the paper:

    Badings et al. (2022). Scenario-Based Verification of Uncertain Parametric
    MDPs. International Journal on Software Tools for Technology Transfer.
    
See the .README file in this folder for detailed instruction of how to use this
implementation.
'''

import numpy as np
import pandas as pd

from core.functions import ticDiff, tocDiff
from core.parser import parse_settings
from core.storm_interface import load_problem, sample_results
from core.compute_bounds import compute_avg_beta, compute_avg_eta

if __name__ == '__main__':
Thom Badings's avatar
synch    
Thom Badings committed
20
21
    np.random.seed(0)
    
Thom Badings's avatar
Thom Badings committed
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
    # Parse arguments provided via the command line
    Z = parse_settings()
    
    # Create buffer for output file
    buffer = ['OUTPUT FOR MODEL: '+str(Z['model']),'\n']
    
    # Load problem
    parameters, model, properties = load_problem(Z['model'], Z['property'],
                                                 Z['bisimulation'])
    
    # Determine how many parameter samples we need in total
    totalN = Z['iterations'] * max(Z['Nsamples'])
    
    # Sample solutions (using Storm) and save the results plus run time
    ticDiff()
    sols = sample_results(totalN, parameters, model, properties, 
Thom Badings's avatar
Thom Badings committed
38
                          Z['model'], Z['weather'])
Thom Badings's avatar
Thom Badings committed
39
40
41
42
43
44
45
46
47
    time = np.round(tocDiff(False) / totalN * 1000, 3)
    buffer += ['\n','Run time in Storm per 1000 samples: '+str(time),'\n']
    
    # Initialize pandas objects to store results in
    beta_results = pd.Series(name='Confidence probability (beta)')
    eta_results = pd.DataFrame(index=Z['Nsamples'])
    
    # For every number of samples
    for n,N in enumerate(Z['Nsamples']):
Thom Badings's avatar
synch    
Thom Badings committed
48
49
        # For every comparator
        for c,C in enumerate(Z['comparator']):
Thom Badings's avatar
Thom Badings committed
50
51
52
53
54
55
56
    
            # We perform as much iterations as possible, given sample size N
            num_iter = int(np.floor(totalN / N))
            violated = np.zeros(num_iter)
            
            # For every iteration
            for i in range(num_iter):
Thom Badings's avatar
synch    
Thom Badings committed
57
                # Depending on comparator (leq/geq), compute nr of violations
Thom Badings's avatar
Thom Badings committed
58
59
60
61
62
63
64
                if C == "leq":
                    violated[i] = np.sum(sols[i*N:(i+1)*N] > Z['threshold'])
                else:
                    violated[i] = np.sum(sols[i*N:(i+1)*N] < Z['threshold'])
            
            # Compute bounds on the confidence probability
            if Z['eta'] != 0:
Thom Badings's avatar
synch    
Thom Badings committed
65
                print('\nCOMPUTE BETA BASED ON ETA (comparator: '+str(C)+')')
Thom Badings's avatar
Thom Badings committed
66
67
68
69
70
71
72
73
74
                
                key = str(N)+'-'+str(C)
                beta_results[key] = compute_avg_beta(violated, N, Z['eta'][c])
            
                buffer += ['\n- Confidence probability for eta='+
                           str(Z['eta'][c])+' is beta='+str(beta_results[key])]
                
            # Compute bounds on the satisfaction probability
            if Z['beta'] != 0:
Thom Badings's avatar
synch    
Thom Badings committed
75
                print('\nCOMPUTE ETA BASED ON BETA (comparator: '+str(C)+')')
Thom Badings's avatar
Thom Badings committed
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
                
                for bet in Z['beta']:
                    print('\nCompute eta for beta='+str(bet))
                    
                    key = str(bet)+'-'+str(C)
                    eta_results.loc[N,key] = compute_avg_eta(violated, N, bet)
                    
                    buffer += ['\n- Lower bound sat.prob. for beta='+
                               str(bet)+' is eta='+str(eta_results[key][N])]
             
    # Write logfile
    f = open('output/'+Z['outfile']+'.txt', "w")
    f.writelines(buffer)
    f.close()
    
    # Export results for the confidence probability (if a sat.prob. was fixed)
    if Z['eta'] != 0:
        beta_results.to_csv('output/'+Z['outfile']+'_confprob.csv', sep=';')
    
    # Export results for the sat.prob. (if a confidence probability was fixed)    
    if Z['beta'] != 0:   
        eta_results.to_csv('output/'+Z['outfile']+'_satprob.csv', sep=';')
    
    print('\n---------------------\n')