Commit c83341b2 authored by Marc Schoolderman's avatar Marc Schoolderman
Browse files

experiment: express effects of all blocks in local terms (why3 0.87.3)

parent 5279ed0a
module BV_asr_Lemmas
use import bv.BV8
use import int.Int
use import bv.Pow2int
use import int.EuclideanDivision
lemma asr_0: eq (asr zeros 1) zeros
lemma asr_1: eq (asr (of_int 1) 1) zeros
lemma asr_f: eq (asr ones 1) ones
lemma xor_0: forall w. 0 <= w < 256 -> to_uint (bw_xor (of_int w) zeros) = w
lemma xor_1: forall w. 0 <= w < 256 -> to_uint (bw_xor (of_int w) ones) = 255 - w
lemma or_0: forall w. bw_or zeros w = w
lemma pow2_72: pow2 72 = 0x1000000000000000000
lemma pow2_80: pow2 80 = 0x100000000000000000000
lemma pow2_88: pow2 88 = 0x10000000000000000000000
lemma pow2_96: pow2 96 = 0x1000000000000000000000000
lemma pow2_104: pow2 104 = 0x100000000000000000000000000
lemma pow2_112: pow2 112 = 0x10000000000000000000000000000
lemma pow2_120: pow2 120 = 0x1000000000000000000000000000000
lemma pow2_128: pow2 128 = 0x100000000000000000000000000000000
end
module AvrModelLemmas
use import int.Int
use import map.Map
use import int.EuclideanDivision
use import bv.Pow2int
lemma register_file_invariant_strengthen:
forall m: map int int. (forall i. 0 <= m[i] < 256) -> (forall i j. 0 <= m[i]*m[j] <= 255*255)
lemma pow_split: forall k. k >= 0 -> pow2 (2*k) = pow2 k*pow2 k
end
module KaratAvr
use import avrmodel2.AVRint
use import int.Int
use import int.EuclideanDivision
use import bv.Pow2int
use import AvrModelLemmas
use BV_asr_Lemmas
use bv.BV8
let mul16 (): unit
ensures { uint 4 reg 12 = old(uint 2 reg 2 * uint 2 reg 7) }
=
clr r23;
mul r3 r8;
movw r14 r0;
mul r2 r7;
movw r12 r0;
mul r2 r8;
add r13 r0;
adc r14 r1;
adc r15 r23;
mul r3 r7;
add r13 r0;
adc r14 r1;
adc r15 r23
use import int.Abs
use avrmodel2.Shadow as S
lemma mul_bound_preserve:
forall x y l. 0 <= x <= l -> 0 <= y <= l -> x*y <= l*l
let karatsuba48_marked(): unit
(*
requires { uint 2 reg rZ <= uint 2 reg rX \/ uint 2 reg rZ >= uint 2 reg rX+6 }
requires { uint 2 reg rZ <= uint 2 reg rY \/ uint 2 reg rZ >= uint 2 reg rY+6 }
*)
requires { 32 <= uint 2 reg rX < pow2 15 }
requires { 32 <= uint 2 reg rY < pow2 15 }
requires { 32 <= uint 2 reg rZ < pow2 15 }
(*
requires { 32 <= ram_begin < ram_end <= 0x10000 }
requires { ram_begin <= uint 2 reg rX <= ram_end-7 }
requires { ram_begin <= uint 2 reg rY <= ram_end-6 }
requires { ram_begin <= uint 2 reg rZ <= ram_end-12 }
*)
ensures { uint 12 mem (old (uint 2 reg rZ)) = old (uint 6 mem (uint 2 reg rX) * uint 6 mem (uint 2 reg rY)) }
=
'S:
(* ; init zero registers *)
clr r22;
clr r23;
movw r12 r22;
movw r20 r22;
(* ;--- Compute L --- *)
ld_inc r2 rX;
ld_inc r3 rX;
ld_inc r4 rX;
ldd r5 rY 0;
ldd r6 rY 1;
ldd r7 rY 2;
'L00:
S.init();
abstract
ensures { S.synchronized S.shadow reg }
ensures { uint 6 reg 8 = old(uint 3 reg 2 * uint 3 reg 5) }
mul r2 r7;
movw r10 r0;
mul r2 r5;
movw r8 r0;
mul r2 r6;
add r9 r0;
adc r10 r1;
adc r11 r23;
mul r3 r7;
movw r14 r0;
mul r3 r5;
add r9 r0;
adc r10 r1;
adc r11 r14;
adc r15 r23;
mul r3 r6;
add r10 r0;
adc r11 r1;
adc r12 r15;
mul r4 r7;
movw r14 r0;
mul r4 r5;
add r10 r0;
adc r11 r1;
adc r12 r14;
adc r15 r23;
mul r4 r6;
add r11 r0;
adc r12 r1;
adc r13 r15;
S.modify_r0(); S.modify_r1(); S.modify_r8(); S.modify_r9(); S.modify_r10(); S.modify_r11(); S.modify_r12(); S.modify_r13(); S.modify_r14(); S.modify_r15();
end;
(* original position
std rZ 0 r8;
std rZ 1 r9;
std rZ 2 r10;
*)
(* ;--- load a3..a5 and b3..b5 --- *)
ld_inc r14 rX;
ld_inc r15 rX;
ld_inc r16 rX;
ldd r17 rY 3;
ldd r18 rY 4;
ldd r19 rY 5;
std rZ 0 r8;
std rZ 1 r9;
std rZ 2 r10;
'L11:
S.init();
abstract
ensures { S.synchronized S.shadow reg }
ensures { uint 3 reg 2 = old (abs (uint 3 reg 2 - uint 3 reg 14)) }
ensures { uint 3 reg 5 = old (abs (uint 3 reg 5 - uint 3 reg 17)) }
ensures { reg[26] = if old (uint 3 reg 2 < uint 3 reg 14) then 0xFF else 0x00 }
ensures { reg[27] = if old (uint 3 reg 5 < uint 3 reg 17) then 0xFF else 0x00 }
(* ;--- subtract a0-a3 --- *)
sub r2 r14;
sbc r3 r15;
sbc r4 r16;
(* ; 0xff if carry and 0x00 if no carry *)
sbc r26 r26;
(* ;--- subtract b0-b3 --- *)
sub r5 r17;
sbc r6 r18;
sbc r7 r19;
(* ; 0xff if carry and 0x00 if no carry *)
sbc r27 r27;
(* ;--- absolute values --- *)
eor r2 r26;
eor r3 r26;
eor r4 r26;
eor r5 r27;
eor r6 r27;
eor r7 r27;
sub r2 r26;
sbc r3 r26;
sbc r4 r26;
sub r5 r27;
sbc r6 r27;
sbc r7 r27;
S.modify_r2();
S.modify_r3();
S.modify_r4();
S.modify_r5();
S.modify_r6();
S.modify_r7();
S.modify_r26();
S.modify_r27();
end;
(* ;--- Compute H + (l3l4l5) --- *)
(*
S.init();
*)
abstract
ensures { S.synchronized S.shadow reg }
ensures { uint 3 reg 11 + pow2 24*uint 3 reg 20 = old (uint 3 reg 11 + uint 3 reg 14*uint 3 reg 17) }
ensures { reg[18] = 0 /\ reg[19] = 0 }
mul r14 r19;
movw r24 r0;
mul r14 r17;
add r11 r0;
adc r12 r1;
adc r13 r24;
adc r25 r23;
mul r14 r18;
add r12 r0;
adc r13 r1;
adc r20 r25;
mul r15 r19;
movw r24 r0;
mul r15 r17;
add r12 r0;
adc r13 r1;
adc r20 r24;
adc r25 r23;
mul r15 r18;
add r13 r0;
adc r20 r1;
adc r21 r25;
mul r16 r19;
movw r24 r0;
mul r16 r17;
add r13 r0;
adc r20 r1;
adc r21 r24;
adc r25 r23;
mul r16 r18;
movw r18 r22;
add r20 r0;
adc r21 r1;
adc r22 r25;
S.modify_r0(); S.modify_r1();
S.modify_r11(); S.modify_r12(); S.modify_r13();
S.modify_r15(); S.modify_r16(); S.modify_r17(); S.modify_r18(); S.modify_r19(); S.modify_r20(); S.modify_r21(); S.modify_r22();
S.modify_r24(); S.modify_r25();
end;
(* ;--- Compute M --- *)
(*
S.init();
*)
abstract
ensures { S.synchronized S.shadow reg }
ensures { uint 6 reg 14 = old (uint 3 reg 2*uint 3 reg 5) }
mul r2 r7;
movw r16 r0;
mul r2 r5;
movw r14 r0;
mul r2 r6;
add r15 r0;
adc r16 r1;
adc r17 r23;
mul r3 r7;
movw r24 r0;
mul r3 r5;
add r15 r0;
adc r16 r1;
adc r17 r24;
adc r25 r23;
mul r3 r6;
add r16 r0;
adc r17 r1;
adc r18 r25;
mul r4 r7;
movw r24 r0;
mul r4 r5;
add r16 r0;
adc r17 r1;
adc r18 r24;
adc r25 r23;
mul r4 r6;
add r17 r0;
adc r18 r1;
adc r19 r25;
S.modify_r0(); S.modify_r1();
S.modify_r14(); S.modify_r15(); S.modify_r16(); S.modify_r17(); S.modify_r18(); S.modify_r19();
S.modify_r24(); S.modify_r25();
end;
(* TODO optimize/analyse me? *)
(*
S.init();
*)
abstract
ensures { S.synchronized S.shadow reg }
ensures { uint 6 reg 8 + pow2 48*uint 3 reg 20 + ?cf*pow2 48 = old (uint 6 reg 8 + pow2 48*uint 3 reg 20 + uint 3 reg 11 + pow2 24*uint 3 reg 20) }
(*
ensures { old(uint 3 reg 8) + pow2 24*uint 6 reg 8 + pow2 72*uint 3 reg 20 + ?cf*pow2 72 = (pow2 24+1)*(at(uint 3 reg 2*uint 3 reg 5)'L00 + at(pow2 24*uint 3 reg 14*uint 3 reg 17)'L11) }
*)
(* TODO *)
'Pre:
(* ;--- process sign bit --- *)
(* original location
eor r26 r27;
com r26;
*)
(* ;--- add l3+h0 to l0 and h3 --- *)
add r8 r11;
adc r9 r12;
adc r10 r13;
adc r11 r20;
adc r12 r21;
adc r13 r22;
S.modify_r8(); S.modify_r9(); S.modify_r10(); S.modify_r11(); S.modify_r12(); S.modify_r13();
end;
(*
S.init();
*)
abstract
ensures { S.synchronized S.shadow reg }
(*
ensures { uint 6 reg 14 = ?cf*(pow2 48 - 1) + (if old (reg[26] = 0x00 <-> reg[27] = 0x00) then -1 else 1)*old(uint 6 reg 14) }
*)
ensures { uint 6 reg 14 = ?cf*(pow2 48 - 1) + (if old (reg[26] = reg[27]) then -1 else 1)*old(uint 6 reg 14) }
ensures { let cor = reg[23] + (pow2 8+pow2 16)*reg[0] in cor = old ?cf - ?cf \/ cor = pow2 24 + old ?cf - ?cf }
(* ; merge carry and borrow *)
'B:
eor r26 r27; (* moved here *)
com r26;
assert { reg[26] = if at(reg[26] = reg[27])'B then 0xFF else 0x00 };
adc r23 r26;
mov r0 r23;
asr r0;
'B:
(* ; invert all bits or do nothing *)
eor r14 r26;
eor r15 r26;
eor r16 r26;
eor r17 r26;
eor r18 r26;
eor r19 r26;
add r26 r26; (* ; sets carry if R26 = 0xff *)
S.modify_r23(); S.modify_r26();
S.modify_r0();
S.modify_r14(); S.modify_r15(); S.modify_r16(); S.modify_r17(); S.modify_r18(); S.modify_r19();
end;
(* ; add in M *)
abstract ensures { S.synchronized S.shadow reg }
ensures { uint 6 reg 8 + pow2 48*uint 3 reg 20 + ?cf*pow2 72 = old(?cf + uint 6 reg 8 + pow2 48*uint 3 reg 20 + uint 6 reg 14 + pow2 48*(reg[23] + (pow2 8+pow2 16)*reg[0])) }
adc r8 r14;
adc r9 r15;
adc r10 r16;
adc r11 r17;
adc r12 r18;
adc r13 r19;
(* ; propagate carry/borrow *)
adc r20 r23;
adc r21 r0;
adc r22 r0;
S.modify_r8(); S.modify_r9(); S.modify_r10();
S.modify_r11(); S.modify_r12(); S.modify_r13();
S.modify_r20(); S.modify_r21(); S.modify_r22();
end;
(* the following 3 asserts establish a bounds result about multiplying uint's -- can't this be put in a lemma? *)
assert { 0 <= at(uint 3 reg 8)'L11 + pow2 24*uint 6 reg 8 + pow2 72*uint 3 reg 20 < pow2 96 };
assert { 0 <= at((uint 3 reg 2 + pow2 24*uint 3 reg 14))'L11 <= pow2 48-1 };
assert { 0 <= at((uint 3 reg 5 + pow2 24*uint 3 reg 17))'L11 <= pow2 48-1 };
assert { 0 <= at((uint 3 reg 2 + pow2 24*uint 3 reg 14)*(uint 3 reg 5 + pow2 24*uint 3 reg 17))'L11 <= (pow2 48-1)*(pow2 48-1) };
assert { at(uint 3 reg 8)'L11 + pow2 24*uint 6 reg 8 + pow2 72*uint 3 reg 20 + ?cf*pow2 96
= at( (uint 3 reg 2 + pow2 24*uint 3 reg 14)*(uint 3 reg 5 + pow2 24*uint 3 reg 17) )'L11 \/
at(uint 3 reg 8)'L11 + pow2 24*uint 6 reg 8 + pow2 72*uint 3 reg 20 + ?cf*pow2 96
= at( (uint 3 reg 2 + pow2 24*uint 3 reg 14)*(uint 3 reg 5 + pow2 24*uint 3 reg 17) )'L11 + pow2 96
};
assert { at(uint 3 reg 8)'L11 + pow2 24*uint 6 reg 8 + pow2 72*uint 3 reg 20
= at( (uint 3 reg 2 + pow2 24*uint 3 reg 14)*(uint 3 reg 5 + pow2 24*uint 3 reg 17) )'L11
};
assert { at(uint 3 reg 8)'L11 + pow2 24*uint 6 reg 8 + pow2 72*uint 3 reg 20 + ?cf*pow2 96
= at( (uint 3 reg 2 + pow2 24*uint 3 reg 14)*(uint 3 reg 5 + pow2 24*uint 3 reg 17) )'L11 + ?cf*pow2 96
};
std rZ 3 r8;
std rZ 4 r9;
std rZ 5 r10;
std rZ 6 r11;
std rZ 7 r12;
std rZ 8 r13;
std rZ 9 r20;
std rZ 10 r21;
std rZ 11 r22
let karatsuba64_marked()
requires { 32 <= uint 2 reg rX < pow2 15 }
requires { 32 <= uint 2 reg rY < pow2 15 }
requires { 32 <= uint 2 reg rZ < pow2 15 }
ensures { uint 16 mem (old (uint 2 reg rZ)) = old (uint 8 mem (uint 2 reg rX) * uint 8 mem (uint 2 reg rY)) }
=
'S:
(* init rZero registers *)
clr r20;
clr r21;
movw r16 r20;
(* compute l *)
ld_inc 2 rX;
ld_inc 3 rX;
ld_inc 4 rX;
ld_inc 5 rX;
ldd r6 rY 0;
ldd r7 rY 1;
ldd r8 rY 2;
ldd r9 rY 3;
'L00:
S.init();
abstract ensures { S.synchronized S.shadow reg }
ensures { uint 8 reg 10 = old(uint 4 reg 2 * uint 4 reg 6) }
mul r2 r8; (* a0*b2 *)
movw r12 r0;
mul r2 r6; (* a0*b0 *)
movw r10 r0;
mul r2 r7; (* a0*b1 *)
add r11 r0;
adc r12 r1;
adc r13 r21;
mul r3 r9; (* a1*b3 *)
movw r14 r0;
mul r2 r9; (* a0*b3 *)
movw r18 r0;
mul r3 r6; (* a1*b0 *)
add r11 r0;
adc r12 r1;
adc r13 r18;
adc r19 r21;
mul r3 r7; (* a1*b1 *)
add r12 r0;
adc r13 r1;
adc r19 r21;
mul r4 r9; (* a2*b3 *)
add r14 r19;
adc r15 r0;
adc r16 r1;
mul r4 r8; (* a2*b2 *)
movw r18 r0;
mul r4 r6; (* a2*b0 *)
add r12 r0;
adc r13 r1;
adc r14 r18;
adc r19 r21;
mul r3 r8; (* a1*b2 *)
add r13 r0;
adc r14 r1;
adc r19 r21;
mul r5 r9; (* a3*b3 *)
add r15 r19;
adc r16 r0;
adc r17 r1;
mul r5 r7; (* a3*b1 *)
movw r18 r0;
mul r4 r7; (* a2*b1 *)
add r13 r0;
adc r18 r1;
adc r19 r21;
mul r5 r6; (* a3*b0 *)
add r13 r0;
adc r18 r1;
adc r19 r21;
mul r5 r8; (* a3*b2 *)
add r14 r18;
adc r0 r19;
adc r1 r21;
add r15 r0;
adc r16 r1;
adc r17 r21;
S.modify_r0(); S.modify_r1();
S.modify_r10(); S.modify_r11();
S.modify_r12(); S.modify_r13();
S.modify_r14(); S.modify_r15();
S.modify_r16(); S.modify_r17();
S.modify_r18(); S.modify_r19();
end;
(* original location
std rZ 0 r10;
std rZ 1 r11;
std rZ 2 r12;
std rZ 3 r13;
*)
(* load a4..a7 and b4..b7 *)
ldd r22 rY 4;
ldd r23 rY 5;
ldd r24 rY 6;
ldd r25 rY 7;
movw r28 r20;
ld_inc 18 rX;
ld_inc 19 rX;
ld_inc 20 rX;
ld_inc 21 rX ;
movw r26 r28;
std rZ 0 r10;
std rZ 1 r11;
std rZ 2 r12;
std rZ 3 r13;
'L11:
S.init();
abstract ensures { S.synchronized S.shadow reg }
ensures { uint 4 reg 2 = old (abs (uint 4 reg 2 - uint 4 reg 18)) }
ensures { uint 4 reg 6 = old (abs (uint 4 reg 6 - uint 4 reg 22)) }
ensures { ?tf = 0 <-> old((uint 4 reg 2 < uint 4 reg 18) <-> (uint 4 reg 6 < uint 4 reg 22)) }
'B:
(* subtract a0 a4 *)
sub r2 r18;
sbc r3 r19;
sbc r4 r20;
sbc r5 r21;
(* 0xff if carry and 0x00 if no carry *)
sbc r0 r0;
(* subtract b0 b4 *)
sub r6 r22;
sbc r7 r23;
sbc r8 r24;
sbc r9 r25;
(* 0xff if carry and 0x00 if no carry *)
sbc r1 r1;
(* absolute values *)
eor r2 r0;
eor r3 r0;
eor r4 r0;
eor r5 r0;
eor r6 r1;
eor r7 r1;
eor r8 r1;
eor r9 r1;
sub r2 r0;
sbc r3 r0;
sbc r4 r0;
sbc r5 r0;
sub r6 r1;
sbc r7 r1;
sbc r8 r1;
sbc r9 r1;
eor r0 r1;
bst r0 0 ;
S.modify_r0(); S.modify_r1();
S.modify_r2(); S.modify_r3(); S.modify_r4(); S.modify_r5();
S.modify_r6(); S.modify_r7(); S.modify_r8(); S.modify_r9();
end;
S.init();
'Tmp:
abstract ensures { S.synchronized S.shadow reg }
ensures { uint 4 reg 14 + pow2 32*uint 2 reg 28 + pow2 48*uint 2 reg 18 = old (uint 4 reg 14 + uint 4 reg 18*uint 4 reg 22) }
(* compute h (l4 l5 l6 l7) *)
mul r18 r22;
add r14 r0;
adc r15 r1;
adc r16 r26;
adc r29 r26 ;
mul r18 r23;
add r15 r0;
adc r16 r1;
adc r29 r26;
mul r19 r22;
add r15 r0;
adc r16 r1;
adc r17 r29;
adc r28 r26;
mul r18 r24;
add r16 r0 ;
adc r17 r1;
adc r28 r26;
mul r19 r23;
add r16 r0;
adc r17 r1;
adc r28 r26;
mul r20 r22;
add r16 r0;
adc r17 r1;
adc r28 r26;
clr r29;
mul r18 r25;
add r17 r0;
adc r28 r1;
adc r29 r26;
mul r19 r24;
add r17 r0;
adc r28 r1;
adc r29 r26;
mul r20 r23;
add r17 r0;
adc r28 r1;
adc r29 r26;
mul r21 r22;
add r17 r0;
adc r28 r1;
adc r29 r26;
mul r19 r25;
movw r18 r26;
add r28 r0;
adc r29 r1;
adc r18 r26;
mul r20 r24;
add r28 r0;
adc r29 r1;
adc r18 r26;
mul r21 r23;
add r28 r0;
adc r29 r1;
adc r18 r26;
mul r20 r25;
add r29 r0;
adc r18 r1;
adc r19 r26;
mul r21 r24;
add r29 r0;
adc r18 r1;
adc r19 r26;
mul r21 r25;
add r18 r0;
adc r19 r1;
S.modify_r0(); S.modify_r1();
S.modify_r14(); S.modify_r15(); S.modify_r16(); S.modify_r17(); S.modify_r18(); S.modify_r19();
S.modify_r28(); S.modify_r29();
end;
S.init();
abstract ensures { S.synchronized S.shadow reg }
ensures { uint 6 reg 20 + pow2 48*uint 2 reg 2 = old (uint 4 reg 2*uint 4 reg 6) }
(* compute m *)
mul r2 r6;
movw r20 r0;
movw r22 r26;