diff options
author | Son HO | 2023-09-18 13:59:38 +0200 |
---|---|---|
committer | GitHub | 2023-09-18 13:59:38 +0200 |
commit | 28f4ea9ffe02d4204bb60273b6a77db7ed48781b (patch) | |
tree | 156dc513299f3144a00d2a684d5e633aadf0cee6 | |
parent | 242aca3092c6594206896ea62eb40395accc8459 (diff) | |
parent | a053ad9739248ade939aa04355672362221883d7 (diff) |
Merge pull request #37 from AeneasVerif/son_tuto
Tutorial for the Lean backend
-rw-r--r-- | README.md | 2 | ||||
-rw-r--r-- | backends/lean/Base/Arith/Int.lean | 8 | ||||
-rw-r--r-- | backends/lean/Base/Arith/Scalar.lean | 17 | ||||
-rw-r--r-- | backends/lean/Base/Primitives/Scalar.lean | 274 | ||||
-rw-r--r-- | backends/lean/Base/Progress/Base.lean | 57 | ||||
-rw-r--r-- | backends/lean/Base/Progress/Progress.lean | 14 | ||||
-rw-r--r-- | tests/lean/Tutorial.lean | 389 | ||||
-rw-r--r-- | tests/lean/lakefile.lean | 1 |
8 files changed, 691 insertions, 71 deletions
@@ -105,6 +105,8 @@ support for partial functions and extrinsic proofs of termination (see and tactics specialized for monadic programs (see `./backends/lean/Base/Progress/Progress.lean` and `./backends/hol4/primitivesLib.sml`). +A tutorial for the Lean backend is available [here](./tests/lean/Tutorial.lean). + ## Formalization The translation has been formalized and published at ICFP2022: [Aeneas: Rust diff --git a/backends/lean/Base/Arith/Int.lean b/backends/lean/Base/Arith/Int.lean index eb6701c2..3359ecdb 100644 --- a/backends/lean/Base/Arith/Int.lean +++ b/backends/lean/Base/Arith/Int.lean @@ -211,9 +211,11 @@ def intTacPreprocess (extraPreprocess : Tactic.TacticM Unit) : Tactic.TacticM U let _ ← introHasIntPropInstances -- Extra preprocessing, before we split on the disjunctions extraPreprocess - -- Split - let asms ← introInstances ``PropHasImp.concl lookupPropHasImp - splitOnAsms asms.toList + -- Split - note that the extra-preprocessing step might actually have + -- proven the goal (by doing simplifications for instance) + Tactic.allGoals do + let asms ← introInstances ``PropHasImp.concl lookupPropHasImp + splitOnAsms asms.toList elab "int_tac_preprocess" : tactic => intTacPreprocess (do pure ()) diff --git a/backends/lean/Base/Arith/Scalar.lean b/backends/lean/Base/Arith/Scalar.lean index db672489..47751c8a 100644 --- a/backends/lean/Base/Arith/Scalar.lean +++ b/backends/lean/Base/Arith/Scalar.lean @@ -16,14 +16,15 @@ def scalarTacExtraPreprocess : Tactic.TacticM Unit := do add (← mkAppM ``Scalar.cMin_bound #[.const ``ScalarTy.Isize []]) add (← mkAppM ``Scalar.cMax_bound #[.const ``ScalarTy.Usize []]) add (← mkAppM ``Scalar.cMax_bound #[.const ``ScalarTy.Isize []]) - -- Reveal the concrete bounds + -- Reveal the concrete bounds, simplify calls to [ofInt] Utils.simpAt [``Scalar.min, ``Scalar.max, ``Scalar.cMin, ``Scalar.cMax, ``I8.min, ``I16.min, ``I32.min, ``I64.min, ``I128.min, ``I8.max, ``I16.max, ``I32.max, ``I64.max, ``I128.max, ``U8.min, ``U16.min, ``U32.min, ``U64.min, ``U128.min, ``U8.max, ``U16.max, ``U32.max, ``U64.max, ``U128.max, ``Usize.min - ] [] [] .wildcard + ] [``Scalar.ofInt_val_eq, ``Scalar.neq_to_neq_val] [] .wildcard + elab "scalar_tac_preprocess" : tactic => intTacPreprocess scalarTacExtraPreprocess @@ -50,4 +51,16 @@ example (x y : U32) : x.val ≤ Scalar.max ScalarTy.U32 := by example (x : U32 × U32) : 0 ≤ x.fst.val := by scalar_tac +-- Checking that we properly handle [ofInt] +example : U32.ofInt 1 ≤ U32.max := by + scalar_tac + +example (x : Int) (h0 : 0 ≤ x) (h1 : x ≤ U32.max) : + U32.ofInt x (by constructor <;> scalar_tac) ≤ U32.max := by + scalar_tac + +-- Not equal +example (x : U32) (h0 : ¬ x = U32.ofInt 0) : 0 < x.val := by + scalar_tac + end Arith diff --git a/backends/lean/Base/Primitives/Scalar.lean b/backends/lean/Base/Primitives/Scalar.lean index ffc969f3..55227a9f 100644 --- a/backends/lean/Base/Primitives/Scalar.lean +++ b/backends/lean/Base/Primitives/Scalar.lean @@ -491,6 +491,36 @@ theorem Scalar.add_unsigned_spec {ty} (s: ¬ ty.isSigned) {x y : Scalar ty} ∃ z, x + y = ret z ∧ z.val = x.val + y.val := by apply Scalar.add_unsigned_spec <;> simp only [Scalar.max, *] +@[cepspec] theorem Isize.add_spec {x y : Isize} + (hmin : Isize.min ≤ x.val + y.val) (hmax : x.val + y.val ≤ Isize.max) : + ∃ z, x + y = ret z ∧ z.val = x.val + y.val := + Scalar.add_spec hmin hmax + +@[cepspec] theorem I8.add_spec {x y : I8} + (hmin : I8.min ≤ x.val + y.val) (hmax : x.val + y.val ≤ I8.max) : + ∃ z, x + y = ret z ∧ z.val = x.val + y.val := + Scalar.add_spec hmin hmax + +@[cepspec] theorem I16.add_spec {x y : I16} + (hmin : I16.min ≤ x.val + y.val) (hmax : x.val + y.val ≤ I16.max) : + ∃ z, x + y = ret z ∧ z.val = x.val + y.val := + Scalar.add_spec hmin hmax + +@[cepspec] theorem I32.add_spec {x y : I32} + (hmin : I32.min ≤ x.val + y.val) (hmax : x.val + y.val ≤ I32.max) : + ∃ z, x + y = ret z ∧ z.val = x.val + y.val := + Scalar.add_spec hmin hmax + +@[cepspec] theorem I64.add_spec {x y : I64} + (hmin : I64.min ≤ x.val + y.val) (hmax : x.val + y.val ≤ I64.max) : + ∃ z, x + y = ret z ∧ z.val = x.val + y.val := + Scalar.add_spec hmin hmax + +@[cepspec] theorem I128.add_spec {x y : I128} + (hmin : I128.min ≤ x.val + y.val) (hmax : x.val + y.val ≤ I128.max) : + ∃ z, x + y = ret z ∧ z.val = x.val + y.val := + Scalar.add_spec hmin hmax + -- Generic theorem - shouldn't be used much @[cpspec] theorem Scalar.sub_spec {ty} {x y : Scalar ty} @@ -540,6 +570,36 @@ theorem Scalar.sub_unsigned_spec {ty} (s: ¬ ty.isSigned) {x y : Scalar ty} ∃ z, x - y = ret z ∧ z.val = x.val - y.val := by apply Scalar.sub_unsigned_spec <;> simp only [Scalar.min, *] +@[cepspec] theorem Isize.sub_spec {x y : Isize} (hmin : Isize.min ≤ x.val - y.val) + (hmax : x.val - y.val ≤ Isize.max) : + ∃ z, x - y = ret z ∧ z.val = x.val - y.val := + Scalar.sub_spec hmin hmax + +@[cepspec] theorem I8.sub_spec {x y : I8} (hmin : I8.min ≤ x.val - y.val) + (hmax : x.val - y.val ≤ I8.max) : + ∃ z, x - y = ret z ∧ z.val = x.val - y.val := + Scalar.sub_spec hmin hmax + +@[cepspec] theorem I16.sub_spec {x y : I16} (hmin : I16.min ≤ x.val - y.val) + (hmax : x.val - y.val ≤ I16.max) : + ∃ z, x - y = ret z ∧ z.val = x.val - y.val := + Scalar.sub_spec hmin hmax + +@[cepspec] theorem I32.sub_spec {x y : I32} (hmin : I32.min ≤ x.val - y.val) + (hmax : x.val - y.val ≤ I32.max) : + ∃ z, x - y = ret z ∧ z.val = x.val - y.val := + Scalar.sub_spec hmin hmax + +@[cepspec] theorem I64.sub_spec {x y : I64} (hmin : I64.min ≤ x.val - y.val) + (hmax : x.val - y.val ≤ I64.max) : + ∃ z, x - y = ret z ∧ z.val = x.val - y.val := + Scalar.sub_spec hmin hmax + +@[cepspec] theorem I128.sub_spec {x y : I128} (hmin : I128.min ≤ x.val - y.val) + (hmax : x.val - y.val ≤ I128.max) : + ∃ z, x - y = ret z ∧ z.val = x.val - y.val := + Scalar.sub_spec hmin hmax + -- Generic theorem - shouldn't be used much theorem Scalar.mul_spec {ty} {x y : Scalar ty} (hmin : Scalar.min ty ≤ x.val * y.val) @@ -586,6 +646,36 @@ theorem Scalar.mul_unsigned_spec {ty} (s: ¬ ty.isSigned) {x y : Scalar ty} ∃ z, x * y = ret z ∧ z.val = x.val * y.val := by apply Scalar.mul_unsigned_spec <;> simp only [Scalar.max, *] +@[cepspec] theorem Isize.mul_spec {x y : Isize} (hmin : Isize.min ≤ x.val * y.val) + (hmax : x.val * y.val ≤ Isize.max) : + ∃ z, x * y = ret z ∧ z.val = x.val * y.val := + Scalar.mul_spec hmin hmax + +@[cepspec] theorem I8.mul_spec {x y : I8} (hmin : I8.min ≤ x.val * y.val) + (hmax : x.val * y.val ≤ I8.max) : + ∃ z, x * y = ret z ∧ z.val = x.val * y.val := + Scalar.mul_spec hmin hmax + +@[cepspec] theorem I16.mul_spec {x y : I16} (hmin : I16.min ≤ x.val * y.val) + (hmax : x.val * y.val ≤ I16.max) : + ∃ z, x * y = ret z ∧ z.val = x.val * y.val := + Scalar.mul_spec hmin hmax + +@[cepspec] theorem I32.mul_spec {x y : I32} (hmin : I32.min ≤ x.val * y.val) + (hmax : x.val * y.val ≤ I32.max) : + ∃ z, x * y = ret z ∧ z.val = x.val * y.val := + Scalar.mul_spec hmin hmax + +@[cepspec] theorem I64.mul_spec {x y : I64} (hmin : I64.min ≤ x.val * y.val) + (hmax : x.val * y.val ≤ I64.max) : + ∃ z, x * y = ret z ∧ z.val = x.val * y.val := + Scalar.mul_spec hmin hmax + +@[cepspec] theorem I128.mul_spec {x y : I128} (hmin : I128.min ≤ x.val * y.val) + (hmax : x.val * y.val ≤ I128.max) : + ∃ z, x * y = ret z ∧ z.val = x.val * y.val := + Scalar.mul_spec hmin hmax + -- Generic theorem - shouldn't be used much @[cpspec] theorem Scalar.div_spec {ty} {x y : Scalar ty} @@ -639,6 +729,48 @@ theorem Scalar.div_unsigned_spec {ty} (s: ¬ ty.isSigned) (x : Scalar ty) {y : S ∃ z, x / y = ret z ∧ z.val = x.val / y.val := by apply Scalar.div_unsigned_spec <;> simp [Scalar.max, *] +@[cepspec] theorem Isize.div_spec (x : Isize) {y : Isize} + (hnz : y.val ≠ 0) + (hmin : Isize.min ≤ scalar_div x.val y.val) + (hmax : scalar_div x.val y.val ≤ Isize.max): + ∃ z, x / y = ret z ∧ z.val = scalar_div x.val y.val := + Scalar.div_spec hnz hmin hmax + +@[cepspec] theorem I8.div_spec (x : I8) {y : I8} + (hnz : y.val ≠ 0) + (hmin : I8.min ≤ scalar_div x.val y.val) + (hmax : scalar_div x.val y.val ≤ I8.max): + ∃ z, x / y = ret z ∧ z.val = scalar_div x.val y.val := + Scalar.div_spec hnz hmin hmax + +@[cepspec] theorem I16.div_spec (x : I16) {y : I16} + (hnz : y.val ≠ 0) + (hmin : I16.min ≤ scalar_div x.val y.val) + (hmax : scalar_div x.val y.val ≤ I16.max): + ∃ z, x / y = ret z ∧ z.val = scalar_div x.val y.val := + Scalar.div_spec hnz hmin hmax + +@[cepspec] theorem I32.div_spec (x : I32) {y : I32} + (hnz : y.val ≠ 0) + (hmin : I32.min ≤ scalar_div x.val y.val) + (hmax : scalar_div x.val y.val ≤ I32.max): + ∃ z, x / y = ret z ∧ z.val = scalar_div x.val y.val := + Scalar.div_spec hnz hmin hmax + +@[cepspec] theorem I64.div_spec (x : I64) {y : I64} + (hnz : y.val ≠ 0) + (hmin : I64.min ≤ scalar_div x.val y.val) + (hmax : scalar_div x.val y.val ≤ I64.max): + ∃ z, x / y = ret z ∧ z.val = scalar_div x.val y.val := + Scalar.div_spec hnz hmin hmax + +@[cepspec] theorem I128.div_spec (x : I128) {y : I128} + (hnz : y.val ≠ 0) + (hmin : I128.min ≤ scalar_div x.val y.val) + (hmax : scalar_div x.val y.val ≤ I128.max): + ∃ z, x / y = ret z ∧ z.val = scalar_div x.val y.val := + Scalar.div_spec hnz hmin hmax + -- Generic theorem - shouldn't be used much @[cpspec] theorem Scalar.rem_spec {ty} {x y : Scalar ty} @@ -692,76 +824,89 @@ theorem Scalar.rem_unsigned_spec {ty} (s: ¬ ty.isSigned) (x : Scalar ty) {y : S ∃ z, x % y = ret z ∧ z.val = x.val % y.val := by apply Scalar.rem_unsigned_spec <;> simp [Scalar.max, *] --- ofIntCore --- TODO: typeclass? -def Isize.ofIntCore := @Scalar.ofIntCore .Isize -def I8.ofIntCore := @Scalar.ofIntCore .I8 -def I16.ofIntCore := @Scalar.ofIntCore .I16 -def I32.ofIntCore := @Scalar.ofIntCore .I32 -def I64.ofIntCore := @Scalar.ofIntCore .I64 -def I128.ofIntCore := @Scalar.ofIntCore .I128 -def Usize.ofIntCore := @Scalar.ofIntCore .Usize -def U8.ofIntCore := @Scalar.ofIntCore .U8 -def U16.ofIntCore := @Scalar.ofIntCore .U16 -def U32.ofIntCore := @Scalar.ofIntCore .U32 -def U64.ofIntCore := @Scalar.ofIntCore .U64 -def U128.ofIntCore := @Scalar.ofIntCore .U128 - --- ofInt --- TODO: typeclass? -def Isize.ofInt := @Scalar.ofInt .Isize -def I8.ofInt := @Scalar.ofInt .I8 -def I16.ofInt := @Scalar.ofInt .I16 -def I32.ofInt := @Scalar.ofInt .I32 -def I64.ofInt := @Scalar.ofInt .I64 -def I128.ofInt := @Scalar.ofInt .I128 -def Usize.ofInt := @Scalar.ofInt .Usize -def U8.ofInt := @Scalar.ofInt .U8 -def U16.ofInt := @Scalar.ofInt .U16 -def U32.ofInt := @Scalar.ofInt .U32 -def U64.ofInt := @Scalar.ofInt .U64 -def U128.ofInt := @Scalar.ofInt .U128 - --- TODO: factor those lemmas out -@[simp] theorem Scalar.ofInt_val_eq {ty} (h : Scalar.min ty ≤ x ∧ x ≤ Scalar.max ty) : (Scalar.ofInt x h).val = x := by - simp [Scalar.ofInt, Scalar.ofIntCore] - -@[simp] theorem Isize.ofInt_val_eq (h : Scalar.min ScalarTy.Isize ≤ x ∧ x ≤ Scalar.max ScalarTy.Isize) : (Isize.ofInt x h).val = x := by - apply Scalar.ofInt_val_eq h - -@[simp] theorem I8.ofInt_val_eq (h : Scalar.min ScalarTy.I8 ≤ x ∧ x ≤ Scalar.max ScalarTy.I8) : (I8.ofInt x h).val = x := by - apply Scalar.ofInt_val_eq h - -@[simp] theorem I16.ofInt_val_eq (h : Scalar.min ScalarTy.I16 ≤ x ∧ x ≤ Scalar.max ScalarTy.I16) : (I16.ofInt x h).val = x := by - apply Scalar.ofInt_val_eq h - -@[simp] theorem I32.ofInt_val_eq (h : Scalar.min ScalarTy.I32 ≤ x ∧ x ≤ Scalar.max ScalarTy.I32) : (I32.ofInt x h).val = x := by - apply Scalar.ofInt_val_eq h - -@[simp] theorem I64.ofInt_val_eq (h : Scalar.min ScalarTy.I64 ≤ x ∧ x ≤ Scalar.max ScalarTy.I64) : (I64.ofInt x h).val = x := by - apply Scalar.ofInt_val_eq h - -@[simp] theorem I128.ofInt_val_eq (h : Scalar.min ScalarTy.I128 ≤ x ∧ x ≤ Scalar.max ScalarTy.I128) : (I128.ofInt x h).val = x := by - apply Scalar.ofInt_val_eq h +@[cepspec] theorem I8.rem_spec (x : I8) {y : I8} + (hnz : y.val ≠ 0) + (hmin : I8.min ≤ scalar_rem x.val y.val) + (hmax : scalar_rem x.val y.val ≤ I8.max): + ∃ z, x % y = ret z ∧ z.val = scalar_rem x.val y.val := + Scalar.rem_spec hnz hmin hmax -@[simp] theorem Usize.ofInt_val_eq (h : Scalar.min ScalarTy.Usize ≤ x ∧ x ≤ Scalar.max ScalarTy.Usize) : (Usize.ofInt x h).val = x := by - apply Scalar.ofInt_val_eq h +@[cepspec] theorem I16.rem_spec (x : I16) {y : I16} + (hnz : y.val ≠ 0) + (hmin : I16.min ≤ scalar_rem x.val y.val) + (hmax : scalar_rem x.val y.val ≤ I16.max): + ∃ z, x % y = ret z ∧ z.val = scalar_rem x.val y.val := + Scalar.rem_spec hnz hmin hmax -@[simp] theorem U8.ofInt_val_eq (h : Scalar.min ScalarTy.U8 ≤ x ∧ x ≤ Scalar.max ScalarTy.U8) : (U8.ofInt x h).val = x := by - apply Scalar.ofInt_val_eq h +@[cepspec] theorem I32.rem_spec (x : I32) {y : I32} + (hnz : y.val ≠ 0) + (hmin : I32.min ≤ scalar_rem x.val y.val) + (hmax : scalar_rem x.val y.val ≤ I32.max): + ∃ z, x % y = ret z ∧ z.val = scalar_rem x.val y.val := + Scalar.rem_spec hnz hmin hmax -@[simp] theorem U16.ofInt_val_eq (h : Scalar.min ScalarTy.U16 ≤ x ∧ x ≤ Scalar.max ScalarTy.U16) : (U16.ofInt x h).val = x := by - apply Scalar.ofInt_val_eq h +@[cepspec] theorem I64.rem_spec (x : I64) {y : I64} + (hnz : y.val ≠ 0) + (hmin : I64.min ≤ scalar_rem x.val y.val) + (hmax : scalar_rem x.val y.val ≤ I64.max): + ∃ z, x % y = ret z ∧ z.val = scalar_rem x.val y.val := + Scalar.rem_spec hnz hmin hmax -@[simp] theorem U32.ofInt_val_eq (h : Scalar.min ScalarTy.U32 ≤ x ∧ x ≤ Scalar.max ScalarTy.U32) : (U32.ofInt x h).val = x := by - apply Scalar.ofInt_val_eq h +@[cepspec] theorem I128.rem_spec (x : I128) {y : I128} + (hnz : y.val ≠ 0) + (hmin : I128.min ≤ scalar_rem x.val y.val) + (hmax : scalar_rem x.val y.val ≤ I128.max): + ∃ z, x % y = ret z ∧ z.val = scalar_rem x.val y.val := + Scalar.rem_spec hnz hmin hmax -@[simp] theorem U64.ofInt_val_eq (h : Scalar.min ScalarTy.U64 ≤ x ∧ x ≤ Scalar.max ScalarTy.U64) : (U64.ofInt x h).val = x := by - apply Scalar.ofInt_val_eq h +-- ofIntCore +-- TODO: typeclass? +@[reducible] def Isize.ofIntCore := @Scalar.ofIntCore .Isize +@[reducible] def I8.ofIntCore := @Scalar.ofIntCore .I8 +@[reducible] def I16.ofIntCore := @Scalar.ofIntCore .I16 +@[reducible] def I32.ofIntCore := @Scalar.ofIntCore .I32 +@[reducible] def I64.ofIntCore := @Scalar.ofIntCore .I64 +@[reducible] def I128.ofIntCore := @Scalar.ofIntCore .I128 +@[reducible] def Usize.ofIntCore := @Scalar.ofIntCore .Usize +@[reducible] def U8.ofIntCore := @Scalar.ofIntCore .U8 +@[reducible] def U16.ofIntCore := @Scalar.ofIntCore .U16 +@[reducible] def U32.ofIntCore := @Scalar.ofIntCore .U32 +@[reducible] def U64.ofIntCore := @Scalar.ofIntCore .U64 +@[reducible] def U128.ofIntCore := @Scalar.ofIntCore .U128 -@[simp] theorem U128.ofInt_val_eq (h : Scalar.min ScalarTy.U128 ≤ x ∧ x ≤ Scalar.max ScalarTy.U128) : (U128.ofInt x h).val = x := by - apply Scalar.ofInt_val_eq h +-- ofInt +-- TODO: typeclass? +@[reducible] def Isize.ofInt := @Scalar.ofInt .Isize +@[reducible] def I8.ofInt := @Scalar.ofInt .I8 +@[reducible] def I16.ofInt := @Scalar.ofInt .I16 +@[reducible] def I32.ofInt := @Scalar.ofInt .I32 +@[reducible] def I64.ofInt := @Scalar.ofInt .I64 +@[reducible] def I128.ofInt := @Scalar.ofInt .I128 +@[reducible] def Usize.ofInt := @Scalar.ofInt .Usize +@[reducible] def U8.ofInt := @Scalar.ofInt .U8 +@[reducible] def U16.ofInt := @Scalar.ofInt .U16 +@[reducible] def U32.ofInt := @Scalar.ofInt .U32 +@[reducible] def U64.ofInt := @Scalar.ofInt .U64 +@[reducible] def U128.ofInt := @Scalar.ofInt .U128 + +postfix:max "#isize" => Isize.ofInt +postfix:max "#i8" => I8.ofInt +postfix:max "#i16" => I16.ofInt +postfix:max "#i32" => I32.ofInt +postfix:max "#i64" => I64.ofInt +postfix:max "#i128" => I128.ofInt +postfix:max "#usize" => Usize.ofInt +postfix:max "#u8" => U8.ofInt +postfix:max "#u16" => U16.ofInt +postfix:max "#u32" => U32.ofInt +postfix:max "#u64" => U64.ofInt +postfix:max "#u128" => U128.ofInt + +-- Testing the notations +example : Result Usize := 0#usize + 1#usize +@[simp] theorem Scalar.ofInt_val_eq {ty} (h : Scalar.min ty ≤ x ∧ x ≤ Scalar.max ty) : (Scalar.ofInt x h).val = x := by + simp [Scalar.ofInt, Scalar.ofIntCore] -- Comparisons instance {ty} : LT (Scalar ty) where @@ -790,6 +935,9 @@ instance (ty : ScalarTy) : DecidableEq (Scalar ty) := instance (ty : ScalarTy) : CoeOut (Scalar ty) Int where coe := λ v => v.val +@[simp] theorem Scalar.neq_to_neq_val {ty} : ∀ {i j : Scalar ty}, (¬ i = j) ↔ ¬ i.val = j.val := by + intro i j; cases i; cases j; simp + -- -- We now define a type class that subsumes the various machine integer types, so -- -- as to write a concise definition for scalar_cast, rather than exhaustively -- -- enumerating all of the possible pairs. We remark that Rust has sane semantics diff --git a/backends/lean/Base/Progress/Base.lean b/backends/lean/Base/Progress/Base.lean index 6f820a84..76a92795 100644 --- a/backends/lean/Base/Progress/Base.lean +++ b/backends/lean/Base/Progress/Base.lean @@ -167,7 +167,8 @@ structure PSpecClassExprAttr where deriving Inhabited -- TODO: the original function doesn't define correctly the `addImportedFn`. Do a PR? -def mkMapDeclarationExtension [Inhabited α] (name : Name := by exact decl_name%) : IO (MapDeclarationExtension α) := +def mkMapDeclarationExtension [Inhabited α] (name : Name := by exact decl_name%) : + IO (MapDeclarationExtension α) := registerSimplePersistentEnvExtension { name := name, addImportedFn := fun a => a.foldl (fun s a => a.foldl (fun s (k, v) => s.insert k v) s) RBMap.empty, @@ -175,6 +176,54 @@ def mkMapDeclarationExtension [Inhabited α] (name : Name := by exact decl_name% toArrayFn := fun es => es.toArray.qsort (fun a b => Name.quickLt a.1 b.1) } +-- Declare an extension of maps of maps (using [RBMap]). +-- The important point is that we need to merge the bound values (which are maps). +def mkMapMapDeclarationExtension [Inhabited β] (ord : α → α → Ordering) + (name : Name := by exact decl_name%) : + IO (MapDeclarationExtension (RBMap α β ord)) := + registerSimplePersistentEnvExtension { + name := name, + addImportedFn := fun a => + a.foldl (fun s a => a.foldl ( + -- We need to merge the maps + fun s (k0, k1_to_v) => + match s.find? k0 with + | none => + -- No binding: insert one + s.insert k0 k1_to_v + | some m => + -- There is already a binding: merge + let m := RBMap.fold (fun m k v => m.insert k v) m k1_to_v + s.insert k0 m) + s) RBMap.empty, + addEntryFn := fun s n => s.insert n.1 n.2 , + toArrayFn := fun es => es.toArray.qsort (fun a b => Name.quickLt a.1 b.1) + } + +-- Declare an extension of maps of maps (using [HashMap]). +-- The important point is that we need to merge the bound values (which are maps). +def mkMapHashMapDeclarationExtension [BEq α] [Hashable α] [Inhabited β] + (name : Name := by exact decl_name%) : + IO (MapDeclarationExtension (HashMap α β)) := + registerSimplePersistentEnvExtension { + name := name, + addImportedFn := fun a => + a.foldl (fun s a => a.foldl ( + -- We need to merge the maps + fun s (k0, k1_to_v) => + match s.find? k0 with + | none => + -- No binding: insert one + s.insert k0 k1_to_v + | some m => + -- There is already a binding: merge + let m := HashMap.fold (fun m k v => m.insert k v) m k1_to_v + s.insert k0 m) + s) RBMap.empty, + addEntryFn := fun s n => s.insert n.1 n.2 , + toArrayFn := fun es => es.toArray.qsort (fun a b => Name.quickLt a.1 b.1) + } + /- The persistent map from function to pspec theorems. -/ initialize pspecAttr : PSpecAttr ← do let ext ← mkMapDeclarationExtension `pspecMap @@ -200,7 +249,8 @@ initialize pspecAttr : PSpecAttr ← do /- The persistent map from type classes to pspec theorems -/ initialize pspecClassAttr : PSpecClassAttr ← do - let ext : MapDeclarationExtension (NameMap Name) ← mkMapDeclarationExtension `pspecClassMap + let ext : MapDeclarationExtension (NameMap Name) ← + mkMapMapDeclarationExtension Name.quickCmp `pspecClassMap let attrImpl : AttributeImpl := { name := `cpspec descr := "Marks theorems to use for type classes with the `progress` tactic" @@ -231,7 +281,8 @@ initialize pspecClassAttr : PSpecClassAttr ← do /- The 2nd persistent map from type classes to pspec theorems -/ initialize pspecClassExprAttr : PSpecClassExprAttr ← do - let ext : MapDeclarationExtension (HashMap Expr Name) ← mkMapDeclarationExtension `pspecClassExprMap + let ext : MapDeclarationExtension (HashMap Expr Name) ← + mkMapHashMapDeclarationExtension `pspecClassExprMap let attrImpl : AttributeImpl := { name := `cepspec descr := "Marks theorems to use for type classes with the `progress` tactic" diff --git a/backends/lean/Base/Progress/Progress.lean b/backends/lean/Base/Progress/Progress.lean index 4fd88e36..8b0759c5 100644 --- a/backends/lean/Base/Progress/Progress.lean +++ b/backends/lean/Base/Progress/Progress.lean @@ -243,21 +243,26 @@ def progressAsmsOrLookupTheorem (keep : Option Name) (withTh : Option TheoremOrL tryLookupApply keep ids splitPost asmTac fExpr "pspec theorem" pspec do -- It failed: try to lookup a *class* expr spec theorem (those are more -- specific than class spec theorems) + trace[Progress] "Failed using a pspec theorem: trying to lookup a pspec class expr theorem" let pspecClassExpr ← do match getFirstArg args with | none => pure none | some arg => do + trace[Progress] "Using: f:{fName}, arg: {arg}" let thName ← pspecClassExprAttr.find? fName arg pure (thName.map fun th => .Theorem th) tryLookupApply keep ids splitPost asmTac fExpr "pspec class expr theorem" pspecClassExpr do -- It failed: try to lookup a *class* spec theorem + trace[Progress] "Failed using a pspec class expr theorem: trying to lookup a pspec class theorem" let pspecClass ← do match ← getFirstArgAppName args with | none => pure none | some argName => do + trace[Progress] "Using: f: {fName}, arg: {argName}" let thName ← pspecClassAttr.find? fName argName pure (thName.map fun th => .Theorem th) tryLookupApply keep ids splitPost asmTac fExpr "pspec class theorem" pspecClass do + trace[Progress] "Failed using a pspec class theorem: trying to use a recursive assumption" -- Try a recursive call - we try the assumptions of kind "auxDecl" let ctx ← Lean.MonadLCtx.getLCtx let decls ← ctx.getAllDecls @@ -346,11 +351,14 @@ elab "progress" args:progressArgs : tactic => namespace Test open Primitives Result + -- Show the traces -- set_option trace.Progress true -- set_option pp.rawOnError true + -- The following commands display the databases of theorems -- #eval showStoredPSpec -- #eval showStoredPSpecClass + -- #eval showStoredPSpecExprClass example {ty} {x y : Scalar ty} (hmin : Scalar.min ty ≤ x.val + y.val) @@ -366,6 +374,12 @@ namespace Test progress keep h with Scalar.add_spec as ⟨ z ⟩ simp [*, h] + example {x y : U32} + (hmax : x.val + y.val ≤ U32.max) : + ∃ z, x + y = ret z ∧ z.val = x.val + y.val := by + progress keep _ as ⟨ z, h1 .. ⟩ + simp [*, h1] + /- Checking that universe instantiation works: the original spec uses `α : Type u` where u is quantified, while here we use `α : Type 0` -/ example {α : Type} (v: Vec α) (i: Usize) (x : α) diff --git a/tests/lean/Tutorial.lean b/tests/lean/Tutorial.lean new file mode 100644 index 00000000..840a606e --- /dev/null +++ b/tests/lean/Tutorial.lean @@ -0,0 +1,389 @@ +/- A tutorial about using Lean to verify properties of programs generated by Aeneas -/ +import Base + +open Primitives +open Result + +namespace Tutorial + +/-#===========================================================================# + # + # Simple Arithmetic Example + # + #===========================================================================#-/ + +/- As a first example, let's consider the function below. + -/ + +def mul2_add1 (x : U32) : Result U32 := do + let x1 ← x + x + let x2 ← x1 + 1#u32 + ret x2 + +/- There are several things to note. + + # Machine integers + ================== + Because Rust programs manipulate machine integers which occupy a fixed + size in memory, we model integers by using types like [U32], which is + the type of integers which take their values between 0 and 2^32 - 1 (inclusive). + [1#u32] is simply the constant 1 (seen as a [U32]). + + You can see a definition or its type by using the [#print] and [#check] commands. + It is also possible to jump to definitions (right-click + "Go to Definition" + in VS Code). + + For instance, you can see below that [U32] is defined in terms of a more generic + type [Scalar] (just move the cursor to the [#print] command below). + + -/ +#print U32 -- This shows the definition of [U32] + +#check mul2_add1 -- This shows the type of [mul2_add1] +#print mul2_add1 -- This show the full definition of [mul2_add1] + +/- # Syntax + ======== + Because machine integers are bounded, arithmetic operations can fail, for instance + because of an overflow: this is the reason why the output of [mul2_add1] uses + the [Result] type. In particular, addition can fail. + + We use a lightweight "do"-notation to write code which calls potentially failing + functions. In practice, all our function bodies start with a [do] keyword, + which enables using this lightweight syntax. After the [do], instead of writing + let-bindings as [let x1 := ...], we write them as: [let x1 ← ...]. We also + have lightweight notations for common operations like the addition. + + For instance, in [let x1 ← x + x], the [x + x] expression desugars to + [Scalar.add x x] and the [let x1 ← ...] desugars to a call to [bind]. + + The definition of [bind x f] is worth investigating. It simply checks whether + [x : Result ...] successfully evaluates to some value, in which case it + calls [f] with this value, and propagates the error otherwise. See the output + of the [#print] command below. + + *Remark:* in order to type the left-arrow symbol [←] you can type: [\l]. Generally + speaking, your editor can tell you how to type the symbols you see in Lean + code. For instance in VS Code, you can simply hover your mouse over the + symbol and a pop-up window will open displaying all the information you need. + -/ +#print Primitives.bind + +/- We show a desugared version of [mul2_add1] below: we remove the syntactic + sugar, and inline the definition of [bind] to make the matches over the + results explicit. + -/ +def mul2_add1_desugared (x : U32) : Result U32 := + match Scalar.add x x with + | ret x1 => -- Success case + match Scalar.add x1 (U32.ofInt 1) with + | ret x2 => ret x2 + | error => error + | error => error -- Propagating the errors + +/- Now that we have seen how [mul2_add1] is defined precisely, we can prove + simple properties about it. For instance, what about proving that it evaluates + to [2 * x + 1]? + + We advise writing specifications in a Hoare-logic style, that is with + preconditions (requirements which must be satisfied by the inputs upon + calling the function) and postconditions (properties that we know about + the output after the function call). + + In the case of [mul2_add1] we could state the theorem as follows. + -/ + +theorem mul2_add1_spec + -- The input + (x : U32) + /- The precondition (we give it the name "h" to be able to refer to it in the proof). + We simply state that [2 * x + 1] must not overflow. + + The ↑ notation ("\u") is used to coerce values. Here, we coerce [x], which is + a bounded machine integer, to an unbounded mathematical integer, which is + easier to work with. Note that writing [↑x] is the same as writing [x.val]. + -/ + (h : 2 * ↑x + 1 ≤ U32.max) + /- The postcondition -/ + : ∃ y, mul2_add1 x = ret y ∧ -- The call succeeds + ↑ y = 2 * ↑x + (1 : Int) -- The output has the expected value + := by + /- The proof -/ + -- Start by a call to the rewriting tactic to reveal the body of [mul2_add1] + rw [mul2_add1] + /- Here we use the fact that if [x + x] doesn't overflow, then the addition + succeeds and returns the value we expect, as given by the theorem [U32.add_spec]. + Doing this properly requires a few manipulations: we need to instantiate + the theorem, introduce it in the context, destruct it to introduce [x1], etc. + We automate this with the [progress] tactic: [progress with th as ⟨ x1 .. ⟩] + uses theorem [th], instantiates it properly by looking at the goal, renames + the output to [x1] and further decomposes the postcondition of [th]. + + Note that it is possible to provide more inputs to name the assumptions + introduced by the postcondition (for instance: [as ⟨ x1, h ⟩]). + + If you look at the goal after the call to [progress], you wil see that: + - there is a new variable [x1] and an assumption stating that [↑x1 = ↑x + ↑x] + - the call [x + x] disappeared from the goal: we "progressed" by one step + + Remark: the theorem [U32.add_spec] actually has a precondition, namely that + the addition doesn't overflow. + In the present case, [progress] manages to prove this automatically by using + the fact that [2 * x + 1 < U32.max]. In case [progress] fails to prove a + precondition, it leaves it as a subgoal. + -/ + progress with U32.add_spec as ⟨ x1 ⟩ + /- We can call [progress] a second time for the second addition -/ + progress with U32.add_spec as ⟨ x2 ⟩ + /- We are now left with the remaining goal. We do this by calling the simplifier + then [scalar_tac], a tactic to solve arithmetic problems: + -/ + simp at * + scalar_tac + +/- The proof above works, but it can actually be simplified a bit. In particular, + it is a bit tedious to specify that [progress] should use [U32.add_spec], while + in most situations the theorem to use is obvious by looking at the function. + + For this reason, we provide the possibility of registering theorems in a database + so that [progress] can automatically look them up. This is done by marking + theorems with custom attributes, like [pspec] below. + + Theorems in the standard library like [U32.add_spec] have already been marked with such + attributes, meaning we don't need to tell [progress] to use them. + -/ +@[pspec] -- the [pspec] attribute saves the theorem in a database, for [progress] to use it +theorem mul2_add1_spec2 (x : U32) (h : 2 * ↑x + 1 ≤ U32.max) + : ∃ y, mul2_add1 x = ret y ∧ + ↑ y = 2 * ↑x + (1 : Int) + := by + rw [mul2_add1] + progress as ⟨ x1 .. ⟩ -- [progress] automatically lookups [U32.add_spec] + progress as ⟨ x2 .. ⟩ -- same + simp at *; scalar_tac + +/- Because we marked [mul2_add1_spec2] theorem with [pspec], [progress] can + now automatically look it up. For instance, below: + -/ +-- A dummy function which uses [mul2_add1] +def use_mul2_add1 (x : U32) (y : U32) : Result U32 := do + let x1 ← mul2_add1 x + x1 + y + +@[pspec] +theorem use_mul2_add1_spec (x : U32) (y : U32) (h : 2 * ↑x + 1 + ↑y ≤ U32.max) : + ∃ z, use_mul2_add1 x y = ret z ∧ + ↑z = 2 * ↑x + (1 : Int) + ↑y := by + rw [use_mul2_add1] + -- Here we use [progress] on [mul2_add1] + progress as ⟨ x1 .. ⟩ + progress as ⟨ z .. ⟩ + simp at *; scalar_tac + + +/-#===========================================================================# + # + # Recursion + # + #===========================================================================#-/ + +/- We can have a look at more complex examples, for example recursive functions. -/ + +/- A custom list type. + + Original Rust code: + ``` + pub enum CList<T> { + CCons(T, Box<CList<T>>), + CNil, + } + ``` +-/ +inductive CList (T : Type) := +| CCons : T → CList T → CList T +| CNil : CList T + +-- Open the [CList] namespace, so that we can write [CCons] instead of [CList.CCons] +open CList + +/- A function accessing the ith element of a list. + + Original Rust code: + ``` + pub fn list_nth<'a, T>(l: &'a CList<T>, i: u32) -> &'a T { + match l { + List::CCons(x, tl) => { + if i == 0 { + return x; + } else { + return list_nth(tl, i - 1); + } + } + List::CNil => { + panic!() + } + } + } + ``` + -/ +divergent def list_nth (T : Type) (l : CList T) (i : U32) : Result T := + match l with + | CCons x tl => + if i = 0#u32 + then ret x + else do + let i1 ← i - 1#u32 + list_nth T tl i1 + | CNil => fail Error.panic + +/- Conversion to Lean's standard list type. + + Note that because we use the suffix "CList.", we can use the notation [l.to_list] + if [l] has type [CList ...]. + -/ +def CList.to_list {α : Type} (x : CList α) : List α := + match x with + | CNil => [] + | CCons hd tl => hd :: tl.to_list + +/- Let's prove that [list_nth] indeed accesses the ith element of the list. + + Remark: the parameter [Inhabited T] tells us that we must have an instance of the + typeclass [Inhabited] for the type [T]. As of today we can only use [index] with + inhabited types, that is to say types which are not empty (i.e., for which it is + possible to construct a value - for instance, [Int] is inhabited because we can exhibit + the value [0: Int]). This is a technical detail. + + Remark: we didn't mention it before, but we advise always writing inequalities + in the same direction (that is: use [<] and not [>]), because it helps the simplifier. + More specifically, if you have the assumption that [x > y] in the context, the simplifier + may not be able to rewrite [y < x] to [⊤]. + -/ +theorem list_nth_spec {T : Type} [Inhabited T] (l : CList T) (i : U32) + -- Precondition: the index is in bounds + (h : ↑i < l.to_list.len) + -- Postcondition + : ∃ x, list_nth T l i = ret x ∧ + -- [x] is the ith element of [l] after conversion to [List] + x = l.to_list.index ↑i + := by + -- Here we have to be careful when unfolding the body of [list_nth]: we could + -- use the [simp] tactic, but it will sometimes loop on recursive definitions. + rw [list_nth] + -- Let's simply follow the structure of the function, by first matching on [l] + match l with + | CNil => + -- We can't get there: we can derive a contradiction from the precondition: + -- we have that [i < 0] (because [i < CNil.to_list.len]) and at the same + -- time [0 ≤ i] (because [i] is a [U32] unsigned integer). + -- First, let's simplify [to_list CNil] to [0] + simp [CList.to_list] at h + -- Proving we have a contradiction + scalar_tac + | CCons hd tl => + -- Simplify the match + simp only [] + -- Perform a case disjunction on [i]. + -- The notation [hi : ...] allows us to introduce an assumption in the + -- context, to remember the fact that in the branches we have [i = 0#u32] + -- and [¬ i = 0#u32]. + if hi: i = 0#u32 then + -- We can finish the proof simply by using the simplifier. + -- We decompose the proof into several calls on purpose, so that it is + -- easier to understand what is going on. + -- Simplify the condition and the [if then else] + simp [hi] + -- Prove the final equality + simp [CList.to_list] + else + -- The interesting branch + -- Simplify the condition and the [if then else] + simp [hi] + -- i0 := i - 1 + progress as ⟨ i1, hi1 ⟩ + -- [progress] can handle recursion + simp [CList.to_list] at h -- we need to simplify this inequality to prove the precondition + progress as ⟨ l1 ⟩ + -- Proving the postcondition + -- We need this to trigger the simplification of [index to.to_list i.val] + -- + -- Among other things, the call to [simp] below will apply the theorem + -- [List.index_nzero_cons], which has the precondition [i.val ≠ 0]. [simp] + -- can automatically use the assumptions/theorems we give it to prove + -- preconditions when applying rewriting lemmas. In the present case, + -- by giving it [*] as argument, we tell [simp] to use all the assumptions + -- to perform rewritings. In particular, it will use [i.val ≠ 0] to + -- apply [List.index_nzero_cons]. + have : i.val ≠ 0 := by scalar_tac -- Remark: [simp at hi] also works + simp [CList.to_list, *] + +/-#===========================================================================# + # + # Partial Functions + # + #===========================================================================#-/ + +/- Recursive functions may not terminate on all inputs. + + For instance, the function below only terminates on positive inputs (note + that we switched to signed integers), in which cases it behaves like the + identity. When we need to define such a potentially partial function, + we use the [divergent] keyword, which means that the function may diverge + (i.e., infinitely loop). + + We will skip the details of how [divergent] precisely handles non-termination. + All you need to know is that the [Result] type has actually 3 cases (we saw + the first 2 cases in the examples above): + - [ret]: successful computation + - [fail]: failure (panic because of overflow, etc.) + - [div]: the computation doesn't terminate + + If in a theorem we state and prove that: + ``` + ∃ y, i32_id x = ret x + ``` + we not only prove that the function doesn't fail, but also that it terminates. + + *Remark*: in practice, whenever Aeneas generates a recursive function, it + annotates it with the [divergent] keyword. + -/ +divergent def i32_id (x : I32) : Result I32 := + if x = 0#i32 then ret 0#i32 + else do + let x1 ← x - 1#i32 + let x2 ← i32_id x1 + x2 + 1#i32 + +/- We can easily prove that [i32_id] behaves like the identity on positive inputs -/ +theorem i32_id_spec (x : I32) (h : 0 ≤ x.val) : + ∃ y, i32_id x = ret y ∧ x.val = y.val := by + rw [i32_id] + if hx : x = 0#i32 then + simp_all + else + simp [hx] + -- x - 1 + progress as ⟨ x1 ⟩ + -- Recursive call + progress as ⟨ x2 ⟩ + -- x2 + 1 + progress + -- Postcondition + simp; scalar_tac +-- Below: we have to prove that the recursive call performed in the proof terminates. +-- Otherwise, we could prove any result we want by simply writing a theorem which +-- uses itself in the proof. +-- +-- We first specify a decreasing value. Here, we state that [x], seen as a natural number, +-- decreases at every recursive call. +termination_by i32_id_spec x _ => x.val.toNat +-- And we now have to prove that it indeed decreases - you can skip this for now. +decreasing_by + -- We first need to "massage" the goal (in practice, all the proofs of [decreasing_by] + -- should start with a call to [simp_wf]). + simp_wf + -- Finish the proof + have : 1 ≤ x.val := by scalar_tac + simp [Int.toNat_sub_of_le, *] + +end Tutorial diff --git a/tests/lean/lakefile.lean b/tests/lean/lakefile.lean index cc63c48f..8acf6973 100644 --- a/tests/lean/lakefile.lean +++ b/tests/lean/lakefile.lean @@ -8,6 +8,7 @@ require Base from "../../backends/lean" package «tests» {} +@[default_target] lean_lib tutorial @[default_target] lean_lib betreeMain @[default_target] lean_lib constants @[default_target] lean_lib external |