From acc09d5c69690f2c46cb1bacf290da5dcc268b24 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Tue, 6 Jun 2023 15:53:46 +0200 Subject: Remove the sorries from Primitives.lean --- backends/lean/Primitives.lean | 184 +++++++++++++++++++++++++++--------------- backends/lean/lakefile.lean | 1 + 2 files changed, 120 insertions(+), 65 deletions(-) (limited to 'backends') diff --git a/backends/lean/Primitives.lean b/backends/lean/Primitives.lean index 4a66a453..e7826fbf 100644 --- a/backends/lean/Primitives.lean +++ b/backends/lean/Primitives.lean @@ -2,9 +2,10 @@ import Lean import Lean.Meta.Tactic.Simp import Init.Data.List.Basic import Mathlib.Tactic.RunCmd +import Mathlib.Tactic.Linarith -------------------- --- ASSERT COMMAND -- +-- ASSERT COMMAND --Std. -------------------- open Lean Elab Command Term Meta @@ -249,27 +250,53 @@ def Scalar.cMax (ty : ScalarTy) : Int := | .Usize => U32.max | _ => Scalar.max ty -theorem Scalar.cMin_bound ty : Scalar.min ty <= Scalar.cMin ty := by sorry -theorem Scalar.cMax_bound ty : Scalar.min ty <= Scalar.cMin ty := by sorry +theorem Scalar.cMin_bound ty : Scalar.min ty ≤ Scalar.cMin ty := by + cases ty <;> simp [Scalar.min, Scalar.max, Scalar.cMin, Scalar.cMax] at * + cases System.Platform.numBits_eq <;> + unfold System.Platform.numBits at * <;> + simp [*] + +theorem Scalar.cMax_bound ty : Scalar.cMax ty ≤ Scalar.max ty := by + cases ty <;> simp [Scalar.min, Scalar.max, Scalar.cMin, Scalar.cMax] at * <;> + cases System.Platform.numBits_eq <;> + unfold System.Platform.numBits at * <;> + simp [*] + +theorem Scalar.cMin_suffices ty (h : Scalar.cMin ty ≤ x) : Scalar.min ty ≤ x := by + cases ty <;> simp [Scalar.min, Scalar.max, Scalar.cMin, Scalar.cMax] at * <;> + cases System.Platform.numBits_eq <;> + unfold System.Platform.numBits at * <;> + simp [*] at * + -- TODO: I would have expected terms like `-(1 + 1) ^ 63` to be simplified + linarith + +theorem Scalar.cMax_suffices ty (h : x ≤ Scalar.cMax ty) : x ≤ Scalar.max ty := by + cases ty <;> simp [Scalar.min, Scalar.max, Scalar.cMin, Scalar.cMax] at * <;> + cases System.Platform.numBits_eq <;> + unfold System.Platform.numBits at * <;> + simp [*] at * <;> + -- TODO: I would have expected terms like `-(1 + 1) ^ 63` to be simplified + linarith structure Scalar (ty : ScalarTy) where val : Int - hmin : Scalar.min ty <= val - hmax : val <= Scalar.max ty + hmin : Scalar.min ty ≤ val + hmax : val ≤ Scalar.max ty theorem Scalar.bound_suffices (ty : ScalarTy) (x : Int) : - Scalar.cMin ty <= x && x <= Scalar.cMax ty -> - (decide (Scalar.min ty ≤ x) && decide (x ≤ Scalar.max ty)) = true - := by sorry + Scalar.cMin ty ≤ x ∧ x ≤ Scalar.cMax ty -> + Scalar.min ty ≤ x ∧ x ≤ Scalar.max ty + := + λ h => by + apply And.intro <;> have hmin := Scalar.cMin_bound ty <;> have hmax := Scalar.cMax_bound ty <;> linarith def Scalar.ofIntCore {ty : ScalarTy} (x : Int) - (hmin : Scalar.min ty <= x) (hmax : x <= Scalar.max ty) : Scalar ty := + (hmin : Scalar.min ty ≤ x) (hmax : x ≤ Scalar.max ty) : Scalar ty := { val := x, hmin := hmin, hmax := hmax } def Scalar.ofInt {ty : ScalarTy} (x : Int) - (h : Scalar.min ty <= x && x <= Scalar.max ty) : Scalar ty := - let hmin: Scalar.min ty <= x := by sorry - let hmax: x <= Scalar.max ty := by sorry + (h : Scalar.min ty ≤ x ∧ x ≤ Scalar.max ty) : Scalar ty := + let ⟨ hmin, hmax ⟩ := h Scalar.ofIntCore x hmin hmax -- Further thoughts: look at what has been done here: @@ -279,12 +306,15 @@ def Scalar.ofInt {ty : ScalarTy} (x : Int) -- which both contain a fair amount of reasoning already! def Scalar.tryMk (ty : ScalarTy) (x : Int) : Result (Scalar ty) := -- TODO: write this with only one if then else - if hmin_cons: Scalar.cMin ty <= x || Scalar.min ty <= x then - if hmax_cons: x <= Scalar.cMax ty || x <= Scalar.max ty then - let hmin: Scalar.min ty <= x := by sorry - let hmax: x <= Scalar.max ty := by sorry - return Scalar.ofIntCore x hmin hmax - else fail integerOverflow + if h: (Scalar.cMin ty ≤ x || Scalar.min ty ≤ x) && (x ≤ Scalar.cMax ty || x ≤ Scalar.max ty) then + let h: Scalar.min ty ≤ x ∧ x ≤ Scalar.max ty := by + simp at * + have ⟨ hmin, hmax ⟩ := h + have hbmin := Scalar.cMin_bound ty + have hbmax := Scalar.cMax_bound ty + cases hmin <;> cases hmax <;> apply And.intro <;> linarith + let ⟨ hmin, hmax ⟩ := h + return Scalar.ofIntCore x hmin hmax else fail integerOverflow def Scalar.neg {ty : ScalarTy} (x : Scalar ty) : Result (Scalar ty) := Scalar.tryMk ty (- x.val) @@ -292,11 +322,39 @@ def Scalar.neg {ty : ScalarTy} (x : Scalar ty) : Result (Scalar ty) := Scalar.tr def Scalar.div {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) := if y.val != 0 then Scalar.tryMk ty (x.val / y.val) else fail divisionByZero --- Checking that the % operation in Lean computes the same as the remainder operation in Rust -#assert 1 % 2 = (1:Int) -#assert (-1) % 2 = -1 -#assert 1 % (-2) = 1 -#assert (-1) % (-2) = -1 +-- Our custom remainder operation, which satisfies the semantics of Rust +-- TODO: is there a better way? +def scalar_rem (x y : Int) : Int := + if 0 ≤ x then |x| % |y| + else - (|x| % |y|) + +-- Our custom division operation, which satisfies the semantics of Rust +-- TODO: is there a better way? +def scalar_div (x y : Int) : Int := + if 0 ≤ x && 0 ≤ y then |x| / |y| + else if 0 ≤ x && y < 0 then - (|x| / |y|) + else if x < 0 && 0 ≤ y then - (|x| / |y|) + else |x| / |y| + +-- Checking that the remainder operation is correct +#assert scalar_rem 1 2 = 1 +#assert scalar_rem (-1) 2 = -1 +#assert scalar_rem 1 (-2) = 1 +#assert scalar_rem (-1) (-2) = -1 +#assert scalar_rem 7 3 = (1:Int) +#assert scalar_rem (-7) 3 = -1 +#assert scalar_rem 7 (-3) = 1 +#assert scalar_rem (-7) (-3) = -1 + +-- Checking that the division operation is correct +#assert scalar_div 3 2 = 1 +#assert scalar_div (-3) 2 = -1 +#assert scalar_div 3 (-2) = -1 +#assert scalar_div (-3) (-2) = 1 +#assert scalar_div 7 3 = 2 +#assert scalar_div (-7) 3 = -2 +#assert scalar_div 7 (-3) = -2 +#assert scalar_div (-7) (-3) = 2 def Scalar.rem {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) := if y.val != 0 then Scalar.tryMk ty (x.val % y.val) else fail divisionByZero @@ -479,20 +537,29 @@ macro_rules -- VECTORS -- ------------- -def Vec (α : Type u) := { l : List α // List.length l <= Usize.max } +def Vec (α : Type u) := { l : List α // List.length l ≤ Usize.max } -def vec_new (α : Type u): Vec α := ⟨ [], by sorry ⟩ +def vec_new (α : Type u): Vec α := ⟨ [], by apply Scalar.cMax_suffices .Usize; simp ⟩ def vec_len (α : Type u) (v : Vec α) : Usize := let ⟨ v, l ⟩ := v - Usize.ofIntCore (List.length v) (by sorry) l + Usize.ofIntCore (List.length v) (by simp [Scalar.min]) l def vec_push_fwd (α : Type u) (_ : Vec α) (_ : α) : Unit := () def vec_push_back (α : Type u) (v : Vec α) (x : α) : Result (Vec α) := - if h : List.length v.val <= U32.max || List.length v.val <= Usize.max then - return ⟨ List.concat v.val x, by sorry ⟩ + let nlen := List.length v.val + 1 + if h : nlen ≤ U32.max || nlen ≤ Usize.max then + have h : nlen ≤ Usize.max := by + simp at * + cases System.Platform.numBits_eq <;> + unfold System.Platform.numBits at * <;> + simp [*] at * <;> + try assumption + cases h <;> + linarith + return ⟨ List.concat v.val x, by simp at *; assumption ⟩ else fail maximumSizeExceeded @@ -506,30 +573,28 @@ def vec_insert_back (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec α if i.val < List.length v.val then -- TODO: maybe we should redefine a list library which uses integers -- (instead of natural numbers) - let i : Nat := - match i.val with - | .ofNat n => n - | .negSucc n => by sorry -- TODO: we can't get here - let isLt: i < USize.size := by sorry - let i : Fin USize.size := { val := i, isLt := isLt } - .ret ⟨ List.set v.val i.val x, by - have h: List.length v.val <= Usize.max := v.property - rewrite [ List.length_set v.val i.val x ] + let i := i.val.toNat + .ret ⟨ List.set v.val i x, by + have h: List.length v.val ≤ Usize.max := v.property + simp [*] at * assumption ⟩ else .fail arrayOutOfBounds +def vec_index_to_fin {α : Type u} {v: Vec α} {i: Usize} (h : i.val < List.length v.val) : + Fin (List.length v.val) := + let j := i.val.toNat + let h: j < List.length v.val := by + have heq := @Int.toNat_lt (List.length v.val) i.val i.hmin + apply heq.mpr + assumption + ⟨j, h⟩ + def vec_index_fwd (α : Type u) (v: Vec α) (i: Usize): Result α := - if i.val < List.length v.val then - let i : Nat := - match i.val with - | .ofNat n => n - | .negSucc n => by sorry -- TODO: we can't get here - let isLt: i < USize.size := by sorry - let i : Fin USize.size := { val := i, isLt := isLt } - let h: i < List.length v.val := by sorry - .ret (List.get v.val ⟨i.val, h⟩) + if h: i.val < List.length v.val then + let i := vec_index_to_fin h + .ret (List.get v.val i) else .fail arrayOutOfBounds @@ -540,29 +605,18 @@ def vec_index_back (α : Type u) (v: Vec α) (i: Usize) (_: α): Result Unit := .fail arrayOutOfBounds def vec_index_mut_fwd (α : Type u) (v: Vec α) (i: Usize): Result α := - if i.val < List.length v.val then - let i : Nat := - match i.val with - | .ofNat n => n - | .negSucc n => by sorry -- TODO: we can't get here - let isLt: i < USize.size := by sorry - let i : Fin USize.size := { val := i, isLt := isLt } - let h: i < List.length v.val := by sorry - .ret (List.get v.val ⟨i.val, h⟩) + if h: i.val < List.length v.val then + let i := vec_index_to_fin h + .ret (List.get v.val i) else .fail arrayOutOfBounds def vec_index_mut_back (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec α) := - if i.val < List.length v.val then - let i : Nat := - match i.val with - | .ofNat n => n - | .negSucc n => by sorry -- TODO: we can't get here - let isLt: i < USize.size := by sorry - let i : Fin USize.size := { val := i, isLt := isLt } - .ret ⟨ List.set v.val i.val x, by - have h: List.length v.val <= Usize.max := v.property - rewrite [ List.length_set v.val i.val x ] + if h: i.val < List.length v.val then + let i := vec_index_to_fin h + .ret ⟨ List.set v.val i x, by + have h: List.length v.val ≤ Usize.max := v.property + simp [*] at * assumption ⟩ else diff --git a/backends/lean/lakefile.lean b/backends/lean/lakefile.lean index 9633e1e8..c5e27d1c 100644 --- a/backends/lean/lakefile.lean +++ b/backends/lean/lakefile.lean @@ -1,6 +1,7 @@ import Lake open Lake DSL +-- Important: mathlib imports std4 and quote4: we mustn't add a `require std4` line require mathlib from git "https://github.com/leanprover-community/mathlib4.git" -- cgit v1.2.3 From e2fef1a5c986aff4c9975b1376bcc0fc0bb87940 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Fri, 9 Jun 2023 10:06:43 +0200 Subject: Reorganize a bit the Lean library --- backends/lean/Base.lean | 1 + backends/lean/Base/Primitives.lean | 650 +++++++++++++++++++++++++++++++++++++ backends/lean/Primitives.lean | 637 ------------------------------------ backends/lean/lake-manifest.json | 8 +- backends/lean/lakefile.lean | 2 +- 5 files changed, 656 insertions(+), 642 deletions(-) create mode 100644 backends/lean/Base.lean create mode 100644 backends/lean/Base/Primitives.lean delete mode 100644 backends/lean/Primitives.lean (limited to 'backends') diff --git a/backends/lean/Base.lean b/backends/lean/Base.lean new file mode 100644 index 00000000..960b2bb5 --- /dev/null +++ b/backends/lean/Base.lean @@ -0,0 +1 @@ +import Base.Primitives diff --git a/backends/lean/Base/Primitives.lean b/backends/lean/Base/Primitives.lean new file mode 100644 index 00000000..d3de1d10 --- /dev/null +++ b/backends/lean/Base/Primitives.lean @@ -0,0 +1,650 @@ +import Lean +import Lean.Meta.Tactic.Simp +import Init.Data.List.Basic +import Mathlib.Tactic.RunCmd +import Mathlib.Tactic.Linarith + +-------------------- +-- ASSERT COMMAND --Std. +-------------------- + +open Lean Elab Command Term Meta + +syntax (name := assert) "#assert" term: command + +@[command_elab assert] +unsafe +def assertImpl : CommandElab := fun (_stx: Syntax) => do + runTermElabM (fun _ => do + let r ← evalTerm Bool (mkConst ``Bool) _stx[1] + if not r then + logInfo "Assertion failed for: " + logInfo _stx[1] + logError "Expression reduced to false" + pure ()) + +#eval 2 == 2 +#assert (2 == 2) + +------------- +-- PRELUDE -- +------------- + +-- Results & monadic combinators + +inductive Error where + | assertionFailure: Error + | integerOverflow: Error + | divisionByZero: Error + | arrayOutOfBounds: Error + | maximumSizeExceeded: Error + | panic: Error +deriving Repr, BEq + +open Error + +inductive Result (α : Type u) where + | ret (v: α): Result α + | fail (e: Error): Result α +deriving Repr, BEq + +open Result + +instance Result_Inhabited (α : Type u) : Inhabited (Result α) := + Inhabited.mk (fail panic) + +/- HELPERS -/ + +def ret? {α: Type} (r: Result α): Bool := + match r with + | Result.ret _ => true + | Result.fail _ => false + +def massert (b:Bool) : Result Unit := + if b then .ret () else fail assertionFailure + +def eval_global {α: Type} (x: Result α) (_: ret? x): α := + match x with + | Result.fail _ => by contradiction + | Result.ret x => x + +/- DO-DSL SUPPORT -/ + +def bind (x: Result α) (f: α -> Result β) : Result β := + match x with + | ret v => f v + | fail v => fail v + +-- Allows using Result in do-blocks +instance : Bind Result where + bind := bind + +-- Allows using return x in do-blocks +instance : Pure Result where + pure := fun x => ret x + +/- CUSTOM-DSL SUPPORT -/ + +-- Let-binding the Result of a monadic operation is oftentimes not sufficient, +-- because we may need a hypothesis for equational reasoning in the scope. We +-- rely on subtype, and a custom let-binding operator, in effect recreating our +-- own variant of the do-dsl + +def Result.attach {α: Type} (o : Result α): Result { x : α // o = ret x } := + match o with + | .ret x => .ret ⟨x, rfl⟩ + | .fail e => .fail e + +macro "let" e:term " ⟵ " f:term : doElem => + `(doElem| let ⟨$e, h⟩ ← Result.attach $f) + +-- TODO: any way to factorize both definitions? +macro "let" e:term " <-- " f:term : doElem => + `(doElem| let ⟨$e, h⟩ ← Result.attach $f) + +-- We call the hypothesis `h`, in effect making it unavailable to the user +-- (because too much shadowing). But in practice, once can use the French single +-- quote notation (input with f< and f>), where `‹ h ›` finds a suitable +-- hypothesis in the context, this is equivalent to `have x: h := by assumption in x` +#eval do + let y <-- .ret (0: Nat) + let _: y = 0 := by cases ‹ ret 0 = ret y › ; decide + let r: { x: Nat // x = 0 } := ⟨ y, by assumption ⟩ + .ret r + +---------------------- +-- MACHINE INTEGERS -- +---------------------- + +-- We redefine our machine integers types. + +-- For Isize/Usize, we reuse `getNumBits` from `USize`. You cannot reduce `getNumBits` +-- using the simplifier, meaning that proofs do not depend on the compile-time value of +-- USize.size. (Lean assumes 32 or 64-bit platforms, and Rust doesn't really support, at +-- least officially, 16-bit microcontrollers, so this seems like a fine design decision +-- for now.) + +-- Note from Chris Bailey: "If there's more than one salient property of your +-- definition then the subtyping strategy might get messy, and the property part +-- of a subtype is less discoverable by the simplifier or tactics like +-- library_search." So, we will not add refinements on the return values of the +-- operations defined on Primitives, but will rather rely on custom lemmas to +-- invert on possible return values of the primitive operations. + +-- Machine integer constants, done via `ofNatCore`, which requires a proof that +-- the `Nat` fits within the desired integer type. We provide a custom tactic. + +open System.Platform.getNumBits + +-- TODO: is there a way of only importing System.Platform.getNumBits? +-- +@[simp] def size_num_bits : Nat := (System.Platform.getNumBits ()).val + +-- Remark: Lean seems to use < for the comparisons with the upper bounds by convention. +-- We keep the F* convention for now. +@[simp] def Isize.min : Int := - (HPow.hPow 2 (size_num_bits - 1)) +@[simp] def Isize.max : Int := (HPow.hPow 2 (size_num_bits - 1)) - 1 +@[simp] def I8.min : Int := - (HPow.hPow 2 7) +@[simp] def I8.max : Int := HPow.hPow 2 7 - 1 +@[simp] def I16.min : Int := - (HPow.hPow 2 15) +@[simp] def I16.max : Int := HPow.hPow 2 15 - 1 +@[simp] def I32.min : Int := -(HPow.hPow 2 31) +@[simp] def I32.max : Int := HPow.hPow 2 31 - 1 +@[simp] def I64.min : Int := -(HPow.hPow 2 63) +@[simp] def I64.max : Int := HPow.hPow 2 63 - 1 +@[simp] def I128.min : Int := -(HPow.hPow 2 127) +@[simp] def I128.max : Int := HPow.hPow 2 127 - 1 +@[simp] def Usize.min : Int := 0 +@[simp] def Usize.max : Int := HPow.hPow 2 size_num_bits - 1 +@[simp] def U8.min : Int := 0 +@[simp] def U8.max : Int := HPow.hPow 2 8 - 1 +@[simp] def U16.min : Int := 0 +@[simp] def U16.max : Int := HPow.hPow 2 16 - 1 +@[simp] def U32.min : Int := 0 +@[simp] def U32.max : Int := HPow.hPow 2 32 - 1 +@[simp] def U64.min : Int := 0 +@[simp] def U64.max : Int := HPow.hPow 2 64 - 1 +@[simp] def U128.min : Int := 0 +@[simp] def U128.max : Int := HPow.hPow 2 128 - 1 + +#assert (I8.min == -128) +#assert (I8.max == 127) +#assert (I16.min == -32768) +#assert (I16.max == 32767) +#assert (I32.min == -2147483648) +#assert (I32.max == 2147483647) +#assert (I64.min == -9223372036854775808) +#assert (I64.max == 9223372036854775807) +#assert (I128.min == -170141183460469231731687303715884105728) +#assert (I128.max == 170141183460469231731687303715884105727) +#assert (U8.min == 0) +#assert (U8.max == 255) +#assert (U16.min == 0) +#assert (U16.max == 65535) +#assert (U32.min == 0) +#assert (U32.max == 4294967295) +#assert (U64.min == 0) +#assert (U64.max == 18446744073709551615) +#assert (U128.min == 0) +#assert (U128.max == 340282366920938463463374607431768211455) + +inductive ScalarTy := +| Isize +| I8 +| I16 +| I32 +| I64 +| I128 +| Usize +| U8 +| U16 +| U32 +| U64 +| U128 + +def Scalar.min (ty : ScalarTy) : Int := + match ty with + | .Isize => Isize.min + | .I8 => I8.min + | .I16 => I16.min + | .I32 => I32.min + | .I64 => I64.min + | .I128 => I128.min + | .Usize => Usize.min + | .U8 => U8.min + | .U16 => U16.min + | .U32 => U32.min + | .U64 => U64.min + | .U128 => U128.min + +def Scalar.max (ty : ScalarTy) : Int := + match ty with + | .Isize => Isize.max + | .I8 => I8.max + | .I16 => I16.max + | .I32 => I32.max + | .I64 => I64.max + | .I128 => I128.max + | .Usize => Usize.max + | .U8 => U8.max + | .U16 => U16.max + | .U32 => U32.max + | .U64 => U64.max + | .U128 => U128.max + +-- "Conservative" bounds +-- We use those because we can't compare to the isize bounds (which can't +-- reduce at compile-time). Whenever we perform an arithmetic operation like +-- addition we need to check that the result is in bounds: we first compare +-- to the conservative bounds, which reduce, then compare to the real bounds. +-- This is useful for the various #asserts that we want to reduce at +-- type-checking time. +def Scalar.cMin (ty : ScalarTy) : Int := + match ty with + | .Isize => I32.min + | _ => Scalar.min ty + +def Scalar.cMax (ty : ScalarTy) : Int := + match ty with + | .Isize => I32.max + | .Usize => U32.max + | _ => Scalar.max ty + +theorem Scalar.cMin_bound ty : Scalar.min ty ≤ Scalar.cMin ty := by + cases ty <;> simp [Scalar.min, Scalar.max, Scalar.cMin, Scalar.cMax] at * + cases System.Platform.numBits_eq <;> + unfold System.Platform.numBits at * <;> + simp [*] + +theorem Scalar.cMax_bound ty : Scalar.cMax ty ≤ Scalar.max ty := by + cases ty <;> simp [Scalar.min, Scalar.max, Scalar.cMin, Scalar.cMax] at * <;> + cases System.Platform.numBits_eq <;> + unfold System.Platform.numBits at * <;> + simp [*] + +theorem Scalar.cMin_suffices ty (h : Scalar.cMin ty ≤ x) : Scalar.min ty ≤ x := by + cases ty <;> simp [Scalar.min, Scalar.max, Scalar.cMin, Scalar.cMax] at * <;> + cases System.Platform.numBits_eq <;> + unfold System.Platform.numBits at * <;> + simp [*] at * + -- TODO: I would have expected terms like `-(1 + 1) ^ 63` to be simplified + linarith + +theorem Scalar.cMax_suffices ty (h : x ≤ Scalar.cMax ty) : x ≤ Scalar.max ty := by + cases ty <;> simp [Scalar.min, Scalar.max, Scalar.cMin, Scalar.cMax] at * <;> + cases System.Platform.numBits_eq <;> + unfold System.Platform.numBits at * <;> + simp [*] at * <;> + -- TODO: I would have expected terms like `-(1 + 1) ^ 63` to be simplified + linarith + +structure Scalar (ty : ScalarTy) where + val : Int + hmin : Scalar.min ty ≤ val + hmax : val ≤ Scalar.max ty +deriving Repr + +theorem Scalar.bound_suffices (ty : ScalarTy) (x : Int) : + Scalar.cMin ty ≤ x ∧ x ≤ Scalar.cMax ty -> + Scalar.min ty ≤ x ∧ x ≤ Scalar.max ty + := + λ h => by + apply And.intro <;> have hmin := Scalar.cMin_bound ty <;> have hmax := Scalar.cMax_bound ty <;> linarith + +def Scalar.ofIntCore {ty : ScalarTy} (x : Int) + (hmin : Scalar.min ty ≤ x) (hmax : x ≤ Scalar.max ty) : Scalar ty := + { val := x, hmin := hmin, hmax := hmax } + +def Scalar.ofInt {ty : ScalarTy} (x : Int) + (h : Scalar.min ty ≤ x ∧ x ≤ Scalar.max ty) : Scalar ty := + -- Remark: we initially wrote: + -- let ⟨ hmin, hmax ⟩ := h + -- Scalar.ofIntCore x hmin hmax + -- We updated to the line below because a similar pattern in `Scalar.tryMk` + -- made reduction block. Both versions seem to work for `Scalar.ofInt`, though. + -- TODO: investigate + Scalar.ofIntCore x h.left h.right + +@[simp] def Scalar.check_bounds (ty : ScalarTy) (x : Int) : Bool := + (Scalar.cMin ty ≤ x || Scalar.min ty ≤ x) ∧ (x ≤ Scalar.cMax ty || x ≤ Scalar.max ty) + +theorem Scalar.check_bounds_prop {ty : ScalarTy} {x : Int} (h: Scalar.check_bounds ty x) : + Scalar.min ty ≤ x ∧ x ≤ Scalar.max ty := by + simp at * + have ⟨ hmin, hmax ⟩ := h + have hbmin := Scalar.cMin_bound ty + have hbmax := Scalar.cMax_bound ty + cases hmin <;> cases hmax <;> apply And.intro <;> linarith + +-- Further thoughts: look at what has been done here: +-- https://github.com/leanprover-community/mathlib4/blob/master/Mathlib/Data/Fin/Basic.lean +-- and +-- https://github.com/leanprover-community/mathlib4/blob/master/Mathlib/Data/UInt.lean +-- which both contain a fair amount of reasoning already! +def Scalar.tryMk (ty : ScalarTy) (x : Int) : Result (Scalar ty) := + if h:Scalar.check_bounds ty x then + -- If we do: + -- ``` + -- let ⟨ hmin, hmax ⟩ := (Scalar.check_bounds_prop h) + -- Scalar.ofIntCore x hmin hmax + -- ``` + -- then normalization blocks (for instance, some proofs which use reflexivity fail). + -- However, the version below doesn't block reduction (TODO: investigate): + return Scalar.ofInt x (Scalar.check_bounds_prop h) + else fail integerOverflow + +def Scalar.neg {ty : ScalarTy} (x : Scalar ty) : Result (Scalar ty) := Scalar.tryMk ty (- x.val) + +def Scalar.div {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) := + if y.val != 0 then Scalar.tryMk ty (x.val / y.val) else fail divisionByZero + +-- Our custom remainder operation, which satisfies the semantics of Rust +-- TODO: is there a better way? +def scalar_rem (x y : Int) : Int := + if 0 ≤ x then |x| % |y| + else - (|x| % |y|) + +-- Our custom division operation, which satisfies the semantics of Rust +-- TODO: is there a better way? +def scalar_div (x y : Int) : Int := + if 0 ≤ x && 0 ≤ y then |x| / |y| + else if 0 ≤ x && y < 0 then - (|x| / |y|) + else if x < 0 && 0 ≤ y then - (|x| / |y|) + else |x| / |y| + +-- Checking that the remainder operation is correct +#assert scalar_rem 1 2 = 1 +#assert scalar_rem (-1) 2 = -1 +#assert scalar_rem 1 (-2) = 1 +#assert scalar_rem (-1) (-2) = -1 +#assert scalar_rem 7 3 = (1:Int) +#assert scalar_rem (-7) 3 = -1 +#assert scalar_rem 7 (-3) = 1 +#assert scalar_rem (-7) (-3) = -1 + +-- Checking that the division operation is correct +#assert scalar_div 3 2 = 1 +#assert scalar_div (-3) 2 = -1 +#assert scalar_div 3 (-2) = -1 +#assert scalar_div (-3) (-2) = 1 +#assert scalar_div 7 3 = 2 +#assert scalar_div (-7) 3 = -2 +#assert scalar_div 7 (-3) = -2 +#assert scalar_div (-7) (-3) = 2 + +def Scalar.rem {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) := + if y.val != 0 then Scalar.tryMk ty (x.val % y.val) else fail divisionByZero + +def Scalar.add {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) := + Scalar.tryMk ty (x.val + y.val) + +def Scalar.sub {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) := + Scalar.tryMk ty (x.val - y.val) + +def Scalar.mul {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) := + Scalar.tryMk ty (x.val * y.val) + +-- TODO: instances of +, -, * etc. for scalars + +-- Cast an integer from a [src_ty] to a [tgt_ty] +-- TODO: check the semantics of casts in Rust +def Scalar.cast {src_ty : ScalarTy} (tgt_ty : ScalarTy) (x : Scalar src_ty) : Result (Scalar tgt_ty) := + Scalar.tryMk tgt_ty x.val + +-- The scalar types +-- We declare the definitions as reducible so that Lean can unfold them (useful +-- for type class resolution for instance). +@[reducible] def Isize := Scalar .Isize +@[reducible] def I8 := Scalar .I8 +@[reducible] def I16 := Scalar .I16 +@[reducible] def I32 := Scalar .I32 +@[reducible] def I64 := Scalar .I64 +@[reducible] def I128 := Scalar .I128 +@[reducible] def Usize := Scalar .Usize +@[reducible] def U8 := Scalar .U8 +@[reducible] def U16 := Scalar .U16 +@[reducible] def U32 := Scalar .U32 +@[reducible] def U64 := Scalar .U64 +@[reducible] def U128 := Scalar .U128 + +-- TODO: below: not sure this is the best way. +-- Should we rather overload operations like +, -, etc.? +-- Also, it is possible to automate the generation of those definitions +-- with macros (but would it be a good idea? It would be less easy to +-- read the file, which is not supposed to change a lot) + +-- Negation + +/-- +Remark: there is no heterogeneous negation in the Lean prelude: we thus introduce +one here. + +The notation typeclass for heterogeneous addition. +This enables the notation `- a : β` where `a : α`. +-/ +class HNeg (α : Type u) (β : outParam (Type v)) where + /-- `- a` computes the negation of `a`. + The meaning of this notation is type-dependent. -/ + hNeg : α → β + +prefix:75 "-" => HNeg.hNeg + +instance : HNeg Isize (Result Isize) where hNeg x := Scalar.neg x +instance : HNeg I8 (Result I8) where hNeg x := Scalar.neg x +instance : HNeg I16 (Result I16) where hNeg x := Scalar.neg x +instance : HNeg I32 (Result I32) where hNeg x := Scalar.neg x +instance : HNeg I64 (Result I64) where hNeg x := Scalar.neg x +instance : HNeg I128 (Result I128) where hNeg x := Scalar.neg x + +-- Addition +instance {ty} : HAdd (Scalar ty) (Scalar ty) (Result (Scalar ty)) where + hAdd x y := Scalar.add x y + +-- Substraction +instance {ty} : HSub (Scalar ty) (Scalar ty) (Result (Scalar ty)) where + hSub x y := Scalar.sub x y + +-- Multiplication +instance {ty} : HMul (Scalar ty) (Scalar ty) (Result (Scalar ty)) where + hMul x y := Scalar.mul x y + +-- Division +instance {ty} : HDiv (Scalar ty) (Scalar ty) (Result (Scalar ty)) where + hDiv x y := Scalar.div x y + +-- Remainder +instance {ty} : HMod (Scalar ty) (Scalar ty) (Result (Scalar ty)) where + hMod x y := Scalar.rem x y + +-- ofIntCore +-- TODO: typeclass? +def Isize.ofIntCore := @Scalar.ofIntCore .Isize +def I8.ofIntCore := @Scalar.ofIntCore .I8 +def I16.ofIntCore := @Scalar.ofIntCore .I16 +def I32.ofIntCore := @Scalar.ofIntCore .I32 +def I64.ofIntCore := @Scalar.ofIntCore .I64 +def I128.ofIntCore := @Scalar.ofIntCore .I128 +def Usize.ofIntCore := @Scalar.ofIntCore .Usize +def U8.ofIntCore := @Scalar.ofIntCore .U8 +def U16.ofIntCore := @Scalar.ofIntCore .U16 +def U32.ofIntCore := @Scalar.ofIntCore .U32 +def U64.ofIntCore := @Scalar.ofIntCore .U64 +def U128.ofIntCore := @Scalar.ofIntCore .U128 + +-- ofInt +-- TODO: typeclass? +def Isize.ofInt := @Scalar.ofInt .Isize +def I8.ofInt := @Scalar.ofInt .I8 +def I16.ofInt := @Scalar.ofInt .I16 +def I32.ofInt := @Scalar.ofInt .I32 +def I64.ofInt := @Scalar.ofInt .I64 +def I128.ofInt := @Scalar.ofInt .I128 +def Usize.ofInt := @Scalar.ofInt .Usize +def U8.ofInt := @Scalar.ofInt .U8 +def U16.ofInt := @Scalar.ofInt .U16 +def U32.ofInt := @Scalar.ofInt .U32 +def U64.ofInt := @Scalar.ofInt .U64 +def U128.ofInt := @Scalar.ofInt .U128 + +-- Comparisons +instance {ty} : LT (Scalar ty) where + lt a b := LT.lt a.val b.val + +instance {ty} : LE (Scalar ty) where le a b := LE.le a.val b.val + +instance Scalar.decLt {ty} (a b : Scalar ty) : Decidable (LT.lt a b) := Int.decLt .. +instance Scalar.decLe {ty} (a b : Scalar ty) : Decidable (LE.le a b) := Int.decLe .. + +theorem Scalar.eq_of_val_eq {ty} : ∀ {i j : Scalar ty}, Eq i.val j.val → Eq i j + | ⟨_, _, _⟩, ⟨_, _, _⟩, rfl => rfl + +theorem Scalar.val_eq_of_eq {ty} {i j : Scalar ty} (h : Eq i j) : Eq i.val j.val := + h ▸ rfl + +theorem Scalar.ne_of_val_ne {ty} {i j : Scalar ty} (h : Not (Eq i.val j.val)) : Not (Eq i j) := + fun h' => absurd (val_eq_of_eq h') h + +instance (ty : ScalarTy) : DecidableEq (Scalar ty) := + fun i j => + match decEq i.val j.val with + | isTrue h => isTrue (Scalar.eq_of_val_eq h) + | isFalse h => isFalse (Scalar.ne_of_val_ne h) + +def Scalar.toInt {ty} (n : Scalar ty) : Int := n.val + +-- Tactic to prove that integers are in bounds +syntax "intlit" : tactic + +macro_rules + | `(tactic| intlit) => `(tactic| apply Scalar.bound_suffices ; decide) + +-- -- We now define a type class that subsumes the various machine integer types, so +-- -- as to write a concise definition for scalar_cast, rather than exhaustively +-- -- enumerating all of the possible pairs. We remark that Rust has sane semantics +-- -- and fails if a cast operation would involve a truncation or modulo. + +-- class MachineInteger (t: Type) where +-- size: Nat +-- val: t -> Fin size +-- ofNatCore: (n:Nat) -> LT.lt n size -> t + +-- set_option hygiene false in +-- run_cmd +-- for typeName in [`UInt8, `UInt16, `UInt32, `UInt64, `USize].map Lean.mkIdent do +-- Lean.Elab.Command.elabCommand (← `( +-- namespace $typeName +-- instance: MachineInteger $typeName where +-- size := size +-- val := val +-- ofNatCore := ofNatCore +-- end $typeName +-- )) + +-- -- Aeneas only instantiates the destination type (`src` is implicit). We rely on +-- -- Lean to infer `src`. + +-- def scalar_cast { src: Type } (dst: Type) [ MachineInteger src ] [ MachineInteger dst ] (x: src): Result dst := +-- if h: MachineInteger.val x < MachineInteger.size dst then +-- .ret (MachineInteger.ofNatCore (MachineInteger.val x).val h) +-- else +-- .fail integerOverflow + +------------- +-- VECTORS -- +------------- + +def Vec (α : Type u) := { l : List α // List.length l ≤ Usize.max } + +def vec_new (α : Type u): Vec α := ⟨ [], by apply Scalar.cMax_suffices .Usize; simp ⟩ + +def vec_len (α : Type u) (v : Vec α) : Usize := + let ⟨ v, l ⟩ := v + Usize.ofIntCore (List.length v) (by simp [Scalar.min]) l + +def vec_push_fwd (α : Type u) (_ : Vec α) (_ : α) : Unit := () + +def vec_push_back (α : Type u) (v : Vec α) (x : α) : Result (Vec α) + := + let nlen := List.length v.val + 1 + if h : nlen ≤ U32.max || nlen ≤ Usize.max then + have h : nlen ≤ Usize.max := by + simp at * + cases System.Platform.numBits_eq <;> + unfold System.Platform.numBits at * <;> + simp [*] at * <;> + try assumption + cases h <;> + linarith + return ⟨ List.concat v.val x, by simp at *; assumption ⟩ + else + fail maximumSizeExceeded + +def vec_insert_fwd (α : Type u) (v: Vec α) (i: Usize) (_: α): Result Unit := + if i.val < List.length v.val then + .ret () + else + .fail arrayOutOfBounds + +def vec_insert_back (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec α) := + if i.val < List.length v.val then + -- TODO: maybe we should redefine a list library which uses integers + -- (instead of natural numbers) + let i := i.val.toNat + .ret ⟨ List.set v.val i x, by + have h: List.length v.val ≤ Usize.max := v.property + simp [*] at * + assumption + ⟩ + else + .fail arrayOutOfBounds + +def vec_index_to_fin {α : Type u} {v: Vec α} {i: Usize} (h : i.val < List.length v.val) : + Fin (List.length v.val) := + let j := i.val.toNat + let h: j < List.length v.val := by + have heq := @Int.toNat_lt (List.length v.val) i.val i.hmin + apply heq.mpr + assumption + ⟨j, h⟩ + +def vec_index_fwd (α : Type u) (v: Vec α) (i: Usize): Result α := + if h: i.val < List.length v.val then + let i := vec_index_to_fin h + .ret (List.get v.val i) + else + .fail arrayOutOfBounds + +def vec_index_back (α : Type u) (v: Vec α) (i: Usize) (_: α): Result Unit := + if i.val < List.length v.val then + .ret () + else + .fail arrayOutOfBounds + +def vec_index_mut_fwd (α : Type u) (v: Vec α) (i: Usize): Result α := + if h: i.val < List.length v.val then + let i := vec_index_to_fin h + .ret (List.get v.val i) + else + .fail arrayOutOfBounds + +def vec_index_mut_back (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec α) := + if h: i.val < List.length v.val then + let i := vec_index_to_fin h + .ret ⟨ List.set v.val i x, by + have h: List.length v.val ≤ Usize.max := v.property + simp [*] at * + assumption + ⟩ + else + .fail arrayOutOfBounds + +---------- +-- MISC -- +---------- + +@[simp] def mem_replace_fwd (a : Type) (x : a) (_ : a) : a := x +@[simp] def mem_replace_back (a : Type) (_ : a) (y : a) : a := y + +/-- Aeneas-translated function -- useful to reduce non-recursive definitions. + Use with `simp [ aeneas ]` -/ +register_simp_attr aeneas diff --git a/backends/lean/Primitives.lean b/backends/lean/Primitives.lean deleted file mode 100644 index e7826fbf..00000000 --- a/backends/lean/Primitives.lean +++ /dev/null @@ -1,637 +0,0 @@ -import Lean -import Lean.Meta.Tactic.Simp -import Init.Data.List.Basic -import Mathlib.Tactic.RunCmd -import Mathlib.Tactic.Linarith - --------------------- --- ASSERT COMMAND --Std. --------------------- - -open Lean Elab Command Term Meta - -syntax (name := assert) "#assert" term: command - -@[command_elab assert] -unsafe -def assertImpl : CommandElab := fun (_stx: Syntax) => do - runTermElabM (fun _ => do - let r ← evalTerm Bool (mkConst ``Bool) _stx[1] - if not r then - logInfo "Assertion failed for: " - logInfo _stx[1] - logError "Expression reduced to false" - pure ()) - -#eval 2 == 2 -#assert (2 == 2) - -------------- --- PRELUDE -- -------------- - --- Results & monadic combinators - -inductive Error where - | assertionFailure: Error - | integerOverflow: Error - | divisionByZero: Error - | arrayOutOfBounds: Error - | maximumSizeExceeded: Error - | panic: Error -deriving Repr, BEq - -open Error - -inductive Result (α : Type u) where - | ret (v: α): Result α - | fail (e: Error): Result α -deriving Repr, BEq - -open Result - -instance Result_Inhabited (α : Type u) : Inhabited (Result α) := - Inhabited.mk (fail panic) - -/- HELPERS -/ - -def ret? {α: Type} (r: Result α): Bool := - match r with - | Result.ret _ => true - | Result.fail _ => false - -def massert (b:Bool) : Result Unit := - if b then .ret () else fail assertionFailure - -def eval_global {α: Type} (x: Result α) (_: ret? x): α := - match x with - | Result.fail _ => by contradiction - | Result.ret x => x - -/- DO-DSL SUPPORT -/ - -def bind (x: Result α) (f: α -> Result β) : Result β := - match x with - | ret v => f v - | fail v => fail v - --- Allows using Result in do-blocks -instance : Bind Result where - bind := bind - --- Allows using return x in do-blocks -instance : Pure Result where - pure := fun x => ret x - -/- CUSTOM-DSL SUPPORT -/ - --- Let-binding the Result of a monadic operation is oftentimes not sufficient, --- because we may need a hypothesis for equational reasoning in the scope. We --- rely on subtype, and a custom let-binding operator, in effect recreating our --- own variant of the do-dsl - -def Result.attach {α: Type} (o : Result α): Result { x : α // o = ret x } := - match o with - | .ret x => .ret ⟨x, rfl⟩ - | .fail e => .fail e - -macro "let" e:term " ⟵ " f:term : doElem => - `(doElem| let ⟨$e, h⟩ ← Result.attach $f) - --- TODO: any way to factorize both definitions? -macro "let" e:term " <-- " f:term : doElem => - `(doElem| let ⟨$e, h⟩ ← Result.attach $f) - --- We call the hypothesis `h`, in effect making it unavailable to the user --- (because too much shadowing). But in practice, once can use the French single --- quote notation (input with f< and f>), where `‹ h ›` finds a suitable --- hypothesis in the context, this is equivalent to `have x: h := by assumption in x` -#eval do - let y <-- .ret (0: Nat) - let _: y = 0 := by cases ‹ ret 0 = ret y › ; decide - let r: { x: Nat // x = 0 } := ⟨ y, by assumption ⟩ - .ret r - ----------------------- --- MACHINE INTEGERS -- ----------------------- - --- We redefine our machine integers types. - --- For Isize/Usize, we reuse `getNumBits` from `USize`. You cannot reduce `getNumBits` --- using the simplifier, meaning that proofs do not depend on the compile-time value of --- USize.size. (Lean assumes 32 or 64-bit platforms, and Rust doesn't really support, at --- least officially, 16-bit microcontrollers, so this seems like a fine design decision --- for now.) - --- Note from Chris Bailey: "If there's more than one salient property of your --- definition then the subtyping strategy might get messy, and the property part --- of a subtype is less discoverable by the simplifier or tactics like --- library_search." So, we will not add refinements on the return values of the --- operations defined on Primitives, but will rather rely on custom lemmas to --- invert on possible return values of the primitive operations. - --- Machine integer constants, done via `ofNatCore`, which requires a proof that --- the `Nat` fits within the desired integer type. We provide a custom tactic. - -open System.Platform.getNumBits - --- TODO: is there a way of only importing System.Platform.getNumBits? --- -@[simp] def size_num_bits : Nat := (System.Platform.getNumBits ()).val - --- Remark: Lean seems to use < for the comparisons with the upper bounds by convention. --- We keep the F* convention for now. -@[simp] def Isize.min : Int := - (HPow.hPow 2 (size_num_bits - 1)) -@[simp] def Isize.max : Int := (HPow.hPow 2 (size_num_bits - 1)) - 1 -@[simp] def I8.min : Int := - (HPow.hPow 2 7) -@[simp] def I8.max : Int := HPow.hPow 2 7 - 1 -@[simp] def I16.min : Int := - (HPow.hPow 2 15) -@[simp] def I16.max : Int := HPow.hPow 2 15 - 1 -@[simp] def I32.min : Int := -(HPow.hPow 2 31) -@[simp] def I32.max : Int := HPow.hPow 2 31 - 1 -@[simp] def I64.min : Int := -(HPow.hPow 2 63) -@[simp] def I64.max : Int := HPow.hPow 2 63 - 1 -@[simp] def I128.min : Int := -(HPow.hPow 2 127) -@[simp] def I128.max : Int := HPow.hPow 2 127 - 1 -@[simp] def Usize.min : Int := 0 -@[simp] def Usize.max : Int := HPow.hPow 2 size_num_bits - 1 -@[simp] def U8.min : Int := 0 -@[simp] def U8.max : Int := HPow.hPow 2 8 - 1 -@[simp] def U16.min : Int := 0 -@[simp] def U16.max : Int := HPow.hPow 2 16 - 1 -@[simp] def U32.min : Int := 0 -@[simp] def U32.max : Int := HPow.hPow 2 32 - 1 -@[simp] def U64.min : Int := 0 -@[simp] def U64.max : Int := HPow.hPow 2 64 - 1 -@[simp] def U128.min : Int := 0 -@[simp] def U128.max : Int := HPow.hPow 2 128 - 1 - -#assert (I8.min == -128) -#assert (I8.max == 127) -#assert (I16.min == -32768) -#assert (I16.max == 32767) -#assert (I32.min == -2147483648) -#assert (I32.max == 2147483647) -#assert (I64.min == -9223372036854775808) -#assert (I64.max == 9223372036854775807) -#assert (I128.min == -170141183460469231731687303715884105728) -#assert (I128.max == 170141183460469231731687303715884105727) -#assert (U8.min == 0) -#assert (U8.max == 255) -#assert (U16.min == 0) -#assert (U16.max == 65535) -#assert (U32.min == 0) -#assert (U32.max == 4294967295) -#assert (U64.min == 0) -#assert (U64.max == 18446744073709551615) -#assert (U128.min == 0) -#assert (U128.max == 340282366920938463463374607431768211455) - -inductive ScalarTy := -| Isize -| I8 -| I16 -| I32 -| I64 -| I128 -| Usize -| U8 -| U16 -| U32 -| U64 -| U128 - -def Scalar.min (ty : ScalarTy) : Int := - match ty with - | .Isize => Isize.min - | .I8 => I8.min - | .I16 => I16.min - | .I32 => I32.min - | .I64 => I64.min - | .I128 => I128.min - | .Usize => Usize.min - | .U8 => U8.min - | .U16 => U16.min - | .U32 => U32.min - | .U64 => U64.min - | .U128 => U128.min - -def Scalar.max (ty : ScalarTy) : Int := - match ty with - | .Isize => Isize.max - | .I8 => I8.max - | .I16 => I16.max - | .I32 => I32.max - | .I64 => I64.max - | .I128 => I128.max - | .Usize => Usize.max - | .U8 => U8.max - | .U16 => U16.max - | .U32 => U32.max - | .U64 => U64.max - | .U128 => U128.max - --- "Conservative" bounds --- We use those because we can't compare to the isize bounds (which can't --- reduce at compile-time). Whenever we perform an arithmetic operation like --- addition we need to check that the result is in bounds: we first compare --- to the conservative bounds, which reduce, then compare to the real bounds. --- This is useful for the various #asserts that we want to reduce at --- type-checking time. -def Scalar.cMin (ty : ScalarTy) : Int := - match ty with - | .Isize => I32.min - | _ => Scalar.min ty - -def Scalar.cMax (ty : ScalarTy) : Int := - match ty with - | .Isize => I32.max - | .Usize => U32.max - | _ => Scalar.max ty - -theorem Scalar.cMin_bound ty : Scalar.min ty ≤ Scalar.cMin ty := by - cases ty <;> simp [Scalar.min, Scalar.max, Scalar.cMin, Scalar.cMax] at * - cases System.Platform.numBits_eq <;> - unfold System.Platform.numBits at * <;> - simp [*] - -theorem Scalar.cMax_bound ty : Scalar.cMax ty ≤ Scalar.max ty := by - cases ty <;> simp [Scalar.min, Scalar.max, Scalar.cMin, Scalar.cMax] at * <;> - cases System.Platform.numBits_eq <;> - unfold System.Platform.numBits at * <;> - simp [*] - -theorem Scalar.cMin_suffices ty (h : Scalar.cMin ty ≤ x) : Scalar.min ty ≤ x := by - cases ty <;> simp [Scalar.min, Scalar.max, Scalar.cMin, Scalar.cMax] at * <;> - cases System.Platform.numBits_eq <;> - unfold System.Platform.numBits at * <;> - simp [*] at * - -- TODO: I would have expected terms like `-(1 + 1) ^ 63` to be simplified - linarith - -theorem Scalar.cMax_suffices ty (h : x ≤ Scalar.cMax ty) : x ≤ Scalar.max ty := by - cases ty <;> simp [Scalar.min, Scalar.max, Scalar.cMin, Scalar.cMax] at * <;> - cases System.Platform.numBits_eq <;> - unfold System.Platform.numBits at * <;> - simp [*] at * <;> - -- TODO: I would have expected terms like `-(1 + 1) ^ 63` to be simplified - linarith - -structure Scalar (ty : ScalarTy) where - val : Int - hmin : Scalar.min ty ≤ val - hmax : val ≤ Scalar.max ty - -theorem Scalar.bound_suffices (ty : ScalarTy) (x : Int) : - Scalar.cMin ty ≤ x ∧ x ≤ Scalar.cMax ty -> - Scalar.min ty ≤ x ∧ x ≤ Scalar.max ty - := - λ h => by - apply And.intro <;> have hmin := Scalar.cMin_bound ty <;> have hmax := Scalar.cMax_bound ty <;> linarith - -def Scalar.ofIntCore {ty : ScalarTy} (x : Int) - (hmin : Scalar.min ty ≤ x) (hmax : x ≤ Scalar.max ty) : Scalar ty := - { val := x, hmin := hmin, hmax := hmax } - -def Scalar.ofInt {ty : ScalarTy} (x : Int) - (h : Scalar.min ty ≤ x ∧ x ≤ Scalar.max ty) : Scalar ty := - let ⟨ hmin, hmax ⟩ := h - Scalar.ofIntCore x hmin hmax - --- Further thoughts: look at what has been done here: --- https://github.com/leanprover-community/mathlib4/blob/master/Mathlib/Data/Fin/Basic.lean --- and --- https://github.com/leanprover-community/mathlib4/blob/master/Mathlib/Data/UInt.lean --- which both contain a fair amount of reasoning already! -def Scalar.tryMk (ty : ScalarTy) (x : Int) : Result (Scalar ty) := - -- TODO: write this with only one if then else - if h: (Scalar.cMin ty ≤ x || Scalar.min ty ≤ x) && (x ≤ Scalar.cMax ty || x ≤ Scalar.max ty) then - let h: Scalar.min ty ≤ x ∧ x ≤ Scalar.max ty := by - simp at * - have ⟨ hmin, hmax ⟩ := h - have hbmin := Scalar.cMin_bound ty - have hbmax := Scalar.cMax_bound ty - cases hmin <;> cases hmax <;> apply And.intro <;> linarith - let ⟨ hmin, hmax ⟩ := h - return Scalar.ofIntCore x hmin hmax - else fail integerOverflow - -def Scalar.neg {ty : ScalarTy} (x : Scalar ty) : Result (Scalar ty) := Scalar.tryMk ty (- x.val) - -def Scalar.div {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) := - if y.val != 0 then Scalar.tryMk ty (x.val / y.val) else fail divisionByZero - --- Our custom remainder operation, which satisfies the semantics of Rust --- TODO: is there a better way? -def scalar_rem (x y : Int) : Int := - if 0 ≤ x then |x| % |y| - else - (|x| % |y|) - --- Our custom division operation, which satisfies the semantics of Rust --- TODO: is there a better way? -def scalar_div (x y : Int) : Int := - if 0 ≤ x && 0 ≤ y then |x| / |y| - else if 0 ≤ x && y < 0 then - (|x| / |y|) - else if x < 0 && 0 ≤ y then - (|x| / |y|) - else |x| / |y| - --- Checking that the remainder operation is correct -#assert scalar_rem 1 2 = 1 -#assert scalar_rem (-1) 2 = -1 -#assert scalar_rem 1 (-2) = 1 -#assert scalar_rem (-1) (-2) = -1 -#assert scalar_rem 7 3 = (1:Int) -#assert scalar_rem (-7) 3 = -1 -#assert scalar_rem 7 (-3) = 1 -#assert scalar_rem (-7) (-3) = -1 - --- Checking that the division operation is correct -#assert scalar_div 3 2 = 1 -#assert scalar_div (-3) 2 = -1 -#assert scalar_div 3 (-2) = -1 -#assert scalar_div (-3) (-2) = 1 -#assert scalar_div 7 3 = 2 -#assert scalar_div (-7) 3 = -2 -#assert scalar_div 7 (-3) = -2 -#assert scalar_div (-7) (-3) = 2 - -def Scalar.rem {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) := - if y.val != 0 then Scalar.tryMk ty (x.val % y.val) else fail divisionByZero - -def Scalar.add {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) := - Scalar.tryMk ty (x.val + y.val) - -def Scalar.sub {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) := - Scalar.tryMk ty (x.val - y.val) - -def Scalar.mul {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) := - Scalar.tryMk ty (x.val * y.val) - --- TODO: instances of +, -, * etc. for scalars - --- Cast an integer from a [src_ty] to a [tgt_ty] --- TODO: check the semantics of casts in Rust -def Scalar.cast {src_ty : ScalarTy} (tgt_ty : ScalarTy) (x : Scalar src_ty) : Result (Scalar tgt_ty) := - Scalar.tryMk tgt_ty x.val - --- The scalar types --- We declare the definitions as reducible so that Lean can unfold them (useful --- for type class resolution for instance). -@[reducible] def Isize := Scalar .Isize -@[reducible] def I8 := Scalar .I8 -@[reducible] def I16 := Scalar .I16 -@[reducible] def I32 := Scalar .I32 -@[reducible] def I64 := Scalar .I64 -@[reducible] def I128 := Scalar .I128 -@[reducible] def Usize := Scalar .Usize -@[reducible] def U8 := Scalar .U8 -@[reducible] def U16 := Scalar .U16 -@[reducible] def U32 := Scalar .U32 -@[reducible] def U64 := Scalar .U64 -@[reducible] def U128 := Scalar .U128 - --- TODO: below: not sure this is the best way. --- Should we rather overload operations like +, -, etc.? --- Also, it is possible to automate the generation of those definitions --- with macros (but would it be a good idea? It would be less easy to --- read the file, which is not supposed to change a lot) - --- Negation - -/-- -Remark: there is no heterogeneous negation in the Lean prelude: we thus introduce -one here. - -The notation typeclass for heterogeneous addition. -This enables the notation `- a : β` where `a : α`. --/ -class HNeg (α : Type u) (β : outParam (Type v)) where - /-- `- a` computes the negation of `a`. - The meaning of this notation is type-dependent. -/ - hNeg : α → β - -prefix:75 "-" => HNeg.hNeg - -instance : HNeg Isize (Result Isize) where hNeg x := Scalar.neg x -instance : HNeg I8 (Result I8) where hNeg x := Scalar.neg x -instance : HNeg I16 (Result I16) where hNeg x := Scalar.neg x -instance : HNeg I32 (Result I32) where hNeg x := Scalar.neg x -instance : HNeg I64 (Result I64) where hNeg x := Scalar.neg x -instance : HNeg I128 (Result I128) where hNeg x := Scalar.neg x - --- Addition -instance {ty} : HAdd (Scalar ty) (Scalar ty) (Result (Scalar ty)) where - hAdd x y := Scalar.add x y - --- Substraction -instance {ty} : HSub (Scalar ty) (Scalar ty) (Result (Scalar ty)) where - hSub x y := Scalar.sub x y - --- Multiplication -instance {ty} : HMul (Scalar ty) (Scalar ty) (Result (Scalar ty)) where - hMul x y := Scalar.mul x y - --- Division -instance {ty} : HDiv (Scalar ty) (Scalar ty) (Result (Scalar ty)) where - hDiv x y := Scalar.div x y - --- Remainder -instance {ty} : HMod (Scalar ty) (Scalar ty) (Result (Scalar ty)) where - hMod x y := Scalar.rem x y - --- ofIntCore --- TODO: typeclass? -def Isize.ofIntCore := @Scalar.ofIntCore .Isize -def I8.ofIntCore := @Scalar.ofIntCore .I8 -def I16.ofIntCore := @Scalar.ofIntCore .I16 -def I32.ofIntCore := @Scalar.ofIntCore .I32 -def I64.ofIntCore := @Scalar.ofIntCore .I64 -def I128.ofIntCore := @Scalar.ofIntCore .I128 -def Usize.ofIntCore := @Scalar.ofIntCore .Usize -def U8.ofIntCore := @Scalar.ofIntCore .U8 -def U16.ofIntCore := @Scalar.ofIntCore .U16 -def U32.ofIntCore := @Scalar.ofIntCore .U32 -def U64.ofIntCore := @Scalar.ofIntCore .U64 -def U128.ofIntCore := @Scalar.ofIntCore .U128 - --- ofInt --- TODO: typeclass? -def Isize.ofInt := @Scalar.ofInt .Isize -def I8.ofInt := @Scalar.ofInt .I8 -def I16.ofInt := @Scalar.ofInt .I16 -def I32.ofInt := @Scalar.ofInt .I32 -def I64.ofInt := @Scalar.ofInt .I64 -def I128.ofInt := @Scalar.ofInt .I128 -def Usize.ofInt := @Scalar.ofInt .Usize -def U8.ofInt := @Scalar.ofInt .U8 -def U16.ofInt := @Scalar.ofInt .U16 -def U32.ofInt := @Scalar.ofInt .U32 -def U64.ofInt := @Scalar.ofInt .U64 -def U128.ofInt := @Scalar.ofInt .U128 - --- Comparisons -instance {ty} : LT (Scalar ty) where - lt a b := LT.lt a.val b.val - -instance {ty} : LE (Scalar ty) where le a b := LE.le a.val b.val - -instance Scalar.decLt {ty} (a b : Scalar ty) : Decidable (LT.lt a b) := Int.decLt .. -instance Scalar.decLe {ty} (a b : Scalar ty) : Decidable (LE.le a b) := Int.decLe .. - -theorem Scalar.eq_of_val_eq {ty} : ∀ {i j : Scalar ty}, Eq i.val j.val → Eq i j - | ⟨_, _, _⟩, ⟨_, _, _⟩, rfl => rfl - -theorem Scalar.val_eq_of_eq {ty} {i j : Scalar ty} (h : Eq i j) : Eq i.val j.val := - h ▸ rfl - -theorem Scalar.ne_of_val_ne {ty} {i j : Scalar ty} (h : Not (Eq i.val j.val)) : Not (Eq i j) := - fun h' => absurd (val_eq_of_eq h') h - -instance (ty : ScalarTy) : DecidableEq (Scalar ty) := - fun i j => - match decEq i.val j.val with - | isTrue h => isTrue (Scalar.eq_of_val_eq h) - | isFalse h => isFalse (Scalar.ne_of_val_ne h) - -def Scalar.toInt {ty} (n : Scalar ty) : Int := n.val - --- Tactic to prove that integers are in bounds -syntax "intlit" : tactic - -macro_rules - | `(tactic| intlit) => `(tactic| apply Scalar.bound_suffices ; decide) - --- -- We now define a type class that subsumes the various machine integer types, so --- -- as to write a concise definition for scalar_cast, rather than exhaustively --- -- enumerating all of the possible pairs. We remark that Rust has sane semantics --- -- and fails if a cast operation would involve a truncation or modulo. - --- class MachineInteger (t: Type) where --- size: Nat --- val: t -> Fin size --- ofNatCore: (n:Nat) -> LT.lt n size -> t - --- set_option hygiene false in --- run_cmd --- for typeName in [`UInt8, `UInt16, `UInt32, `UInt64, `USize].map Lean.mkIdent do --- Lean.Elab.Command.elabCommand (← `( --- namespace $typeName --- instance: MachineInteger $typeName where --- size := size --- val := val --- ofNatCore := ofNatCore --- end $typeName --- )) - --- -- Aeneas only instantiates the destination type (`src` is implicit). We rely on --- -- Lean to infer `src`. - --- def scalar_cast { src: Type } (dst: Type) [ MachineInteger src ] [ MachineInteger dst ] (x: src): Result dst := --- if h: MachineInteger.val x < MachineInteger.size dst then --- .ret (MachineInteger.ofNatCore (MachineInteger.val x).val h) --- else --- .fail integerOverflow - -------------- --- VECTORS -- -------------- - -def Vec (α : Type u) := { l : List α // List.length l ≤ Usize.max } - -def vec_new (α : Type u): Vec α := ⟨ [], by apply Scalar.cMax_suffices .Usize; simp ⟩ - -def vec_len (α : Type u) (v : Vec α) : Usize := - let ⟨ v, l ⟩ := v - Usize.ofIntCore (List.length v) (by simp [Scalar.min]) l - -def vec_push_fwd (α : Type u) (_ : Vec α) (_ : α) : Unit := () - -def vec_push_back (α : Type u) (v : Vec α) (x : α) : Result (Vec α) - := - let nlen := List.length v.val + 1 - if h : nlen ≤ U32.max || nlen ≤ Usize.max then - have h : nlen ≤ Usize.max := by - simp at * - cases System.Platform.numBits_eq <;> - unfold System.Platform.numBits at * <;> - simp [*] at * <;> - try assumption - cases h <;> - linarith - return ⟨ List.concat v.val x, by simp at *; assumption ⟩ - else - fail maximumSizeExceeded - -def vec_insert_fwd (α : Type u) (v: Vec α) (i: Usize) (_: α): Result Unit := - if i.val < List.length v.val then - .ret () - else - .fail arrayOutOfBounds - -def vec_insert_back (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec α) := - if i.val < List.length v.val then - -- TODO: maybe we should redefine a list library which uses integers - -- (instead of natural numbers) - let i := i.val.toNat - .ret ⟨ List.set v.val i x, by - have h: List.length v.val ≤ Usize.max := v.property - simp [*] at * - assumption - ⟩ - else - .fail arrayOutOfBounds - -def vec_index_to_fin {α : Type u} {v: Vec α} {i: Usize} (h : i.val < List.length v.val) : - Fin (List.length v.val) := - let j := i.val.toNat - let h: j < List.length v.val := by - have heq := @Int.toNat_lt (List.length v.val) i.val i.hmin - apply heq.mpr - assumption - ⟨j, h⟩ - -def vec_index_fwd (α : Type u) (v: Vec α) (i: Usize): Result α := - if h: i.val < List.length v.val then - let i := vec_index_to_fin h - .ret (List.get v.val i) - else - .fail arrayOutOfBounds - -def vec_index_back (α : Type u) (v: Vec α) (i: Usize) (_: α): Result Unit := - if i.val < List.length v.val then - .ret () - else - .fail arrayOutOfBounds - -def vec_index_mut_fwd (α : Type u) (v: Vec α) (i: Usize): Result α := - if h: i.val < List.length v.val then - let i := vec_index_to_fin h - .ret (List.get v.val i) - else - .fail arrayOutOfBounds - -def vec_index_mut_back (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec α) := - if h: i.val < List.length v.val then - let i := vec_index_to_fin h - .ret ⟨ List.set v.val i x, by - have h: List.length v.val ≤ Usize.max := v.property - simp [*] at * - assumption - ⟩ - else - .fail arrayOutOfBounds - ----------- --- MISC -- ----------- - -def mem_replace_fwd (a : Type) (x : a) (_ : a) : a := - x - -def mem_replace_back (a : Type) (_ : a) (y : a) : a := - y - -/-- Aeneas-translated function -- useful to reduce non-recursive definitions. - Use with `simp [ aeneas ]` -/ -register_simp_attr aeneas diff --git a/backends/lean/lake-manifest.json b/backends/lean/lake-manifest.json index 7b23fc19..e5d362fc 100644 --- a/backends/lean/lake-manifest.json +++ b/backends/lean/lake-manifest.json @@ -4,24 +4,24 @@ [{"git": {"url": "https://github.com/leanprover-community/mathlib4.git", "subDir?": null, - "rev": "f89ee53085b8aad0bacd3bc94d7ef4b8d9aba643", + "rev": "cdb1b898e4317567699181f27533182046ebc544", "name": "mathlib", "inputRev?": null}}, {"git": {"url": "https://github.com/gebner/quote4", "subDir?": null, - "rev": "2412c4fdf4a8b689f4467618e5e7b371ae5014aa", + "rev": "c71f94e34c1cda52eef5c93dc9da409ab2727420", "name": "Qq", "inputRev?": "master"}}, {"git": {"url": "https://github.com/JLimperg/aesop", "subDir?": null, - "rev": "7fe9ecd9339b0e1796e89d243b776849c305c690", + "rev": "ca73109cc40837bc61df8024c9016da4b4f99d4c", "name": "aesop", "inputRev?": "master"}}, {"git": {"url": "https://github.com/leanprover/std4", "subDir?": null, - "rev": "24897887905b3a1254b244369f5dd2cf6174b0ee", + "rev": "6932c4ea52914dc6b0488944e367459ddc4d01a6", "name": "std", "inputRev?": "main"}}]} diff --git a/backends/lean/lakefile.lean b/backends/lean/lakefile.lean index c5e27d1c..21a4a332 100644 --- a/backends/lean/lakefile.lean +++ b/backends/lean/lakefile.lean @@ -8,4 +8,4 @@ require mathlib from git package «base» {} @[default_target] -lean_lib «Primitives» {} +lean_lib «Base» {} -- cgit v1.2.3 From c034a7ea1335705ca1e1a7461fac257df6757d57 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Fri, 9 Jun 2023 16:07:39 +0200 Subject: Start working on extrinsic proofs of termination --- backends/lean/Base.lean | 1 + backends/lean/Base/Diverge.lean | 208 +++++++++++++++++++++++++++++++++++++ backends/lean/Base/Primitives.lean | 29 ++++-- backends/lean/lean-toolchain | 1 + 4 files changed, 232 insertions(+), 7 deletions(-) create mode 100644 backends/lean/Base/Diverge.lean create mode 100644 backends/lean/lean-toolchain (limited to 'backends') diff --git a/backends/lean/Base.lean b/backends/lean/Base.lean index 960b2bb5..92e87e6c 100644 --- a/backends/lean/Base.lean +++ b/backends/lean/Base.lean @@ -1 +1,2 @@ import Base.Primitives +import Base.Diverge diff --git a/backends/lean/Base/Diverge.lean b/backends/lean/Base/Diverge.lean new file mode 100644 index 00000000..bd500c25 --- /dev/null +++ b/backends/lean/Base/Diverge.lean @@ -0,0 +1,208 @@ +import Lean +import Base.Primitives + +namespace Diverge + +open Primitives + +section Fix + +open Result + +variable {a b : Type} + +/-! # The least fixed point definition and its properties -/ + +def least_p (p : Nat → Prop) (n : Nat) : Prop := p n ∧ (∀ m, m < n → ¬ p m) +noncomputable def least (p : Nat → Prop) : Nat := + Classical.epsilon (least_p p) + +-- Auxiliary theorem for [least_spec]: if there exists an `n` satisfying `p`, +-- there there exists a least `m` satisfying `p`. +theorem least_spec_aux (p : Nat → Prop) : ∀ (n : Nat), (hn : p n) → ∃ m, least_p p m := by + apply Nat.strongRec' + intros n hi hn + -- Case disjunction on: is n the smallest n satisfying p? + match Classical.em (∀ m, m < n → ¬ p m) with + | .inl hlt => + -- Yes: trivial + exists n + | .inr hlt => + simp at * + let ⟨ m, ⟨ hmlt, hm ⟩ ⟩ := hlt + have hi := hi m hmlt hm + apply hi + +-- The specification of [least]: either `p` is never satisfied, or it is satisfied +-- by `least p` and no `n < least p` satisfies `p`. +theorem least_spec (p : Nat → Prop) : (∀ n, ¬ p n) ∨ (p (least p) ∧ ∀ n, n < least p → ¬ p n) := by + -- Case disjunction on the existence of an `n` which satisfies `p` + match Classical.em (∀ n, ¬ p n) with + | .inl h => + -- There doesn't exist: trivial + apply (Or.inl h) + | .inr h => + -- There exists: we simply use `least_spec_aux` in combination with the property + -- of the epsilon operator + simp at * + let ⟨ n, hn ⟩ := h + apply Or.inr + have hl := least_spec_aux p n hn + have he := Classical.epsilon_spec hl + apply he + +/-! # The fixed point definitions -/ + +def fix_fuel (n : Nat) (f : (a → Result b) → a → Result b) (x : a) : Result b := + match n with + | 0 => .div + | n + 1 => + f (fix_fuel n f) x + +@[simp] def fix_fuel_pred (f : (a → Result b) → a → Result b) (x : a) (n : Nat) := + not (div? (fix_fuel n f x)) + +def fix_fuel_P (f : (a → Result b) → a → Result b) (x : a) (n : Nat) : Prop := + fix_fuel_pred f x n + +noncomputable def fix (f : (a → Result b) → a → Result b) (x : a) : Result b := + fix_fuel (least (fix_fuel_P f x)) f x + +/-! # The proof of the fixed point equation -/ + +-- Monotonicity relation over results +-- TODO: generalize +def result_rel {a : Type u} (x1 x2 : Result a) : Prop := + match x1 with + | div => True + | fail _ => x2 = x1 + | ret _ => x2 = x1 -- TODO: generalize + +-- Monotonicity relation over monadic arrows +-- TODO: generalize +def marrow_rel (f g : a → Result b) : Prop := + ∀ x, result_rel (f x) (g x) + +-- Validity property for a body +def is_valid (f : (a → Result b) → a → Result b) : Prop := + ∀ {{g h}}, marrow_rel g h → marrow_rel (f g) (f h) + +/- + + -/ + +theorem fix_fuel_mono {f : (a → Result b) → a → Result b} (Hvalid : is_valid f) : + ∀ {{n m}}, n ≤ m → marrow_rel (fix_fuel n f) (fix_fuel m f) := by + intros n + induction n + case zero => simp [marrow_rel, fix_fuel, result_rel] + case succ n1 Hi => + intros m Hle x + simp [result_rel] + match m with + | 0 => + exfalso + -- TODO: annoying to do those conversions by hand - try zify? + have : n1 + 1 ≤ (0 : Int) := by simp [*] at * + have : 0 ≤ n1 := by simp [*] at * + linarith + | Nat.succ m1 => + simp_arith at Hle + simp [fix_fuel] + have Hi := Hi Hle + simp [is_valid] at Hvalid + have Hvalid := Hvalid Hi x + simp [result_rel] at Hvalid + apply Hvalid + +@[simp] theorem neg_fix_fuel_P {f : (a → Result b) → a → Result b} {x : a} {n : Nat} : + ¬ fix_fuel_P f x n ↔ (fix_fuel n f x = div) := by + simp [fix_fuel_P, div?] + cases fix_fuel n f x <;> simp + +theorem fix_fuel_fix_mono {f : (a → Result b) → a → Result b} (Hvalid : is_valid f) : + ∀ n, marrow_rel (fix_fuel n f) (fix f) := by + intros n x + simp [result_rel] + have Hl := least_spec (fix_fuel_P f x) + simp at Hl + match Hl with + | .inl Hl => simp [*] + | .inr ⟨ Hl, Hn ⟩ => + match Classical.em (fix_fuel n f x = div) with + | .inl Hd => + simp [*] + | .inr Hd => + have Hineq : least (fix_fuel_P f x) ≤ n := by + -- Proof by contradiction + cases Classical.em (least (fix_fuel_P f x) ≤ n) <;> simp [*] + simp at * + rename_i Hineq + have Hn := Hn n Hineq + contradiction + have Hfix : ¬ (fix f x = div) := by + simp [fix] + -- By property of the least upper bound + revert Hd Hl + -- TODO: there is no conversion to select the head of a function! + have : fix_fuel_P f x (least (fix_fuel_P f x)) = fix_fuel_pred f x (least (fix_fuel_P f x)) := + by simp[fix_fuel_P] + simp [this, div?] + clear this + cases fix_fuel (least (fix_fuel_P f x)) f x <;> simp + have Hmono := fix_fuel_mono Hvalid Hineq x + simp [result_rel] at Hmono + -- TODO: there is no conversion to select the head of a function! + revert Hmono Hfix Hd + simp [fix] + -- TODO: it would be good if cases actually introduces an equation: this + -- way we wouldn't have to do all the book-keeping + cases fix_fuel (least (fix_fuel_P f x)) f x <;> cases fix_fuel n f x <;> + intros <;> simp [*] at * + +theorem fix_fuel_P_least {f : (a → Result b) → a → Result b} (Hvalid : is_valid f) : + ∀ {{x n}}, fix_fuel_P f x n → fix_fuel_P f x (least (fix_fuel_P f x)) := by sorry + +theorem fix_fixed_eq (f : (a → Result b) → a → Result b) (Hvalid : is_valid f) : + ∀ x, fix f x = f (fix f) x := by + intros x + -- conv => lhs; simp [fix] + -- Case disjunction: is there a fuel such that the execution successfully execute? + match Classical.em (∃ n, fix_fuel_P f x n) with + | .inr He => + -- No fuel: the fixed point evaluates to `div` + --simp [fix] at * + simp at * + simp [fix] + have He := He (Nat.succ (least (fix_fuel_P f x))) + simp [*, fix_fuel] at * + -- Use the monotonicity of `f` + have Hmono := fix_fuel_fix_mono Hvalid (least (fix_fuel_P f x)) x + simp [result_rel] at Hmono + simp [*] at * + -- TODO: we need a stronger validity predicate + sorry + | .inl ⟨ n, He ⟩ => + have Hl := fix_fuel_P_least Hvalid He + -- TODO: better control of simplification + have Heq : fix_fuel_P f x (least (fix_fuel_P f x)) = fix_fuel_pred f x (least (fix_fuel_P f x)) := + by simp [fix_fuel_P] + simp [Heq] at Hl; clear Heq + -- The least upper bound is > 0 + have ⟨ n, Hsucc ⟩ : ∃ n, least (fix_fuel_P f x) = Nat.succ n := by sorry + simp [Hsucc] at Hl + revert Hl + simp [*, div?, fix, fix_fuel] + -- Use the monotonicity + have Hineq : n ≤ Nat.succ n := by sorry + have Hmono := fix_fuel_fix_mono Hvalid n + have Hv := Hvalid Hmono x + -- Use functional extensionality + simp [result_rel, fix] at Hv + revert Hv + split <;> simp [*] <;> intros <;> simp [*] + + +end Fix + +end Diverge diff --git a/backends/lean/Base/Primitives.lean b/backends/lean/Base/Primitives.lean index d3de1d10..85e088fc 100644 --- a/backends/lean/Base/Primitives.lean +++ b/backends/lean/Base/Primitives.lean @@ -4,6 +4,8 @@ import Init.Data.List.Basic import Mathlib.Tactic.RunCmd import Mathlib.Tactic.Linarith +namespace Primitives + -------------------- -- ASSERT COMMAND --Std. -------------------- @@ -46,6 +48,7 @@ open Error inductive Result (α : Type u) where | ret (v: α): Result α | fail (e: Error): Result α + | div deriving Repr, BEq open Result @@ -53,20 +56,28 @@ open Result instance Result_Inhabited (α : Type u) : Inhabited (Result α) := Inhabited.mk (fail panic) +instance Result_Nonempty (α : Type u) : Nonempty (Result α) := + Nonempty.intro div + /- HELPERS -/ def ret? {α: Type} (r: Result α): Bool := match r with - | Result.ret _ => true - | Result.fail _ => false + | ret _ => true + | fail _ | div => false + +def div? {α: Type} (r: Result α): Bool := + match r with + | div => true + | ret _ | fail _ => false def massert (b:Bool) : Result Unit := - if b then .ret () else fail assertionFailure + if b then ret () else fail assertionFailure def eval_global {α: Type} (x: Result α) (_: ret? x): α := match x with - | Result.fail _ => by contradiction - | Result.ret x => x + | fail _ | div => by contradiction + | ret x => x /- DO-DSL SUPPORT -/ @@ -74,6 +85,7 @@ def bind (x: Result α) (f: α -> Result β) : Result β := match x with | ret v => f v | fail v => fail v + | div => div -- Allows using Result in do-blocks instance : Bind Result where @@ -92,8 +104,9 @@ instance : Pure Result where def Result.attach {α: Type} (o : Result α): Result { x : α // o = ret x } := match o with - | .ret x => .ret ⟨x, rfl⟩ - | .fail e => .fail e + | ret x => ret ⟨x, rfl⟩ + | fail e => fail e + | div => div macro "let" e:term " ⟵ " f:term : doElem => `(doElem| let ⟨$e, h⟩ ← Result.attach $f) @@ -648,3 +661,5 @@ def vec_index_mut_back (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec /-- Aeneas-translated function -- useful to reduce non-recursive definitions. Use with `simp [ aeneas ]` -/ register_simp_attr aeneas + +end Primitives diff --git a/backends/lean/lean-toolchain b/backends/lean/lean-toolchain new file mode 100644 index 00000000..1211e372 --- /dev/null +++ b/backends/lean/lean-toolchain @@ -0,0 +1 @@ +leanprover/lean4:nightly-2023-05-31 \ No newline at end of file -- cgit v1.2.3 From ef6204e1e1b0a21975fcd9e3d0e5aa7ec3d9125f Mon Sep 17 00:00:00 2001 From: Son Ho Date: Tue, 13 Jun 2023 16:22:08 +0200 Subject: Find sufficient validity criteria for Diverge.lean --- backends/lean/Base/Diverge.lean | 350 +++++++++++++++++++++++++++++++++++----- 1 file changed, 309 insertions(+), 41 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Diverge.lean b/backends/lean/Base/Diverge.lean index bd500c25..b5264d0d 100644 --- a/backends/lean/Base/Diverge.lean +++ b/backends/lean/Base/Diverge.lean @@ -5,7 +5,7 @@ namespace Diverge open Primitives -section Fix +namespace Fix open Result @@ -79,19 +79,32 @@ def result_rel {a : Type u} (x1 x2 : Result a) : Prop := | ret _ => x2 = x1 -- TODO: generalize -- Monotonicity relation over monadic arrows +-- TODO: Kleisli arrow -- TODO: generalize def marrow_rel (f g : a → Result b) : Prop := ∀ x, result_rel (f x) (g x) --- Validity property for a body -def is_valid (f : (a → Result b) → a → Result b) : Prop := +-- Monotonicity property +def is_mono (f : (a → Result b) → a → Result b) : Prop := ∀ {{g h}}, marrow_rel g h → marrow_rel (f g) (f h) +-- "Continuity" property. +-- We need this, and this looks a lot like continuity. Also see this paper: +-- https://inria.hal.science/file/index/docid/216187/filename/tarski.pdf +def is_cont (f : (a → Result b) → a → Result b) : Prop := + ∀ x, (Hdiv : ∀ n, fix_fuel (.succ n) f x = div) → f (fix f) x = div + +-- Validity property for a body +structure is_valid (f : (a → Result b) → a → Result b) := + intro:: + hmono : is_mono f + hcont : is_cont f + /- -/ -theorem fix_fuel_mono {f : (a → Result b) → a → Result b} (Hvalid : is_valid f) : +theorem fix_fuel_mono {f : (a → Result b) → a → Result b} (Hmono : is_mono f) : ∀ {{n m}}, n ≤ m → marrow_rel (fix_fuel n f) (fix_fuel m f) := by intros n induction n @@ -110,17 +123,16 @@ theorem fix_fuel_mono {f : (a → Result b) → a → Result b} (Hvalid : is_val simp_arith at Hle simp [fix_fuel] have Hi := Hi Hle - simp [is_valid] at Hvalid - have Hvalid := Hvalid Hi x - simp [result_rel] at Hvalid - apply Hvalid + have Hmono := Hmono Hi x + simp [result_rel] at Hmono + apply Hmono @[simp] theorem neg_fix_fuel_P {f : (a → Result b) → a → Result b} {x : a} {n : Nat} : ¬ fix_fuel_P f x n ↔ (fix_fuel n f x = div) := by simp [fix_fuel_P, div?] cases fix_fuel n f x <;> simp -theorem fix_fuel_fix_mono {f : (a → Result b) → a → Result b} (Hvalid : is_valid f) : +theorem fix_fuel_fix_mono {f : (a → Result b) → a → Result b} (Hmono : is_mono f) : ∀ n, marrow_rel (fix_fuel n f) (fix f) := by intros n x simp [result_rel] @@ -150,7 +162,7 @@ theorem fix_fuel_fix_mono {f : (a → Result b) → a → Result b} (Hvalid : is simp [this, div?] clear this cases fix_fuel (least (fix_fuel_P f x)) f x <;> simp - have Hmono := fix_fuel_mono Hvalid Hineq x + have Hmono := fix_fuel_mono Hmono Hineq x simp [result_rel] at Hmono -- TODO: there is no conversion to select the head of a function! revert Hmono Hfix Hd @@ -160,9 +172,42 @@ theorem fix_fuel_fix_mono {f : (a → Result b) → a → Result b} (Hvalid : is cases fix_fuel (least (fix_fuel_P f x)) f x <;> cases fix_fuel n f x <;> intros <;> simp [*] at * -theorem fix_fuel_P_least {f : (a → Result b) → a → Result b} (Hvalid : is_valid f) : - ∀ {{x n}}, fix_fuel_P f x n → fix_fuel_P f x (least (fix_fuel_P f x)) := by sorry +theorem fix_fuel_P_least {f : (a → Result b) → a → Result b} (Hmono : is_mono f) : + ∀ {{x n}}, fix_fuel_P f x n → fix_fuel_P f x (least (fix_fuel_P f x)) := by + intros x n Hf + have Hfmono := fix_fuel_fix_mono Hmono n x + revert Hf Hfmono + -- TODO: would be good to be able to unfold fix_fuel_P only on the left + simp [fix_fuel_P, div?, result_rel, fix] + cases fix_fuel n f x <;> simp_all +-- Prove the fixed point equation in the case there exists some fuel for which +-- the execution terminates +theorem fix_fixed_eq_terminates (f : (a → Result b) → a → Result b) (Hmono : is_mono f) + (x : a) (n : Nat) (He : fix_fuel_P f x n) : + fix f x = f (fix f) x := by + have Hl := fix_fuel_P_least Hmono He + -- TODO: better control of simplification + have Heq : fix_fuel_P f x (least (fix_fuel_P f x)) = fix_fuel_pred f x (least (fix_fuel_P f x)) := + by simp [fix_fuel_P] + simp [Heq] at Hl; clear Heq + -- The least upper bound is > 0 + have ⟨ n, Hsucc ⟩ : ∃ n, least (fix_fuel_P f x) = Nat.succ n := by + revert Hl + simp [div?] + cases least (fix_fuel_P f x) <;> simp [fix_fuel] + simp [Hsucc] at Hl + revert Hl + simp [*, div?, fix, fix_fuel] + -- Use the monotonicity + have Hfixmono := fix_fuel_fix_mono Hmono n + have Hvm := Hmono Hfixmono x + -- Use functional extensionality + simp [result_rel, fix] at Hvm + revert Hvm + split <;> simp [*] <;> intros <;> simp [*] + +-- The final fixed point equation theorem fix_fixed_eq (f : (a → Result b) → a → Result b) (Hvalid : is_valid f) : ∀ x, fix f x = f (fix f) x := by intros x @@ -173,36 +218,259 @@ theorem fix_fixed_eq (f : (a → Result b) → a → Result b) (Hvalid : is_vali -- No fuel: the fixed point evaluates to `div` --simp [fix] at * simp at * - simp [fix] - have He := He (Nat.succ (least (fix_fuel_P f x))) - simp [*, fix_fuel] at * - -- Use the monotonicity of `f` - have Hmono := fix_fuel_fix_mono Hvalid (least (fix_fuel_P f x)) x - simp [result_rel] at Hmono - simp [*] at * - -- TODO: we need a stronger validity predicate - sorry - | .inl ⟨ n, He ⟩ => - have Hl := fix_fuel_P_least Hvalid He - -- TODO: better control of simplification - have Heq : fix_fuel_P f x (least (fix_fuel_P f x)) = fix_fuel_pred f x (least (fix_fuel_P f x)) := - by simp [fix_fuel_P] - simp [Heq] at Hl; clear Heq - -- The least upper bound is > 0 - have ⟨ n, Hsucc ⟩ : ∃ n, least (fix_fuel_P f x) = Nat.succ n := by sorry - simp [Hsucc] at Hl - revert Hl - simp [*, div?, fix, fix_fuel] - -- Use the monotonicity - have Hineq : n ≤ Nat.succ n := by sorry - have Hmono := fix_fuel_fix_mono Hvalid n - have Hv := Hvalid Hmono x - -- Use functional extensionality - simp [result_rel, fix] at Hv - revert Hv - split <;> simp [*] <;> intros <;> simp [*] - + conv => lhs; simp [fix] + have Hel := He (Nat.succ (least (fix_fuel_P f x))); simp [*, fix_fuel] at *; clear Hel + -- Use the "continuity" of `f` + have He : ∀ n, fix_fuel (.succ n) f x = div := by intros; simp [*] + have Hcont := Hvalid.hcont x He + simp [Hcont] + | .inl ⟨ n, He ⟩ => apply fix_fixed_eq_terminates f Hvalid.hmono x n He +/- +(∀ n, fix_fuel n f x = div) + +⊢ f (fun y => fix_fuel (least (fix_fuel_P f y)) f y) x = div + +(? x. p x) ==> p (epsilon p) + + +P (nf : a -> option Nat) := + match nf x with + | None => forall n, fix_fuel n f x = div + | Some n => fix_fuel n f x <> div + +TODO: theorem de Tarsky, +Gilles Dowek (Introduction à la théorie des langages de programmation) + +fix_f is f s.t.: f x = f (fix f) x ∧ ! g. g x = g (fix g) x ==> f <= g + +-/ + end Fix +namespace Ex1 + /- An example of use of the fixed-point -/ + open Fix + + variable {a : Type} (f : (List a × Int) → Result a) + + def list_nth_body (x : (List a × Int)) : Result a := + let (ls, i) := x + match ls with + | [] => .fail .panic + | hd :: tl => + if i = 0 then .ret hd + else f (tl, i - 1) + + theorem list_nth_body_mono : is_mono (@list_nth_body a) := by + simp [is_mono]; intro g h Hr (ls, i); simp [result_rel, list_nth_body] + cases ls <;> simp + rename_i hd tl + -- TODO: making a case disjunction over `i = 0` is annoying, we need a more + -- general tactic for this + cases (Classical.em (Eq i 0)) <;> simp [*] at * + apply Hr + + theorem list_nth_body_cont : is_cont (@list_nth_body a) := by + rw [is_cont]; intro (ls, i) Hdiv; simp [list_nth_body, fix_fuel] at * + cases ls <;> simp at * + -- TODO: automate this + cases (Classical.em (Eq i 0)) <;> simp [*] at * + -- Recursive call + apply Hdiv + + noncomputable + def list_nth (ls : List a) (i : Int) : Result a := fix list_nth_body (ls, i) + + theorem list_nth_eq (ls : List a) (i : Int) : + list_nth ls i = + match ls with + | [] => .fail .panic + | hd :: tl => + if i = 0 then .ret hd + else list_nth tl (i - 1) + := by + have Hvalid : is_valid (@list_nth_body a) := + is_valid.intro list_nth_body_mono list_nth_body_cont + have Heq := fix_fixed_eq (@list_nth_body a) Hvalid + simp [Heq, list_nth] + conv => lhs; rw [list_nth_body] + simp [Heq] + +end Ex1 + +namespace Ex2 + /- Higher-order example -/ + open Fix + + variable {a b : Type} + + /- An auxiliary function, which doesn't require the fixed-point -/ + def map (f : a → Result b) (ls : List a) : Result (List b) := + match ls with + | [] => .ret [] + | hd :: tl => + do + match f hd with + | .ret hd => + match map f tl with + | .ret tl => + .ret (hd :: tl) + | r => r + | .fail e => .fail e + | .div => .div + + theorem map_is_mono {{f g : a → Result b}} (Hr : marrow_rel f g) : + ∀ ls, result_rel (map f ls) (map g ls) := by + intro ls; induction ls <;> simp [result_rel, map] + case cons hd tl Hi => + have Hr1 := Hr hd; simp [result_rel] at Hr1 + -- TODO: reverting is annoying + revert Hr1 + cases f hd <;> intro Hr1 <;> simp [*] + -- ret case + simp [result_rel] at Hi + -- TODO: annoying + revert Hi + cases map f tl <;> intro Hi <;> simp [*] + + -- Auxiliary definition + def map_fix_fuel (n0 n1 : Nat) (f : (a → Result b) → a → Result b) (ls : List a) : Result (List b) := + match ls with + | [] => .ret [] + | hd :: tl => + do + match fix_fuel n0 f hd with + | .ret hd => + match map (fix_fuel n1 f) tl with + | .ret tl => + .ret (hd :: tl) + | r => r + | .fail e => .fail e + | .div => .div + + def exists_map_fix_fuel_not_div_imp {{f : (a → Result b) → a → Result b}} {{ls : List a}} + (Hmono : is_mono f) : + (∃ n0 n1, map_fix_fuel n0 n1 f ls ≠ .div) → + ∃ n2, map (fix_fuel n2 f) ls ≠ .div := by + intro ⟨ n0, n1, Hnd ⟩ + exists n0 + n1 + have Hineq0 : n0 ≤ n0 + n1 := by linarith + have Hineq1 : n1 ≤ n0 + n1 := by linarith + simp [map_fix_fuel] at Hnd + -- TODO: I would like a rewrite_once tactic + unfold map; simp + -- + revert Hnd + cases ls <;> simp + rename_i hd tl + -- Use the monotonicity of fix_fuel + have Hfmono := fix_fuel_mono Hmono Hineq0 hd + simp [result_rel] at Hfmono; revert Hfmono + cases fix_fuel n0 f hd <;> intro <;> simp [*] + -- Use the monotonicity of map + have Hfmono := fix_fuel_mono Hmono Hineq1 + have Hmmono := map_is_mono Hfmono tl + simp [result_rel] at Hmmono; revert Hmmono + cases map (fix_fuel n1 f) tl <;> intro <;> simp [*] + + -- TODO: it is simpler to prove the contrapositive of is_cont than is_cont itself. + -- The proof is still quite technical: think of a criteria whose proof is simpler + -- to automate. + theorem map_is_cont_contra_aux {{f : (a → Result b) → a → Result b}} (Hmono : is_mono f) : + ∀ ls, map (fix f) ls ≠ .div → + ∃ n0 n1, map_fix_fuel n0 n1 f ls ≠ .div + := by + intro ls; induction ls <;> simp [result_rel, map_fix_fuel, map] + simp [fix] + case cons hd tl Hi => + -- Instantiate the first n and do a case disjunction + intro Hf; exists (least (fix_fuel_P f hd)); revert Hf + cases fix_fuel (least (fix_fuel_P f hd)) f hd <;> simp + -- Use the induction hyp + have Hr := Classical.em (map (fix f) tl = .div) + simp [fix] at * + cases Hr <;> simp_all + have Hj : ∃ n2, map (fix_fuel n2 f) tl ≠ .div := exists_map_fix_fuel_not_div_imp Hmono Hi + revert Hj; intro ⟨ n2, Hj ⟩ + intro Hf; exists n2; revert Hf + revert Hj; cases map (fix_fuel n2 f) tl <;> simp_all + + theorem map_is_cont_contra {{f : (a → Result b) → a → Result b}} (Hmono : is_mono f) : + ∀ ls, map (fix f) ls ≠ .div → + ∃ n, map (fix_fuel n f) ls ≠ .div + := by + intro ls Hf + have Hc := map_is_cont_contra_aux Hmono ls Hf + apply exists_map_fix_fuel_not_div_imp <;> assumption + + theorem map_is_cont {{f : (a → Result b) → a → Result b}} (Hmono : is_mono f) : + ∀ ls, (Hc : ∀ n, map (fix_fuel n f) ls = .div) → + map (fix f) ls = .div + := by + intro ls Hc + -- TODO: is there a tactic for proofs by contraposition? + apply Classical.byContradiction; intro Hndiv + let ⟨ n, Hcc ⟩ := map_is_cont_contra Hmono ls Hndiv + simp_all + + /- An example which uses map -/ + inductive Tree (a : Type) := + | leaf (x : a) + | node (tl : List (Tree a)) + + def id_body (f : Tree a → Result (Tree a)) (t : Tree a) : Result (Tree a) := + match t with + | .leaf x => .ret (.leaf x) + | .node tl => + match map f tl with + | .div => .div + | .fail e => .fail e + | .ret tl => .ret (.node tl) + + theorem id_body_mono : is_mono (@id_body a) := by + simp [is_mono]; intro g h Hr t; simp [result_rel, id_body] + cases t <;> simp + rename_i tl + have Hmmono := map_is_mono Hr tl + revert Hmmono; simp [result_rel] + cases map g tl <;> simp_all + + theorem id_body_cont : is_cont (@id_body a) := by + rw [is_cont]; intro t Hdiv + simp [fix_fuel] at * + -- TODO: weird things are happening with the rewriter and the simplifier here + rw [id_body] + simp [id_body] at Hdiv + -- + cases t <;> simp at * + rename_i tl + -- TODO: automate this + have Hmc := map_is_cont id_body_mono tl + have Hdiv : ∀ (n : ℕ), map (fix_fuel n id_body) tl = Result.div := by + intro n + have Hdiv := Hdiv n; revert Hdiv + cases map (fix_fuel n id_body) tl <;> simp_all + have Hmc := Hmc Hdiv; revert Hmc + cases map (fix id_body) tl <;> simp_all + + noncomputable def id (t : Tree a) := fix id_body t + + theorem id_eq (t : Tree a) : + id t = + match t with + | .leaf x => .ret (.leaf x) + | .node tl => + match map id tl with + | .div => .div + | .fail e => .fail e + | .ret tl => .ret (.node tl) + := by + have Hvalid : is_valid (@id_body a) := + is_valid.intro id_body_mono id_body_cont + have Heq := fix_fixed_eq (@id_body a) Hvalid + conv => lhs; rw [id, Heq, id_body] + +end Ex2 + end Diverge -- cgit v1.2.3 From ccc97b46c166a45255096d3fec2444c90f7c5aaa Mon Sep 17 00:00:00 2001 From: Son Ho Date: Wed, 14 Jun 2023 11:24:58 +0200 Subject: Make minor modifications --- backends/lean/Base/Diverge.lean | 31 +++++++++++++++++++++++++------ backends/lean/Base/Primitives.lean | 1 + 2 files changed, 26 insertions(+), 6 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Diverge.lean b/backends/lean/Base/Diverge.lean index b5264d0d..37d8eb27 100644 --- a/backends/lean/Base/Diverge.lean +++ b/backends/lean/Base/Diverge.lean @@ -11,6 +11,27 @@ open Result variable {a b : Type} +/- +TODO: +- we want an easier to use cases: + - keeps in the goal an equation of the shape: `t = case` + - if called on Prop terms, uses Classical.em + Actually, the cases from mathlib seems already quite powerful + (https://leanprover-community.github.io/mathlib_docs/tactics.html#cases) + For instance: cases h : e + Also: cases_matching +- better split tactic +- we need conversions to operate on the head of applications. + Actually, something like this works: + ``` + conv at Hl => + apply congr_fun + simp [fix_fuel_P] + ``` + Maybe we need a rpt ... ; focus? +- simplifier/rewriter have a strange behavior sometimes +-/ + /-! # The least fixed point definition and its properties -/ def least_p (p : Nat → Prop) (n : Nat) : Prop := p n ∧ (∀ m, m < n → ¬ p m) @@ -115,9 +136,7 @@ theorem fix_fuel_mono {f : (a → Result b) → a → Result b} (Hmono : is_mono match m with | 0 => exfalso - -- TODO: annoying to do those conversions by hand - try zify? - have : n1 + 1 ≤ (0 : Int) := by simp [*] at * - have : 0 ≤ n1 := by simp [*] at * + zify at * linarith | Nat.succ m1 => simp_arith at Hle @@ -188,9 +207,9 @@ theorem fix_fixed_eq_terminates (f : (a → Result b) → a → Result b) (Hmono fix f x = f (fix f) x := by have Hl := fix_fuel_P_least Hmono He -- TODO: better control of simplification - have Heq : fix_fuel_P f x (least (fix_fuel_P f x)) = fix_fuel_pred f x (least (fix_fuel_P f x)) := - by simp [fix_fuel_P] - simp [Heq] at Hl; clear Heq + conv at Hl => + apply congr_fun + simp [fix_fuel_P] -- The least upper bound is > 0 have ⟨ n, Hsucc ⟩ : ∃ n, least (fix_fuel_P f x) = Nat.succ n := by revert Hl diff --git a/backends/lean/Base/Primitives.lean b/backends/lean/Base/Primitives.lean index 85e088fc..6b922143 100644 --- a/backends/lean/Base/Primitives.lean +++ b/backends/lean/Base/Primitives.lean @@ -526,6 +526,7 @@ instance (ty : ScalarTy) : DecidableEq (Scalar ty) := def Scalar.toInt {ty} (n : Scalar ty) : Int := n.val -- Tactic to prove that integers are in bounds +-- TODO: use this: https://leanprover.zulipchat.com/#narrow/stream/270676-lean4/topic/instance.20with.20tactic.20autoparam syntax "intlit" : tactic macro_rules -- cgit v1.2.3 From 04cefd3b4f3d2c11cfc3542a5ad6fae31dae4796 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Sun, 18 Jun 2023 19:09:19 +0200 Subject: Make minor modifications --- backends/lean/Base.lean | 1 + backends/lean/Base/Primitives.lean | 5 ++--- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base.lean b/backends/lean/Base.lean index 92e87e6c..f6a78bba 100644 --- a/backends/lean/Base.lean +++ b/backends/lean/Base.lean @@ -1,2 +1,3 @@ import Base.Primitives import Base.Diverge +import Base.TestTactics diff --git a/backends/lean/Base/Primitives.lean b/backends/lean/Base/Primitives.lean index 6b922143..d6cc0bad 100644 --- a/backends/lean/Base/Primitives.lean +++ b/backends/lean/Base/Primitives.lean @@ -20,9 +20,8 @@ def assertImpl : CommandElab := fun (_stx: Syntax) => do runTermElabM (fun _ => do let r ← evalTerm Bool (mkConst ``Bool) _stx[1] if not r then - logInfo "Assertion failed for: " - logInfo _stx[1] - logError "Expression reduced to false" + logInfo ("Assertion failed for:\n" ++ _stx[1]) + throwError ("Expression reduced to false:\n" ++ _stx[1]) pure ()) #eval 2 == 2 -- cgit v1.2.3 From 75f5f8a68b0ce028689c1d880ec99448e6d8dc3a Mon Sep 17 00:00:00 2001 From: Son Ho Date: Mon, 19 Jun 2023 15:03:00 +0200 Subject: Make progress on making the proofs in Diverge more systematic --- backends/lean/Base/Diverge.lean | 260 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 252 insertions(+), 8 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Diverge.lean b/backends/lean/Base/Diverge.lean index 37d8eb27..0eff17e3 100644 --- a/backends/lean/Base/Diverge.lean +++ b/backends/lean/Base/Diverge.lean @@ -1,12 +1,76 @@ import Lean -import Base.Primitives +import Lean.Meta.Tactic.Simp +import Init.Data.List.Basic +import Mathlib.Tactic.RunCmd +import Mathlib.Tactic.Linarith +import Mathlib.Tactic.Tauto namespace Diverge -open Primitives +namespace Primitives + +inductive Error where + | assertionFailure: Error + | integerOverflow: Error + | divisionByZero: Error + | arrayOutOfBounds: Error + | maximumSizeExceeded: Error + | panic: Error +deriving Repr, BEq + +open Error + +inductive Result (α : Type u) where + | ret (v: α): Result α + | fail (e: Error): Result α + | div +deriving Repr, BEq + +open Result + +-- instance Result_Inhabited (α : Type u) : Inhabited (Result α) := +-- Inhabited.mk (fail panic) + +-- instance Result_Nonempty (α : Type u) : Nonempty (Result α) := +-- Nonempty.intro div + +def bind (x: Result α) (f: α -> Result β) : Result β := + match x with + | ret v => f v + | fail v => fail v + | div => div + +@[simp] theorem bind_ret (x : α) (f : α → Result β) : bind (.ret x) f = f x := by simp [bind] +@[simp] theorem bind_fail (x : Error) (f : α → Result β) : bind (.fail x) f = .fail x := by simp [bind] +@[simp] theorem bind_div (f : α → Result β) : bind .div f = .div := by simp [bind] + +-- Allows using Result in do-blocks +instance : Bind Result where + bind := bind + +-- Allows using return x in do-blocks +instance : Pure Result where + pure := fun x => ret x + +@[simp] theorem bind_tc_ret (x : α) (f : α → Result β) : + (do let y ← .ret x; f y) = f x := by simp [Bind.bind, bind] + +@[simp] theorem bind_tc_fail (x : Error) (f : α → Result β) : + (do let y ← fail x; f y) = fail x := by simp [Bind.bind, bind] + +@[simp] theorem bind_tc_div (f : α → Result β) : + (do let y ← div; f y) = div := by simp [Bind.bind, bind] + +def div? {α: Type} (r: Result α): Bool := + match r with + | div => true + | ret _ | fail _ => false + +end Primitives namespace Fix +open Primitives open Result variable {a b : Type} @@ -123,7 +187,7 @@ structure is_valid (f : (a → Result b) → a → Result b) := /- - -/ + -/ theorem fix_fuel_mono {f : (a → Result b) → a → Result b} (Hmono : is_mono f) : ∀ {{n m}}, n ≤ m → marrow_rel (fix_fuel n f) (fix_fuel m f) := by @@ -244,7 +308,38 @@ theorem fix_fixed_eq (f : (a → Result b) → a → Result b) (Hvalid : is_vali have Hcont := Hvalid.hcont x He simp [Hcont] | .inl ⟨ n, He ⟩ => apply fix_fixed_eq_terminates f Hvalid.hmono x n He + + /- Making the proofs more systematic -/ + -- TODO: rewrite is_mono in terms of is_mono_p + def is_mono_p (body : (a → Result b) → Result b) : Prop := + ∀ {{g h}}, marrow_rel g h → result_rel (body g) (body h) + + @[simp] theorem is_mono_p_same (x : Result b) : + @is_mono_p a b (λ _ => x) := by + simp [is_mono_p, marrow_rel, result_rel] + split <;> simp + + -- TODO: generalize + @[simp] theorem is_mono_p_tail_rec (x : a) : + @is_mono_p a b (λ f => f x) := by + simp_all [is_mono_p, marrow_rel, result_rel] + + -- TODO: rewrite is_cont in terms of is_cont_p + def is_cont_p (f : (a → Result b) → a → Result b) + (body : (a → Result b) → Result b) : Prop := + (Hc : ∀ n, body (fix_fuel n f) = .div) → + body (fix f) = .div + + @[simp] theorem is_cont_p_same (f : (a → Result b) → a → Result b) (x : Result b) : + is_cont_p f (λ _ => x) := by + simp [is_cont_p] + + -- TODO: generalize + @[simp] theorem is_cont_p_tail_rec (f : (a → Result b) → a → Result b) (x : a) : + is_cont_p f (λ f => f x) := by + simp_all [is_cont_p, fix] + /- (∀ n, fix_fuel n f x = div) @@ -269,7 +364,7 @@ end Fix namespace Ex1 /- An example of use of the fixed-point -/ - open Fix + open Primitives Fix variable {a : Type} (f : (List a × Int) → Result a) @@ -298,6 +393,20 @@ namespace Ex1 -- Recursive call apply Hdiv + /- Making the monotonicity/continuity proofs more systematic -/ + + theorem list_nth_body_mono2 : ∀ x, is_mono_p (λ f => @list_nth_body a f x) := by + intro x + simp [list_nth_body] + split <;> simp + split <;> simp + + theorem list_nth_body_cont2: ∀ f x, is_cont_p f (λ f => @list_nth_body a f x) := by + intro f x + simp [list_nth_body] + split <;> simp + split <;> simp + noncomputable def list_nth (ls : List a) (i : Int) : Result a := fix list_nth_body (ls, i) @@ -319,8 +428,142 @@ namespace Ex1 end Ex1 namespace Ex2 + /- Same as Ex1, but we make the body of nth non tail-rec -/ + open Primitives Fix + + variable {a : Type} (f : (List a × Int) → Result a) + + def list_nth_body (x : (List a × Int)) : Result a := + let (ls, i) := x + match ls with + | [] => .fail .panic + | hd :: tl => + if i = 0 then .ret hd + else + do + let y ← f (tl, i - 1) + .ret y + + -- Lean is good at applying lemmas: we can write a very general version + theorem is_mono_p_bind + (g : (a → Result b) → Result b) + (h : b → (a → Result b) → Result b) : + is_mono_p g → + (∀ y, is_mono_p (h y)) → + is_mono_p (λ f => do let y ← g f; h y f) := by + intro hg hh + simp [is_mono_p] + intro fg fh Hrgh + simp [marrow_rel, result_rel] + have hg := hg Hrgh; simp [result_rel] at hg + cases heq0: g fg <;> simp_all + rename_i y _ + have hh := hh y Hrgh; simp [result_rel] at hh + simp_all + + -- Lean is good at applying lemmas: we can write a very general version + theorem is_cont_p_bind + (f : (a → Result b) → a → Result b) + (g : (a → Result b) → Result b) + (h : b → (a → Result b) → Result b) : + is_cont_p f (λ f => g f) → + (∀ y, is_cont_p f (h y)) → + is_cont_p f (λ f => do let y ← g f; h y f) := by + intro Hg Hh + simp [is_cont_p] + intro Hdiv + -- Case on `g (fix... f)`: is there an n s.t. it terminates? + cases Classical.em (∀ n, g (fix_fuel n f) = .div) <;> rename_i Hn + . -- Case 1: g diverges + have Hg := Hg Hn + simp_all + . -- Case 2: g doesn't diverge + simp at Hn + let ⟨ n, Hn ⟩ := Hn + have Hdivn := Hdiv n + -- TODO: we need monotonicity of g and f + have Hgmono : is_mono_p g := by sorry + have Hfmono : is_mono f := by sorry + have Hffmono := fix_fuel_fix_mono Hfmono n + have Hgeq := Hgmono Hffmono + simp [result_rel] at Hgeq + cases Heq: g (fix_fuel n f) <;> rename_i y <;> simp_all + -- Remains the .ret case + -- TODO: we need monotonicity of h? + have Hhmono : is_mono_p (h y) := by sorry + -- Use Hdiv to prove that: ∀ n, h y (fix_fuel n f) = div + -- We do this in two steps: first we prove it for m ≥ n + have Hhdiv: ∀ m, h y (fix_fuel m f) = .div := by + have Hhdiv : ∀ m, n ≤ m → h y (fix_fuel m f) = .div := by + -- We use the fact that `g (fix_fuel n f) = .div`, combined with Hdiv + intro m Hle + have Hdivm := Hdiv m + -- Monotonicity of g + have Hffmono := fix_fuel_mono Hfmono Hle + have Hgmono := Hgmono Hffmono + -- We need to clear Hdiv because otherwise simp_all rewrites Hdivm with Hdiv + clear Hdiv + simp_all [result_rel] + intro m + -- TODO: we shouldn't need the excluded middle here because it is decidable + cases Classical.em (n ≤ m) <;> rename_i Hl + . apply Hhdiv; assumption + . simp at Hl + -- Make a case disjunction on `h y (fix_fuel m f)`: if it is not equal + -- to div, use the monotonicity of `h y` + have Hle : m ≤ n := by linarith + have Hffmono := fix_fuel_mono Hfmono Hle + have Hmono := Hhmono Hffmono + simp [result_rel] at Hmono + cases Heq: h y (fix_fuel m f) <;> simp_all + -- We can now use the continuity hypothesis for h + apply Hh; assumption + + + -- TODO: what is the name of this theorem? + -- theorem eta_app_eq (x : a) (f : a → b) : f x = (λ x => f x) x := by simp + -- theorem eta_eq (x : a) (f : a → b) : (λ x => f x) = f := by simp + + --set_option pp.funBinderTypes true + --set_option pp.explicit true + --set_option pp.notation false + + theorem list_nth_body_mono : ∀ x, is_mono_p (λ f => @list_nth_body a f x) := by + intro x + simp [list_nth_body] + split <;> simp + split <;> simp + apply is_mono_p_bind <;> intros <;> simp + + theorem list_nth_body_cont2: ∀ f x, is_cont_p f (λ f => @list_nth_body a f x) := by + intro f x + simp [list_nth_body] + split <;> simp + split <;> simp + + noncomputable + def list_nth (ls : List a) (i : Int) : Result a := fix list_nth_body (ls, i) + + theorem list_nth_eq (ls : List a) (i : Int) : + list_nth ls i = + match ls with + | [] => .fail .panic + | hd :: tl => + if i = 0 then .ret hd + else list_nth tl (i - 1) + := by + have Hvalid : is_valid (@list_nth_body a) := + is_valid.intro list_nth_body_mono list_nth_body_cont + have Heq := fix_fixed_eq (@list_nth_body a) Hvalid + simp [Heq, list_nth] + conv => lhs; rw [list_nth_body] + simp [Heq] + +end Ex2 + +namespace Ex3 /- Higher-order example -/ - open Fix + open Primitives Fix variable {a b : Type} @@ -330,6 +573,7 @@ namespace Ex2 | [] => .ret [] | hd :: tl => do + -- TODO: monadic syntax match f hd with | .ret hd => match map f tl with @@ -423,7 +667,7 @@ namespace Ex2 have Hc := map_is_cont_contra_aux Hmono ls Hf apply exists_map_fix_fuel_not_div_imp <;> assumption - theorem map_is_cont {{f : (a → Result b) → a → Result b}} (Hmono : is_mono f) : + theorem map_is_cont {{f : (a → Result b) → a → Result b}} (Hmono : is_mono f) : ∀ ls, (Hc : ∀ n, map (fix_fuel n f) ls = .div) → map (fix f) ls = .div := by @@ -431,7 +675,7 @@ namespace Ex2 -- TODO: is there a tactic for proofs by contraposition? apply Classical.byContradiction; intro Hndiv let ⟨ n, Hcc ⟩ := map_is_cont_contra Hmono ls Hndiv - simp_all + simp_all /- An example which uses map -/ inductive Tree (a : Type) := @@ -490,6 +734,6 @@ namespace Ex2 have Heq := fix_fixed_eq (@id_body a) Hvalid conv => lhs; rw [id, Heq, id_body] -end Ex2 +end Ex3 end Diverge -- cgit v1.2.3 From 6297cdd89299452f8043f7aed75cf2eb01d31e24 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Mon, 19 Jun 2023 16:20:35 +0200 Subject: Further simplify the proofs in Diverge.lean --- backends/lean/Base/Diverge.lean | 273 ++++++++++++++++++++++------------------ 1 file changed, 151 insertions(+), 122 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Diverge.lean b/backends/lean/Base/Diverge.lean index 0eff17e3..759773c9 100644 --- a/backends/lean/Base/Diverge.lean +++ b/backends/lean/Base/Diverge.lean @@ -4,6 +4,7 @@ import Init.Data.List.Basic import Mathlib.Tactic.RunCmd import Mathlib.Tactic.Linarith import Mathlib.Tactic.Tauto +--import Mathlib.Logic namespace Diverge @@ -291,7 +292,7 @@ theorem fix_fixed_eq_terminates (f : (a → Result b) → a → Result b) (Hmono split <;> simp [*] <;> intros <;> simp [*] -- The final fixed point equation -theorem fix_fixed_eq (f : (a → Result b) → a → Result b) (Hvalid : is_valid f) : +theorem fix_fixed_eq {{f : (a → Result b) → a → Result b}} (Hvalid : is_valid f) : ∀ x, fix f x = f (fix f) x := by intros x -- conv => lhs; simp [fix] @@ -320,7 +321,7 @@ theorem fix_fixed_eq (f : (a → Result b) → a → Result b) (Hvalid : is_vali simp [is_mono_p, marrow_rel, result_rel] split <;> simp - -- TODO: generalize + -- TODO: remove @[simp] theorem is_mono_p_tail_rec (x : a) : @is_mono_p a b (λ f => f x) := by simp_all [is_mono_p, marrow_rel, result_rel] @@ -335,30 +336,147 @@ theorem fix_fixed_eq (f : (a → Result b) → a → Result b) (Hvalid : is_vali is_cont_p f (λ _ => x) := by simp [is_cont_p] - -- TODO: generalize + -- TODO: remove @[simp] theorem is_cont_p_tail_rec (f : (a → Result b) → a → Result b) (x : a) : is_cont_p f (λ f => f x) := by simp_all [is_cont_p, fix] -/- -(∀ n, fix_fuel n f x = div) - -⊢ f (fun y => fix_fuel (least (fix_fuel_P f y)) f y) x = div - -(? x. p x) ==> p (epsilon p) - - -P (nf : a -> option Nat) := - match nf x with - | None => forall n, fix_fuel n f x = div - | Some n => fix_fuel n f x <> div + -- Lean is good at unification: we can write a very general version + theorem is_mono_p_bind + (g : (a → Result b) → Result b) + (h : b → (a → Result b) → Result b) : + is_mono_p g → + (∀ y, is_mono_p (h y)) → + is_mono_p (λ f => do let y ← g f; h y f) := by + intro hg hh + simp [is_mono_p] + intro fg fh Hrgh + simp [marrow_rel, result_rel] + have hg := hg Hrgh; simp [result_rel] at hg + cases heq0: g fg <;> simp_all + rename_i y _ + have hh := hh y Hrgh; simp [result_rel] at hh + simp_all -TODO: theorem de Tarsky, -Gilles Dowek (Introduction à la théorie des langages de programmation) + -- Lean is good at unification: we can write a very general version + -- (in particular, it will manage to figure out `g` and `h` when we + -- apply the lemma) + theorem is_cont_p_bind + (f : (a → Result b) → a → Result b) + (Hfmono : is_mono f) + (g : (a → Result b) → Result b) + (h : b → (a → Result b) → Result b) : + is_mono_p g → + is_cont_p f g → + (∀ y, is_mono_p (h y)) → + (∀ y, is_cont_p f (h y)) → + is_cont_p f (λ f => do let y ← g f; h y f) := by + intro Hgmono Hgcont Hhmono Hhcont + simp [is_cont_p] + intro Hdiv + -- Case on `g (fix... f)`: is there an n s.t. it terminates? + cases Classical.em (∀ n, g (fix_fuel n f) = .div) <;> rename_i Hn + . -- Case 1: g diverges + have Hgcont := Hgcont Hn + simp_all + . -- Case 2: g doesn't diverge + simp at Hn + let ⟨ n, Hn ⟩ := Hn + have Hdivn := Hdiv n + have Hffmono := fix_fuel_fix_mono Hfmono n + have Hgeq := Hgmono Hffmono + simp [result_rel] at Hgeq + cases Heq: g (fix_fuel n f) <;> rename_i y <;> simp_all + -- Remains the .ret case + -- Use Hdiv to prove that: ∀ n, h y (fix_fuel n f) = div + -- We do this in two steps: first we prove it for m ≥ n + have Hhdiv: ∀ m, h y (fix_fuel m f) = .div := by + have Hhdiv : ∀ m, n ≤ m → h y (fix_fuel m f) = .div := by + -- We use the fact that `g (fix_fuel n f) = .div`, combined with Hdiv + intro m Hle + have Hdivm := Hdiv m + -- Monotonicity of g + have Hffmono := fix_fuel_mono Hfmono Hle + have Hgmono := Hgmono Hffmono + -- We need to clear Hdiv because otherwise simp_all rewrites Hdivm with Hdiv + clear Hdiv + simp_all [result_rel] + intro m + -- TODO: we shouldn't need the excluded middle here because it is decidable + cases Classical.em (n ≤ m) <;> rename_i Hl + . apply Hhdiv; assumption + . simp at Hl + -- Make a case disjunction on `h y (fix_fuel m f)`: if it is not equal + -- to div, use the monotonicity of `h y` + have Hle : m ≤ n := by linarith + have Hffmono := fix_fuel_mono Hfmono Hle + have Hmono := Hhmono y Hffmono + simp [result_rel] at Hmono + cases Heq: h y (fix_fuel m f) <;> simp_all + -- We can now use the continuity hypothesis for h + apply Hhcont; assumption -fix_f is f s.t.: f x = f (fix f) x ∧ ! g. g x = g (fix g) x ==> f <= g + -- TODO: move + def is_valid_p (f : (a → Result b) → a → Result b) + (body : (a → Result b) → Result b) : Prop := + is_mono_p body ∧ + (is_mono f → is_cont_p f body) + + @[simp] theorem is_valid_p_same (f : (a → Result b) → a → Result b) (x : Result b) : + is_valid_p f (λ _ => x) := by + simp [is_valid_p] + + @[simp] theorem is_valid_p_rec (f : (a → Result b) → a → Result b) (x : a) : + is_valid_p f (λ f => f x) := by + simp [is_valid_p] + + -- Lean is good at unification: we can write a very general version + -- (in particular, it will manage to figure out `g` and `h` when we + -- apply the lemma) + theorem is_valid_p_bind + {{f : (a → Result b) → a → Result b}} + {{g : (a → Result b) → Result b}} + {{h : b → (a → Result b) → Result b}} + (Hgvalid : is_valid_p f g) + (Hhvalid : ∀ y, is_valid_p f (h y)) : + is_valid_p f (λ f => do let y ← g f; h y f) := by + let ⟨ Hgmono, Hgcont ⟩ := Hgvalid + -- TODO: conversion to move forall below and conjunction? + simp [is_valid_p, forall_and] at Hhvalid + have ⟨ Hhmono, Hhcont ⟩ := Hhvalid + simp [← imp_forall_iff] at Hhcont + simp [is_valid_p]; constructor + . -- Monotonicity + apply is_mono_p_bind <;> assumption + . -- Continuity + intro Hfmono + have Hgcont := Hgcont Hfmono + have Hhcont := Hhcont Hfmono + apply is_cont_p_bind <;> assumption + + theorem is_valid_p_imp_is_valid {{body : (a → Result b) → a → Result b}} + (Hvalid : ∀ f x, is_valid_p f (λ f => body f x)) : + is_valid body := by + have Hmono : is_mono body := by + intro f h Hr x + have Hmono := Hvalid (λ _ _ => .div) x + have Hmono := Hmono.left + apply Hmono; assumption + have Hcont : is_cont body := by + intro x Hdiv + have Hcont := (Hvalid body x).right Hmono + simp [is_cont_p] at Hcont + apply Hcont + intro n + have Hdiv := Hdiv n + simp [fix_fuel] at Hdiv + simp [*] + apply is_valid.intro Hmono Hcont --/ + theorem is_valid_p_fix_fixed_eq {{body : (a → Result b) → a → Result b}} + (Hvalid : ∀ f x, is_valid_p f (λ f => body f x)) : + ∀ x, fix body x = body (fix body) x := + fix_fixed_eq (is_valid_p_imp_is_valid Hvalid) end Fix @@ -420,7 +538,7 @@ namespace Ex1 := by have Hvalid : is_valid (@list_nth_body a) := is_valid.intro list_nth_body_mono list_nth_body_cont - have Heq := fix_fixed_eq (@list_nth_body a) Hvalid + have Heq := fix_fixed_eq Hvalid simp [Heq, list_nth] conv => lhs; rw [list_nth_body] simp [Heq] @@ -444,117 +562,28 @@ namespace Ex2 let y ← f (tl, i - 1) .ret y - -- Lean is good at applying lemmas: we can write a very general version - theorem is_mono_p_bind - (g : (a → Result b) → Result b) - (h : b → (a → Result b) → Result b) : - is_mono_p g → - (∀ y, is_mono_p (h y)) → - is_mono_p (λ f => do let y ← g f; h y f) := by - intro hg hh - simp [is_mono_p] - intro fg fh Hrgh - simp [marrow_rel, result_rel] - have hg := hg Hrgh; simp [result_rel] at hg - cases heq0: g fg <;> simp_all - rename_i y _ - have hh := hh y Hrgh; simp [result_rel] at hh - simp_all - - -- Lean is good at applying lemmas: we can write a very general version - theorem is_cont_p_bind - (f : (a → Result b) → a → Result b) - (g : (a → Result b) → Result b) - (h : b → (a → Result b) → Result b) : - is_cont_p f (λ f => g f) → - (∀ y, is_cont_p f (h y)) → - is_cont_p f (λ f => do let y ← g f; h y f) := by - intro Hg Hh - simp [is_cont_p] - intro Hdiv - -- Case on `g (fix... f)`: is there an n s.t. it terminates? - cases Classical.em (∀ n, g (fix_fuel n f) = .div) <;> rename_i Hn - . -- Case 1: g diverges - have Hg := Hg Hn - simp_all - . -- Case 2: g doesn't diverge - simp at Hn - let ⟨ n, Hn ⟩ := Hn - have Hdivn := Hdiv n - -- TODO: we need monotonicity of g and f - have Hgmono : is_mono_p g := by sorry - have Hfmono : is_mono f := by sorry - have Hffmono := fix_fuel_fix_mono Hfmono n - have Hgeq := Hgmono Hffmono - simp [result_rel] at Hgeq - cases Heq: g (fix_fuel n f) <;> rename_i y <;> simp_all - -- Remains the .ret case - -- TODO: we need monotonicity of h? - have Hhmono : is_mono_p (h y) := by sorry - -- Use Hdiv to prove that: ∀ n, h y (fix_fuel n f) = div - -- We do this in two steps: first we prove it for m ≥ n - have Hhdiv: ∀ m, h y (fix_fuel m f) = .div := by - have Hhdiv : ∀ m, n ≤ m → h y (fix_fuel m f) = .div := by - -- We use the fact that `g (fix_fuel n f) = .div`, combined with Hdiv - intro m Hle - have Hdivm := Hdiv m - -- Monotonicity of g - have Hffmono := fix_fuel_mono Hfmono Hle - have Hgmono := Hgmono Hffmono - -- We need to clear Hdiv because otherwise simp_all rewrites Hdivm with Hdiv - clear Hdiv - simp_all [result_rel] - intro m - -- TODO: we shouldn't need the excluded middle here because it is decidable - cases Classical.em (n ≤ m) <;> rename_i Hl - . apply Hhdiv; assumption - . simp at Hl - -- Make a case disjunction on `h y (fix_fuel m f)`: if it is not equal - -- to div, use the monotonicity of `h y` - have Hle : m ≤ n := by linarith - have Hffmono := fix_fuel_mono Hfmono Hle - have Hmono := Hhmono Hffmono - simp [result_rel] at Hmono - cases Heq: h y (fix_fuel m f) <;> simp_all - -- We can now use the continuity hypothesis for h - apply Hh; assumption - - - -- TODO: what is the name of this theorem? - -- theorem eta_app_eq (x : a) (f : a → b) : f x = (λ x => f x) x := by simp - -- theorem eta_eq (x : a) (f : a → b) : (λ x => f x) = f := by simp - - --set_option pp.funBinderTypes true - --set_option pp.explicit true - --set_option pp.notation false - - theorem list_nth_body_mono : ∀ x, is_mono_p (λ f => @list_nth_body a f x) := by - intro x - simp [list_nth_body] - split <;> simp - split <;> simp - apply is_mono_p_bind <;> intros <;> simp - - theorem list_nth_body_cont2: ∀ f x, is_cont_p f (λ f => @list_nth_body a f x) := by + theorem list_nth_body_valid: ∀ f x, is_valid_p f (λ f => @list_nth_body a f x) := by intro f x simp [list_nth_body] split <;> simp split <;> simp + apply is_valid_p_bind <;> intros <;> simp_all noncomputable def list_nth (ls : List a) (i : Int) : Result a := fix list_nth_body (ls, i) theorem list_nth_eq (ls : List a) (i : Int) : - list_nth ls i = - match ls with - | [] => .fail .panic - | hd :: tl => - if i = 0 then .ret hd - else list_nth tl (i - 1) + (list_nth ls i = + match ls with + | [] => .fail .panic + | hd :: tl => + if i = 0 then .ret hd + else + do + let y ← list_nth tl (i - 1) + .ret y) := by - have Hvalid : is_valid (@list_nth_body a) := - is_valid.intro list_nth_body_mono list_nth_body_cont - have Heq := fix_fixed_eq (@list_nth_body a) Hvalid + have Heq := is_valid_p_fix_fixed_eq (@list_nth_body_valid a) simp [Heq, list_nth] conv => lhs; rw [list_nth_body] simp [Heq] @@ -731,7 +760,7 @@ namespace Ex3 := by have Hvalid : is_valid (@id_body a) := is_valid.intro id_body_mono id_body_cont - have Heq := fix_fixed_eq (@id_body a) Hvalid + have Heq := fix_fixed_eq Hvalid conv => lhs; rw [id, Heq, id_body] end Ex3 -- cgit v1.2.3 From 34a471c02d6c49aa34b7f353b28b90b09a69864a Mon Sep 17 00:00:00 2001 From: Son Ho Date: Mon, 19 Jun 2023 17:10:24 +0200 Subject: Simplify the id example in Diverge.lean --- backends/lean/Base/Diverge.lean | 119 ++++++++++++++++++++++++++++++++-------- 1 file changed, 95 insertions(+), 24 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Diverge.lean b/backends/lean/Base/Diverge.lean index 759773c9..2c764c5e 100644 --- a/backends/lean/Base/Diverge.lean +++ b/backends/lean/Base/Diverge.lean @@ -74,7 +74,7 @@ namespace Fix open Primitives open Result -variable {a b : Type} +variable {a b c d : Type} /- TODO: @@ -292,6 +292,7 @@ theorem fix_fixed_eq_terminates (f : (a → Result b) → a → Result b) (Hmono split <;> simp [*] <;> intros <;> simp [*] -- The final fixed point equation +-- TODO: remove the `forall x` theorem fix_fixed_eq {{f : (a → Result b) → a → Result b}} (Hvalid : is_valid f) : ∀ x, fix f x = f (fix f) x := by intros x @@ -313,26 +314,26 @@ theorem fix_fixed_eq {{f : (a → Result b) → a → Result b}} (Hvalid : is_va /- Making the proofs more systematic -/ -- TODO: rewrite is_mono in terms of is_mono_p - def is_mono_p (body : (a → Result b) → Result b) : Prop := + def is_mono_p (body : (a → Result b) → Result c) : Prop := ∀ {{g h}}, marrow_rel g h → result_rel (body g) (body h) - @[simp] theorem is_mono_p_same (x : Result b) : - @is_mono_p a b (λ _ => x) := by + @[simp] theorem is_mono_p_same (x : Result c) : + @is_mono_p a b c (λ _ => x) := by simp [is_mono_p, marrow_rel, result_rel] split <;> simp -- TODO: remove @[simp] theorem is_mono_p_tail_rec (x : a) : - @is_mono_p a b (λ f => f x) := by + @is_mono_p a b b (λ f => f x) := by simp_all [is_mono_p, marrow_rel, result_rel] -- TODO: rewrite is_cont in terms of is_cont_p def is_cont_p (f : (a → Result b) → a → Result b) - (body : (a → Result b) → Result b) : Prop := + (body : (a → Result b) → Result c) : Prop := (Hc : ∀ n, body (fix_fuel n f) = .div) → body (fix f) = .div - @[simp] theorem is_cont_p_same (f : (a → Result b) → a → Result b) (x : Result b) : + @[simp] theorem is_cont_p_same (f : (a → Result b) → a → Result b) (x : Result c) : is_cont_p f (λ _ => x) := by simp [is_cont_p] @@ -343,8 +344,8 @@ theorem fix_fixed_eq {{f : (a → Result b) → a → Result b}} (Hvalid : is_va -- Lean is good at unification: we can write a very general version theorem is_mono_p_bind - (g : (a → Result b) → Result b) - (h : b → (a → Result b) → Result b) : + (g : (a → Result b) → Result c) + (h : c → (a → Result b) → Result d) : is_mono_p g → (∀ y, is_mono_p (h y)) → is_mono_p (λ f => do let y ← g f; h y f) := by @@ -364,8 +365,8 @@ theorem fix_fixed_eq {{f : (a → Result b) → a → Result b}} (Hvalid : is_va theorem is_cont_p_bind (f : (a → Result b) → a → Result b) (Hfmono : is_mono f) - (g : (a → Result b) → Result b) - (h : b → (a → Result b) → Result b) : + (g : (a → Result b) → Result c) + (h : c → (a → Result b) → Result d) : is_mono_p g → is_cont_p f g → (∀ y, is_mono_p (h y)) → @@ -417,12 +418,12 @@ theorem fix_fixed_eq {{f : (a → Result b) → a → Result b}} (Hvalid : is_va apply Hhcont; assumption -- TODO: move - def is_valid_p (f : (a → Result b) → a → Result b) - (body : (a → Result b) → Result b) : Prop := + def is_valid_p (k : (a → Result b) → a → Result b) + (body : (a → Result b) → Result c) : Prop := is_mono_p body ∧ - (is_mono f → is_cont_p f body) + (is_mono k → is_cont_p k body) - @[simp] theorem is_valid_p_same (f : (a → Result b) → a → Result b) (x : Result b) : + @[simp] theorem is_valid_p_same (f : (a → Result b) → a → Result b) (x : Result c) : is_valid_p f (λ _ => x) := by simp [is_valid_p] @@ -435,8 +436,8 @@ theorem fix_fixed_eq {{f : (a → Result b) → a → Result b}} (Hvalid : is_va -- apply the lemma) theorem is_valid_p_bind {{f : (a → Result b) → a → Result b}} - {{g : (a → Result b) → Result b}} - {{h : b → (a → Result b) → Result b}} + {{g : (a → Result b) → Result c}} + {{h : c → (a → Result b) → Result d}} (Hgvalid : is_valid_p f g) (Hhvalid : ∀ y, is_valid_p f (h y)) : is_valid_p f (λ f => do let y ← g f; h y f) := by @@ -473,10 +474,12 @@ theorem fix_fixed_eq {{f : (a → Result b) → a → Result b}} (Hvalid : is_va simp [*] apply is_valid.intro Hmono Hcont + -- TODO: functional extensionality theorem is_valid_p_fix_fixed_eq {{body : (a → Result b) → a → Result b}} (Hvalid : ∀ f x, is_valid_p f (λ f => body f x)) : - ∀ x, fix body x = body (fix body) x := - fix_fixed_eq (is_valid_p_imp_is_valid Hvalid) + fix body = body (fix body) := by + apply funext + exact fix_fixed_eq (is_valid_p_imp_is_valid Hvalid) end Fix @@ -562,8 +565,8 @@ namespace Ex2 let y ← f (tl, i - 1) .ret y - theorem list_nth_body_valid: ∀ f x, is_valid_p f (λ f => @list_nth_body a f x) := by - intro f x + theorem list_nth_body_valid: ∀ k x, is_valid_p k (λ k => @list_nth_body a k x) := by + intro k x simp [list_nth_body] split <;> simp split <;> simp @@ -584,9 +587,8 @@ namespace Ex2 .ret y) := by have Heq := is_valid_p_fix_fixed_eq (@list_nth_body_valid a) - simp [Heq, list_nth] - conv => lhs; rw [list_nth_body] - simp [Heq] + simp [list_nth] + conv => lhs; rw [Heq] end Ex2 @@ -765,4 +767,73 @@ namespace Ex3 end Ex3 +namespace Ex4 + /- Higher-order example -/ + open Primitives Fix + + variable {a b : Type} + + /- An auxiliary function, which doesn't require the fixed-point -/ + def map (f : a → Result b) (ls : List a) : Result (List b) := + match ls with + | [] => .ret [] + | hd :: tl => + do + let hd ← f hd + let tl ← map f tl + .ret (hd :: tl) + + /- The validity theorem for `map`, generic in `f` -/ + /- TODO: rename the condition to k in all the lemma statements -/ + theorem map_is_valid + {{f : (a → Result b) → a → Result c}} + (Hfvalid : ∀ k x, is_valid_p k (λ k => f k x)) + (k : (a → Result b) → a → Result b) + (ls : List a) : + is_valid_p k (λ k => map (f k) ls) := by + induction ls <;> simp [map] + apply is_valid_p_bind <;> simp_all + intros + apply is_valid_p_bind <;> simp_all + + /- An example which uses map -/ + inductive Tree (a : Type) := + | leaf (x : a) + | node (tl : List (Tree a)) + + def id_body (f : Tree a → Result (Tree a)) (t : Tree a) : Result (Tree a) := + match t with + | .leaf x => .ret (.leaf x) + | .node tl => + do + let tl ← map f tl + .ret (.node tl) + + /- TODO: make the naming consistent (suffix with "_is") -/ + theorem id_body_is_valid : + ∀ k x, is_valid_p k (λ k => @id_body a k x) := by + intro k x + simp [id_body] + split <;> simp + apply is_valid_p_bind <;> simp_all + -- We have to show that `map k tl` is valid + apply map_is_valid; simp + + noncomputable def id (t : Tree a) := fix id_body t + + theorem id_eq (t : Tree a) : + (id t = + match t with + | .leaf x => .ret (.leaf x) + | .node tl => + do + let tl ← map id tl + .ret (.node tl)) + := by + have Heq := is_valid_p_fix_fixed_eq (@id_body_is_valid a) + simp [id] + conv => lhs; rw [Heq]; simp; rw [id_body] + +end Ex4 + end Diverge -- cgit v1.2.3 From 5d8eea6504d9dcfa43844d5ba51c7abf6c589701 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Mon, 19 Jun 2023 17:26:51 +0200 Subject: Remove the obsolete examples from Diverge --- backends/lean/Base/Diverge.lean | 213 +--------------------------------------- 1 file changed, 5 insertions(+), 208 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Diverge.lean b/backends/lean/Base/Diverge.lean index 2c764c5e..0e3e96c3 100644 --- a/backends/lean/Base/Diverge.lean +++ b/backends/lean/Base/Diverge.lean @@ -497,33 +497,8 @@ namespace Ex1 if i = 0 then .ret hd else f (tl, i - 1) - theorem list_nth_body_mono : is_mono (@list_nth_body a) := by - simp [is_mono]; intro g h Hr (ls, i); simp [result_rel, list_nth_body] - cases ls <;> simp - rename_i hd tl - -- TODO: making a case disjunction over `i = 0` is annoying, we need a more - -- general tactic for this - cases (Classical.em (Eq i 0)) <;> simp [*] at * - apply Hr - - theorem list_nth_body_cont : is_cont (@list_nth_body a) := by - rw [is_cont]; intro (ls, i) Hdiv; simp [list_nth_body, fix_fuel] at * - cases ls <;> simp at * - -- TODO: automate this - cases (Classical.em (Eq i 0)) <;> simp [*] at * - -- Recursive call - apply Hdiv - - /- Making the monotonicity/continuity proofs more systematic -/ - - theorem list_nth_body_mono2 : ∀ x, is_mono_p (λ f => @list_nth_body a f x) := by - intro x - simp [list_nth_body] - split <;> simp - split <;> simp - - theorem list_nth_body_cont2: ∀ f x, is_cont_p f (λ f => @list_nth_body a f x) := by - intro f x + theorem list_nth_body_valid: ∀ k x, is_valid_p k (λ k => @list_nth_body a k x) := by + intro k x simp [list_nth_body] split <;> simp split <;> simp @@ -539,12 +514,9 @@ namespace Ex1 if i = 0 then .ret hd else list_nth tl (i - 1) := by - have Hvalid : is_valid (@list_nth_body a) := - is_valid.intro list_nth_body_mono list_nth_body_cont - have Heq := fix_fixed_eq Hvalid - simp [Heq, list_nth] - conv => lhs; rw [list_nth_body] - simp [Heq] + have Heq := is_valid_p_fix_fixed_eq (@list_nth_body_valid a) + simp [list_nth] + conv => lhs; rw [Heq] end Ex1 @@ -592,181 +564,6 @@ namespace Ex2 end Ex2 -namespace Ex3 - /- Higher-order example -/ - open Primitives Fix - - variable {a b : Type} - - /- An auxiliary function, which doesn't require the fixed-point -/ - def map (f : a → Result b) (ls : List a) : Result (List b) := - match ls with - | [] => .ret [] - | hd :: tl => - do - -- TODO: monadic syntax - match f hd with - | .ret hd => - match map f tl with - | .ret tl => - .ret (hd :: tl) - | r => r - | .fail e => .fail e - | .div => .div - - theorem map_is_mono {{f g : a → Result b}} (Hr : marrow_rel f g) : - ∀ ls, result_rel (map f ls) (map g ls) := by - intro ls; induction ls <;> simp [result_rel, map] - case cons hd tl Hi => - have Hr1 := Hr hd; simp [result_rel] at Hr1 - -- TODO: reverting is annoying - revert Hr1 - cases f hd <;> intro Hr1 <;> simp [*] - -- ret case - simp [result_rel] at Hi - -- TODO: annoying - revert Hi - cases map f tl <;> intro Hi <;> simp [*] - - -- Auxiliary definition - def map_fix_fuel (n0 n1 : Nat) (f : (a → Result b) → a → Result b) (ls : List a) : Result (List b) := - match ls with - | [] => .ret [] - | hd :: tl => - do - match fix_fuel n0 f hd with - | .ret hd => - match map (fix_fuel n1 f) tl with - | .ret tl => - .ret (hd :: tl) - | r => r - | .fail e => .fail e - | .div => .div - - def exists_map_fix_fuel_not_div_imp {{f : (a → Result b) → a → Result b}} {{ls : List a}} - (Hmono : is_mono f) : - (∃ n0 n1, map_fix_fuel n0 n1 f ls ≠ .div) → - ∃ n2, map (fix_fuel n2 f) ls ≠ .div := by - intro ⟨ n0, n1, Hnd ⟩ - exists n0 + n1 - have Hineq0 : n0 ≤ n0 + n1 := by linarith - have Hineq1 : n1 ≤ n0 + n1 := by linarith - simp [map_fix_fuel] at Hnd - -- TODO: I would like a rewrite_once tactic - unfold map; simp - -- - revert Hnd - cases ls <;> simp - rename_i hd tl - -- Use the monotonicity of fix_fuel - have Hfmono := fix_fuel_mono Hmono Hineq0 hd - simp [result_rel] at Hfmono; revert Hfmono - cases fix_fuel n0 f hd <;> intro <;> simp [*] - -- Use the monotonicity of map - have Hfmono := fix_fuel_mono Hmono Hineq1 - have Hmmono := map_is_mono Hfmono tl - simp [result_rel] at Hmmono; revert Hmmono - cases map (fix_fuel n1 f) tl <;> intro <;> simp [*] - - -- TODO: it is simpler to prove the contrapositive of is_cont than is_cont itself. - -- The proof is still quite technical: think of a criteria whose proof is simpler - -- to automate. - theorem map_is_cont_contra_aux {{f : (a → Result b) → a → Result b}} (Hmono : is_mono f) : - ∀ ls, map (fix f) ls ≠ .div → - ∃ n0 n1, map_fix_fuel n0 n1 f ls ≠ .div - := by - intro ls; induction ls <;> simp [result_rel, map_fix_fuel, map] - simp [fix] - case cons hd tl Hi => - -- Instantiate the first n and do a case disjunction - intro Hf; exists (least (fix_fuel_P f hd)); revert Hf - cases fix_fuel (least (fix_fuel_P f hd)) f hd <;> simp - -- Use the induction hyp - have Hr := Classical.em (map (fix f) tl = .div) - simp [fix] at * - cases Hr <;> simp_all - have Hj : ∃ n2, map (fix_fuel n2 f) tl ≠ .div := exists_map_fix_fuel_not_div_imp Hmono Hi - revert Hj; intro ⟨ n2, Hj ⟩ - intro Hf; exists n2; revert Hf - revert Hj; cases map (fix_fuel n2 f) tl <;> simp_all - - theorem map_is_cont_contra {{f : (a → Result b) → a → Result b}} (Hmono : is_mono f) : - ∀ ls, map (fix f) ls ≠ .div → - ∃ n, map (fix_fuel n f) ls ≠ .div - := by - intro ls Hf - have Hc := map_is_cont_contra_aux Hmono ls Hf - apply exists_map_fix_fuel_not_div_imp <;> assumption - - theorem map_is_cont {{f : (a → Result b) → a → Result b}} (Hmono : is_mono f) : - ∀ ls, (Hc : ∀ n, map (fix_fuel n f) ls = .div) → - map (fix f) ls = .div - := by - intro ls Hc - -- TODO: is there a tactic for proofs by contraposition? - apply Classical.byContradiction; intro Hndiv - let ⟨ n, Hcc ⟩ := map_is_cont_contra Hmono ls Hndiv - simp_all - - /- An example which uses map -/ - inductive Tree (a : Type) := - | leaf (x : a) - | node (tl : List (Tree a)) - - def id_body (f : Tree a → Result (Tree a)) (t : Tree a) : Result (Tree a) := - match t with - | .leaf x => .ret (.leaf x) - | .node tl => - match map f tl with - | .div => .div - | .fail e => .fail e - | .ret tl => .ret (.node tl) - - theorem id_body_mono : is_mono (@id_body a) := by - simp [is_mono]; intro g h Hr t; simp [result_rel, id_body] - cases t <;> simp - rename_i tl - have Hmmono := map_is_mono Hr tl - revert Hmmono; simp [result_rel] - cases map g tl <;> simp_all - - theorem id_body_cont : is_cont (@id_body a) := by - rw [is_cont]; intro t Hdiv - simp [fix_fuel] at * - -- TODO: weird things are happening with the rewriter and the simplifier here - rw [id_body] - simp [id_body] at Hdiv - -- - cases t <;> simp at * - rename_i tl - -- TODO: automate this - have Hmc := map_is_cont id_body_mono tl - have Hdiv : ∀ (n : ℕ), map (fix_fuel n id_body) tl = Result.div := by - intro n - have Hdiv := Hdiv n; revert Hdiv - cases map (fix_fuel n id_body) tl <;> simp_all - have Hmc := Hmc Hdiv; revert Hmc - cases map (fix id_body) tl <;> simp_all - - noncomputable def id (t : Tree a) := fix id_body t - - theorem id_eq (t : Tree a) : - id t = - match t with - | .leaf x => .ret (.leaf x) - | .node tl => - match map id tl with - | .div => .div - | .fail e => .fail e - | .ret tl => .ret (.node tl) - := by - have Hvalid : is_valid (@id_body a) := - is_valid.intro id_body_mono id_body_cont - have Heq := fix_fixed_eq Hvalid - conv => lhs; rw [id, Heq, id_body] - -end Ex3 - namespace Ex4 /- Higher-order example -/ open Primitives Fix -- cgit v1.2.3 From 8db6718d06023ffa77035b29ec92cec03ee838bc Mon Sep 17 00:00:00 2001 From: Son Ho Date: Mon, 19 Jun 2023 18:13:29 +0200 Subject: Add an example with even/odd in Diverge.lean --- backends/lean/Base/Diverge.lean | 120 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 119 insertions(+), 1 deletion(-) (limited to 'backends') diff --git a/backends/lean/Base/Diverge.lean b/backends/lean/Base/Diverge.lean index 0e3e96c3..2e77c5e0 100644 --- a/backends/lean/Base/Diverge.lean +++ b/backends/lean/Base/Diverge.lean @@ -564,6 +564,124 @@ namespace Ex2 end Ex2 +namespace Ex3 + /- Mutually recursive functions -/ + open Primitives Fix + + /- Because we have mutually recursive functions, we use a sum for the inputs + and the output types: + - inputs: the sum allows to select the function to call in the recursive + calls (and the functions may not have the same input types) + - outpus: this case is degenerate because `even` and `odd` both have the + return type `Bool`, but generally speaking we need a sum type because + the functions in the mutually recursive group may not have the same + return type. + -/ + variable (f : (Int ⊕ Int) → Result (Bool ⊕ Bool)) + + def is_even_is_odd_body (x : (Int ⊕ Int)) : Result (Bool ⊕ Bool) := + match x with + | .inl i => + -- Body of `is_even` + if i = 0 + then .ret (.inl true) -- We return .inl because this is `is_even` + else + do + let b ← + do + -- Call `odd`: we need to wrap the input value in `.inr`, then + -- extract the output value + let r ← f (.inr (i- 1)) + match r with + | .inl _ => .fail .panic -- Invalid output + | .inr b => .ret b + -- Wrap the return value + .ret (.inl b) + | .inr i => + -- Body of `is_odd` + if i = 0 + then .ret (.inr false) -- We return .inr because this is `is_odd` + else + do + let b ← + do + -- Call `is_even`: we need to wrap the input value in .inr, then + -- extract the output value + let r ← f (.inl (i- 1)) + match r with + | .inl b => .ret b + | .inr _ => .fail .panic -- Invalid output + -- Wrap the return value + .ret (.inr b) + + theorem is_even_is_odd_body_is_valid: + ∀ k x, is_valid_p k (λ k => is_even_is_odd_body k x) := by + intro k x + simp [is_even_is_odd_body] + split <;> simp <;> split <;> simp + apply is_valid_p_bind; simp + intros; split <;> simp + apply is_valid_p_bind; simp + intros; split <;> simp + + noncomputable + def is_even (i : Int): Result Bool := + do + let r ← fix is_even_is_odd_body (.inl i) + match r with + | .inl b => .ret b + | .inr _ => .fail .panic + + noncomputable + def is_odd (i : Int): Result Bool := + do + let r ← fix is_even_is_odd_body (.inr i) + match r with + | .inl _ => .fail .panic + | .inr b => .ret b + + -- TODO: move + -- TODO: this is not enough + theorem swap_if_bind {a b : Type} (e : Prop) [Decidable e] (x y : Result a) (f : a → Result b) : + (do + let z ← (if e then x else y) + f z) + = + (if e then do let z ← x; f z + else do let z ← y; f z) := by + split <;> simp + + theorem is_even_eq (i : Int) : + is_even i = (if i = 0 then .ret true else is_odd (i - 1)) + := by + have Heq := is_valid_p_fix_fixed_eq is_even_is_odd_body_is_valid + simp [is_even, is_odd] + conv => lhs; rw [Heq]; simp; rw [is_even_is_odd_body]; simp + -- Very annoying: we need to swap the matches + -- Doing this with rewriting lemmas is hard generally speaking + -- (especially as we may have to generate lemmas for user-defined + -- inductives on the fly). + -- The simplest is to repeatedly split then simplify (we identify + -- the outer match or monadic let-binding, and split on its scrutinee) + split <;> simp + cases H0 : fix is_even_is_odd_body (Sum.inr (i - 1)) <;> simp + rename_i v + split <;> simp + + theorem is_odd_eq (i : Int) : + is_odd i = (if i = 0 then .ret false else is_even (i - 1)) + := by + have Heq := is_valid_p_fix_fixed_eq is_even_is_odd_body_is_valid + simp [is_even, is_odd] + conv => lhs; rw [Heq]; simp; rw [is_even_is_odd_body]; simp + -- Same remark as for `even` + split <;> simp + cases H0 : fix is_even_is_odd_body (Sum.inl (i - 1)) <;> simp + rename_i v + split <;> simp + +end Ex3 + namespace Ex4 /- Higher-order example -/ open Primitives Fix @@ -581,7 +699,7 @@ namespace Ex4 .ret (hd :: tl) /- The validity theorem for `map`, generic in `f` -/ - /- TODO: rename the condition to k in all the lemma statements -/ + /- TODO: rename the continuation to k in all the lemma statements -/ theorem map_is_valid {{f : (a → Result b) → a → Result c}} (Hfvalid : ∀ k x, is_valid_p k (λ k => f k x)) -- cgit v1.2.3 From a2670f4d097075c23b9affceb8ed8498b73c4b8c Mon Sep 17 00:00:00 2001 From: Son Ho Date: Mon, 19 Jun 2023 18:52:29 +0200 Subject: Cleanup Diverge.lean --- backends/lean/Base/Diverge.lean | 657 ++++++++++++++++++------------------- backends/lean/Base/Primitives.lean | 13 + 2 files changed, 333 insertions(+), 337 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Diverge.lean b/backends/lean/Base/Diverge.lean index 2e77c5e0..65c061bd 100644 --- a/backends/lean/Base/Diverge.lean +++ b/backends/lean/Base/Diverge.lean @@ -3,12 +3,32 @@ import Lean.Meta.Tactic.Simp import Init.Data.List.Basic import Mathlib.Tactic.RunCmd import Mathlib.Tactic.Linarith -import Mathlib.Tactic.Tauto ---import Mathlib.Logic + +/- +TODO: +- we want an easier to use cases: + - keeps in the goal an equation of the shape: `t = case` + - if called on Prop terms, uses Classical.em + Actually, the cases from mathlib seems already quite powerful + (https://leanprover-community.github.io/mathlib_docs/tactics.html#cases) + For instance: cases h : e + Also: cases_matching +- better split tactic +- we need conversions to operate on the head of applications. + Actually, something like this works: + ``` + conv at Hl => + apply congr_fun + simp [fix_fuel_P] + ``` + Maybe we need a rpt ... ; focus? +- simplifier/rewriter have a strange behavior sometimes +-/ namespace Diverge namespace Primitives +/-! # Copy-pasting from Primitives to make the file self-contained -/ inductive Error where | assertionFailure: Error @@ -29,12 +49,6 @@ deriving Repr, BEq open Result --- instance Result_Inhabited (α : Type u) : Inhabited (Result α) := --- Inhabited.mk (fail panic) - --- instance Result_Nonempty (α : Type u) : Nonempty (Result α) := --- Nonempty.intro div - def bind (x: Result α) (f: α -> Result β) : Result β := match x with | ret v => f v @@ -71,312 +85,276 @@ end Primitives namespace Fix -open Primitives -open Result - -variable {a b c d : Type} - -/- -TODO: -- we want an easier to use cases: - - keeps in the goal an equation of the shape: `t = case` - - if called on Prop terms, uses Classical.em - Actually, the cases from mathlib seems already quite powerful - (https://leanprover-community.github.io/mathlib_docs/tactics.html#cases) - For instance: cases h : e - Also: cases_matching -- better split tactic -- we need conversions to operate on the head of applications. - Actually, something like this works: - ``` - conv at Hl => - apply congr_fun - simp [fix_fuel_P] - ``` - Maybe we need a rpt ... ; focus? -- simplifier/rewriter have a strange behavior sometimes --/ - -/-! # The least fixed point definition and its properties -/ - -def least_p (p : Nat → Prop) (n : Nat) : Prop := p n ∧ (∀ m, m < n → ¬ p m) -noncomputable def least (p : Nat → Prop) : Nat := - Classical.epsilon (least_p p) - --- Auxiliary theorem for [least_spec]: if there exists an `n` satisfying `p`, --- there there exists a least `m` satisfying `p`. -theorem least_spec_aux (p : Nat → Prop) : ∀ (n : Nat), (hn : p n) → ∃ m, least_p p m := by - apply Nat.strongRec' - intros n hi hn - -- Case disjunction on: is n the smallest n satisfying p? - match Classical.em (∀ m, m < n → ¬ p m) with - | .inl hlt => - -- Yes: trivial - exists n - | .inr hlt => - simp at * - let ⟨ m, ⟨ hmlt, hm ⟩ ⟩ := hlt - have hi := hi m hmlt hm - apply hi - --- The specification of [least]: either `p` is never satisfied, or it is satisfied --- by `least p` and no `n < least p` satisfies `p`. -theorem least_spec (p : Nat → Prop) : (∀ n, ¬ p n) ∨ (p (least p) ∧ ∀ n, n < least p → ¬ p n) := by - -- Case disjunction on the existence of an `n` which satisfies `p` - match Classical.em (∀ n, ¬ p n) with - | .inl h => - -- There doesn't exist: trivial - apply (Or.inl h) - | .inr h => - -- There exists: we simply use `least_spec_aux` in combination with the property - -- of the epsilon operator - simp at * - let ⟨ n, hn ⟩ := h - apply Or.inr - have hl := least_spec_aux p n hn - have he := Classical.epsilon_spec hl - apply he - -/-! # The fixed point definitions -/ - -def fix_fuel (n : Nat) (f : (a → Result b) → a → Result b) (x : a) : Result b := - match n with - | 0 => .div - | n + 1 => - f (fix_fuel n f) x - -@[simp] def fix_fuel_pred (f : (a → Result b) → a → Result b) (x : a) (n : Nat) := - not (div? (fix_fuel n f x)) - -def fix_fuel_P (f : (a → Result b) → a → Result b) (x : a) (n : Nat) : Prop := - fix_fuel_pred f x n - -noncomputable def fix (f : (a → Result b) → a → Result b) (x : a) : Result b := - fix_fuel (least (fix_fuel_P f x)) f x - -/-! # The proof of the fixed point equation -/ - --- Monotonicity relation over results --- TODO: generalize -def result_rel {a : Type u} (x1 x2 : Result a) : Prop := - match x1 with - | div => True - | fail _ => x2 = x1 - | ret _ => x2 = x1 -- TODO: generalize - --- Monotonicity relation over monadic arrows --- TODO: Kleisli arrow --- TODO: generalize -def marrow_rel (f g : a → Result b) : Prop := - ∀ x, result_rel (f x) (g x) - --- Monotonicity property -def is_mono (f : (a → Result b) → a → Result b) : Prop := - ∀ {{g h}}, marrow_rel g h → marrow_rel (f g) (f h) - --- "Continuity" property. --- We need this, and this looks a lot like continuity. Also see this paper: --- https://inria.hal.science/file/index/docid/216187/filename/tarski.pdf -def is_cont (f : (a → Result b) → a → Result b) : Prop := - ∀ x, (Hdiv : ∀ n, fix_fuel (.succ n) f x = div) → f (fix f) x = div - --- Validity property for a body -structure is_valid (f : (a → Result b) → a → Result b) := - intro:: - hmono : is_mono f - hcont : is_cont f - -/- - - -/ - -theorem fix_fuel_mono {f : (a → Result b) → a → Result b} (Hmono : is_mono f) : - ∀ {{n m}}, n ≤ m → marrow_rel (fix_fuel n f) (fix_fuel m f) := by - intros n - induction n - case zero => simp [marrow_rel, fix_fuel, result_rel] - case succ n1 Hi => - intros m Hle x + open Primitives + open Result + + variable {a b c d : Type} + + /-! # The least fixed point definition and its properties -/ + + def least_p (p : Nat → Prop) (n : Nat) : Prop := p n ∧ (∀ m, m < n → ¬ p m) + noncomputable def least (p : Nat → Prop) : Nat := + Classical.epsilon (least_p p) + + -- Auxiliary theorem for [least_spec]: if there exists an `n` satisfying `p`, + -- there there exists a least `m` satisfying `p`. + theorem least_spec_aux (p : Nat → Prop) : ∀ (n : Nat), (hn : p n) → ∃ m, least_p p m := by + apply Nat.strongRec' + intros n hi hn + -- Case disjunction on: is n the smallest n satisfying p? + match Classical.em (∀ m, m < n → ¬ p m) with + | .inl hlt => + -- Yes: trivial + exists n + | .inr hlt => + simp at * + let ⟨ m, ⟨ hmlt, hm ⟩ ⟩ := hlt + have hi := hi m hmlt hm + apply hi + + -- The specification of [least]: either `p` is never satisfied, or it is satisfied + -- by `least p` and no `n < least p` satisfies `p`. + theorem least_spec (p : Nat → Prop) : (∀ n, ¬ p n) ∨ (p (least p) ∧ ∀ n, n < least p → ¬ p n) := by + -- Case disjunction on the existence of an `n` which satisfies `p` + match Classical.em (∀ n, ¬ p n) with + | .inl h => + -- There doesn't exist: trivial + apply (Or.inl h) + | .inr h => + -- There exists: we simply use `least_spec_aux` in combination with the property + -- of the epsilon operator + simp at * + let ⟨ n, hn ⟩ := h + apply Or.inr + have hl := least_spec_aux p n hn + have he := Classical.epsilon_spec hl + apply he + + /-! # The fixed point definitions -/ + + def fix_fuel (n : Nat) (f : (a → Result b) → a → Result b) (x : a) : Result b := + match n with + | 0 => .div + | n + 1 => + f (fix_fuel n f) x + + @[simp] def fix_fuel_pred (f : (a → Result b) → a → Result b) (x : a) (n : Nat) := + not (div? (fix_fuel n f x)) + + def fix_fuel_P (f : (a → Result b) → a → Result b) (x : a) (n : Nat) : Prop := + fix_fuel_pred f x n + + noncomputable def fix (f : (a → Result b) → a → Result b) (x : a) : Result b := + fix_fuel (least (fix_fuel_P f x)) f x + + /-! # The validity property -/ + + -- Monotonicity relation over results + -- TODO: generalize (we should parameterize the definition by a relation over `a`) + def result_rel {a : Type u} (x1 x2 : Result a) : Prop := + match x1 with + | div => True + | fail _ => x2 = x1 + | ret _ => x2 = x1 -- TODO: generalize + + -- Monotonicity relation over monadic arrows (i.e., Kleisli arrows) + def karrow_rel (k1 k2 : a → Result b) : Prop := + ∀ x, result_rel (k1 x) (k2 x) + + -- Monotonicity property for function bodies + def is_mono (f : (a → Result b) → a → Result b) : Prop := + ∀ {{k1 k2}}, karrow_rel k1 k2 → karrow_rel (f k1) (f k2) + + -- "Continuity" property. + -- We need this, and this looks a lot like continuity. Also see this paper: + -- https://inria.hal.science/file/index/docid/216187/filename/tarski.pdf + -- We define our "continuity" criteria so that it gives us what we need to + -- prove the fixed-point equation, and we can also easily manipulate it. + def is_cont (f : (a → Result b) → a → Result b) : Prop := + ∀ x, (Hdiv : ∀ n, fix_fuel (.succ n) f x = div) → f (fix f) x = div + + /-! # The proof of the fixed-point equation -/ + theorem fix_fuel_mono {f : (a → Result b) → a → Result b} (Hmono : is_mono f) : + ∀ {{n m}}, n ≤ m → karrow_rel (fix_fuel n f) (fix_fuel m f) := by + intros n + induction n + case zero => simp [karrow_rel, fix_fuel, result_rel] + case succ n1 Hi => + intros m Hle x + simp [result_rel] + match m with + | 0 => + exfalso + zify at * + linarith + | Nat.succ m1 => + simp_arith at Hle + simp [fix_fuel] + have Hi := Hi Hle + have Hmono := Hmono Hi x + simp [result_rel] at Hmono + apply Hmono + + @[simp] theorem neg_fix_fuel_P {f : (a → Result b) → a → Result b} {x : a} {n : Nat} : + ¬ fix_fuel_P f x n ↔ (fix_fuel n f x = div) := by + simp [fix_fuel_P, div?] + cases fix_fuel n f x <;> simp + + theorem fix_fuel_fix_mono {f : (a → Result b) → a → Result b} (Hmono : is_mono f) : + ∀ n, karrow_rel (fix_fuel n f) (fix f) := by + intros n x simp [result_rel] - match m with - | 0 => - exfalso - zify at * - linarith - | Nat.succ m1 => - simp_arith at Hle - simp [fix_fuel] - have Hi := Hi Hle - have Hmono := Hmono Hi x - simp [result_rel] at Hmono - apply Hmono - -@[simp] theorem neg_fix_fuel_P {f : (a → Result b) → a → Result b} {x : a} {n : Nat} : - ¬ fix_fuel_P f x n ↔ (fix_fuel n f x = div) := by - simp [fix_fuel_P, div?] - cases fix_fuel n f x <;> simp - -theorem fix_fuel_fix_mono {f : (a → Result b) → a → Result b} (Hmono : is_mono f) : - ∀ n, marrow_rel (fix_fuel n f) (fix f) := by - intros n x - simp [result_rel] - have Hl := least_spec (fix_fuel_P f x) - simp at Hl - match Hl with - | .inl Hl => simp [*] - | .inr ⟨ Hl, Hn ⟩ => - match Classical.em (fix_fuel n f x = div) with - | .inl Hd => - simp [*] - | .inr Hd => - have Hineq : least (fix_fuel_P f x) ≤ n := by - -- Proof by contradiction - cases Classical.em (least (fix_fuel_P f x) ≤ n) <;> simp [*] - simp at * - rename_i Hineq - have Hn := Hn n Hineq - contradiction - have Hfix : ¬ (fix f x = div) := by - simp [fix] - -- By property of the least upper bound - revert Hd Hl - -- TODO: there is no conversion to select the head of a function! - have : fix_fuel_P f x (least (fix_fuel_P f x)) = fix_fuel_pred f x (least (fix_fuel_P f x)) := - by simp[fix_fuel_P] - simp [this, div?] - clear this - cases fix_fuel (least (fix_fuel_P f x)) f x <;> simp - have Hmono := fix_fuel_mono Hmono Hineq x - simp [result_rel] at Hmono - -- TODO: there is no conversion to select the head of a function! - revert Hmono Hfix Hd - simp [fix] - -- TODO: it would be good if cases actually introduces an equation: this - -- way we wouldn't have to do all the book-keeping - cases fix_fuel (least (fix_fuel_P f x)) f x <;> cases fix_fuel n f x <;> - intros <;> simp [*] at * - -theorem fix_fuel_P_least {f : (a → Result b) → a → Result b} (Hmono : is_mono f) : - ∀ {{x n}}, fix_fuel_P f x n → fix_fuel_P f x (least (fix_fuel_P f x)) := by - intros x n Hf - have Hfmono := fix_fuel_fix_mono Hmono n x - revert Hf Hfmono - -- TODO: would be good to be able to unfold fix_fuel_P only on the left - simp [fix_fuel_P, div?, result_rel, fix] - cases fix_fuel n f x <;> simp_all - --- Prove the fixed point equation in the case there exists some fuel for which --- the execution terminates -theorem fix_fixed_eq_terminates (f : (a → Result b) → a → Result b) (Hmono : is_mono f) - (x : a) (n : Nat) (He : fix_fuel_P f x n) : - fix f x = f (fix f) x := by - have Hl := fix_fuel_P_least Hmono He - -- TODO: better control of simplification - conv at Hl => - apply congr_fun - simp [fix_fuel_P] - -- The least upper bound is > 0 - have ⟨ n, Hsucc ⟩ : ∃ n, least (fix_fuel_P f x) = Nat.succ n := by + have Hl := least_spec (fix_fuel_P f x) + simp at Hl + match Hl with + | .inl Hl => simp [*] + | .inr ⟨ Hl, Hn ⟩ => + match Classical.em (fix_fuel n f x = div) with + | .inl Hd => + simp [*] + | .inr Hd => + have Hineq : least (fix_fuel_P f x) ≤ n := by + -- Proof by contradiction + cases Classical.em (least (fix_fuel_P f x) ≤ n) <;> simp [*] + simp at * + rename_i Hineq + have Hn := Hn n Hineq + contradiction + have Hfix : ¬ (fix f x = div) := by + simp [fix] + -- By property of the least upper bound + revert Hd Hl + -- TODO: there is no conversion to select the head of a function! + conv => lhs; apply congr_fun; apply congr_fun; apply congr_fun; simp [fix_fuel_P, div?] + cases fix_fuel (least (fix_fuel_P f x)) f x <;> simp + have Hmono := fix_fuel_mono Hmono Hineq x + simp [result_rel] at Hmono + simp [fix] at * + cases Heq: fix_fuel (least (fix_fuel_P f x)) f x <;> + cases Heq':fix_fuel n f x <;> + simp_all + + theorem fix_fuel_P_least {f : (a → Result b) → a → Result b} (Hmono : is_mono f) : + ∀ {{x n}}, fix_fuel_P f x n → fix_fuel_P f x (least (fix_fuel_P f x)) := by + intros x n Hf + have Hfmono := fix_fuel_fix_mono Hmono n x + -- TODO: there is no conversion to select the head of a function! + conv => apply congr_fun; simp [fix_fuel_P] + simp [fix_fuel_P] at Hf + revert Hf Hfmono + simp [div?, result_rel, fix] + cases fix_fuel n f x <;> simp_all + + -- Prove the fixed point equation in the case there exists some fuel for which + -- the execution terminates + theorem fix_fixed_eq_terminates (f : (a → Result b) → a → Result b) (Hmono : is_mono f) + (x : a) (n : Nat) (He : fix_fuel_P f x n) : + fix f x = f (fix f) x := by + have Hl := fix_fuel_P_least Hmono He + -- TODO: better control of simplification + conv at Hl => + apply congr_fun + simp [fix_fuel_P] + -- The least upper bound is > 0 + have ⟨ n, Hsucc ⟩ : ∃ n, least (fix_fuel_P f x) = Nat.succ n := by + revert Hl + simp [div?] + cases least (fix_fuel_P f x) <;> simp [fix_fuel] + simp [Hsucc] at Hl revert Hl - simp [div?] - cases least (fix_fuel_P f x) <;> simp [fix_fuel] - simp [Hsucc] at Hl - revert Hl - simp [*, div?, fix, fix_fuel] - -- Use the monotonicity - have Hfixmono := fix_fuel_fix_mono Hmono n - have Hvm := Hmono Hfixmono x - -- Use functional extensionality - simp [result_rel, fix] at Hvm - revert Hvm - split <;> simp [*] <;> intros <;> simp [*] - --- The final fixed point equation --- TODO: remove the `forall x` -theorem fix_fixed_eq {{f : (a → Result b) → a → Result b}} (Hvalid : is_valid f) : - ∀ x, fix f x = f (fix f) x := by - intros x - -- conv => lhs; simp [fix] - -- Case disjunction: is there a fuel such that the execution successfully execute? - match Classical.em (∃ n, fix_fuel_P f x n) with - | .inr He => - -- No fuel: the fixed point evaluates to `div` - --simp [fix] at * - simp at * - conv => lhs; simp [fix] - have Hel := He (Nat.succ (least (fix_fuel_P f x))); simp [*, fix_fuel] at *; clear Hel - -- Use the "continuity" of `f` - have He : ∀ n, fix_fuel (.succ n) f x = div := by intros; simp [*] - have Hcont := Hvalid.hcont x He - simp [Hcont] - | .inl ⟨ n, He ⟩ => apply fix_fixed_eq_terminates f Hvalid.hmono x n He - - /- Making the proofs more systematic -/ - - -- TODO: rewrite is_mono in terms of is_mono_p - def is_mono_p (body : (a → Result b) → Result c) : Prop := - ∀ {{g h}}, marrow_rel g h → result_rel (body g) (body h) - - @[simp] theorem is_mono_p_same (x : Result c) : + simp [*, div?, fix, fix_fuel] + -- Use the monotonicity + have Hfixmono := fix_fuel_fix_mono Hmono n + have Hvm := Hmono Hfixmono x + -- Use functional extensionality + simp [result_rel, fix] at Hvm + revert Hvm + split <;> simp [*] <;> intros <;> simp [*] + + -- The final fixed point equation + -- TODO: remove the `forall x` + theorem fix_fixed_eq {{f : (a → Result b) → a → Result b}} + (Hmono : is_mono f) (Hcont : is_cont f) : + ∀ x, fix f x = f (fix f) x := by + intros x + -- Case disjunction: is there a fuel such that the execution successfully execute? + match Classical.em (∃ n, fix_fuel_P f x n) with + | .inr He => + -- No fuel: the fixed point evaluates to `div` + --simp [fix] at * + simp at * + conv => lhs; simp [fix] + have Hel := He (Nat.succ (least (fix_fuel_P f x))); simp [*, fix_fuel] at *; clear Hel + -- Use the "continuity" of `f` + have He : ∀ n, fix_fuel (.succ n) f x = div := by intros; simp [*] + have Hcont := Hcont x He + simp [Hcont] + | .inl ⟨ n, He ⟩ => apply fix_fixed_eq_terminates f Hmono x n He + + + /-! # Making the proofs of validity manageable (and automatable) -/ + + -- Monotonicity property for expressions + def is_mono_p (e : (a → Result b) → Result c) : Prop := + ∀ {{k1 k2}}, karrow_rel k1 k2 → result_rel (e k1) (e k2) + + theorem is_mono_p_same (x : Result c) : @is_mono_p a b c (λ _ => x) := by - simp [is_mono_p, marrow_rel, result_rel] + simp [is_mono_p, karrow_rel, result_rel] split <;> simp - -- TODO: remove - @[simp] theorem is_mono_p_tail_rec (x : a) : + theorem is_mono_p_rec (x : a) : @is_mono_p a b b (λ f => f x) := by - simp_all [is_mono_p, marrow_rel, result_rel] + simp_all [is_mono_p, karrow_rel, result_rel] - -- TODO: rewrite is_cont in terms of is_cont_p - def is_cont_p (f : (a → Result b) → a → Result b) - (body : (a → Result b) → Result c) : Prop := - (Hc : ∀ n, body (fix_fuel n f) = .div) → - body (fix f) = .div - - @[simp] theorem is_cont_p_same (f : (a → Result b) → a → Result b) (x : Result c) : - is_cont_p f (λ _ => x) := by - simp [is_cont_p] - - -- TODO: remove - @[simp] theorem is_cont_p_tail_rec (f : (a → Result b) → a → Result b) (x : a) : - is_cont_p f (λ f => f x) := by - simp_all [is_cont_p, fix] - - -- Lean is good at unification: we can write a very general version + -- The important lemma about `is_mono_p` theorem is_mono_p_bind (g : (a → Result b) → Result c) (h : c → (a → Result b) → Result d) : is_mono_p g → (∀ y, is_mono_p (h y)) → - is_mono_p (λ f => do let y ← g f; h y f) := by + is_mono_p (λ k => do let y ← g k; h y k) := by intro hg hh simp [is_mono_p] intro fg fh Hrgh - simp [marrow_rel, result_rel] + simp [karrow_rel, result_rel] have hg := hg Hrgh; simp [result_rel] at hg cases heq0: g fg <;> simp_all rename_i y _ have hh := hh y Hrgh; simp [result_rel] at hh simp_all - -- Lean is good at unification: we can write a very general version - -- (in particular, it will manage to figure out `g` and `h` when we - -- apply the lemma) + -- Continuity property for expressions - note that we take the continuation + -- as parameter + def is_cont_p (k : (a → Result b) → a → Result b) + (e : (a → Result b) → Result c) : Prop := + (Hc : ∀ n, e (fix_fuel n k) = .div) → + e (fix k) = .div + + theorem is_cont_p_same (k : (a → Result b) → a → Result b) (x : Result c) : + is_cont_p k (λ _ => x) := by + simp [is_cont_p] + + theorem is_cont_p_rec (f : (a → Result b) → a → Result b) (x : a) : + is_cont_p f (λ f => f x) := by + simp_all [is_cont_p, fix] + + -- The important lemma about `is_cont_p` theorem is_cont_p_bind - (f : (a → Result b) → a → Result b) - (Hfmono : is_mono f) + (k : (a → Result b) → a → Result b) + (Hkmono : is_mono k) (g : (a → Result b) → Result c) (h : c → (a → Result b) → Result d) : is_mono_p g → - is_cont_p f g → + is_cont_p k g → (∀ y, is_mono_p (h y)) → - (∀ y, is_cont_p f (h y)) → - is_cont_p f (λ f => do let y ← g f; h y f) := by + (∀ y, is_cont_p k (h y)) → + is_cont_p k (λ k => do let y ← g k; h y k) := by intro Hgmono Hgcont Hhmono Hhcont simp [is_cont_p] intro Hdiv - -- Case on `g (fix... f)`: is there an n s.t. it terminates? - cases Classical.em (∀ n, g (fix_fuel n f) = .div) <;> rename_i Hn + -- Case on `g (fix... k)`: is there an n s.t. it terminates? + cases Classical.em (∀ n, g (fix_fuel n k) = .div) <;> rename_i Hn . -- Case 1: g diverges have Hgcont := Hgcont Hn simp_all @@ -384,20 +362,20 @@ theorem fix_fixed_eq {{f : (a → Result b) → a → Result b}} (Hvalid : is_va simp at Hn let ⟨ n, Hn ⟩ := Hn have Hdivn := Hdiv n - have Hffmono := fix_fuel_fix_mono Hfmono n + have Hffmono := fix_fuel_fix_mono Hkmono n have Hgeq := Hgmono Hffmono simp [result_rel] at Hgeq - cases Heq: g (fix_fuel n f) <;> rename_i y <;> simp_all + cases Heq: g (fix_fuel n k) <;> rename_i y <;> simp_all -- Remains the .ret case -- Use Hdiv to prove that: ∀ n, h y (fix_fuel n f) = div -- We do this in two steps: first we prove it for m ≥ n - have Hhdiv: ∀ m, h y (fix_fuel m f) = .div := by - have Hhdiv : ∀ m, n ≤ m → h y (fix_fuel m f) = .div := by + have Hhdiv: ∀ m, h y (fix_fuel m k) = .div := by + have Hhdiv : ∀ m, n ≤ m → h y (fix_fuel m k) = .div := by -- We use the fact that `g (fix_fuel n f) = .div`, combined with Hdiv intro m Hle have Hdivm := Hdiv m -- Monotonicity of g - have Hffmono := fix_fuel_mono Hfmono Hle + have Hffmono := fix_fuel_mono Hkmono Hle have Hgmono := Hgmono Hffmono -- We need to clear Hdiv because otherwise simp_all rewrites Hdivm with Hdiv clear Hdiv @@ -407,42 +385,41 @@ theorem fix_fixed_eq {{f : (a → Result b) → a → Result b}} (Hvalid : is_va cases Classical.em (n ≤ m) <;> rename_i Hl . apply Hhdiv; assumption . simp at Hl - -- Make a case disjunction on `h y (fix_fuel m f)`: if it is not equal + -- Make a case disjunction on `h y (fix_fuel m k)`: if it is not equal -- to div, use the monotonicity of `h y` have Hle : m ≤ n := by linarith - have Hffmono := fix_fuel_mono Hfmono Hle + have Hffmono := fix_fuel_mono Hkmono Hle have Hmono := Hhmono y Hffmono simp [result_rel] at Hmono - cases Heq: h y (fix_fuel m f) <;> simp_all + cases Heq: h y (fix_fuel m k) <;> simp_all -- We can now use the continuity hypothesis for h apply Hhcont; assumption - -- TODO: move + -- The validity property for an expression def is_valid_p (k : (a → Result b) → a → Result b) - (body : (a → Result b) → Result c) : Prop := - is_mono_p body ∧ - (is_mono k → is_cont_p k body) + (e : (a → Result b) → Result c) : Prop := + is_mono_p e ∧ + (is_mono k → is_cont_p k e) - @[simp] theorem is_valid_p_same (f : (a → Result b) → a → Result b) (x : Result c) : - is_valid_p f (λ _ => x) := by - simp [is_valid_p] + @[simp] theorem is_valid_p_same (k : (a → Result b) → a → Result b) (x : Result c) : + is_valid_p k (λ _ => x) := by + simp [is_valid_p, is_mono_p_same, is_cont_p_same] - @[simp] theorem is_valid_p_rec (f : (a → Result b) → a → Result b) (x : a) : - is_valid_p f (λ f => f x) := by - simp [is_valid_p] + @[simp] theorem is_valid_p_rec (k : (a → Result b) → a → Result b) (x : a) : + is_valid_p k (λ k => k x) := by + simp_all [is_valid_p, is_mono_p_rec, is_cont_p_rec] -- Lean is good at unification: we can write a very general version -- (in particular, it will manage to figure out `g` and `h` when we -- apply the lemma) theorem is_valid_p_bind - {{f : (a → Result b) → a → Result b}} + {{k : (a → Result b) → a → Result b}} {{g : (a → Result b) → Result c}} {{h : c → (a → Result b) → Result d}} - (Hgvalid : is_valid_p f g) - (Hhvalid : ∀ y, is_valid_p f (h y)) : - is_valid_p f (λ f => do let y ← g f; h y f) := by + (Hgvalid : is_valid_p k g) + (Hhvalid : ∀ y, is_valid_p k (h y)) : + is_valid_p k (λ k => do let y ← g k; h y k) := by let ⟨ Hgmono, Hgcont ⟩ := Hgvalid - -- TODO: conversion to move forall below and conjunction? simp [is_valid_p, forall_and] at Hhvalid have ⟨ Hhmono, Hhcont ⟩ := Hhvalid simp [← imp_forall_iff] at Hhcont @@ -450,36 +427,37 @@ theorem fix_fixed_eq {{f : (a → Result b) → a → Result b}} (Hvalid : is_va . -- Monotonicity apply is_mono_p_bind <;> assumption . -- Continuity - intro Hfmono - have Hgcont := Hgcont Hfmono - have Hhcont := Hhcont Hfmono + intro Hkmono + have Hgcont := Hgcont Hkmono + have Hhcont := Hhcont Hkmono apply is_cont_p_bind <;> assumption - theorem is_valid_p_imp_is_valid {{body : (a → Result b) → a → Result b}} - (Hvalid : ∀ f x, is_valid_p f (λ f => body f x)) : - is_valid body := by - have Hmono : is_mono body := by + theorem is_valid_p_imp_is_valid {{e : (a → Result b) → a → Result b}} + (Hvalid : ∀ k x, is_valid_p k (λ k => e k x)) : + is_mono e ∧ is_cont e := by + have Hmono : is_mono e := by intro f h Hr x have Hmono := Hvalid (λ _ _ => .div) x have Hmono := Hmono.left apply Hmono; assumption - have Hcont : is_cont body := by + have Hcont : is_cont e := by intro x Hdiv - have Hcont := (Hvalid body x).right Hmono + have Hcont := (Hvalid e x).right Hmono simp [is_cont_p] at Hcont apply Hcont intro n have Hdiv := Hdiv n simp [fix_fuel] at Hdiv simp [*] - apply is_valid.intro Hmono Hcont + simp [*] -- TODO: functional extensionality - theorem is_valid_p_fix_fixed_eq {{body : (a → Result b) → a → Result b}} - (Hvalid : ∀ f x, is_valid_p f (λ f => body f x)) : - fix body = body (fix body) := by + theorem is_valid_p_fix_fixed_eq {{e : (a → Result b) → a → Result b}} + (Hvalid : ∀ k x, is_valid_p k (λ k => e k x)) : + fix e = e (fix e) := by apply funext - exact fix_fixed_eq (is_valid_p_imp_is_valid Hvalid) + have ⟨ Hmono, Hcont ⟩ := is_valid_p_imp_is_valid Hvalid + exact fix_fixed_eq Hmono Hcont end Fix @@ -487,7 +465,7 @@ namespace Ex1 /- An example of use of the fixed-point -/ open Primitives Fix - variable {a : Type} (f : (List a × Int) → Result a) + variable {a : Type} (k : (List a × Int) → Result a) def list_nth_body (x : (List a × Int)) : Result a := let (ls, i) := x @@ -495,9 +473,9 @@ namespace Ex1 | [] => .fail .panic | hd :: tl => if i = 0 then .ret hd - else f (tl, i - 1) + else k (tl, i - 1) - theorem list_nth_body_valid: ∀ k x, is_valid_p k (λ k => @list_nth_body a k x) := by + theorem list_nth_body_is_valid: ∀ k x, is_valid_p k (λ k => @list_nth_body a k x) := by intro k x simp [list_nth_body] split <;> simp @@ -506,6 +484,7 @@ namespace Ex1 noncomputable def list_nth (ls : List a) (i : Int) : Result a := fix list_nth_body (ls, i) + -- The unfolding equation theorem list_nth_eq (ls : List a) (i : Int) : list_nth ls i = match ls with @@ -514,17 +493,18 @@ namespace Ex1 if i = 0 then .ret hd else list_nth tl (i - 1) := by - have Heq := is_valid_p_fix_fixed_eq (@list_nth_body_valid a) + have Heq := is_valid_p_fix_fixed_eq (@list_nth_body_is_valid a) simp [list_nth] conv => lhs; rw [Heq] end Ex1 namespace Ex2 - /- Same as Ex1, but we make the body of nth non tail-rec -/ + /- Same as Ex1, but we make the body of nth non tail-rec (this is mostly + to see what happens when there are let-bindings) -/ open Primitives Fix - variable {a : Type} (f : (List a × Int) → Result a) + variable {a : Type} (k : (List a × Int) → Result a) def list_nth_body (x : (List a × Int)) : Result a := let (ls, i) := x @@ -534,10 +514,10 @@ namespace Ex2 if i = 0 then .ret hd else do - let y ← f (tl, i - 1) + let y ← k (tl, i - 1) .ret y - theorem list_nth_body_valid: ∀ k x, is_valid_p k (λ k => @list_nth_body a k x) := by + theorem list_nth_body_is_valid: ∀ k x, is_valid_p k (λ k => @list_nth_body a k x) := by intro k x simp [list_nth_body] split <;> simp @@ -547,6 +527,7 @@ namespace Ex2 noncomputable def list_nth (ls : List a) (i : Int) : Result a := fix list_nth_body (ls, i) + -- The unfolding equation theorem list_nth_eq (ls : List a) (i : Int) : (list_nth ls i = match ls with @@ -558,7 +539,7 @@ namespace Ex2 let y ← list_nth tl (i - 1) .ret y) := by - have Heq := is_valid_p_fix_fixed_eq (@list_nth_body_valid a) + have Heq := is_valid_p_fix_fixed_eq (@list_nth_body_is_valid a) simp [list_nth] conv => lhs; rw [Heq] @@ -577,7 +558,7 @@ namespace Ex3 the functions in the mutually recursive group may not have the same return type. -/ - variable (f : (Int ⊕ Int) → Result (Bool ⊕ Bool)) + variable (k : (Int ⊕ Int) → Result (Bool ⊕ Bool)) def is_even_is_odd_body (x : (Int ⊕ Int)) : Result (Bool ⊕ Bool) := match x with @@ -591,7 +572,7 @@ namespace Ex3 do -- Call `odd`: we need to wrap the input value in `.inr`, then -- extract the output value - let r ← f (.inr (i- 1)) + let r ← k (.inr (i- 1)) match r with | .inl _ => .fail .panic -- Invalid output | .inr b => .ret b @@ -607,7 +588,7 @@ namespace Ex3 do -- Call `is_even`: we need to wrap the input value in .inr, then -- extract the output value - let r ← f (.inl (i- 1)) + let r ← k (.inl (i- 1)) match r with | .inl b => .ret b | .inr _ => .fail .panic -- Invalid output @@ -642,7 +623,8 @@ namespace Ex3 -- TODO: move -- TODO: this is not enough - theorem swap_if_bind {a b : Type} (e : Prop) [Decidable e] (x y : Result a) (f : a → Result b) : + theorem swap_if_bind {a b : Type} (e : Prop) [Decidable e] + (x y : Result a) (f : a → Result b) : (do let z ← (if e then x else y) f z) @@ -651,6 +633,7 @@ namespace Ex3 else do let z ← y; f z) := by split <;> simp + -- The unfolding equation for `is_even` theorem is_even_eq (i : Int) : is_even i = (if i = 0 then .ret true else is_odd (i - 1)) := by @@ -668,6 +651,7 @@ namespace Ex3 rename_i v split <;> simp + -- The unfolding equation for `is_odd` theorem is_odd_eq (i : Int) : is_odd i = (if i = 0 then .ret false else is_even (i - 1)) := by @@ -699,7 +683,6 @@ namespace Ex4 .ret (hd :: tl) /- The validity theorem for `map`, generic in `f` -/ - /- TODO: rename the continuation to k in all the lemma statements -/ theorem map_is_valid {{f : (a → Result b) → a → Result c}} (Hfvalid : ∀ k x, is_valid_p k (λ k => f k x)) @@ -724,7 +707,6 @@ namespace Ex4 let tl ← map f tl .ret (.node tl) - /- TODO: make the naming consistent (suffix with "_is") -/ theorem id_body_is_valid : ∀ k x, is_valid_p k (λ k => @id_body a k x) := by intro k x @@ -736,6 +718,7 @@ namespace Ex4 noncomputable def id (t : Tree a) := fix id_body t + -- The unfolding equation theorem id_eq (t : Tree a) : (id t = match t with diff --git a/backends/lean/Base/Primitives.lean b/backends/lean/Base/Primitives.lean index d6cc0bad..1185a07d 100644 --- a/backends/lean/Base/Primitives.lean +++ b/backends/lean/Base/Primitives.lean @@ -94,6 +94,10 @@ instance : Bind Result where instance : Pure Result where pure := fun x => ret x +@[simp] theorem bind_ret (x : α) (f : α → Result β) : bind (.ret x) f = f x := by simp [bind] +@[simp] theorem bind_fail (x : Error) (f : α → Result β) : bind (.fail x) f = .fail x := by simp [bind] +@[simp] theorem bind_div (f : α → Result β) : bind .div f = .div := by simp [bind] + /- CUSTOM-DSL SUPPORT -/ -- Let-binding the Result of a monadic operation is oftentimes not sufficient, @@ -124,6 +128,15 @@ macro "let" e:term " <-- " f:term : doElem => let r: { x: Nat // x = 0 } := ⟨ y, by assumption ⟩ .ret r +@[simp] theorem bind_tc_ret (x : α) (f : α → Result β) : + (do let y ← .ret x; f y) = f x := by simp [Bind.bind, bind] + +@[simp] theorem bind_tc_fail (x : Error) (f : α → Result β) : + (do let y ← fail x; f y) = fail x := by simp [Bind.bind, bind] + +@[simp] theorem bind_tc_div (f : α → Result β) : + (do let y ← div; f y) = div := by simp [Bind.bind, bind] + ---------------------- -- MACHINE INTEGERS -- ---------------------- -- cgit v1.2.3 From 393748cc3dd0f43a79d2342379008bbf445f116d Mon Sep 17 00:00:00 2001 From: Son Ho Date: Tue, 20 Jun 2023 12:30:39 +0200 Subject: Remove the use of fun. ext. in Diverge.lean --- backends/lean/Base/Diverge.lean | 34 +++++++++++++++++++--------------- 1 file changed, 19 insertions(+), 15 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Diverge.lean b/backends/lean/Base/Diverge.lean index 65c061bd..97ffa214 100644 --- a/backends/lean/Base/Diverge.lean +++ b/backends/lean/Base/Diverge.lean @@ -271,9 +271,7 @@ namespace Fix revert Hvm split <;> simp [*] <;> intros <;> simp [*] - -- The final fixed point equation - -- TODO: remove the `forall x` - theorem fix_fixed_eq {{f : (a → Result b) → a → Result b}} + theorem fix_fixed_eq_forall {{f : (a → Result b) → a → Result b}} (Hmono : is_mono f) (Hcont : is_cont f) : ∀ x, fix f x = f (fix f) x := by intros x @@ -291,6 +289,14 @@ namespace Fix simp [Hcont] | .inl ⟨ n, He ⟩ => apply fix_fixed_eq_terminates f Hmono x n He + -- The final fixed point equation + theorem fix_fixed_eq {{f : (a → Result b) → a → Result b}} + (Hmono : is_mono f) (Hcont : is_cont f) : + fix f = f (fix f) := by + have Heq := fix_fixed_eq_forall Hmono Hcont + have Heq1 : fix f = (λ x => fix f x) := by simp + rw [Heq1] + conv => lhs; ext; simp [Heq] /-! # Making the proofs of validity manageable (and automatable) -/ @@ -451,11 +457,9 @@ namespace Fix simp [*] simp [*] - -- TODO: functional extensionality theorem is_valid_p_fix_fixed_eq {{e : (a → Result b) → a → Result b}} (Hvalid : ∀ k x, is_valid_p k (λ k => e k x)) : fix e = e (fix e) := by - apply funext have ⟨ Hmono, Hcont ⟩ := is_valid_p_imp_is_valid Hvalid exact fix_fixed_eq Hmono Hcont @@ -484,7 +488,7 @@ namespace Ex1 noncomputable def list_nth (ls : List a) (i : Int) : Result a := fix list_nth_body (ls, i) - -- The unfolding equation + -- The unfolding equation - diverges if `i < 0` theorem list_nth_eq (ls : List a) (i : Int) : list_nth ls i = match ls with @@ -527,7 +531,7 @@ namespace Ex2 noncomputable def list_nth (ls : List a) (i : Int) : Result a := fix list_nth_body (ls, i) - -- The unfolding equation + -- The unfolding equation - diverges if `i < 0` theorem list_nth_eq (ls : List a) (i : Int) : (list_nth ls i = match ls with @@ -553,10 +557,10 @@ namespace Ex3 and the output types: - inputs: the sum allows to select the function to call in the recursive calls (and the functions may not have the same input types) - - outpus: this case is degenerate because `even` and `odd` both have the + - outputs: this case is degenerate because `even` and `odd` have the same return type `Bool`, but generally speaking we need a sum type because - the functions in the mutually recursive group may not have the same - return type. + the functions in the mutually recursive group may have different + return types. -/ variable (k : (Int ⊕ Int) → Result (Bool ⊕ Bool)) @@ -565,7 +569,7 @@ namespace Ex3 | .inl i => -- Body of `is_even` if i = 0 - then .ret (.inl true) -- We return .inl because this is `is_even` + then .ret (.inl true) -- We use .inl because this is `is_even` else do let b ← @@ -581,7 +585,7 @@ namespace Ex3 | .inr i => -- Body of `is_odd` if i = 0 - then .ret (.inr false) -- We return .inr because this is `is_odd` + then .ret (.inr false) -- We use .inr because this is `is_odd` else do let b ← @@ -633,7 +637,7 @@ namespace Ex3 else do let z ← y; f z) := by split <;> simp - -- The unfolding equation for `is_even` + -- The unfolding equation for `is_even` - diverges if `i < 0` theorem is_even_eq (i : Int) : is_even i = (if i = 0 then .ret true else is_odd (i - 1)) := by @@ -651,9 +655,9 @@ namespace Ex3 rename_i v split <;> simp - -- The unfolding equation for `is_odd` + -- The unfolding equation for `is_odd` - diverges if `i < 0` theorem is_odd_eq (i : Int) : - is_odd i = (if i = 0 then .ret false else is_even (i - 1)) + is_odd i = (if i = 0 then .ret false else is_even (i - 1)) := by have Heq := is_valid_p_fix_fixed_eq is_even_is_odd_body_is_valid simp [is_even, is_odd] -- cgit v1.2.3 From 3971da603ee54d373b4c73d6a20b3d83dea7b5b9 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Wed, 21 Jun 2023 16:20:25 +0200 Subject: Start working on Arith.lean --- backends/lean/Base.lean | 1 + backends/lean/Base/Arith.lean | 221 ++++++++++++++++++++++++++++++++++++++++ backends/lean/Base/Diverge.lean | 4 +- 3 files changed, 224 insertions(+), 2 deletions(-) create mode 100644 backends/lean/Base/Arith.lean (limited to 'backends') diff --git a/backends/lean/Base.lean b/backends/lean/Base.lean index f6a78bba..6e9ff873 100644 --- a/backends/lean/Base.lean +++ b/backends/lean/Base.lean @@ -1,3 +1,4 @@ import Base.Primitives import Base.Diverge import Base.TestTactics +import Base.Arith diff --git a/backends/lean/Base/Arith.lean b/backends/lean/Base/Arith.lean new file mode 100644 index 00000000..6339f218 --- /dev/null +++ b/backends/lean/Base/Arith.lean @@ -0,0 +1,221 @@ +/- This file contains tactics to solve arithmetic goals -/ + +import Lean +import Lean.Meta.Tactic.Simp +import Init.Data.List.Basic +import Mathlib.Tactic.RunCmd +import Mathlib.Tactic.Linarith +import Base.Primitives + +/- +Mathlib tactics: +- rcases: https://leanprover-community.github.io/mathlib_docs/tactics.html#rcases +- split_ifs: https://leanprover-community.github.io/mathlib_docs/tactics.html#split_ifs +- norm_num: https://leanprover-community.github.io/mathlib_docs/tactics.html#norm_num +- should we use linarith or omega? +- hint: https://leanprover-community.github.io/mathlib_docs/tactics.html#hint +- classical: https://leanprover-community.github.io/mathlib_docs/tactics.html#classical +-/ + +namespace List + + -- TODO: I could not find this function?? + @[simp] def flatten {a : Type u} : List (List a) → List a + | [] => [] + | x :: ls => x ++ flatten ls + +end List + +namespace Lean + +namespace LocalContext + + open Lean Lean.Elab Command Term Lean.Meta + + -- Small utility: return the list of declarations in the context, from + -- the last to the first. + def getAllDecls (lctx : Lean.LocalContext) : MetaM (List Lean.LocalDecl) := + lctx.foldrM (fun d ls => do let d ← instantiateLocalDeclMVars d; pure (d :: ls)) [] + + -- Return the list of declarations in the context, but filter the + -- declarations which are considered as implementation details + def getDecls (lctx : Lean.LocalContext) : MetaM (List Lean.LocalDecl) := do + let ls ← lctx.getAllDecls + pure (ls.filter (fun d => not d.isImplementationDetail)) + +end LocalContext + +end Lean + +namespace Arith + +open Primitives + +--set_option pp.explicit true +--set_option pp.notation false +--set_option pp.coercions false + +-- TODO: move +instance Vec.cast (a : Type): Coe (Vec a) (List a) where coe := λ v => v.val + +-- TODO: move +/- Remark: we can't write the following instance because of restrictions about + the type class parameters (`ty` doesn't appear in the return type, which is + forbidden): + + ``` + instance Scalar.cast (ty : ScalarTy) : Coe (Scalar ty) Int where coe := λ v => v.val + ``` + -/ +def Scalar.toInt {ty : ScalarTy} (x : Scalar ty) : Int := x.val + +-- We use this type-class to test if an expression is a scalar (if we manage +-- to lookup an instance of this type-class, then it is) +class IsScalar (a : Type) where + +instance (ty : ScalarTy) : IsScalar (Scalar ty) where + +--example (ty : ScalarTy) : IsScalar (Scalar ty) := _ + +open Lean Lean.Elab Command Term Lean.Meta + +-- Return true if the expression is a scalar expression +def isScalarExpr (e : Expr) : MetaM Bool := do + -- Try to convert the expression to a scalar + -- TODO: I tried to do it with Lean.Meta.mkAppM but it didn't work: how + -- do we allow Lean to perform (controlled) unfoldings for instantiation + -- purposes? + let r ← Lean.observing? do + let ty ← Lean.Meta.inferType e + let isScalar ← mkAppM `Arith.IsScalar #[ty] + let isScalar ← trySynthInstance isScalar + match isScalar with + | LOption.some x => some x + | _ => none + match r with + | .some _ => pure true + | _ => pure false + +-- Explore a term and return the set of scalar expressions found inside +partial def collectScalarExprsAux (hs : HashSet Expr) (e : Expr) : MetaM (HashSet Expr) := do + -- We do it in a very simpler manner: we deconstruct applications, + -- and recursively explore the sub-expressions. Note that we do + -- not go inside foralls and abstractions (should we?). + e.withApp fun f args => do + let hs ← if ← isScalarExpr f then pure (hs.insert f) else pure hs + let hs ← args.foldlM collectScalarExprsAux hs + pure hs + +-- Explore a term and return the list of scalar expressions found inside +def collectScalarExprs (e : Expr) : MetaM (HashSet Expr) := + collectScalarExprsAux HashSet.empty e + +-- Collect the scalar expressions in the context +def getScalarExprsFromMainCtx : Tactic.TacticM (HashSet Expr) := do + Lean.Elab.Tactic.withMainContext do + -- Get the local context + let ctx ← Lean.MonadLCtx.getLCtx + -- Just a matter of precaution + let ctx ← instantiateLCtxMVars ctx + -- Initialize the hashset + let hs := HashSet.empty + -- Explore the declarations + let decls ← ctx.getDecls + let hs ← decls.foldlM (fun hs d => collectScalarExprsAux hs d.toExpr) hs + -- Return + pure hs + + +#check TSyntax +#check mkIdent +-- TODO: addDecl? +-- Project the scalar expressions into the context, to retrieve the bound inequalities +-- def projectScalarExpr (e: Expr) : Tactic.TacticM Unit := do +-- let e ← `($e) +-- let e ← Lean.Elab.Term.elabTerm `($e) none +-- Lean.Elab.Tactic.evalCases `($e) + +elab "list_scalar_exprs" : tactic => do + let hs ← getScalarExprsFromMainCtx + hs.forM fun e => do + dbg_trace f!"+ Scalar expression: {e}" + +#check LocalContext + +elab "list_local_decls_1" : tactic => + Lean.Elab.Tactic.withMainContext do + -- Get the local context + let ctx ← Lean.MonadLCtx.getLCtx + let ctx ← instantiateLCtxMVars ctx + let decls ← ctx.getDecls + -- Filter the scalar expressions + let decls ← decls.filterMapM fun decl: Lean.LocalDecl => do + let declExpr := decl.toExpr + let declName := decl.userName + let declType ← Lean.Meta.inferType declExpr + dbg_trace f!"+ local decl: name: {declName} | expr: {declExpr} | ty: {declType}" + -- Try to convert the expression to a scalar + -- TODO: I tried to do it with Lean.Meta.mkAppM but it didn't work: how + -- do we allow Lean to perform (controlled) unfoldings for instantiation + -- purposes? + let r ← Lean.observing? do + let isScalar ← mkAppM `Arith.IsScalar #[declType] + let isScalar ← trySynthInstance isScalar + match isScalar with + | LOption.some x => some x + | _ => none + match r with + | .some _ => dbg_trace f!" Scalar expression"; pure r + | _ => dbg_trace f!" Not a scalar"; pure .none + pure () + -- match ← Lean.observing? (Lean.Meta.mkAppM `Scalar.toInt #[decl.toExpr]) with + -- | .none => dbg_trace f!" Not a scalar" + -- | .some _ => dbg_trace f!" Scalar expression" + +#check Lean.Environment.addDecl +#check Expr +#check LocalContext +#check MVarId +#check Lean.Elab.Tactic.setGoals +#check Lean.Elab.Tactic.Context +#check withLocalDecl +#check Lean.MVarId.assert +#check LocalDecl + +-- Insert x = 3 in the context +elab "custom_let" : tactic => + -- I don't think we need that + Lean.Elab.Tactic.withMainContext do + -- + let type := (Expr.const `Nat []) + let val : Expr := .lit (.natVal 3) + let n := `x -- the name is "x" + withLetDecl n type val fun nval => do + -- For debugging + let lctx ← Lean.MonadLCtx.getLCtx + let fid := nval.fvarId! + let decl := lctx.get! fid + dbg_trace f!" nval: \"{decl.userName}\" ({nval}) : {decl.type} := {decl.value}" + -- + -- Tranform the main goal `m0?` to `let x = nval in m1?` + let mvarId ← Tactic.getMainGoal + let newMVar ← mkFreshExprSyntheticOpaqueMVar (← mvarId.getType) + let newVal ← mkLetFVars #[nval] newMVar + -- Focus on the current goal + Tactic.focus do + -- Assign the main goal. + -- We must do this *after* we focused on the current goal, because + -- after we assigned the meta variable the goal is considered as solved + mvarId.assign newVal + -- Replace the list of goals with the new goal - we can do this because + -- we focused on the current goal + Lean.Elab.Tactic.setGoals [newMVar.mvarId!] + +example : Nat := by + custom_let + apply x + +example (x : Bool) : Nat := by + cases x <;> custom_let <;> apply x + +end Arith diff --git a/backends/lean/Base/Diverge.lean b/backends/lean/Base/Diverge.lean index 97ffa214..1ff34516 100644 --- a/backends/lean/Base/Diverge.lean +++ b/backends/lean/Base/Diverge.lean @@ -703,12 +703,12 @@ namespace Ex4 | leaf (x : a) | node (tl : List (Tree a)) - def id_body (f : Tree a → Result (Tree a)) (t : Tree a) : Result (Tree a) := + def id_body (k : Tree a → Result (Tree a)) (t : Tree a) : Result (Tree a) := match t with | .leaf x => .ret (.leaf x) | .node tl => do - let tl ← map f tl + let tl ← map k tl .ret (.node tl) theorem id_body_is_valid : -- cgit v1.2.3 From 34f1f4d877b32002cd292cb1fe27969184efcf94 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Thu, 22 Jun 2023 10:37:13 +0200 Subject: Finish the custom_let tactic --- backends/lean/Base/Arith.lean | 44 +++++++++++++++++++++---------------------- 1 file changed, 21 insertions(+), 23 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Arith.lean b/backends/lean/Base/Arith.lean index 6339f218..d4611deb 100644 --- a/backends/lean/Base/Arith.lean +++ b/backends/lean/Base/Arith.lean @@ -168,36 +168,27 @@ elab "list_local_decls_1" : tactic => | .some _ => dbg_trace f!" Scalar expression"; pure r | _ => dbg_trace f!" Not a scalar"; pure .none pure () - -- match ← Lean.observing? (Lean.Meta.mkAppM `Scalar.toInt #[decl.toExpr]) with - -- | .none => dbg_trace f!" Not a scalar" - -- | .some _ => dbg_trace f!" Scalar expression" -#check Lean.Environment.addDecl -#check Expr -#check LocalContext -#check MVarId -#check Lean.Elab.Tactic.setGoals -#check Lean.Elab.Tactic.Context -#check withLocalDecl -#check Lean.MVarId.assert -#check LocalDecl - --- Insert x = 3 in the context -elab "custom_let" : tactic => +def evalCustomLet (name : Name) (val : Syntax) : Tactic.TacticM Unit := -- I don't think we need that Lean.Elab.Tactic.withMainContext do -- - let type := (Expr.const `Nat []) - let val : Expr := .lit (.natVal 3) - let n := `x -- the name is "x" - withLetDecl n type val fun nval => do + let val ← elabTerm val .none + let type ← inferType val + -- In some situations, the type will be left as a metavariable (for instance, + -- if the term is `3`, Lean has the choice between `Nat` and `Int` and will + -- not choose): we force the instantiation of the meta-variable + synthesizeSyntheticMVarsUsingDefault + -- Insert the new declaration + withLetDecl name type val fun nval => do -- For debugging let lctx ← Lean.MonadLCtx.getLCtx let fid := nval.fvarId! let decl := lctx.get! fid - dbg_trace f!" nval: \"{decl.userName}\" ({nval}) : {decl.type} := {decl.value}" + -- Remark: logInfo instantiates the mvars (contrary to dbg_trace): + logInfo m!" new decl: \"{decl.userName}\" ({nval}) : {decl.type} := {decl.value}" -- - -- Tranform the main goal `m0?` to `let x = nval in m1?` + -- Tranform the main goal `?m0` to `let x = nval in ?m1` let mvarId ← Tactic.getMainGoal let newMVar ← mkFreshExprSyntheticOpaqueMVar (← mvarId.getType) let newVal ← mkLetFVars #[nval] newMVar @@ -211,11 +202,18 @@ elab "custom_let" : tactic => -- we focused on the current goal Lean.Elab.Tactic.setGoals [newMVar.mvarId!] +elab "custom_let " n:ident " := " v:term : tactic => + evalCustomLet n.getId v + +-- Insert x = 3 in the context +elab "custom_let " n:ident " := " v:term : tactic => + evalCustomLet n.getId v + example : Nat := by - custom_let + custom_let x := 4 apply x example (x : Bool) : Nat := by - cases x <;> custom_let <;> apply x + cases x <;> custom_let x := 3 <;> apply x end Arith -- cgit v1.2.3 From 9421b215a8911bc545eb524b8b07e7ca2eb717f3 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Thu, 22 Jun 2023 16:02:09 +0200 Subject: Make intro_has_prop_instances work --- backends/lean/Base/Arith.lean | 171 +++++++++++++++++++++++++++++++++++------- 1 file changed, 145 insertions(+), 26 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Arith.lean b/backends/lean/Base/Arith.lean index d4611deb..a792deb2 100644 --- a/backends/lean/Base/Arith.lean +++ b/backends/lean/Base/Arith.lean @@ -75,7 +75,28 @@ class IsScalar (a : Type) where instance (ty : ScalarTy) : IsScalar (Scalar ty) where ---example (ty : ScalarTy) : IsScalar (Scalar ty) := _ +example (ty : ScalarTy) : IsScalar (Scalar ty) := inferInstance + +-- TODO: lookup doesn't work +class HasProp {a : Type} (x : a) where + prop_ty : Prop + prop : prop_ty + +class HasProp' (a : Type) where + prop_ty : a → Prop + prop : ∀ x:a, prop_ty x + +instance {ty : ScalarTy} (x : Scalar x) : HasProp x where + -- prop_ty is inferred + prop := And.intro x.hmin x.hmax + +instance (ty : ScalarTy) : HasProp' (Scalar ty) where + -- prop_ty is inferred + prop := λ x => And.intro x.hmin x.hmax + +example {a : Type} (x : a) [HasProp x] : Prop := + let i : HasProp x := inferInstance + i.prop_ty open Lean Lean.Elab Command Term Lean.Meta @@ -96,6 +117,40 @@ def isScalarExpr (e : Expr) : MetaM Bool := do | .some _ => pure true | _ => pure false +#check @HasProp'.prop + +-- Return an instance of `HasProp` for `e` if it has some +def lookupHasProp (e : Expr) : MetaM (Option Expr) := do + logInfo f!"lookupHasProp" + -- TODO: do we need Lean.observing? + -- This actually eliminates the error messages + Lean.observing? do + logInfo f!"lookupHasProp: observing" + let ty ← Lean.Meta.inferType e + let hasProp ← mkAppM `Arith.HasProp' #[ty] + let hasPropInst ← trySynthInstance hasProp + match hasPropInst with + | LOption.some i => + logInfo m!"Found HasProp instance" + let i_prop ← mkProjection i `prop + some (← mkAppM' i_prop #[e]) + | _ => none + +-- Auxiliary function for `collectHasPropInstances` +private partial def collectHasPropInstancesAux (hs : HashSet Expr) (e : Expr) : MetaM (HashSet Expr) := do + -- We do it in a very simpler manner: we deconstruct applications, + -- and recursively explore the sub-expressions. Note that we do + -- not go inside foralls and abstractions (should we?). + e.withApp fun f args => do + let hasPropInst ← lookupHasProp f + let hs := Option.getD (hasPropInst.map hs.insert) hs + let hs ← args.foldlM collectHasPropInstancesAux hs + pure hs + +-- Explore a term and return the instances of `HasProp` found for the sub-expressions +def collectHasPropInstances (e : Expr) : MetaM (HashSet Expr) := + collectHasPropInstancesAux HashSet.empty e + -- Explore a term and return the set of scalar expressions found inside partial def collectScalarExprsAux (hs : HashSet Expr) (e : Expr) : MetaM (HashSet Expr) := do -- We do it in a very simpler manner: we deconstruct applications, @@ -125,22 +180,43 @@ def getScalarExprsFromMainCtx : Tactic.TacticM (HashSet Expr) := do -- Return pure hs - -#check TSyntax -#check mkIdent --- TODO: addDecl? --- Project the scalar expressions into the context, to retrieve the bound inequalities --- def projectScalarExpr (e: Expr) : Tactic.TacticM Unit := do --- let e ← `($e) --- let e ← Lean.Elab.Term.elabTerm `($e) none --- Lean.Elab.Tactic.evalCases `($e) +-- Collect the instances of `HasProp` for the subexpressions in the context +def getHasPropInstancesFromMainCtx : Tactic.TacticM (HashSet Expr) := do + Lean.Elab.Tactic.withMainContext do + -- Get the local context + let ctx ← Lean.MonadLCtx.getLCtx + -- Just a matter of precaution + let ctx ← instantiateLCtxMVars ctx + -- Initialize the hashset + let hs := HashSet.empty + -- Explore the declarations + let decls ← ctx.getDecls + let hs ← decls.foldlM (fun hs d => collectHasPropInstancesAux hs d.toExpr) hs + -- Return + pure hs elab "list_scalar_exprs" : tactic => do + logInfo m!"Listing scalar expressions" let hs ← getScalarExprsFromMainCtx hs.forM fun e => do - dbg_trace f!"+ Scalar expression: {e}" + logInfo m!"+ Scalar expression: {e}" + +example (x y : U32) (z : Usize) : True := by + list_scalar_exprs + simp + +elab "display_has_prop_instances" : tactic => do + logInfo m!"Displaying the HasProp instances" + let hs ← getHasPropInstancesFromMainCtx + hs.forM fun e => do + logInfo m!"+ HasProp instance: {e}" -#check LocalContext +example (x : U32) : True := by + let i : HasProp' U32 := inferInstance + have p := @HasProp'.prop _ i x + simp only [HasProp'.prop_ty] at p + display_has_prop_instances + simp elab "list_local_decls_1" : tactic => Lean.Elab.Tactic.withMainContext do @@ -169,18 +245,12 @@ elab "list_local_decls_1" : tactic => | _ => dbg_trace f!" Not a scalar"; pure .none pure () -def evalCustomLet (name : Name) (val : Syntax) : Tactic.TacticM Unit := +def evalAddDecl (name : Name) (val : Expr) (type : Expr) (asLet : Bool := false) : Tactic.TacticM Unit := -- I don't think we need that Lean.Elab.Tactic.withMainContext do - -- - let val ← elabTerm val .none - let type ← inferType val - -- In some situations, the type will be left as a metavariable (for instance, - -- if the term is `3`, Lean has the choice between `Nat` and `Int` and will - -- not choose): we force the instantiation of the meta-variable - synthesizeSyntheticMVarsUsingDefault -- Insert the new declaration - withLetDecl name type val fun nval => do + let withDecl := if asLet then withLetDecl name type val else withLocalDeclD name type + withDecl fun nval => do -- For debugging let lctx ← Lean.MonadLCtx.getLCtx let fid := nval.fvarId! @@ -192,6 +262,11 @@ def evalCustomLet (name : Name) (val : Syntax) : Tactic.TacticM Unit := let mvarId ← Tactic.getMainGoal let newMVar ← mkFreshExprSyntheticOpaqueMVar (← mvarId.getType) let newVal ← mkLetFVars #[nval] newMVar + -- There are two cases: + -- - asLet is true: newVal is `let $name := $val in $newMVar` + -- - asLet is false: ewVal is `λ $name => $newMVar` + -- We need to apply it to `val` + let newVal := if asLet then newVal else mkAppN newVal #[val] -- Focus on the current goal Tactic.focus do -- Assign the main goal. @@ -202,18 +277,62 @@ def evalCustomLet (name : Name) (val : Syntax) : Tactic.TacticM Unit := -- we focused on the current goal Lean.Elab.Tactic.setGoals [newMVar.mvarId!] -elab "custom_let " n:ident " := " v:term : tactic => - evalCustomLet n.getId v +def evalAddDeclSyntax (name : Name) (val : Syntax) (asLet : Bool := false) : Tactic.TacticM Unit := + -- I don't think we need that + Lean.Elab.Tactic.withMainContext do + -- + let val ← elabTerm val .none + let type ← inferType val + -- In some situations, the type will be left as a metavariable (for instance, + -- if the term is `3`, Lean has the choice between `Nat` and `Int` and will + -- not choose): we force the instantiation of the meta-variable + synthesizeSyntheticMVarsUsingDefault + -- + evalAddDecl name val type asLet --- Insert x = 3 in the context elab "custom_let " n:ident " := " v:term : tactic => - evalCustomLet n.getId v + evalAddDeclSyntax n.getId v (asLet := true) + +elab "custom_have " n:ident " := " v:term : tactic => + evalAddDeclSyntax n.getId v (asLet := false) example : Nat := by custom_let x := 4 - apply x + custom_have y := 4 + apply y example (x : Bool) : Nat := by cases x <;> custom_let x := 3 <;> apply x +#check mkIdent +#check Syntax + +-- Lookup the instances of `HasProp' for all the sub-expressions in the context, +-- and introduce the corresponding assumptions +elab "intro_has_prop_instances" : tactic => do + logInfo m!"Introducing the HasProp instances" + let hs ← getHasPropInstancesFromMainCtx + hs.forM fun e => do + let type ← inferType e + let name := `h + evalAddDecl name e type (asLet := false) + -- Simplify to unfold the `prop_ty` projector + --let simpTheorems := ++ [``HasProp'.prop_ty] + let simpTheorems ← Tactic.simpOnlyBuiltins.foldlM (·.addConst ·) ({} : SimpTheorems) + -- Add the equational theorem for `HashProp'.prop_ty` + let simpTheorems ← simpTheorems.addDeclToUnfold ``HasProp'.prop_ty + let congrTheorems ← getSimpCongrTheorems + let ctx : Simp.Context := { simpTheorems := #[simpTheorems], congrTheorems } + -- Where to apply the simplifier + let loc := Tactic.Location.targets #[mkIdent name] false + -- Apply the simplifier + let _ ← Tactic.simpLocation ctx (discharge? := .none) loc + pure () + -- simpLocation + +example (x y : U32) : x.val ≤ Scalar.max ScalarTy.U32 := by + intro_has_prop_instances + simp [*] + + end Arith -- cgit v1.2.3 From 6b319ece09b0f8a02529dd98bc20ffcb843020d6 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Mon, 26 Jun 2023 17:33:17 +0200 Subject: Make minor modifications to Arith.lean --- backends/lean/Base/Arith.lean | 64 +++++++++++++++++++++++++------------------ 1 file changed, 37 insertions(+), 27 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Arith.lean b/backends/lean/Base/Arith.lean index a792deb2..bb776b55 100644 --- a/backends/lean/Base/Arith.lean +++ b/backends/lean/Base/Arith.lean @@ -5,6 +5,8 @@ import Lean.Meta.Tactic.Simp import Init.Data.List.Basic import Mathlib.Tactic.RunCmd import Mathlib.Tactic.Linarith +-- TODO: there is no Omega tactic for now - it seems it hasn't been ported yet +--import Mathlib.Tactic.Omega import Base.Primitives /- @@ -77,26 +79,19 @@ instance (ty : ScalarTy) : IsScalar (Scalar ty) where example (ty : ScalarTy) : IsScalar (Scalar ty) := inferInstance --- TODO: lookup doesn't work -class HasProp {a : Type} (x : a) where - prop_ty : Prop - prop : prop_ty - -class HasProp' (a : Type) where +-- Remark: I tried a version of the shape `HasProp {a : Type} (x : a)` +-- but the lookup didn't work +class HasProp (a : Type) where prop_ty : a → Prop prop : ∀ x:a, prop_ty x -instance {ty : ScalarTy} (x : Scalar x) : HasProp x where - -- prop_ty is inferred - prop := And.intro x.hmin x.hmax - -instance (ty : ScalarTy) : HasProp' (Scalar ty) where +instance (ty : ScalarTy) : HasProp (Scalar ty) where -- prop_ty is inferred prop := λ x => And.intro x.hmin x.hmax -example {a : Type} (x : a) [HasProp x] : Prop := - let i : HasProp x := inferInstance - i.prop_ty +instance (a : Type) : HasProp (Vec a) where + prop_ty := λ v => v.val.length ≤ Scalar.max ScalarTy.Usize + prop := λ ⟨ _, l ⟩ => l open Lean Lean.Elab Command Term Lean.Meta @@ -117,8 +112,6 @@ def isScalarExpr (e : Expr) : MetaM Bool := do | .some _ => pure true | _ => pure false -#check @HasProp'.prop - -- Return an instance of `HasProp` for `e` if it has some def lookupHasProp (e : Expr) : MetaM (Option Expr) := do logInfo f!"lookupHasProp" @@ -127,7 +120,7 @@ def lookupHasProp (e : Expr) : MetaM (Option Expr) := do Lean.observing? do logInfo f!"lookupHasProp: observing" let ty ← Lean.Meta.inferType e - let hasProp ← mkAppM `Arith.HasProp' #[ty] + let hasProp ← mkAppM ``Arith.HasProp #[ty] let hasPropInst ← trySynthInstance hasProp match hasPropInst with | LOption.some i => @@ -212,9 +205,9 @@ elab "display_has_prop_instances" : tactic => do logInfo m!"+ HasProp instance: {e}" example (x : U32) : True := by - let i : HasProp' U32 := inferInstance - have p := @HasProp'.prop _ i x - simp only [HasProp'.prop_ty] at p + let i : HasProp U32 := inferInstance + have p := @HasProp.prop _ i x + simp only [HasProp.prop_ty] at p display_has_prop_instances simp @@ -304,10 +297,7 @@ example : Nat := by example (x : Bool) : Nat := by cases x <;> custom_let x := 3 <;> apply x -#check mkIdent -#check Syntax - --- Lookup the instances of `HasProp' for all the sub-expressions in the context, +-- Lookup the instances of `HasProp for all the sub-expressions in the context, -- and introduce the corresponding assumptions elab "intro_has_prop_instances" : tactic => do logInfo m!"Introducing the HasProp instances" @@ -317,10 +307,9 @@ elab "intro_has_prop_instances" : tactic => do let name := `h evalAddDecl name e type (asLet := false) -- Simplify to unfold the `prop_ty` projector - --let simpTheorems := ++ [``HasProp'.prop_ty] let simpTheorems ← Tactic.simpOnlyBuiltins.foldlM (·.addConst ·) ({} : SimpTheorems) -- Add the equational theorem for `HashProp'.prop_ty` - let simpTheorems ← simpTheorems.addDeclToUnfold ``HasProp'.prop_ty + let simpTheorems ← simpTheorems.addDeclToUnfold ``HasProp.prop_ty let congrTheorems ← getSimpCongrTheorems let ctx : Simp.Context := { simpTheorems := #[simpTheorems], congrTheorems } -- Where to apply the simplifier @@ -328,11 +317,32 @@ elab "intro_has_prop_instances" : tactic => do -- Apply the simplifier let _ ← Tactic.simpLocation ctx (discharge? := .none) loc pure () - -- simpLocation example (x y : U32) : x.val ≤ Scalar.max ScalarTy.U32 := by intro_has_prop_instances simp [*] +example {a: Type} (v : Vec a) : v.val.length ≤ Scalar.max ScalarTy.Usize := by + intro_has_prop_instances + simp_all [Scalar.max, Scalar.min] + +-- A tactic to solve linear arithmetic goals +syntax "int_tac" : tactic +macro_rules + | `(tactic| int_tac) => + `(tactic| + intro_has_prop_instances; + have := Scalar.cMin_bound ScalarTy.Usize; + have := Scalar.cMin_bound ScalarTy.Isize; + have := Scalar.cMax_bound ScalarTy.Usize; + have := Scalar.cMax_bound ScalarTy.Isize; + simp only [*, Scalar.max, Scalar.min, Scalar.cMin, Scalar.cMax] at *; + linarith) + +example (x y : U32) : x.val ≤ Scalar.max ScalarTy.U32 := by + int_tac + +example {a: Type} (v : Vec a) : v.val.length ≤ Scalar.max ScalarTy.Usize := by + int_tac end Arith -- cgit v1.2.3 From ffdc2f47bc4b21df491e1a2efb6cd19637fb399b Mon Sep 17 00:00:00 2001 From: Son Ho Date: Mon, 26 Jun 2023 17:38:49 +0200 Subject: Start working on a better encoding of mut rec defs for Diverge --- backends/lean/Base/Diverge.lean | 102 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 100 insertions(+), 2 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Diverge.lean b/backends/lean/Base/Diverge.lean index 1ff34516..a5cf3459 100644 --- a/backends/lean/Base/Diverge.lean +++ b/backends/lean/Base/Diverge.lean @@ -550,7 +550,7 @@ namespace Ex2 end Ex2 namespace Ex3 - /- Mutually recursive functions -/ + /- Mutually recursive functions - first encoding (see Ex4 for a better encoding) -/ open Primitives Fix /- Because we have mutually recursive functions, we use a sum for the inputs @@ -671,6 +671,104 @@ namespace Ex3 end Ex3 namespace Ex4 + /- Mutually recursive functions - 2nd encoding -/ + open Primitives Fix + + attribute [local simp] List.get + + /- We make the input type and output types dependent on a parameter -/ + @[simp] def input_ty (i : Fin 2) : Type := + [Int, Int].get i + + @[simp] def output_ty (i : Fin 2) : Type := + [Bool, Bool].get i + + /- The continuation -/ + variable (k : (i : Fin 2) → input_ty i → Result (output_ty i)) + + /- The bodies are more natural -/ + def is_even_body (k : (i : Fin 2) → input_ty i → Result (output_ty i)) (i : Int) : Result Bool := + if i = 0 + then .ret true + else do + let b ← k 1 (i - 1) + .ret b + + def is_odd_body (i : Int) : Result Bool := + if i = 0 + then .ret false + else do + let b ← k 0 (i - 1) + .ret b + + inductive Funs : List (Type 0) → List (Type 0) → Type 1 := + | Nil : Funs [] [] + | Cons {ity oty : Type 0} {itys otys : List (Type 0)} (f : ity → Result oty) (tl : Funs itys otys) : Funs (ity :: itys) (oty :: otys) + + theorem Funs.length_eq {itys otys : List (Type 0)} (fl : Funs itys otys) : itys.length = otys.length := + match fl with + | .Nil => by simp + | .Cons f tl => + have h:= Funs.length_eq tl + by simp [h] + + @[simp] def Funs.cast_fin {itys otys : List (Type 0)} (fl : Funs itys otys) (i : Fin itys.length) : Fin otys.length := + ⟨ i.val, by have h:= fl.length_eq; have h1:= i.isLt; simp_all ⟩ + + @[simp] def bodies (k : (i : Fin 2) → input_ty i → Result (output_ty i)) : Funs [Int, Int] [Bool, Bool] := + Funs.Cons (is_even_body k) (Funs.Cons (is_odd_body k) Funs.Nil) + + @[simp] def get_fun {itys otys : List (Type 0)} (fl : Funs itys otys) : + (i : Fin itys.length) → itys.get i → Result (otys.get (fl.cast_fin i)) := + match fl with + | .Nil => λ i => by have h:= i.isLt; simp at h + | @Funs.Cons ity oty itys1 otys1 f tl => + λ i => + if h: i.val = 0 then + Eq.mp (by cases i; simp_all [List.get]) f + else + let j := i.val - 1 + have Hj: j < itys1.length := by + have Hi := i.isLt + simp at Hi + revert Hi + cases Heq: i.val <;> simp_all + simp_arith + let j: Fin itys1.length := ⟨ j, Hj ⟩ + Eq.mp (by cases Heq: i; rename_i val isLt; cases Heq': j; rename_i val' isLt; cases val <;> simp_all [List.get]) (get_fun tl j) + + def body (k : (i : Fin 2) → input_ty i → Result (output_ty i)) (i: Fin 2) : input_ty i → Result (output_ty i) := get_fun (bodies k) i + + def fix_ {n : Nat} {ity oty : Fin n → Type 0} (f : ((i:Fin n) → ity i → Result (oty i)) → (i:Fin n) → ity i → Result (oty i)) : + (i:Fin n) → ity i → Result (oty i) := + sorry + + theorem body_fix_eq : fix_ body = body (fix_ body) := sorry + + def is_even (i : Int) : Result Bool := fix_ body 0 i + def is_odd (i : Int) : Result Bool := fix_ body 1 i + + theorem is_even_eq (i : Int) : is_even i = + (if i = 0 + then .ret true + else do + let b ← is_odd (i - 1) + .ret b) := by + simp [is_even, is_odd]; + conv => lhs; rw [body_fix_eq] + + theorem is_odd_eq (i : Int) : is_odd i = + (if i = 0 + then .ret false + else do + let b ← is_even (i - 1) + .ret b) := by + simp [is_even, is_odd]; + conv => lhs; rw [body_fix_eq] + +end Ex4 + +namespace Ex5 /- Higher-order example -/ open Primitives Fix @@ -736,6 +834,6 @@ namespace Ex4 simp [id] conv => lhs; rw [Heq]; simp; rw [id_body] -end Ex4 +end Ex5 end Diverge -- cgit v1.2.3 From 87fd14e74fe00752df7759372093543ae77a51ae Mon Sep 17 00:00:00 2001 From: Son Ho Date: Mon, 26 Jun 2023 18:33:26 +0200 Subject: Make the definitions in Diverge.Fix dependently typed --- backends/lean/Base/Diverge.lean | 95 ++++++++++++++++++++--------------------- 1 file changed, 47 insertions(+), 48 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Diverge.lean b/backends/lean/Base/Diverge.lean index a5cf3459..d65e77a1 100644 --- a/backends/lean/Base/Diverge.lean +++ b/backends/lean/Base/Diverge.lean @@ -88,7 +88,8 @@ namespace Fix open Primitives open Result - variable {a b c d : Type} + variable {a : Type} {b : a → Type} + variable {c d : Type} /-! # The least fixed point definition and its properties -/ @@ -132,19 +133,23 @@ namespace Fix /-! # The fixed point definitions -/ - def fix_fuel (n : Nat) (f : (a → Result b) → a → Result b) (x : a) : Result b := + def fix_fuel (n : Nat) (f : ((x:a) → Result (b x)) → (x:a) → Result (b x)) (x : a) : + Result (b x) := match n with | 0 => .div | n + 1 => f (fix_fuel n f) x - @[simp] def fix_fuel_pred (f : (a → Result b) → a → Result b) (x : a) (n : Nat) := + @[simp] def fix_fuel_pred (f : ((x:a) → Result (b x)) → (x:a) → Result (b x)) + (x : a) (n : Nat) := not (div? (fix_fuel n f x)) - def fix_fuel_P (f : (a → Result b) → a → Result b) (x : a) (n : Nat) : Prop := + def fix_fuel_P (f : ((x:a) → Result (b x)) → (x:a) → Result (b x)) + (x : a) (n : Nat) : Prop := fix_fuel_pred f x n - noncomputable def fix (f : (a → Result b) → a → Result b) (x : a) : Result b := + noncomputable + def fix (f : ((x:a) → Result (b x)) → (x:a) → Result (b x)) (x : a) : Result (b x) := fix_fuel (least (fix_fuel_P f x)) f x /-! # The validity property -/ @@ -158,11 +163,11 @@ namespace Fix | ret _ => x2 = x1 -- TODO: generalize -- Monotonicity relation over monadic arrows (i.e., Kleisli arrows) - def karrow_rel (k1 k2 : a → Result b) : Prop := + def karrow_rel (k1 k2 : (x:a) → Result (b x)) : Prop := ∀ x, result_rel (k1 x) (k2 x) -- Monotonicity property for function bodies - def is_mono (f : (a → Result b) → a → Result b) : Prop := + def is_mono (f : ((x:a) → Result (b x)) → (x:a) → Result (b x)) : Prop := ∀ {{k1 k2}}, karrow_rel k1 k2 → karrow_rel (f k1) (f k2) -- "Continuity" property. @@ -170,11 +175,12 @@ namespace Fix -- https://inria.hal.science/file/index/docid/216187/filename/tarski.pdf -- We define our "continuity" criteria so that it gives us what we need to -- prove the fixed-point equation, and we can also easily manipulate it. - def is_cont (f : (a → Result b) → a → Result b) : Prop := + def is_cont (f : ((x:a) → Result (b x)) → (x:a) → Result (b x)) : Prop := ∀ x, (Hdiv : ∀ n, fix_fuel (.succ n) f x = div) → f (fix f) x = div /-! # The proof of the fixed-point equation -/ - theorem fix_fuel_mono {f : (a → Result b) → a → Result b} (Hmono : is_mono f) : + theorem fix_fuel_mono {f : ((x:a) → Result (b x)) → (x:a) → Result (b x)} + (Hmono : is_mono f) : ∀ {{n m}}, n ≤ m → karrow_rel (fix_fuel n f) (fix_fuel m f) := by intros n induction n @@ -195,12 +201,13 @@ namespace Fix simp [result_rel] at Hmono apply Hmono - @[simp] theorem neg_fix_fuel_P {f : (a → Result b) → a → Result b} {x : a} {n : Nat} : + @[simp] theorem neg_fix_fuel_P + {f : ((x:a) → Result (b x)) → (x:a) → Result (b x)} {x : a} {n : Nat} : ¬ fix_fuel_P f x n ↔ (fix_fuel n f x = div) := by simp [fix_fuel_P, div?] cases fix_fuel n f x <;> simp - theorem fix_fuel_fix_mono {f : (a → Result b) → a → Result b} (Hmono : is_mono f) : + theorem fix_fuel_fix_mono {f : ((x:a) → Result (b x)) → (x:a) → Result (b x)} (Hmono : is_mono f) : ∀ n, karrow_rel (fix_fuel n f) (fix f) := by intros n x simp [result_rel] @@ -234,7 +241,7 @@ namespace Fix cases Heq':fix_fuel n f x <;> simp_all - theorem fix_fuel_P_least {f : (a → Result b) → a → Result b} (Hmono : is_mono f) : + theorem fix_fuel_P_least {f : ((x:a) → Result (b x)) → (x:a) → Result (b x)} (Hmono : is_mono f) : ∀ {{x n}}, fix_fuel_P f x n → fix_fuel_P f x (least (fix_fuel_P f x)) := by intros x n Hf have Hfmono := fix_fuel_fix_mono Hmono n x @@ -247,7 +254,7 @@ namespace Fix -- Prove the fixed point equation in the case there exists some fuel for which -- the execution terminates - theorem fix_fixed_eq_terminates (f : (a → Result b) → a → Result b) (Hmono : is_mono f) + theorem fix_fixed_eq_terminates (f : ((x:a) → Result (b x)) → (x:a) → Result (b x)) (Hmono : is_mono f) (x : a) (n : Nat) (He : fix_fuel_P f x n) : fix f x = f (fix f) x := by have Hl := fix_fuel_P_least Hmono He @@ -271,7 +278,7 @@ namespace Fix revert Hvm split <;> simp [*] <;> intros <;> simp [*] - theorem fix_fixed_eq_forall {{f : (a → Result b) → a → Result b}} + theorem fix_fixed_eq_forall {{f : ((x:a) → Result (b x)) → (x:a) → Result (b x)}} (Hmono : is_mono f) (Hcont : is_cont f) : ∀ x, fix f x = f (fix f) x := by intros x @@ -290,7 +297,7 @@ namespace Fix | .inl ⟨ n, He ⟩ => apply fix_fixed_eq_terminates f Hmono x n He -- The final fixed point equation - theorem fix_fixed_eq {{f : (a → Result b) → a → Result b}} + theorem fix_fixed_eq {{f : ((x:a) → Result (b x)) → (x:a) → Result (b x)}} (Hmono : is_mono f) (Hcont : is_cont f) : fix f = f (fix f) := by have Heq := fix_fixed_eq_forall Hmono Hcont @@ -301,7 +308,7 @@ namespace Fix /-! # Making the proofs of validity manageable (and automatable) -/ -- Monotonicity property for expressions - def is_mono_p (e : (a → Result b) → Result c) : Prop := + def is_mono_p (e : ((x:a) → Result (b x)) → Result c) : Prop := ∀ {{k1 k2}}, karrow_rel k1 k2 → result_rel (e k1) (e k2) theorem is_mono_p_same (x : Result c) : @@ -310,16 +317,17 @@ namespace Fix split <;> simp theorem is_mono_p_rec (x : a) : - @is_mono_p a b b (λ f => f x) := by + @is_mono_p a b (b x) (λ f => f x) := by simp_all [is_mono_p, karrow_rel, result_rel] -- The important lemma about `is_mono_p` + -- TODO: generalize d? theorem is_mono_p_bind - (g : (a → Result b) → Result c) - (h : c → (a → Result b) → Result d) : + (g : ((x:a) → Result (b x)) → Result c) + (h : c → ((x:a) → Result (b x)) → Result d) : is_mono_p g → (∀ y, is_mono_p (h y)) → - is_mono_p (λ k => do let y ← g k; h y k) := by + @is_mono_p a b d (λ k => do let y ← g k; h y k) := by intro hg hh simp [is_mono_p] intro fg fh Hrgh @@ -332,25 +340,26 @@ namespace Fix -- Continuity property for expressions - note that we take the continuation -- as parameter - def is_cont_p (k : (a → Result b) → a → Result b) - (e : (a → Result b) → Result c) : Prop := + def is_cont_p (k : ((x:a) → Result (b x)) → (x:a) → Result (b x)) + (e : ((x:a) → Result (b x)) → Result c) : Prop := (Hc : ∀ n, e (fix_fuel n k) = .div) → e (fix k) = .div - theorem is_cont_p_same (k : (a → Result b) → a → Result b) (x : Result c) : + theorem is_cont_p_same (k : ((x:a) → Result (b x)) → (x:a) → Result (b x)) + (x : Result c) : is_cont_p k (λ _ => x) := by simp [is_cont_p] - theorem is_cont_p_rec (f : (a → Result b) → a → Result b) (x : a) : + theorem is_cont_p_rec (f : ((x:a) → Result (b x)) → (x:a) → Result (b x)) (x : a) : is_cont_p f (λ f => f x) := by simp_all [is_cont_p, fix] -- The important lemma about `is_cont_p` theorem is_cont_p_bind - (k : (a → Result b) → a → Result b) + (k : ((x:a) → Result (b x)) → (x:a) → Result (b x)) (Hkmono : is_mono k) - (g : (a → Result b) → Result c) - (h : c → (a → Result b) → Result d) : + (g : ((x:a) → Result (b x)) → Result c) + (h : c → ((x:a) → Result (b x)) → Result d) : is_mono_p g → is_cont_p k g → (∀ y, is_mono_p (h y)) → @@ -402,16 +411,18 @@ namespace Fix apply Hhcont; assumption -- The validity property for an expression - def is_valid_p (k : (a → Result b) → a → Result b) - (e : (a → Result b) → Result c) : Prop := + def is_valid_p (k : ((x:a) → Result (b x)) → (x:a) → Result (b x)) + (e : ((x:a) → Result (b x)) → Result c) : Prop := is_mono_p e ∧ (is_mono k → is_cont_p k e) - @[simp] theorem is_valid_p_same (k : (a → Result b) → a → Result b) (x : Result c) : + @[simp] theorem is_valid_p_same + (k : ((x:a) → Result (b x)) → (x:a) → Result (b x)) (x : Result c) : is_valid_p k (λ _ => x) := by simp [is_valid_p, is_mono_p_same, is_cont_p_same] - @[simp] theorem is_valid_p_rec (k : (a → Result b) → a → Result b) (x : a) : + @[simp] theorem is_valid_p_rec + (k : ((x:a) → Result (b x)) → (x:a) → Result (b x)) (x : a) : is_valid_p k (λ k => k x) := by simp_all [is_valid_p, is_mono_p_rec, is_cont_p_rec] @@ -419,9 +430,9 @@ namespace Fix -- (in particular, it will manage to figure out `g` and `h` when we -- apply the lemma) theorem is_valid_p_bind - {{k : (a → Result b) → a → Result b}} - {{g : (a → Result b) → Result c}} - {{h : c → (a → Result b) → Result d}} + {{k : ((x:a) → Result (b x)) → (x:a) → Result (b x)}} + {{g : ((x:a) → Result (b x)) → Result c}} + {{h : c → ((x:a) → Result (b x)) → Result d}} (Hgvalid : is_valid_p k g) (Hhvalid : ∀ y, is_valid_p k (h y)) : is_valid_p k (λ k => do let y ← g k; h y k) := by @@ -438,7 +449,7 @@ namespace Fix have Hhcont := Hhcont Hkmono apply is_cont_p_bind <;> assumption - theorem is_valid_p_imp_is_valid {{e : (a → Result b) → a → Result b}} + theorem is_valid_p_imp_is_valid {{e : ((x:a) → Result (b x)) → (x:a) → Result (b x)}} (Hvalid : ∀ k x, is_valid_p k (λ k => e k x)) : is_mono e ∧ is_cont e := by have Hmono : is_mono e := by @@ -457,7 +468,7 @@ namespace Fix simp [*] simp [*] - theorem is_valid_p_fix_fixed_eq {{e : (a → Result b) → a → Result b}} + theorem is_valid_p_fix_fixed_eq {{e : ((x:a) → Result (b x)) → (x:a) → Result (b x)}} (Hvalid : ∀ k x, is_valid_p k (λ k => e k x)) : fix e = e (fix e) := by have ⟨ Hmono, Hcont ⟩ := is_valid_p_imp_is_valid Hvalid @@ -625,18 +636,6 @@ namespace Ex3 | .inl _ => .fail .panic | .inr b => .ret b - -- TODO: move - -- TODO: this is not enough - theorem swap_if_bind {a b : Type} (e : Prop) [Decidable e] - (x y : Result a) (f : a → Result b) : - (do - let z ← (if e then x else y) - f z) - = - (if e then do let z ← x; f z - else do let z ← y; f z) := by - split <;> simp - -- The unfolding equation for `is_even` - diverges if `i < 0` theorem is_even_eq (i : Int) : is_even i = (if i = 0 then .ret true else is_odd (i - 1)) -- cgit v1.2.3 From 4cc411a30b19f5c5eea67b2e4da232337af8f12b Mon Sep 17 00:00:00 2001 From: Son Ho Date: Mon, 26 Jun 2023 18:40:47 +0200 Subject: Generalize some definitions --- backends/lean/Base/Diverge.lean | 34 ++++++++++++++++++++++------------ 1 file changed, 22 insertions(+), 12 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Diverge.lean b/backends/lean/Base/Diverge.lean index d65e77a1..0c1028bd 100644 --- a/backends/lean/Base/Diverge.lean +++ b/backends/lean/Base/Diverge.lean @@ -321,7 +321,6 @@ namespace Fix simp_all [is_mono_p, karrow_rel, result_rel] -- The important lemma about `is_mono_p` - -- TODO: generalize d? theorem is_mono_p_bind (g : ((x:a) → Result (b x)) → Result c) (h : c → ((x:a) → Result (b x)) → Result d) : @@ -700,24 +699,28 @@ namespace Ex4 let b ← k 0 (i - 1) .ret b - inductive Funs : List (Type 0) → List (Type 0) → Type 1 := + inductive Funs : List (Type u) → List (Type u) → Type (u + 1) := | Nil : Funs [] [] - | Cons {ity oty : Type 0} {itys otys : List (Type 0)} (f : ity → Result oty) (tl : Funs itys otys) : Funs (ity :: itys) (oty :: otys) + | Cons {ity oty : Type u} {itys otys : List (Type u)} + (f : ity → Result oty) (tl : Funs itys otys) : Funs (ity :: itys) (oty :: otys) - theorem Funs.length_eq {itys otys : List (Type 0)} (fl : Funs itys otys) : itys.length = otys.length := + theorem Funs.length_eq {itys otys : List (Type)} (fl : Funs itys otys) : + itys.length = otys.length := match fl with | .Nil => by simp | .Cons f tl => have h:= Funs.length_eq tl by simp [h] - @[simp] def Funs.cast_fin {itys otys : List (Type 0)} (fl : Funs itys otys) (i : Fin itys.length) : Fin otys.length := + @[simp] def Funs.cast_fin {itys otys : List (Type)} + (fl : Funs itys otys) (i : Fin itys.length) : Fin otys.length := ⟨ i.val, by have h:= fl.length_eq; have h1:= i.isLt; simp_all ⟩ - @[simp] def bodies (k : (i : Fin 2) → input_ty i → Result (output_ty i)) : Funs [Int, Int] [Bool, Bool] := + @[simp] def bodies (k : (i : Fin 2) → input_ty i → Result (output_ty i)) : + Funs [Int, Int] [Bool, Bool] := Funs.Cons (is_even_body k) (Funs.Cons (is_odd_body k) Funs.Nil) - @[simp] def get_fun {itys otys : List (Type 0)} (fl : Funs itys otys) : + @[simp] def get_fun {itys otys : List (Type)} (fl : Funs itys otys) : (i : Fin itys.length) → itys.get i → Result (otys.get (fl.cast_fin i)) := match fl with | .Nil => λ i => by have h:= i.isLt; simp at h @@ -734,11 +737,18 @@ namespace Ex4 cases Heq: i.val <;> simp_all simp_arith let j: Fin itys1.length := ⟨ j, Hj ⟩ - Eq.mp (by cases Heq: i; rename_i val isLt; cases Heq': j; rename_i val' isLt; cases val <;> simp_all [List.get]) (get_fun tl j) - - def body (k : (i : Fin 2) → input_ty i → Result (output_ty i)) (i: Fin 2) : input_ty i → Result (output_ty i) := get_fun (bodies k) i - - def fix_ {n : Nat} {ity oty : Fin n → Type 0} (f : ((i:Fin n) → ity i → Result (oty i)) → (i:Fin n) → ity i → Result (oty i)) : + Eq.mp + (by + cases Heq: i; rename_i val isLt; + cases Heq': j; rename_i val' isLt; + cases val <;> simp_all [List.get]) + (get_fun tl j) + + def body (k : (i : Fin 2) → input_ty i → Result (output_ty i)) (i: Fin 2) : + input_ty i → Result (output_ty i) := get_fun (bodies k) i + + def fix_ {n : Nat} {ity oty : Fin n → Type} + (f : ((i:Fin n) → ity i → Result (oty i)) → (i:Fin n) → ity i → Result (oty i)) : (i:Fin n) → ity i → Result (oty i) := sorry -- cgit v1.2.3 From f4ee75da0959ff06ce4cfaab817de540fcd0433f Mon Sep 17 00:00:00 2001 From: Son Ho Date: Mon, 26 Jun 2023 19:28:03 +0200 Subject: Add FixI in Diverge --- backends/lean/Base/Diverge.lean | 127 +++++++++++++++++++++++++++++++++------- 1 file changed, 105 insertions(+), 22 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Diverge.lean b/backends/lean/Base/Diverge.lean index 0c1028bd..907075d7 100644 --- a/backends/lean/Base/Diverge.lean +++ b/backends/lean/Base/Diverge.lean @@ -448,17 +448,20 @@ namespace Fix have Hhcont := Hhcont Hkmono apply is_cont_p_bind <;> assumption - theorem is_valid_p_imp_is_valid {{e : ((x:a) → Result (b x)) → (x:a) → Result (b x)}} - (Hvalid : ∀ k x, is_valid_p k (λ k => e k x)) : - is_mono e ∧ is_cont e := by - have Hmono : is_mono e := by + def is_valid (f : ((x:a) → Result (b x)) → (x:a) → Result (b x)) : Prop := + ∀ k x, is_valid_p k (λ k => f k x) + + theorem is_valid_p_imp_is_valid {{f : ((x:a) → Result (b x)) → (x:a) → Result (b x)}} + (Hvalid : is_valid f) : + is_mono f ∧ is_cont f := by + have Hmono : is_mono f := by intro f h Hr x have Hmono := Hvalid (λ _ _ => .div) x have Hmono := Hmono.left apply Hmono; assumption - have Hcont : is_cont e := by + have Hcont : is_cont f := by intro x Hdiv - have Hcont := (Hvalid e x).right Hmono + have Hcont := (Hvalid f x).right Hmono simp [is_cont_p] at Hcont apply Hcont intro n @@ -467,14 +470,96 @@ namespace Fix simp [*] simp [*] - theorem is_valid_p_fix_fixed_eq {{e : ((x:a) → Result (b x)) → (x:a) → Result (b x)}} - (Hvalid : ∀ k x, is_valid_p k (λ k => e k x)) : - fix e = e (fix e) := by + theorem is_valid_fix_fixed_eq {{f : ((x:a) → Result (b x)) → (x:a) → Result (b x)}} + (Hvalid : is_valid f) : + fix f = f (fix f) := by have ⟨ Hmono, Hcont ⟩ := is_valid_p_imp_is_valid Hvalid exact fix_fixed_eq Hmono Hcont end Fix +namespace FixI + /- Indexed fixed-point: definitions with indexed types, convenient to use for mutually + recursive definitions. We simply port the definitions and proofs from Fix to a more + specific case. + -/ + open Primitives Fix + + -- The index type + variable {id : Type} + + -- The input/output types + variable {a b : id → Type} + + -- Monotonicity relation over monadic arrows (i.e., Kleisli arrows) + def karrow_rel (k1 k2 : (i:id) → a i → Result (b i)) : Prop := + ∀ i x, result_rel (k1 i x) (k2 i x) + + def kk_to_gen (k : (i:id) → a i → Result (b i)) : + (x: (i:id) × a i) → Result (b x.fst) := + λ ⟨ i, x ⟩ => k i x + + def kk_of_gen (k : (x: (i:id) × a i) → Result (b x.fst)) : + (i:id) → a i → Result (b i) := + λ i x => k ⟨ i, x ⟩ + + def k_to_gen (k : ((i:id) → a i → Result (b i)) → (i:id) → a i → Result (b i)) : + ((x: (i:id) × a i) → Result (b x.fst)) → (x: (i:id) × a i) → Result (b x.fst) := + λ kk => kk_to_gen (k (kk_of_gen kk)) + + def k_of_gen (k : ((x: (i:id) × a i) → Result (b x.fst)) → (x: (i:id) × a i) → Result (b x.fst)) : + ((i:id) → a i → Result (b i)) → (i:id) → a i → Result (b i) := + λ kk => kk_of_gen (k (kk_to_gen kk)) + + def e_to_gen (e : ((i:id) → a i → Result (b i)) → Result c) : + ((x: (i:id) × a i) → Result (b x.fst)) → Result c := + λ k => e (kk_of_gen k) + + def is_valid_p (k : ((i:id) → a i → Result (b i)) → (i:id) → a i → Result (b i)) + (e : ((i:id) → a i → Result (b i)) → Result c) : Prop := + Fix.is_valid_p (k_to_gen k) (e_to_gen e) + + def is_valid (f : ((i:id) → a i → Result (b i)) → (i:id) → a i → Result (b i)) : Prop := + ∀ k i x, is_valid_p k (λ k => f k i x) + + @[simp] theorem kk_to_gen_kk_of_gen + (k : (x: (i:id) × a i) → Result (b x.fst)) : + kk_to_gen (kk_of_gen kk) = kk := by + simp [kk_to_gen, kk_of_gen] + + @[simp] theorem k_to_gen_k_of_gen + (k : ((x: (i:id) × a i) → Result (b x.fst)) → (x: (i:id) × a i) → Result (b x.fst)) : + k_to_gen (k_of_gen kk) = kk := by + simp [k_to_gen, k_of_gen] + apply funext + intro kk_1 + -- TODO: some simplifications don't work + simp [kk_to_gen, kk_of_gen] + + noncomputable def fix + (f : ((i:id) → a i → Result (b i)) → (i:id) → a i → Result (b i)) : + (i:id) → a i → Result (b i) := + kk_of_gen (Fix.fix (k_to_gen f)) + + theorem is_valid_fix_fixed_eq + {{f : ((i:id) → a i → Result (b i)) → (i:id) → a i → Result (b i)}} + (Hvalid : is_valid f) : + fix f = f (fix f) := by + have Hvalid' : Fix.is_valid (k_to_gen f) := by + intro k x + simp [is_valid, is_valid_p] at Hvalid + --simp [Fix.is_valid_p] + let ⟨ i, x ⟩ := x + have Hvalid := Hvalid (k_of_gen k) i x + -- TODO: some simplifications don't work + simp [k_to_gen, k_of_gen, kk_to_gen, kk_of_gen] at Hvalid + refine Hvalid + have Heq := Fix.is_valid_fix_fixed_eq Hvalid' + simp [fix] + conv => lhs; rw [Heq] + +end FixI + namespace Ex1 /- An example of use of the fixed-point -/ open Primitives Fix @@ -507,7 +592,7 @@ namespace Ex1 if i = 0 then .ret hd else list_nth tl (i - 1) := by - have Heq := is_valid_p_fix_fixed_eq (@list_nth_body_is_valid a) + have Heq := is_valid_fix_fixed_eq (@list_nth_body_is_valid a) simp [list_nth] conv => lhs; rw [Heq] @@ -553,7 +638,7 @@ namespace Ex2 let y ← list_nth tl (i - 1) .ret y) := by - have Heq := is_valid_p_fix_fixed_eq (@list_nth_body_is_valid a) + have Heq := is_valid_fix_fixed_eq (@list_nth_body_is_valid a) simp [list_nth] conv => lhs; rw [Heq] @@ -639,7 +724,7 @@ namespace Ex3 theorem is_even_eq (i : Int) : is_even i = (if i = 0 then .ret true else is_odd (i - 1)) := by - have Heq := is_valid_p_fix_fixed_eq is_even_is_odd_body_is_valid + have Heq := is_valid_fix_fixed_eq is_even_is_odd_body_is_valid simp [is_even, is_odd] conv => lhs; rw [Heq]; simp; rw [is_even_is_odd_body]; simp -- Very annoying: we need to swap the matches @@ -657,7 +742,7 @@ namespace Ex3 theorem is_odd_eq (i : Int) : is_odd i = (if i = 0 then .ret false else is_even (i - 1)) := by - have Heq := is_valid_p_fix_fixed_eq is_even_is_odd_body_is_valid + have Heq := is_valid_fix_fixed_eq is_even_is_odd_body_is_valid simp [is_even, is_odd] conv => lhs; rw [Heq]; simp; rw [is_even_is_odd_body]; simp -- Same remark as for `even` @@ -670,7 +755,7 @@ end Ex3 namespace Ex4 /- Mutually recursive functions - 2nd encoding -/ - open Primitives Fix + open Primitives FixI attribute [local simp] List.get @@ -747,15 +832,13 @@ namespace Ex4 def body (k : (i : Fin 2) → input_ty i → Result (output_ty i)) (i: Fin 2) : input_ty i → Result (output_ty i) := get_fun (bodies k) i - def fix_ {n : Nat} {ity oty : Fin n → Type} - (f : ((i:Fin n) → ity i → Result (oty i)) → (i:Fin n) → ity i → Result (oty i)) : - (i:Fin n) → ity i → Result (oty i) := - sorry + theorem body_is_valid : is_valid body := by sorry - theorem body_fix_eq : fix_ body = body (fix_ body) := sorry + theorem body_fix_eq : fix body = body (fix body) := + is_valid_fix_fixed_eq body_is_valid - def is_even (i : Int) : Result Bool := fix_ body 0 i - def is_odd (i : Int) : Result Bool := fix_ body 1 i + noncomputable def is_even (i : Int) : Result Bool := fix body 0 i + noncomputable def is_odd (i : Int) : Result Bool := fix body 1 i theorem is_even_eq (i : Int) : is_even i = (if i = 0 @@ -839,7 +922,7 @@ namespace Ex5 let tl ← map id tl .ret (.node tl)) := by - have Heq := is_valid_p_fix_fixed_eq (@id_body_is_valid a) + have Heq := is_valid_fix_fixed_eq (@id_body_is_valid a) simp [id] conv => lhs; rw [Heq]; simp; rw [id_body] -- cgit v1.2.3 From 7f3604c21bb9f923aecb98917b5c7a33bafd1bcb Mon Sep 17 00:00:00 2001 From: Son Ho Date: Mon, 26 Jun 2023 23:41:01 +0200 Subject: Make minor modifications --- backends/lean/Base/Diverge.lean | 16 ---------------- 1 file changed, 16 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Diverge.lean b/backends/lean/Base/Diverge.lean index 907075d7..f3fa4815 100644 --- a/backends/lean/Base/Diverge.lean +++ b/backends/lean/Base/Diverge.lean @@ -522,20 +522,6 @@ namespace FixI def is_valid (f : ((i:id) → a i → Result (b i)) → (i:id) → a i → Result (b i)) : Prop := ∀ k i x, is_valid_p k (λ k => f k i x) - @[simp] theorem kk_to_gen_kk_of_gen - (k : (x: (i:id) × a i) → Result (b x.fst)) : - kk_to_gen (kk_of_gen kk) = kk := by - simp [kk_to_gen, kk_of_gen] - - @[simp] theorem k_to_gen_k_of_gen - (k : ((x: (i:id) × a i) → Result (b x.fst)) → (x: (i:id) × a i) → Result (b x.fst)) : - k_to_gen (k_of_gen kk) = kk := by - simp [k_to_gen, k_of_gen] - apply funext - intro kk_1 - -- TODO: some simplifications don't work - simp [kk_to_gen, kk_of_gen] - noncomputable def fix (f : ((i:id) → a i → Result (b i)) → (i:id) → a i → Result (b i)) : (i:id) → a i → Result (b i) := @@ -548,10 +534,8 @@ namespace FixI have Hvalid' : Fix.is_valid (k_to_gen f) := by intro k x simp [is_valid, is_valid_p] at Hvalid - --simp [Fix.is_valid_p] let ⟨ i, x ⟩ := x have Hvalid := Hvalid (k_of_gen k) i x - -- TODO: some simplifications don't work simp [k_to_gen, k_of_gen, kk_to_gen, kk_of_gen] at Hvalid refine Hvalid have Heq := Fix.is_valid_fix_fixed_eq Hvalid' -- cgit v1.2.3 From 0a62cf3f7d58b31c75344172bad1032e14a4082f Mon Sep 17 00:00:00 2001 From: Son Ho Date: Tue, 27 Jun 2023 10:52:07 +0200 Subject: Finish the proofs which use FixI --- backends/lean/Base/Diverge.lean | 199 +++++++++++++++++++++++++++++++--------- 1 file changed, 157 insertions(+), 42 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Diverge.lean b/backends/lean/Base/Diverge.lean index f3fa4815..76f0543a 100644 --- a/backends/lean/Base/Diverge.lean +++ b/backends/lean/Base/Diverge.lean @@ -542,6 +542,147 @@ namespace FixI simp [fix] conv => lhs; rw [Heq] + /- Some utilities to define the mutually recursive functions -/ + + inductive Funs : List (Type u) → List (Type u) → Type (u + 1) := + | Nil : Funs [] [] + | Cons {ity oty : Type u} {itys otys : List (Type u)} + (f : ity → Result oty) (tl : Funs itys otys) : Funs (ity :: itys) (oty :: otys) + + theorem Funs.length_eq {itys otys : List (Type)} (fl : Funs itys otys) : + itys.length = otys.length := + match fl with + | .Nil => by simp + | .Cons f tl => + have h:= Funs.length_eq tl + by simp [h] + + @[simp] def Funs.cast_fin {itys otys : List (Type)} + (fl : Funs itys otys) (i : Fin itys.length) : Fin otys.length := + ⟨ i.val, by have h:= fl.length_eq; have h1:= i.isLt; simp_all ⟩ + + def get_fun {itys otys : List (Type)} (fl : Funs itys otys) : + (i : Fin itys.length) → itys.get i → Result (otys.get (fl.cast_fin i)) := + match fl with + | .Nil => λ i => by have h:= i.isLt; simp at h + | @Funs.Cons ity oty itys1 otys1 f tl => + λ i => + if h: i.val = 0 then + Eq.mp (by cases i; simp_all [List.get]) f + else + let j := i.val - 1 + have Hj: j < itys1.length := by + have Hi := i.isLt + simp at Hi + revert Hi + cases Heq: i.val <;> simp_all + simp_arith + let j: Fin itys1.length := ⟨ j, Hj ⟩ + Eq.mp + (by + cases Heq: i; rename_i val isLt; + cases Heq': j; rename_i val' isLt; + cases val <;> simp_all [List.get]) + (get_fun tl j) + + + -- TODO: move + theorem add_one_le_iff_le_ne (n m : Nat) (h1 : m ≤ n) (h2 : m ≠ n) : m + 1 ≤ n := by + -- Damn, those proofs on natural numbers are hard - I wish Omega was in mathlib4... + simp [Nat.add_one_le_iff] + simp [Nat.lt_iff_le_and_ne] + simp_all + + def for_all_fin_aux {n : Nat} (f : Fin n → Prop) (m : Nat) (h : m ≤ n) : Prop := + if heq: m = n then True + else + f ⟨ m, by simp_all [Nat.lt_iff_le_and_ne] ⟩ ∧ + for_all_fin_aux f (m + 1) (by simp_all [add_one_le_iff_le_ne]) + termination_by for_all_fin_aux n _ m h => n - m + decreasing_by + simp_wf + apply Nat.sub_add_lt_sub <;> simp_all + simp_all [add_one_le_iff_le_ne] + + def for_all_fin {n : Nat} (f : Fin n → Prop) := for_all_fin_aux f 0 (by simp) + + theorem for_all_fin_aux_imp_forall {n : Nat} (f : Fin n → Prop) (m : Nat) : + (h : m ≤ n) → + for_all_fin_aux f m h → ∀ i, m ≤ i.val → f i + := by + generalize h: (n - m) = k + revert m + induction k + case zero => + simp_all + intro m h1 h2 + have h: n = m := by + linarith + unfold for_all_fin_aux; simp_all + simp_all + -- There is no i s.t. m ≤ i + intro i h3; cases i; simp_all + linarith + case succ k hi => + simp_all + intro m hk hmn + intro hf i hmi + have hne: m ≠ n := by + have hineq := Nat.lt_of_sub_eq_succ hk + linarith + -- m = i? + if heq: m = i then + -- Yes: simply use the `for_all_fin_aux` hyp + unfold for_all_fin_aux at hf + simp_all + tauto + else + -- No: use the induction hypothesis + have hlt: m < i := by simp_all [Nat.lt_iff_le_and_ne] + have hineq: m + 1 ≤ n := by + have hineq := Nat.lt_of_sub_eq_succ hk + simp [*, Nat.add_one_le_iff] + have heq1: n - (m + 1) = k := by + -- TODO: very annoying arithmetic proof + simp [Nat.sub_eq_iff_eq_add hineq] + have hineq1: m ≤ n := by linarith + simp [Nat.sub_eq_iff_eq_add hineq1] at hk + simp_arith [hk] + have hi := hi (m + 1) heq1 hineq + apply hi <;> simp_all + . unfold for_all_fin_aux at hf + simp_all + . simp_all [add_one_le_iff_le_ne] + + theorem for_all_fin_imp_forall (n : Nat) (f : Fin n → Prop) : + for_all_fin f → ∀ i, f i + := by + intro Hf i + apply for_all_fin_aux_imp_forall <;> try assumption + simp + + /- Automating the proofs -/ + @[simp] theorem is_valid_p_same + (k : ((i:id) → a i → Result (b i)) → (i:id) → a i → Result (b i)) (x : Result c) : + is_valid_p k (λ _ => x) := by + simp [is_valid_p, k_to_gen, e_to_gen] + + @[simp] theorem is_valid_p_rec + (k : ((i:id) → a i → Result (b i)) → (i:id) → a i → Result (b i)) (i : id) (x : a i) : + is_valid_p k (λ k => k i x) := by + simp [is_valid_p, k_to_gen, e_to_gen, kk_to_gen, kk_of_gen] + + theorem is_valid_p_bind + {{k : ((i:id) → a i → Result (b i)) → (i:id) → a i → Result (b i)}} + {{g : ((i:id) → a i → Result (b i)) → Result c}} + {{h : c → ((i:id) → a i → Result (b i)) → Result d}} + (Hgvalid : is_valid_p k g) + (Hhvalid : ∀ y, is_valid_p k (h y)) : + is_valid_p k (λ k => do let y ← g k; h y k) := by + apply Fix.is_valid_p_bind + . apply Hgvalid + . apply Hhvalid + end FixI namespace Ex1 @@ -768,55 +909,29 @@ namespace Ex4 let b ← k 0 (i - 1) .ret b - inductive Funs : List (Type u) → List (Type u) → Type (u + 1) := - | Nil : Funs [] [] - | Cons {ity oty : Type u} {itys otys : List (Type u)} - (f : ity → Result oty) (tl : Funs itys otys) : Funs (ity :: itys) (oty :: otys) - - theorem Funs.length_eq {itys otys : List (Type)} (fl : Funs itys otys) : - itys.length = otys.length := - match fl with - | .Nil => by simp - | .Cons f tl => - have h:= Funs.length_eq tl - by simp [h] - - @[simp] def Funs.cast_fin {itys otys : List (Type)} - (fl : Funs itys otys) (i : Fin itys.length) : Fin otys.length := - ⟨ i.val, by have h:= fl.length_eq; have h1:= i.isLt; simp_all ⟩ - @[simp] def bodies (k : (i : Fin 2) → input_ty i → Result (output_ty i)) : Funs [Int, Int] [Bool, Bool] := Funs.Cons (is_even_body k) (Funs.Cons (is_odd_body k) Funs.Nil) - @[simp] def get_fun {itys otys : List (Type)} (fl : Funs itys otys) : - (i : Fin itys.length) → itys.get i → Result (otys.get (fl.cast_fin i)) := - match fl with - | .Nil => λ i => by have h:= i.isLt; simp at h - | @Funs.Cons ity oty itys1 otys1 f tl => - λ i => - if h: i.val = 0 then - Eq.mp (by cases i; simp_all [List.get]) f - else - let j := i.val - 1 - have Hj: j < itys1.length := by - have Hi := i.isLt - simp at Hi - revert Hi - cases Heq: i.val <;> simp_all - simp_arith - let j: Fin itys1.length := ⟨ j, Hj ⟩ - Eq.mp - (by - cases Heq: i; rename_i val isLt; - cases Heq': j; rename_i val' isLt; - cases val <;> simp_all [List.get]) - (get_fun tl j) - def body (k : (i : Fin 2) → input_ty i → Result (output_ty i)) (i: Fin 2) : input_ty i → Result (output_ty i) := get_fun (bodies k) i - theorem body_is_valid : is_valid body := by sorry + theorem body_is_valid : is_valid body := by + -- Split the proof into proofs of validity of the individual bodies + rw [is_valid] + simp [body] + intro k + apply for_all_fin_imp_forall + simp [for_all_fin] + repeat (unfold for_all_fin_aux; simp) + simp [get_fun] + (repeat (apply And.intro)) <;> intro x <;> simp at x <;> + simp [is_even_body, is_odd_body] + -- Prove the validity of the individual bodies + . split <;> simp + apply is_valid_p_bind <;> simp + . split <;> simp + apply is_valid_p_bind <;> simp theorem body_fix_eq : fix body = body (fix body) := is_valid_fix_fixed_eq body_is_valid -- cgit v1.2.3 From 8db1af5afcb414b502a58a87f6bdcc1c08cbe3d2 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Tue, 27 Jun 2023 18:22:22 +0200 Subject: Finish some proofs in Diverge --- backends/lean/Base/Diverge.lean | 119 ++++++++++++++++++++++++++++++++++------ 1 file changed, 102 insertions(+), 17 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Diverge.lean b/backends/lean/Base/Diverge.lean index 76f0543a..c97674dd 100644 --- a/backends/lean/Base/Diverge.lean +++ b/backends/lean/Base/Diverge.lean @@ -544,28 +544,40 @@ namespace FixI /- Some utilities to define the mutually recursive functions -/ - inductive Funs : List (Type u) → List (Type u) → Type (u + 1) := - | Nil : Funs [] [] + -- TODO: use more + @[simp] def kk_ty (id : Type) (a b : id → Type) := (i:id) → a i → Result (b i) + @[simp] def k_ty (id : Type) (a b : id → Type) := kk_ty id a b → kk_ty id a b + + -- Initially, we had left out the parameters id, a and b. + -- However, by parameterizing Funs with those parameters, we can state + -- and prove lemmas like Funs.is_valid_p_is_valid_p + inductive Funs (id : Type) (a b : id → Type) : + List (Type u) → List (Type u) → Type (u + 1) := + | Nil : Funs id a b [] [] | Cons {ity oty : Type u} {itys otys : List (Type u)} - (f : ity → Result oty) (tl : Funs itys otys) : Funs (ity :: itys) (oty :: otys) + (f : kk_ty id a b → ity → Result oty) (tl : Funs id a b itys otys) : + Funs id a b (ity :: itys) (oty :: otys) - theorem Funs.length_eq {itys otys : List (Type)} (fl : Funs itys otys) : - itys.length = otys.length := + theorem Funs.length_eq {itys otys : List (Type)} (fl : Funs id a b itys otys) : + otys.length = itys.length := match fl with | .Nil => by simp | .Cons f tl => have h:= Funs.length_eq tl by simp [h] + def fin_cast {n m : Nat} (h : m = n) (i : Fin n) : Fin m := + ⟨ i.val, by have h1:= i.isLt; simp_all ⟩ + @[simp] def Funs.cast_fin {itys otys : List (Type)} - (fl : Funs itys otys) (i : Fin itys.length) : Fin otys.length := - ⟨ i.val, by have h:= fl.length_eq; have h1:= i.isLt; simp_all ⟩ + (fl : Funs id a b itys otys) (i : Fin itys.length) : Fin otys.length := + fin_cast (fl.length_eq) i - def get_fun {itys otys : List (Type)} (fl : Funs itys otys) : - (i : Fin itys.length) → itys.get i → Result (otys.get (fl.cast_fin i)) := + def get_fun {itys otys : List (Type)} (fl : Funs id a b itys otys) : + (i : Fin itys.length) → kk_ty id a b → itys.get i → Result (otys.get (fl.cast_fin i)) := match fl with | .Nil => λ i => by have h:= i.isLt; simp at h - | @Funs.Cons ity oty itys1 otys1 f tl => + | @Funs.Cons id a b ity oty itys1 otys1 f tl => λ i => if h: i.val = 0 then Eq.mp (by cases i; simp_all [List.get]) f @@ -582,10 +594,9 @@ namespace FixI (by cases Heq: i; rename_i val isLt; cases Heq': j; rename_i val' isLt; - cases val <;> simp_all [List.get]) + cases val <;> simp_all [List.get, fin_cast]) (get_fun tl j) - -- TODO: move theorem add_one_le_iff_le_ne (n m : Nat) (h1 : m ≤ n) (h2 : m ≠ n) : m + 1 ≤ n := by -- Damn, those proofs on natural numbers are hard - I wish Omega was in mathlib4... @@ -612,7 +623,7 @@ namespace FixI := by generalize h: (n - m) = k revert m - induction k + induction k -- TODO: induction h rather? case zero => simp_all intro m h1 h2 @@ -683,6 +694,65 @@ namespace FixI . apply Hgvalid . apply Hhvalid + def Funs.is_valid_p + (k : k_ty id a b) + (fl : Funs id a b itys otys) : + Prop := + match fl with + | .Nil => True + | .Cons f fl => (∀ x, FixI.is_valid_p k (λ k => f k x)) ∧ fl.is_valid_p k + + #check Subtype + def Funs.is_valid_p_is_valid_p_aux + {k : k_ty id a b} + {itys otys : List Type} + (Heq : List.length otys = List.length itys) + (fl : Funs id a b itys otys) (Hvalid : is_valid_p k fl) : + ∀ (i : Fin itys.length) (x : itys.get i), FixI.is_valid_p k (fun k => get_fun fl i k x) := by + -- Prepare the induction + have ⟨ n, Hn ⟩ : { n : Nat // itys.length = n } := ⟨ itys.length, by rfl ⟩ + revert itys otys Heq fl Hvalid + induction n + -- + case zero => + intro itys otys Heq fl Hvalid Hlen; + have Heq: itys = [] := by cases itys <;> simp_all + have Heq: otys = [] := by cases otys <;> simp_all + intro i x + simp_all + have Hi := i.isLt + simp_all + case succ n Hn => + intro itys otys Heq fl Hvalid Hlen i x; + cases fl <;> simp at * + rename_i ity oty itys otys f fl + have ⟨ Hvf, Hvalid ⟩ := Hvalid + have Hvf1: is_valid_p k fl := by + simp_all [Funs.is_valid_p] + have Hn := @Hn itys otys (by simp[*]) fl Hvf1 (by simp [*]) + -- Case disjunction on i + match i with + | ⟨ 0, _ ⟩ => + simp at x + simp [get_fun] + apply (Hvf x) + | ⟨ .succ j, HiLt ⟩ => + simp_arith at HiLt + simp at x + let j : Fin (List.length itys) := ⟨ j, by simp_arith [HiLt] ⟩ + have Hn := Hn j x + apply Hn + + def Funs.is_valid_p_is_valid_p + (itys otys : List (Type)) (Heq: otys.length = itys.length := by decide) + (k : k_ty (Fin (List.length itys)) (List.get itys) fun i => List.get otys (fin_cast Heq i)) + (fl : Funs (Fin itys.length) itys.get (λ i => otys.get (fin_cast Heq i)) itys otys) : + fl.is_valid_p k → + ∀ (i : Fin itys.length) (x : itys.get i), FixI.is_valid_p k (fun k => get_fun fl i k x) + := by + intro Hvalid + apply is_valid_p_is_valid_p_aux <;> simp [*] + end FixI namespace Ex1 @@ -909,12 +979,12 @@ namespace Ex4 let b ← k 0 (i - 1) .ret b - @[simp] def bodies (k : (i : Fin 2) → input_ty i → Result (output_ty i)) : - Funs [Int, Int] [Bool, Bool] := - Funs.Cons (is_even_body k) (Funs.Cons (is_odd_body k) Funs.Nil) + @[simp] def bodies : + Funs (Fin 2) input_ty output_ty [Int, Int] [Bool, Bool] := + Funs.Cons (is_even_body) (Funs.Cons (is_odd_body) Funs.Nil) def body (k : (i : Fin 2) → input_ty i → Result (output_ty i)) (i: Fin 2) : - input_ty i → Result (output_ty i) := get_fun (bodies k) i + input_ty i → Result (output_ty i) := get_fun bodies i k theorem body_is_valid : is_valid body := by -- Split the proof into proofs of validity of the individual bodies @@ -933,6 +1003,21 @@ namespace Ex4 . split <;> simp apply is_valid_p_bind <;> simp + theorem body_is_valid' : is_valid body := by + -- Split the proof into proofs of validity of the individual bodies + rw [is_valid] + simp [body] + intro k + apply (Funs.is_valid_p_is_valid_p [Int, Int] [Bool, Bool]) + simp [Funs.is_valid_p] + (repeat (apply And.intro)) <;> intro x <;> simp at x <;> + simp [is_even_body, is_odd_body] + -- Prove the validity of the individual bodies + . split <;> simp + apply is_valid_p_bind <;> simp + . split <;> simp + apply is_valid_p_bind <;> simp + theorem body_fix_eq : fix body = body (fix body) := is_valid_fix_fixed_eq body_is_valid -- cgit v1.2.3 From 2554a0a64d761a82789b7eacbfa3ca2c88eec7df Mon Sep 17 00:00:00 2001 From: Son Ho Date: Tue, 27 Jun 2023 19:06:14 +0200 Subject: Reduce the time spent on some proofs --- backends/lean/Base/Diverge.lean | 48 +++++++++++++++++------------------------ 1 file changed, 20 insertions(+), 28 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Diverge.lean b/backends/lean/Base/Diverge.lean index c97674dd..c62e6dd5 100644 --- a/backends/lean/Base/Diverge.lean +++ b/backends/lean/Base/Diverge.lean @@ -25,6 +25,11 @@ TODO: - simplifier/rewriter have a strange behavior sometimes -/ + +/- TODO: this is very useful, but is there more? -/ +set_option profiler true +set_option profiler.threshold 100 + namespace Diverge namespace Primitives @@ -533,10 +538,10 @@ namespace FixI fix f = f (fix f) := by have Hvalid' : Fix.is_valid (k_to_gen f) := by intro k x - simp [is_valid, is_valid_p] at Hvalid + simp only [is_valid, is_valid_p] at Hvalid let ⟨ i, x ⟩ := x have Hvalid := Hvalid (k_of_gen k) i x - simp [k_to_gen, k_of_gen, kk_to_gen, kk_of_gen] at Hvalid + simp only [k_to_gen, k_of_gen, kk_to_gen, kk_of_gen] at Hvalid refine Hvalid have Heq := Fix.is_valid_fix_fixed_eq Hvalid' simp [fix] @@ -612,7 +617,7 @@ namespace FixI termination_by for_all_fin_aux n _ m h => n - m decreasing_by simp_wf - apply Nat.sub_add_lt_sub <;> simp_all + apply Nat.sub_add_lt_sub <;> simp simp_all [add_one_le_iff_le_ne] def for_all_fin {n : Nat} (f : Fin n → Prop) := for_all_fin_aux f 0 (by simp) @@ -665,6 +670,7 @@ namespace FixI simp_all . simp_all [add_one_le_iff_le_ne] + -- TODO: this is not necessary anymore theorem for_all_fin_imp_forall (n : Nat) (f : Fin n → Prop) : for_all_fin f → ∀ i, f i := by @@ -702,7 +708,6 @@ namespace FixI | .Nil => True | .Cons f fl => (∀ x, FixI.is_valid_p k (λ k => f k x)) ∧ fl.is_valid_p k - #check Subtype def Funs.is_valid_p_is_valid_p_aux {k : k_ty id a b} {itys otys : List Type} @@ -724,11 +729,11 @@ namespace FixI simp_all case succ n Hn => intro itys otys Heq fl Hvalid Hlen i x; - cases fl <;> simp at * + cases fl <;> simp at Hlen i x Heq Hvalid rename_i ity oty itys otys f fl have ⟨ Hvf, Hvalid ⟩ := Hvalid have Hvf1: is_valid_p k fl := by - simp_all [Funs.is_valid_p] + simp [Hvalid, Funs.is_valid_p] have Hn := @Hn itys otys (by simp[*]) fl Hvf1 (by simp [*]) -- Case disjunction on i match i with @@ -989,29 +994,12 @@ namespace Ex4 theorem body_is_valid : is_valid body := by -- Split the proof into proofs of validity of the individual bodies rw [is_valid] - simp [body] - intro k - apply for_all_fin_imp_forall - simp [for_all_fin] - repeat (unfold for_all_fin_aux; simp) - simp [get_fun] - (repeat (apply And.intro)) <;> intro x <;> simp at x <;> - simp [is_even_body, is_odd_body] - -- Prove the validity of the individual bodies - . split <;> simp - apply is_valid_p_bind <;> simp - . split <;> simp - apply is_valid_p_bind <;> simp - - theorem body_is_valid' : is_valid body := by - -- Split the proof into proofs of validity of the individual bodies - rw [is_valid] - simp [body] + simp only [body] intro k apply (Funs.is_valid_p_is_valid_p [Int, Int] [Bool, Bool]) simp [Funs.is_valid_p] (repeat (apply And.intro)) <;> intro x <;> simp at x <;> - simp [is_even_body, is_odd_body] + simp only [is_even_body, is_odd_body] -- Prove the validity of the individual bodies . split <;> simp apply is_valid_p_bind <;> simp @@ -1088,11 +1076,15 @@ namespace Ex5 theorem id_body_is_valid : ∀ k x, is_valid_p k (λ k => @id_body a k x) := by intro k x - simp [id_body] + simp only [id_body] split <;> simp - apply is_valid_p_bind <;> simp_all + apply is_valid_p_bind <;> simp [*] -- We have to show that `map k tl` is valid - apply map_is_valid; simp + apply map_is_valid; + -- Remark: if we don't do the intro, then the last step is expensive: + -- "typeclass inference of Nonempty took 119ms" + intro k x + simp only [is_valid_p_same, is_valid_p_rec] noncomputable def id (t : Tree a) := fix id_body t -- cgit v1.2.3 From 19bde89b84619defc2a822c3bf96bdca9c97eee7 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Wed, 28 Jun 2023 12:16:10 +0200 Subject: Reorganize backends/lean/Base --- backends/lean/Base/Diverge.lean | 1104 +---------------------------- backends/lean/Base/Diverge/Base.lean | 1105 ++++++++++++++++++++++++++++++ backends/lean/Base/Diverge/Elab.lean | 182 +++++ backends/lean/Base/Diverge/ElabBase.lean | 9 + 4 files changed, 1298 insertions(+), 1102 deletions(-) create mode 100644 backends/lean/Base/Diverge/Base.lean create mode 100644 backends/lean/Base/Diverge/Elab.lean create mode 100644 backends/lean/Base/Diverge/ElabBase.lean (limited to 'backends') diff --git a/backends/lean/Base/Diverge.lean b/backends/lean/Base/Diverge.lean index c62e6dd5..c9a2eec2 100644 --- a/backends/lean/Base/Diverge.lean +++ b/backends/lean/Base/Diverge.lean @@ -3,1105 +3,5 @@ import Lean.Meta.Tactic.Simp import Init.Data.List.Basic import Mathlib.Tactic.RunCmd import Mathlib.Tactic.Linarith - -/- -TODO: -- we want an easier to use cases: - - keeps in the goal an equation of the shape: `t = case` - - if called on Prop terms, uses Classical.em - Actually, the cases from mathlib seems already quite powerful - (https://leanprover-community.github.io/mathlib_docs/tactics.html#cases) - For instance: cases h : e - Also: cases_matching -- better split tactic -- we need conversions to operate on the head of applications. - Actually, something like this works: - ``` - conv at Hl => - apply congr_fun - simp [fix_fuel_P] - ``` - Maybe we need a rpt ... ; focus? -- simplifier/rewriter have a strange behavior sometimes --/ - - -/- TODO: this is very useful, but is there more? -/ -set_option profiler true -set_option profiler.threshold 100 - -namespace Diverge - -namespace Primitives -/-! # Copy-pasting from Primitives to make the file self-contained -/ - -inductive Error where - | assertionFailure: Error - | integerOverflow: Error - | divisionByZero: Error - | arrayOutOfBounds: Error - | maximumSizeExceeded: Error - | panic: Error -deriving Repr, BEq - -open Error - -inductive Result (α : Type u) where - | ret (v: α): Result α - | fail (e: Error): Result α - | div -deriving Repr, BEq - -open Result - -def bind (x: Result α) (f: α -> Result β) : Result β := - match x with - | ret v => f v - | fail v => fail v - | div => div - -@[simp] theorem bind_ret (x : α) (f : α → Result β) : bind (.ret x) f = f x := by simp [bind] -@[simp] theorem bind_fail (x : Error) (f : α → Result β) : bind (.fail x) f = .fail x := by simp [bind] -@[simp] theorem bind_div (f : α → Result β) : bind .div f = .div := by simp [bind] - --- Allows using Result in do-blocks -instance : Bind Result where - bind := bind - --- Allows using return x in do-blocks -instance : Pure Result where - pure := fun x => ret x - -@[simp] theorem bind_tc_ret (x : α) (f : α → Result β) : - (do let y ← .ret x; f y) = f x := by simp [Bind.bind, bind] - -@[simp] theorem bind_tc_fail (x : Error) (f : α → Result β) : - (do let y ← fail x; f y) = fail x := by simp [Bind.bind, bind] - -@[simp] theorem bind_tc_div (f : α → Result β) : - (do let y ← div; f y) = div := by simp [Bind.bind, bind] - -def div? {α: Type} (r: Result α): Bool := - match r with - | div => true - | ret _ | fail _ => false - -end Primitives - -namespace Fix - - open Primitives - open Result - - variable {a : Type} {b : a → Type} - variable {c d : Type} - - /-! # The least fixed point definition and its properties -/ - - def least_p (p : Nat → Prop) (n : Nat) : Prop := p n ∧ (∀ m, m < n → ¬ p m) - noncomputable def least (p : Nat → Prop) : Nat := - Classical.epsilon (least_p p) - - -- Auxiliary theorem for [least_spec]: if there exists an `n` satisfying `p`, - -- there there exists a least `m` satisfying `p`. - theorem least_spec_aux (p : Nat → Prop) : ∀ (n : Nat), (hn : p n) → ∃ m, least_p p m := by - apply Nat.strongRec' - intros n hi hn - -- Case disjunction on: is n the smallest n satisfying p? - match Classical.em (∀ m, m < n → ¬ p m) with - | .inl hlt => - -- Yes: trivial - exists n - | .inr hlt => - simp at * - let ⟨ m, ⟨ hmlt, hm ⟩ ⟩ := hlt - have hi := hi m hmlt hm - apply hi - - -- The specification of [least]: either `p` is never satisfied, or it is satisfied - -- by `least p` and no `n < least p` satisfies `p`. - theorem least_spec (p : Nat → Prop) : (∀ n, ¬ p n) ∨ (p (least p) ∧ ∀ n, n < least p → ¬ p n) := by - -- Case disjunction on the existence of an `n` which satisfies `p` - match Classical.em (∀ n, ¬ p n) with - | .inl h => - -- There doesn't exist: trivial - apply (Or.inl h) - | .inr h => - -- There exists: we simply use `least_spec_aux` in combination with the property - -- of the epsilon operator - simp at * - let ⟨ n, hn ⟩ := h - apply Or.inr - have hl := least_spec_aux p n hn - have he := Classical.epsilon_spec hl - apply he - - /-! # The fixed point definitions -/ - - def fix_fuel (n : Nat) (f : ((x:a) → Result (b x)) → (x:a) → Result (b x)) (x : a) : - Result (b x) := - match n with - | 0 => .div - | n + 1 => - f (fix_fuel n f) x - - @[simp] def fix_fuel_pred (f : ((x:a) → Result (b x)) → (x:a) → Result (b x)) - (x : a) (n : Nat) := - not (div? (fix_fuel n f x)) - - def fix_fuel_P (f : ((x:a) → Result (b x)) → (x:a) → Result (b x)) - (x : a) (n : Nat) : Prop := - fix_fuel_pred f x n - - noncomputable - def fix (f : ((x:a) → Result (b x)) → (x:a) → Result (b x)) (x : a) : Result (b x) := - fix_fuel (least (fix_fuel_P f x)) f x - - /-! # The validity property -/ - - -- Monotonicity relation over results - -- TODO: generalize (we should parameterize the definition by a relation over `a`) - def result_rel {a : Type u} (x1 x2 : Result a) : Prop := - match x1 with - | div => True - | fail _ => x2 = x1 - | ret _ => x2 = x1 -- TODO: generalize - - -- Monotonicity relation over monadic arrows (i.e., Kleisli arrows) - def karrow_rel (k1 k2 : (x:a) → Result (b x)) : Prop := - ∀ x, result_rel (k1 x) (k2 x) - - -- Monotonicity property for function bodies - def is_mono (f : ((x:a) → Result (b x)) → (x:a) → Result (b x)) : Prop := - ∀ {{k1 k2}}, karrow_rel k1 k2 → karrow_rel (f k1) (f k2) - - -- "Continuity" property. - -- We need this, and this looks a lot like continuity. Also see this paper: - -- https://inria.hal.science/file/index/docid/216187/filename/tarski.pdf - -- We define our "continuity" criteria so that it gives us what we need to - -- prove the fixed-point equation, and we can also easily manipulate it. - def is_cont (f : ((x:a) → Result (b x)) → (x:a) → Result (b x)) : Prop := - ∀ x, (Hdiv : ∀ n, fix_fuel (.succ n) f x = div) → f (fix f) x = div - - /-! # The proof of the fixed-point equation -/ - theorem fix_fuel_mono {f : ((x:a) → Result (b x)) → (x:a) → Result (b x)} - (Hmono : is_mono f) : - ∀ {{n m}}, n ≤ m → karrow_rel (fix_fuel n f) (fix_fuel m f) := by - intros n - induction n - case zero => simp [karrow_rel, fix_fuel, result_rel] - case succ n1 Hi => - intros m Hle x - simp [result_rel] - match m with - | 0 => - exfalso - zify at * - linarith - | Nat.succ m1 => - simp_arith at Hle - simp [fix_fuel] - have Hi := Hi Hle - have Hmono := Hmono Hi x - simp [result_rel] at Hmono - apply Hmono - - @[simp] theorem neg_fix_fuel_P - {f : ((x:a) → Result (b x)) → (x:a) → Result (b x)} {x : a} {n : Nat} : - ¬ fix_fuel_P f x n ↔ (fix_fuel n f x = div) := by - simp [fix_fuel_P, div?] - cases fix_fuel n f x <;> simp - - theorem fix_fuel_fix_mono {f : ((x:a) → Result (b x)) → (x:a) → Result (b x)} (Hmono : is_mono f) : - ∀ n, karrow_rel (fix_fuel n f) (fix f) := by - intros n x - simp [result_rel] - have Hl := least_spec (fix_fuel_P f x) - simp at Hl - match Hl with - | .inl Hl => simp [*] - | .inr ⟨ Hl, Hn ⟩ => - match Classical.em (fix_fuel n f x = div) with - | .inl Hd => - simp [*] - | .inr Hd => - have Hineq : least (fix_fuel_P f x) ≤ n := by - -- Proof by contradiction - cases Classical.em (least (fix_fuel_P f x) ≤ n) <;> simp [*] - simp at * - rename_i Hineq - have Hn := Hn n Hineq - contradiction - have Hfix : ¬ (fix f x = div) := by - simp [fix] - -- By property of the least upper bound - revert Hd Hl - -- TODO: there is no conversion to select the head of a function! - conv => lhs; apply congr_fun; apply congr_fun; apply congr_fun; simp [fix_fuel_P, div?] - cases fix_fuel (least (fix_fuel_P f x)) f x <;> simp - have Hmono := fix_fuel_mono Hmono Hineq x - simp [result_rel] at Hmono - simp [fix] at * - cases Heq: fix_fuel (least (fix_fuel_P f x)) f x <;> - cases Heq':fix_fuel n f x <;> - simp_all - - theorem fix_fuel_P_least {f : ((x:a) → Result (b x)) → (x:a) → Result (b x)} (Hmono : is_mono f) : - ∀ {{x n}}, fix_fuel_P f x n → fix_fuel_P f x (least (fix_fuel_P f x)) := by - intros x n Hf - have Hfmono := fix_fuel_fix_mono Hmono n x - -- TODO: there is no conversion to select the head of a function! - conv => apply congr_fun; simp [fix_fuel_P] - simp [fix_fuel_P] at Hf - revert Hf Hfmono - simp [div?, result_rel, fix] - cases fix_fuel n f x <;> simp_all - - -- Prove the fixed point equation in the case there exists some fuel for which - -- the execution terminates - theorem fix_fixed_eq_terminates (f : ((x:a) → Result (b x)) → (x:a) → Result (b x)) (Hmono : is_mono f) - (x : a) (n : Nat) (He : fix_fuel_P f x n) : - fix f x = f (fix f) x := by - have Hl := fix_fuel_P_least Hmono He - -- TODO: better control of simplification - conv at Hl => - apply congr_fun - simp [fix_fuel_P] - -- The least upper bound is > 0 - have ⟨ n, Hsucc ⟩ : ∃ n, least (fix_fuel_P f x) = Nat.succ n := by - revert Hl - simp [div?] - cases least (fix_fuel_P f x) <;> simp [fix_fuel] - simp [Hsucc] at Hl - revert Hl - simp [*, div?, fix, fix_fuel] - -- Use the monotonicity - have Hfixmono := fix_fuel_fix_mono Hmono n - have Hvm := Hmono Hfixmono x - -- Use functional extensionality - simp [result_rel, fix] at Hvm - revert Hvm - split <;> simp [*] <;> intros <;> simp [*] - - theorem fix_fixed_eq_forall {{f : ((x:a) → Result (b x)) → (x:a) → Result (b x)}} - (Hmono : is_mono f) (Hcont : is_cont f) : - ∀ x, fix f x = f (fix f) x := by - intros x - -- Case disjunction: is there a fuel such that the execution successfully execute? - match Classical.em (∃ n, fix_fuel_P f x n) with - | .inr He => - -- No fuel: the fixed point evaluates to `div` - --simp [fix] at * - simp at * - conv => lhs; simp [fix] - have Hel := He (Nat.succ (least (fix_fuel_P f x))); simp [*, fix_fuel] at *; clear Hel - -- Use the "continuity" of `f` - have He : ∀ n, fix_fuel (.succ n) f x = div := by intros; simp [*] - have Hcont := Hcont x He - simp [Hcont] - | .inl ⟨ n, He ⟩ => apply fix_fixed_eq_terminates f Hmono x n He - - -- The final fixed point equation - theorem fix_fixed_eq {{f : ((x:a) → Result (b x)) → (x:a) → Result (b x)}} - (Hmono : is_mono f) (Hcont : is_cont f) : - fix f = f (fix f) := by - have Heq := fix_fixed_eq_forall Hmono Hcont - have Heq1 : fix f = (λ x => fix f x) := by simp - rw [Heq1] - conv => lhs; ext; simp [Heq] - - /-! # Making the proofs of validity manageable (and automatable) -/ - - -- Monotonicity property for expressions - def is_mono_p (e : ((x:a) → Result (b x)) → Result c) : Prop := - ∀ {{k1 k2}}, karrow_rel k1 k2 → result_rel (e k1) (e k2) - - theorem is_mono_p_same (x : Result c) : - @is_mono_p a b c (λ _ => x) := by - simp [is_mono_p, karrow_rel, result_rel] - split <;> simp - - theorem is_mono_p_rec (x : a) : - @is_mono_p a b (b x) (λ f => f x) := by - simp_all [is_mono_p, karrow_rel, result_rel] - - -- The important lemma about `is_mono_p` - theorem is_mono_p_bind - (g : ((x:a) → Result (b x)) → Result c) - (h : c → ((x:a) → Result (b x)) → Result d) : - is_mono_p g → - (∀ y, is_mono_p (h y)) → - @is_mono_p a b d (λ k => do let y ← g k; h y k) := by - intro hg hh - simp [is_mono_p] - intro fg fh Hrgh - simp [karrow_rel, result_rel] - have hg := hg Hrgh; simp [result_rel] at hg - cases heq0: g fg <;> simp_all - rename_i y _ - have hh := hh y Hrgh; simp [result_rel] at hh - simp_all - - -- Continuity property for expressions - note that we take the continuation - -- as parameter - def is_cont_p (k : ((x:a) → Result (b x)) → (x:a) → Result (b x)) - (e : ((x:a) → Result (b x)) → Result c) : Prop := - (Hc : ∀ n, e (fix_fuel n k) = .div) → - e (fix k) = .div - - theorem is_cont_p_same (k : ((x:a) → Result (b x)) → (x:a) → Result (b x)) - (x : Result c) : - is_cont_p k (λ _ => x) := by - simp [is_cont_p] - - theorem is_cont_p_rec (f : ((x:a) → Result (b x)) → (x:a) → Result (b x)) (x : a) : - is_cont_p f (λ f => f x) := by - simp_all [is_cont_p, fix] - - -- The important lemma about `is_cont_p` - theorem is_cont_p_bind - (k : ((x:a) → Result (b x)) → (x:a) → Result (b x)) - (Hkmono : is_mono k) - (g : ((x:a) → Result (b x)) → Result c) - (h : c → ((x:a) → Result (b x)) → Result d) : - is_mono_p g → - is_cont_p k g → - (∀ y, is_mono_p (h y)) → - (∀ y, is_cont_p k (h y)) → - is_cont_p k (λ k => do let y ← g k; h y k) := by - intro Hgmono Hgcont Hhmono Hhcont - simp [is_cont_p] - intro Hdiv - -- Case on `g (fix... k)`: is there an n s.t. it terminates? - cases Classical.em (∀ n, g (fix_fuel n k) = .div) <;> rename_i Hn - . -- Case 1: g diverges - have Hgcont := Hgcont Hn - simp_all - . -- Case 2: g doesn't diverge - simp at Hn - let ⟨ n, Hn ⟩ := Hn - have Hdivn := Hdiv n - have Hffmono := fix_fuel_fix_mono Hkmono n - have Hgeq := Hgmono Hffmono - simp [result_rel] at Hgeq - cases Heq: g (fix_fuel n k) <;> rename_i y <;> simp_all - -- Remains the .ret case - -- Use Hdiv to prove that: ∀ n, h y (fix_fuel n f) = div - -- We do this in two steps: first we prove it for m ≥ n - have Hhdiv: ∀ m, h y (fix_fuel m k) = .div := by - have Hhdiv : ∀ m, n ≤ m → h y (fix_fuel m k) = .div := by - -- We use the fact that `g (fix_fuel n f) = .div`, combined with Hdiv - intro m Hle - have Hdivm := Hdiv m - -- Monotonicity of g - have Hffmono := fix_fuel_mono Hkmono Hle - have Hgmono := Hgmono Hffmono - -- We need to clear Hdiv because otherwise simp_all rewrites Hdivm with Hdiv - clear Hdiv - simp_all [result_rel] - intro m - -- TODO: we shouldn't need the excluded middle here because it is decidable - cases Classical.em (n ≤ m) <;> rename_i Hl - . apply Hhdiv; assumption - . simp at Hl - -- Make a case disjunction on `h y (fix_fuel m k)`: if it is not equal - -- to div, use the monotonicity of `h y` - have Hle : m ≤ n := by linarith - have Hffmono := fix_fuel_mono Hkmono Hle - have Hmono := Hhmono y Hffmono - simp [result_rel] at Hmono - cases Heq: h y (fix_fuel m k) <;> simp_all - -- We can now use the continuity hypothesis for h - apply Hhcont; assumption - - -- The validity property for an expression - def is_valid_p (k : ((x:a) → Result (b x)) → (x:a) → Result (b x)) - (e : ((x:a) → Result (b x)) → Result c) : Prop := - is_mono_p e ∧ - (is_mono k → is_cont_p k e) - - @[simp] theorem is_valid_p_same - (k : ((x:a) → Result (b x)) → (x:a) → Result (b x)) (x : Result c) : - is_valid_p k (λ _ => x) := by - simp [is_valid_p, is_mono_p_same, is_cont_p_same] - - @[simp] theorem is_valid_p_rec - (k : ((x:a) → Result (b x)) → (x:a) → Result (b x)) (x : a) : - is_valid_p k (λ k => k x) := by - simp_all [is_valid_p, is_mono_p_rec, is_cont_p_rec] - - -- Lean is good at unification: we can write a very general version - -- (in particular, it will manage to figure out `g` and `h` when we - -- apply the lemma) - theorem is_valid_p_bind - {{k : ((x:a) → Result (b x)) → (x:a) → Result (b x)}} - {{g : ((x:a) → Result (b x)) → Result c}} - {{h : c → ((x:a) → Result (b x)) → Result d}} - (Hgvalid : is_valid_p k g) - (Hhvalid : ∀ y, is_valid_p k (h y)) : - is_valid_p k (λ k => do let y ← g k; h y k) := by - let ⟨ Hgmono, Hgcont ⟩ := Hgvalid - simp [is_valid_p, forall_and] at Hhvalid - have ⟨ Hhmono, Hhcont ⟩ := Hhvalid - simp [← imp_forall_iff] at Hhcont - simp [is_valid_p]; constructor - . -- Monotonicity - apply is_mono_p_bind <;> assumption - . -- Continuity - intro Hkmono - have Hgcont := Hgcont Hkmono - have Hhcont := Hhcont Hkmono - apply is_cont_p_bind <;> assumption - - def is_valid (f : ((x:a) → Result (b x)) → (x:a) → Result (b x)) : Prop := - ∀ k x, is_valid_p k (λ k => f k x) - - theorem is_valid_p_imp_is_valid {{f : ((x:a) → Result (b x)) → (x:a) → Result (b x)}} - (Hvalid : is_valid f) : - is_mono f ∧ is_cont f := by - have Hmono : is_mono f := by - intro f h Hr x - have Hmono := Hvalid (λ _ _ => .div) x - have Hmono := Hmono.left - apply Hmono; assumption - have Hcont : is_cont f := by - intro x Hdiv - have Hcont := (Hvalid f x).right Hmono - simp [is_cont_p] at Hcont - apply Hcont - intro n - have Hdiv := Hdiv n - simp [fix_fuel] at Hdiv - simp [*] - simp [*] - - theorem is_valid_fix_fixed_eq {{f : ((x:a) → Result (b x)) → (x:a) → Result (b x)}} - (Hvalid : is_valid f) : - fix f = f (fix f) := by - have ⟨ Hmono, Hcont ⟩ := is_valid_p_imp_is_valid Hvalid - exact fix_fixed_eq Hmono Hcont - -end Fix - -namespace FixI - /- Indexed fixed-point: definitions with indexed types, convenient to use for mutually - recursive definitions. We simply port the definitions and proofs from Fix to a more - specific case. - -/ - open Primitives Fix - - -- The index type - variable {id : Type} - - -- The input/output types - variable {a b : id → Type} - - -- Monotonicity relation over monadic arrows (i.e., Kleisli arrows) - def karrow_rel (k1 k2 : (i:id) → a i → Result (b i)) : Prop := - ∀ i x, result_rel (k1 i x) (k2 i x) - - def kk_to_gen (k : (i:id) → a i → Result (b i)) : - (x: (i:id) × a i) → Result (b x.fst) := - λ ⟨ i, x ⟩ => k i x - - def kk_of_gen (k : (x: (i:id) × a i) → Result (b x.fst)) : - (i:id) → a i → Result (b i) := - λ i x => k ⟨ i, x ⟩ - - def k_to_gen (k : ((i:id) → a i → Result (b i)) → (i:id) → a i → Result (b i)) : - ((x: (i:id) × a i) → Result (b x.fst)) → (x: (i:id) × a i) → Result (b x.fst) := - λ kk => kk_to_gen (k (kk_of_gen kk)) - - def k_of_gen (k : ((x: (i:id) × a i) → Result (b x.fst)) → (x: (i:id) × a i) → Result (b x.fst)) : - ((i:id) → a i → Result (b i)) → (i:id) → a i → Result (b i) := - λ kk => kk_of_gen (k (kk_to_gen kk)) - - def e_to_gen (e : ((i:id) → a i → Result (b i)) → Result c) : - ((x: (i:id) × a i) → Result (b x.fst)) → Result c := - λ k => e (kk_of_gen k) - - def is_valid_p (k : ((i:id) → a i → Result (b i)) → (i:id) → a i → Result (b i)) - (e : ((i:id) → a i → Result (b i)) → Result c) : Prop := - Fix.is_valid_p (k_to_gen k) (e_to_gen e) - - def is_valid (f : ((i:id) → a i → Result (b i)) → (i:id) → a i → Result (b i)) : Prop := - ∀ k i x, is_valid_p k (λ k => f k i x) - - noncomputable def fix - (f : ((i:id) → a i → Result (b i)) → (i:id) → a i → Result (b i)) : - (i:id) → a i → Result (b i) := - kk_of_gen (Fix.fix (k_to_gen f)) - - theorem is_valid_fix_fixed_eq - {{f : ((i:id) → a i → Result (b i)) → (i:id) → a i → Result (b i)}} - (Hvalid : is_valid f) : - fix f = f (fix f) := by - have Hvalid' : Fix.is_valid (k_to_gen f) := by - intro k x - simp only [is_valid, is_valid_p] at Hvalid - let ⟨ i, x ⟩ := x - have Hvalid := Hvalid (k_of_gen k) i x - simp only [k_to_gen, k_of_gen, kk_to_gen, kk_of_gen] at Hvalid - refine Hvalid - have Heq := Fix.is_valid_fix_fixed_eq Hvalid' - simp [fix] - conv => lhs; rw [Heq] - - /- Some utilities to define the mutually recursive functions -/ - - -- TODO: use more - @[simp] def kk_ty (id : Type) (a b : id → Type) := (i:id) → a i → Result (b i) - @[simp] def k_ty (id : Type) (a b : id → Type) := kk_ty id a b → kk_ty id a b - - -- Initially, we had left out the parameters id, a and b. - -- However, by parameterizing Funs with those parameters, we can state - -- and prove lemmas like Funs.is_valid_p_is_valid_p - inductive Funs (id : Type) (a b : id → Type) : - List (Type u) → List (Type u) → Type (u + 1) := - | Nil : Funs id a b [] [] - | Cons {ity oty : Type u} {itys otys : List (Type u)} - (f : kk_ty id a b → ity → Result oty) (tl : Funs id a b itys otys) : - Funs id a b (ity :: itys) (oty :: otys) - - theorem Funs.length_eq {itys otys : List (Type)} (fl : Funs id a b itys otys) : - otys.length = itys.length := - match fl with - | .Nil => by simp - | .Cons f tl => - have h:= Funs.length_eq tl - by simp [h] - - def fin_cast {n m : Nat} (h : m = n) (i : Fin n) : Fin m := - ⟨ i.val, by have h1:= i.isLt; simp_all ⟩ - - @[simp] def Funs.cast_fin {itys otys : List (Type)} - (fl : Funs id a b itys otys) (i : Fin itys.length) : Fin otys.length := - fin_cast (fl.length_eq) i - - def get_fun {itys otys : List (Type)} (fl : Funs id a b itys otys) : - (i : Fin itys.length) → kk_ty id a b → itys.get i → Result (otys.get (fl.cast_fin i)) := - match fl with - | .Nil => λ i => by have h:= i.isLt; simp at h - | @Funs.Cons id a b ity oty itys1 otys1 f tl => - λ i => - if h: i.val = 0 then - Eq.mp (by cases i; simp_all [List.get]) f - else - let j := i.val - 1 - have Hj: j < itys1.length := by - have Hi := i.isLt - simp at Hi - revert Hi - cases Heq: i.val <;> simp_all - simp_arith - let j: Fin itys1.length := ⟨ j, Hj ⟩ - Eq.mp - (by - cases Heq: i; rename_i val isLt; - cases Heq': j; rename_i val' isLt; - cases val <;> simp_all [List.get, fin_cast]) - (get_fun tl j) - - -- TODO: move - theorem add_one_le_iff_le_ne (n m : Nat) (h1 : m ≤ n) (h2 : m ≠ n) : m + 1 ≤ n := by - -- Damn, those proofs on natural numbers are hard - I wish Omega was in mathlib4... - simp [Nat.add_one_le_iff] - simp [Nat.lt_iff_le_and_ne] - simp_all - - def for_all_fin_aux {n : Nat} (f : Fin n → Prop) (m : Nat) (h : m ≤ n) : Prop := - if heq: m = n then True - else - f ⟨ m, by simp_all [Nat.lt_iff_le_and_ne] ⟩ ∧ - for_all_fin_aux f (m + 1) (by simp_all [add_one_le_iff_le_ne]) - termination_by for_all_fin_aux n _ m h => n - m - decreasing_by - simp_wf - apply Nat.sub_add_lt_sub <;> simp - simp_all [add_one_le_iff_le_ne] - - def for_all_fin {n : Nat} (f : Fin n → Prop) := for_all_fin_aux f 0 (by simp) - - theorem for_all_fin_aux_imp_forall {n : Nat} (f : Fin n → Prop) (m : Nat) : - (h : m ≤ n) → - for_all_fin_aux f m h → ∀ i, m ≤ i.val → f i - := by - generalize h: (n - m) = k - revert m - induction k -- TODO: induction h rather? - case zero => - simp_all - intro m h1 h2 - have h: n = m := by - linarith - unfold for_all_fin_aux; simp_all - simp_all - -- There is no i s.t. m ≤ i - intro i h3; cases i; simp_all - linarith - case succ k hi => - simp_all - intro m hk hmn - intro hf i hmi - have hne: m ≠ n := by - have hineq := Nat.lt_of_sub_eq_succ hk - linarith - -- m = i? - if heq: m = i then - -- Yes: simply use the `for_all_fin_aux` hyp - unfold for_all_fin_aux at hf - simp_all - tauto - else - -- No: use the induction hypothesis - have hlt: m < i := by simp_all [Nat.lt_iff_le_and_ne] - have hineq: m + 1 ≤ n := by - have hineq := Nat.lt_of_sub_eq_succ hk - simp [*, Nat.add_one_le_iff] - have heq1: n - (m + 1) = k := by - -- TODO: very annoying arithmetic proof - simp [Nat.sub_eq_iff_eq_add hineq] - have hineq1: m ≤ n := by linarith - simp [Nat.sub_eq_iff_eq_add hineq1] at hk - simp_arith [hk] - have hi := hi (m + 1) heq1 hineq - apply hi <;> simp_all - . unfold for_all_fin_aux at hf - simp_all - . simp_all [add_one_le_iff_le_ne] - - -- TODO: this is not necessary anymore - theorem for_all_fin_imp_forall (n : Nat) (f : Fin n → Prop) : - for_all_fin f → ∀ i, f i - := by - intro Hf i - apply for_all_fin_aux_imp_forall <;> try assumption - simp - - /- Automating the proofs -/ - @[simp] theorem is_valid_p_same - (k : ((i:id) → a i → Result (b i)) → (i:id) → a i → Result (b i)) (x : Result c) : - is_valid_p k (λ _ => x) := by - simp [is_valid_p, k_to_gen, e_to_gen] - - @[simp] theorem is_valid_p_rec - (k : ((i:id) → a i → Result (b i)) → (i:id) → a i → Result (b i)) (i : id) (x : a i) : - is_valid_p k (λ k => k i x) := by - simp [is_valid_p, k_to_gen, e_to_gen, kk_to_gen, kk_of_gen] - - theorem is_valid_p_bind - {{k : ((i:id) → a i → Result (b i)) → (i:id) → a i → Result (b i)}} - {{g : ((i:id) → a i → Result (b i)) → Result c}} - {{h : c → ((i:id) → a i → Result (b i)) → Result d}} - (Hgvalid : is_valid_p k g) - (Hhvalid : ∀ y, is_valid_p k (h y)) : - is_valid_p k (λ k => do let y ← g k; h y k) := by - apply Fix.is_valid_p_bind - . apply Hgvalid - . apply Hhvalid - - def Funs.is_valid_p - (k : k_ty id a b) - (fl : Funs id a b itys otys) : - Prop := - match fl with - | .Nil => True - | .Cons f fl => (∀ x, FixI.is_valid_p k (λ k => f k x)) ∧ fl.is_valid_p k - - def Funs.is_valid_p_is_valid_p_aux - {k : k_ty id a b} - {itys otys : List Type} - (Heq : List.length otys = List.length itys) - (fl : Funs id a b itys otys) (Hvalid : is_valid_p k fl) : - ∀ (i : Fin itys.length) (x : itys.get i), FixI.is_valid_p k (fun k => get_fun fl i k x) := by - -- Prepare the induction - have ⟨ n, Hn ⟩ : { n : Nat // itys.length = n } := ⟨ itys.length, by rfl ⟩ - revert itys otys Heq fl Hvalid - induction n - -- - case zero => - intro itys otys Heq fl Hvalid Hlen; - have Heq: itys = [] := by cases itys <;> simp_all - have Heq: otys = [] := by cases otys <;> simp_all - intro i x - simp_all - have Hi := i.isLt - simp_all - case succ n Hn => - intro itys otys Heq fl Hvalid Hlen i x; - cases fl <;> simp at Hlen i x Heq Hvalid - rename_i ity oty itys otys f fl - have ⟨ Hvf, Hvalid ⟩ := Hvalid - have Hvf1: is_valid_p k fl := by - simp [Hvalid, Funs.is_valid_p] - have Hn := @Hn itys otys (by simp[*]) fl Hvf1 (by simp [*]) - -- Case disjunction on i - match i with - | ⟨ 0, _ ⟩ => - simp at x - simp [get_fun] - apply (Hvf x) - | ⟨ .succ j, HiLt ⟩ => - simp_arith at HiLt - simp at x - let j : Fin (List.length itys) := ⟨ j, by simp_arith [HiLt] ⟩ - have Hn := Hn j x - apply Hn - - def Funs.is_valid_p_is_valid_p - (itys otys : List (Type)) (Heq: otys.length = itys.length := by decide) - (k : k_ty (Fin (List.length itys)) (List.get itys) fun i => List.get otys (fin_cast Heq i)) - (fl : Funs (Fin itys.length) itys.get (λ i => otys.get (fin_cast Heq i)) itys otys) : - fl.is_valid_p k → - ∀ (i : Fin itys.length) (x : itys.get i), FixI.is_valid_p k (fun k => get_fun fl i k x) - := by - intro Hvalid - apply is_valid_p_is_valid_p_aux <;> simp [*] - -end FixI - -namespace Ex1 - /- An example of use of the fixed-point -/ - open Primitives Fix - - variable {a : Type} (k : (List a × Int) → Result a) - - def list_nth_body (x : (List a × Int)) : Result a := - let (ls, i) := x - match ls with - | [] => .fail .panic - | hd :: tl => - if i = 0 then .ret hd - else k (tl, i - 1) - - theorem list_nth_body_is_valid: ∀ k x, is_valid_p k (λ k => @list_nth_body a k x) := by - intro k x - simp [list_nth_body] - split <;> simp - split <;> simp - - noncomputable - def list_nth (ls : List a) (i : Int) : Result a := fix list_nth_body (ls, i) - - -- The unfolding equation - diverges if `i < 0` - theorem list_nth_eq (ls : List a) (i : Int) : - list_nth ls i = - match ls with - | [] => .fail .panic - | hd :: tl => - if i = 0 then .ret hd - else list_nth tl (i - 1) - := by - have Heq := is_valid_fix_fixed_eq (@list_nth_body_is_valid a) - simp [list_nth] - conv => lhs; rw [Heq] - -end Ex1 - -namespace Ex2 - /- Same as Ex1, but we make the body of nth non tail-rec (this is mostly - to see what happens when there are let-bindings) -/ - open Primitives Fix - - variable {a : Type} (k : (List a × Int) → Result a) - - def list_nth_body (x : (List a × Int)) : Result a := - let (ls, i) := x - match ls with - | [] => .fail .panic - | hd :: tl => - if i = 0 then .ret hd - else - do - let y ← k (tl, i - 1) - .ret y - - theorem list_nth_body_is_valid: ∀ k x, is_valid_p k (λ k => @list_nth_body a k x) := by - intro k x - simp [list_nth_body] - split <;> simp - split <;> simp - apply is_valid_p_bind <;> intros <;> simp_all - - noncomputable - def list_nth (ls : List a) (i : Int) : Result a := fix list_nth_body (ls, i) - - -- The unfolding equation - diverges if `i < 0` - theorem list_nth_eq (ls : List a) (i : Int) : - (list_nth ls i = - match ls with - | [] => .fail .panic - | hd :: tl => - if i = 0 then .ret hd - else - do - let y ← list_nth tl (i - 1) - .ret y) - := by - have Heq := is_valid_fix_fixed_eq (@list_nth_body_is_valid a) - simp [list_nth] - conv => lhs; rw [Heq] - -end Ex2 - -namespace Ex3 - /- Mutually recursive functions - first encoding (see Ex4 for a better encoding) -/ - open Primitives Fix - - /- Because we have mutually recursive functions, we use a sum for the inputs - and the output types: - - inputs: the sum allows to select the function to call in the recursive - calls (and the functions may not have the same input types) - - outputs: this case is degenerate because `even` and `odd` have the same - return type `Bool`, but generally speaking we need a sum type because - the functions in the mutually recursive group may have different - return types. - -/ - variable (k : (Int ⊕ Int) → Result (Bool ⊕ Bool)) - - def is_even_is_odd_body (x : (Int ⊕ Int)) : Result (Bool ⊕ Bool) := - match x with - | .inl i => - -- Body of `is_even` - if i = 0 - then .ret (.inl true) -- We use .inl because this is `is_even` - else - do - let b ← - do - -- Call `odd`: we need to wrap the input value in `.inr`, then - -- extract the output value - let r ← k (.inr (i- 1)) - match r with - | .inl _ => .fail .panic -- Invalid output - | .inr b => .ret b - -- Wrap the return value - .ret (.inl b) - | .inr i => - -- Body of `is_odd` - if i = 0 - then .ret (.inr false) -- We use .inr because this is `is_odd` - else - do - let b ← - do - -- Call `is_even`: we need to wrap the input value in .inr, then - -- extract the output value - let r ← k (.inl (i- 1)) - match r with - | .inl b => .ret b - | .inr _ => .fail .panic -- Invalid output - -- Wrap the return value - .ret (.inr b) - - theorem is_even_is_odd_body_is_valid: - ∀ k x, is_valid_p k (λ k => is_even_is_odd_body k x) := by - intro k x - simp [is_even_is_odd_body] - split <;> simp <;> split <;> simp - apply is_valid_p_bind; simp - intros; split <;> simp - apply is_valid_p_bind; simp - intros; split <;> simp - - noncomputable - def is_even (i : Int): Result Bool := - do - let r ← fix is_even_is_odd_body (.inl i) - match r with - | .inl b => .ret b - | .inr _ => .fail .panic - - noncomputable - def is_odd (i : Int): Result Bool := - do - let r ← fix is_even_is_odd_body (.inr i) - match r with - | .inl _ => .fail .panic - | .inr b => .ret b - - -- The unfolding equation for `is_even` - diverges if `i < 0` - theorem is_even_eq (i : Int) : - is_even i = (if i = 0 then .ret true else is_odd (i - 1)) - := by - have Heq := is_valid_fix_fixed_eq is_even_is_odd_body_is_valid - simp [is_even, is_odd] - conv => lhs; rw [Heq]; simp; rw [is_even_is_odd_body]; simp - -- Very annoying: we need to swap the matches - -- Doing this with rewriting lemmas is hard generally speaking - -- (especially as we may have to generate lemmas for user-defined - -- inductives on the fly). - -- The simplest is to repeatedly split then simplify (we identify - -- the outer match or monadic let-binding, and split on its scrutinee) - split <;> simp - cases H0 : fix is_even_is_odd_body (Sum.inr (i - 1)) <;> simp - rename_i v - split <;> simp - - -- The unfolding equation for `is_odd` - diverges if `i < 0` - theorem is_odd_eq (i : Int) : - is_odd i = (if i = 0 then .ret false else is_even (i - 1)) - := by - have Heq := is_valid_fix_fixed_eq is_even_is_odd_body_is_valid - simp [is_even, is_odd] - conv => lhs; rw [Heq]; simp; rw [is_even_is_odd_body]; simp - -- Same remark as for `even` - split <;> simp - cases H0 : fix is_even_is_odd_body (Sum.inl (i - 1)) <;> simp - rename_i v - split <;> simp - -end Ex3 - -namespace Ex4 - /- Mutually recursive functions - 2nd encoding -/ - open Primitives FixI - - attribute [local simp] List.get - - /- We make the input type and output types dependent on a parameter -/ - @[simp] def input_ty (i : Fin 2) : Type := - [Int, Int].get i - - @[simp] def output_ty (i : Fin 2) : Type := - [Bool, Bool].get i - - /- The continuation -/ - variable (k : (i : Fin 2) → input_ty i → Result (output_ty i)) - - /- The bodies are more natural -/ - def is_even_body (k : (i : Fin 2) → input_ty i → Result (output_ty i)) (i : Int) : Result Bool := - if i = 0 - then .ret true - else do - let b ← k 1 (i - 1) - .ret b - - def is_odd_body (i : Int) : Result Bool := - if i = 0 - then .ret false - else do - let b ← k 0 (i - 1) - .ret b - - @[simp] def bodies : - Funs (Fin 2) input_ty output_ty [Int, Int] [Bool, Bool] := - Funs.Cons (is_even_body) (Funs.Cons (is_odd_body) Funs.Nil) - - def body (k : (i : Fin 2) → input_ty i → Result (output_ty i)) (i: Fin 2) : - input_ty i → Result (output_ty i) := get_fun bodies i k - - theorem body_is_valid : is_valid body := by - -- Split the proof into proofs of validity of the individual bodies - rw [is_valid] - simp only [body] - intro k - apply (Funs.is_valid_p_is_valid_p [Int, Int] [Bool, Bool]) - simp [Funs.is_valid_p] - (repeat (apply And.intro)) <;> intro x <;> simp at x <;> - simp only [is_even_body, is_odd_body] - -- Prove the validity of the individual bodies - . split <;> simp - apply is_valid_p_bind <;> simp - . split <;> simp - apply is_valid_p_bind <;> simp - - theorem body_fix_eq : fix body = body (fix body) := - is_valid_fix_fixed_eq body_is_valid - - noncomputable def is_even (i : Int) : Result Bool := fix body 0 i - noncomputable def is_odd (i : Int) : Result Bool := fix body 1 i - - theorem is_even_eq (i : Int) : is_even i = - (if i = 0 - then .ret true - else do - let b ← is_odd (i - 1) - .ret b) := by - simp [is_even, is_odd]; - conv => lhs; rw [body_fix_eq] - - theorem is_odd_eq (i : Int) : is_odd i = - (if i = 0 - then .ret false - else do - let b ← is_even (i - 1) - .ret b) := by - simp [is_even, is_odd]; - conv => lhs; rw [body_fix_eq] - -end Ex4 - -namespace Ex5 - /- Higher-order example -/ - open Primitives Fix - - variable {a b : Type} - - /- An auxiliary function, which doesn't require the fixed-point -/ - def map (f : a → Result b) (ls : List a) : Result (List b) := - match ls with - | [] => .ret [] - | hd :: tl => - do - let hd ← f hd - let tl ← map f tl - .ret (hd :: tl) - - /- The validity theorem for `map`, generic in `f` -/ - theorem map_is_valid - {{f : (a → Result b) → a → Result c}} - (Hfvalid : ∀ k x, is_valid_p k (λ k => f k x)) - (k : (a → Result b) → a → Result b) - (ls : List a) : - is_valid_p k (λ k => map (f k) ls) := by - induction ls <;> simp [map] - apply is_valid_p_bind <;> simp_all - intros - apply is_valid_p_bind <;> simp_all - - /- An example which uses map -/ - inductive Tree (a : Type) := - | leaf (x : a) - | node (tl : List (Tree a)) - - def id_body (k : Tree a → Result (Tree a)) (t : Tree a) : Result (Tree a) := - match t with - | .leaf x => .ret (.leaf x) - | .node tl => - do - let tl ← map k tl - .ret (.node tl) - - theorem id_body_is_valid : - ∀ k x, is_valid_p k (λ k => @id_body a k x) := by - intro k x - simp only [id_body] - split <;> simp - apply is_valid_p_bind <;> simp [*] - -- We have to show that `map k tl` is valid - apply map_is_valid; - -- Remark: if we don't do the intro, then the last step is expensive: - -- "typeclass inference of Nonempty took 119ms" - intro k x - simp only [is_valid_p_same, is_valid_p_rec] - - noncomputable def id (t : Tree a) := fix id_body t - - -- The unfolding equation - theorem id_eq (t : Tree a) : - (id t = - match t with - | .leaf x => .ret (.leaf x) - | .node tl => - do - let tl ← map id tl - .ret (.node tl)) - := by - have Heq := is_valid_fix_fixed_eq (@id_body_is_valid a) - simp [id] - conv => lhs; rw [Heq]; simp; rw [id_body] - -end Ex5 - -end Diverge +import Base.Diverge.Base +import Base.Diverge.Elab diff --git a/backends/lean/Base/Diverge/Base.lean b/backends/lean/Base/Diverge/Base.lean new file mode 100644 index 00000000..0f92e682 --- /dev/null +++ b/backends/lean/Base/Diverge/Base.lean @@ -0,0 +1,1105 @@ +import Lean +import Lean.Meta.Tactic.Simp +import Init.Data.List.Basic +import Mathlib.Tactic.RunCmd +import Mathlib.Tactic.Linarith + +/- +TODO: +- we want an easier to use cases: + - keeps in the goal an equation of the shape: `t = case` + - if called on Prop terms, uses Classical.em + Actually, the cases from mathlib seems already quite powerful + (https://leanprover-community.github.io/mathlib_docs/tactics.html#cases) + For instance: cases h : e + Also: cases_matching +- better split tactic +- we need conversions to operate on the head of applications. + Actually, something like this works: + ``` + conv at Hl => + apply congr_fun + simp [fix_fuel_P] + ``` + Maybe we need a rpt ... ; focus? +- simplifier/rewriter have a strange behavior sometimes +-/ + + +/- TODO: this is very useful, but is there more? -/ +set_option profiler true +set_option profiler.threshold 100 + +namespace Diverge + +namespace Primitives +/-! # Copy-pasting from Primitives to make the file self-contained -/ + +inductive Error where + | assertionFailure: Error + | integerOverflow: Error + | divisionByZero: Error + | arrayOutOfBounds: Error + | maximumSizeExceeded: Error + | panic: Error +deriving Repr, BEq + +open Error + +inductive Result (α : Type u) where + | ret (v: α): Result α + | fail (e: Error): Result α + | div +deriving Repr, BEq + +open Result + +def bind (x: Result α) (f: α -> Result β) : Result β := + match x with + | ret v => f v + | fail v => fail v + | div => div + +@[simp] theorem bind_ret (x : α) (f : α → Result β) : bind (.ret x) f = f x := by simp [bind] +@[simp] theorem bind_fail (x : Error) (f : α → Result β) : bind (.fail x) f = .fail x := by simp [bind] +@[simp] theorem bind_div (f : α → Result β) : bind .div f = .div := by simp [bind] + +-- Allows using Result in do-blocks +instance : Bind Result where + bind := bind + +-- Allows using return x in do-blocks +instance : Pure Result where + pure := fun x => ret x + +@[simp] theorem bind_tc_ret (x : α) (f : α → Result β) : + (do let y ← .ret x; f y) = f x := by simp [Bind.bind, bind] + +@[simp] theorem bind_tc_fail (x : Error) (f : α → Result β) : + (do let y ← fail x; f y) = fail x := by simp [Bind.bind, bind] + +@[simp] theorem bind_tc_div (f : α → Result β) : + (do let y ← div; f y) = div := by simp [Bind.bind, bind] + +def div? {α: Type} (r: Result α): Bool := + match r with + | div => true + | ret _ | fail _ => false + +end Primitives + +namespace Fix + + open Primitives + open Result + + variable {a : Type} {b : a → Type} + variable {c d : Type} + + /-! # The least fixed point definition and its properties -/ + + def least_p (p : Nat → Prop) (n : Nat) : Prop := p n ∧ (∀ m, m < n → ¬ p m) + noncomputable def least (p : Nat → Prop) : Nat := + Classical.epsilon (least_p p) + + -- Auxiliary theorem for [least_spec]: if there exists an `n` satisfying `p`, + -- there there exists a least `m` satisfying `p`. + theorem least_spec_aux (p : Nat → Prop) : ∀ (n : Nat), (hn : p n) → ∃ m, least_p p m := by + apply Nat.strongRec' + intros n hi hn + -- Case disjunction on: is n the smallest n satisfying p? + match Classical.em (∀ m, m < n → ¬ p m) with + | .inl hlt => + -- Yes: trivial + exists n + | .inr hlt => + simp at * + let ⟨ m, ⟨ hmlt, hm ⟩ ⟩ := hlt + have hi := hi m hmlt hm + apply hi + + -- The specification of [least]: either `p` is never satisfied, or it is satisfied + -- by `least p` and no `n < least p` satisfies `p`. + theorem least_spec (p : Nat → Prop) : (∀ n, ¬ p n) ∨ (p (least p) ∧ ∀ n, n < least p → ¬ p n) := by + -- Case disjunction on the existence of an `n` which satisfies `p` + match Classical.em (∀ n, ¬ p n) with + | .inl h => + -- There doesn't exist: trivial + apply (Or.inl h) + | .inr h => + -- There exists: we simply use `least_spec_aux` in combination with the property + -- of the epsilon operator + simp at * + let ⟨ n, hn ⟩ := h + apply Or.inr + have hl := least_spec_aux p n hn + have he := Classical.epsilon_spec hl + apply he + + /-! # The fixed point definitions -/ + + def fix_fuel (n : Nat) (f : ((x:a) → Result (b x)) → (x:a) → Result (b x)) (x : a) : + Result (b x) := + match n with + | 0 => .div + | n + 1 => + f (fix_fuel n f) x + + @[simp] def fix_fuel_pred (f : ((x:a) → Result (b x)) → (x:a) → Result (b x)) + (x : a) (n : Nat) := + not (div? (fix_fuel n f x)) + + def fix_fuel_P (f : ((x:a) → Result (b x)) → (x:a) → Result (b x)) + (x : a) (n : Nat) : Prop := + fix_fuel_pred f x n + + noncomputable + def fix (f : ((x:a) → Result (b x)) → (x:a) → Result (b x)) (x : a) : Result (b x) := + fix_fuel (least (fix_fuel_P f x)) f x + + /-! # The validity property -/ + + -- Monotonicity relation over results + -- TODO: generalize (we should parameterize the definition by a relation over `a`) + def result_rel {a : Type u} (x1 x2 : Result a) : Prop := + match x1 with + | div => True + | fail _ => x2 = x1 + | ret _ => x2 = x1 -- TODO: generalize + + -- Monotonicity relation over monadic arrows (i.e., Kleisli arrows) + def karrow_rel (k1 k2 : (x:a) → Result (b x)) : Prop := + ∀ x, result_rel (k1 x) (k2 x) + + -- Monotonicity property for function bodies + def is_mono (f : ((x:a) → Result (b x)) → (x:a) → Result (b x)) : Prop := + ∀ {{k1 k2}}, karrow_rel k1 k2 → karrow_rel (f k1) (f k2) + + -- "Continuity" property. + -- We need this, and this looks a lot like continuity. Also see this paper: + -- https://inria.hal.science/file/index/docid/216187/filename/tarski.pdf + -- We define our "continuity" criteria so that it gives us what we need to + -- prove the fixed-point equation, and we can also easily manipulate it. + def is_cont (f : ((x:a) → Result (b x)) → (x:a) → Result (b x)) : Prop := + ∀ x, (Hdiv : ∀ n, fix_fuel (.succ n) f x = div) → f (fix f) x = div + + /-! # The proof of the fixed-point equation -/ + theorem fix_fuel_mono {f : ((x:a) → Result (b x)) → (x:a) → Result (b x)} + (Hmono : is_mono f) : + ∀ {{n m}}, n ≤ m → karrow_rel (fix_fuel n f) (fix_fuel m f) := by + intros n + induction n + case zero => simp [karrow_rel, fix_fuel, result_rel] + case succ n1 Hi => + intros m Hle x + simp [result_rel] + match m with + | 0 => + exfalso + zify at * + linarith + | Nat.succ m1 => + simp_arith at Hle + simp [fix_fuel] + have Hi := Hi Hle + have Hmono := Hmono Hi x + simp [result_rel] at Hmono + apply Hmono + + @[simp] theorem neg_fix_fuel_P + {f : ((x:a) → Result (b x)) → (x:a) → Result (b x)} {x : a} {n : Nat} : + ¬ fix_fuel_P f x n ↔ (fix_fuel n f x = div) := by + simp [fix_fuel_P, div?] + cases fix_fuel n f x <;> simp + + theorem fix_fuel_fix_mono {f : ((x:a) → Result (b x)) → (x:a) → Result (b x)} (Hmono : is_mono f) : + ∀ n, karrow_rel (fix_fuel n f) (fix f) := by + intros n x + simp [result_rel] + have Hl := least_spec (fix_fuel_P f x) + simp at Hl + match Hl with + | .inl Hl => simp [*] + | .inr ⟨ Hl, Hn ⟩ => + match Classical.em (fix_fuel n f x = div) with + | .inl Hd => + simp [*] + | .inr Hd => + have Hineq : least (fix_fuel_P f x) ≤ n := by + -- Proof by contradiction + cases Classical.em (least (fix_fuel_P f x) ≤ n) <;> simp [*] + simp at * + rename_i Hineq + have Hn := Hn n Hineq + contradiction + have Hfix : ¬ (fix f x = div) := by + simp [fix] + -- By property of the least upper bound + revert Hd Hl + -- TODO: there is no conversion to select the head of a function! + conv => lhs; apply congr_fun; apply congr_fun; apply congr_fun; simp [fix_fuel_P, div?] + cases fix_fuel (least (fix_fuel_P f x)) f x <;> simp + have Hmono := fix_fuel_mono Hmono Hineq x + simp [result_rel] at Hmono + simp [fix] at * + cases Heq: fix_fuel (least (fix_fuel_P f x)) f x <;> + cases Heq':fix_fuel n f x <;> + simp_all + + theorem fix_fuel_P_least {f : ((x:a) → Result (b x)) → (x:a) → Result (b x)} (Hmono : is_mono f) : + ∀ {{x n}}, fix_fuel_P f x n → fix_fuel_P f x (least (fix_fuel_P f x)) := by + intros x n Hf + have Hfmono := fix_fuel_fix_mono Hmono n x + -- TODO: there is no conversion to select the head of a function! + conv => apply congr_fun; simp [fix_fuel_P] + simp [fix_fuel_P] at Hf + revert Hf Hfmono + simp [div?, result_rel, fix] + cases fix_fuel n f x <;> simp_all + + -- Prove the fixed point equation in the case there exists some fuel for which + -- the execution terminates + theorem fix_fixed_eq_terminates (f : ((x:a) → Result (b x)) → (x:a) → Result (b x)) (Hmono : is_mono f) + (x : a) (n : Nat) (He : fix_fuel_P f x n) : + fix f x = f (fix f) x := by + have Hl := fix_fuel_P_least Hmono He + -- TODO: better control of simplification + conv at Hl => + apply congr_fun + simp [fix_fuel_P] + -- The least upper bound is > 0 + have ⟨ n, Hsucc ⟩ : ∃ n, least (fix_fuel_P f x) = Nat.succ n := by + revert Hl + simp [div?] + cases least (fix_fuel_P f x) <;> simp [fix_fuel] + simp [Hsucc] at Hl + revert Hl + simp [*, div?, fix, fix_fuel] + -- Use the monotonicity + have Hfixmono := fix_fuel_fix_mono Hmono n + have Hvm := Hmono Hfixmono x + -- Use functional extensionality + simp [result_rel, fix] at Hvm + revert Hvm + split <;> simp [*] <;> intros <;> simp [*] + + theorem fix_fixed_eq_forall {{f : ((x:a) → Result (b x)) → (x:a) → Result (b x)}} + (Hmono : is_mono f) (Hcont : is_cont f) : + ∀ x, fix f x = f (fix f) x := by + intros x + -- Case disjunction: is there a fuel such that the execution successfully execute? + match Classical.em (∃ n, fix_fuel_P f x n) with + | .inr He => + -- No fuel: the fixed point evaluates to `div` + --simp [fix] at * + simp at * + conv => lhs; simp [fix] + have Hel := He (Nat.succ (least (fix_fuel_P f x))); simp [*, fix_fuel] at *; clear Hel + -- Use the "continuity" of `f` + have He : ∀ n, fix_fuel (.succ n) f x = div := by intros; simp [*] + have Hcont := Hcont x He + simp [Hcont] + | .inl ⟨ n, He ⟩ => apply fix_fixed_eq_terminates f Hmono x n He + + -- The final fixed point equation + theorem fix_fixed_eq {{f : ((x:a) → Result (b x)) → (x:a) → Result (b x)}} + (Hmono : is_mono f) (Hcont : is_cont f) : + fix f = f (fix f) := by + have Heq := fix_fixed_eq_forall Hmono Hcont + have Heq1 : fix f = (λ x => fix f x) := by simp + rw [Heq1] + conv => lhs; ext; simp [Heq] + + /-! # Making the proofs of validity manageable (and automatable) -/ + + -- Monotonicity property for expressions + def is_mono_p (e : ((x:a) → Result (b x)) → Result c) : Prop := + ∀ {{k1 k2}}, karrow_rel k1 k2 → result_rel (e k1) (e k2) + + theorem is_mono_p_same (x : Result c) : + @is_mono_p a b c (λ _ => x) := by + simp [is_mono_p, karrow_rel, result_rel] + split <;> simp + + theorem is_mono_p_rec (x : a) : + @is_mono_p a b (b x) (λ f => f x) := by + simp_all [is_mono_p, karrow_rel, result_rel] + + -- The important lemma about `is_mono_p` + theorem is_mono_p_bind + (g : ((x:a) → Result (b x)) → Result c) + (h : c → ((x:a) → Result (b x)) → Result d) : + is_mono_p g → + (∀ y, is_mono_p (h y)) → + @is_mono_p a b d (λ k => do let y ← g k; h y k) := by + intro hg hh + simp [is_mono_p] + intro fg fh Hrgh + simp [karrow_rel, result_rel] + have hg := hg Hrgh; simp [result_rel] at hg + cases heq0: g fg <;> simp_all + rename_i y _ + have hh := hh y Hrgh; simp [result_rel] at hh + simp_all + + -- Continuity property for expressions - note that we take the continuation + -- as parameter + def is_cont_p (k : ((x:a) → Result (b x)) → (x:a) → Result (b x)) + (e : ((x:a) → Result (b x)) → Result c) : Prop := + (Hc : ∀ n, e (fix_fuel n k) = .div) → + e (fix k) = .div + + theorem is_cont_p_same (k : ((x:a) → Result (b x)) → (x:a) → Result (b x)) + (x : Result c) : + is_cont_p k (λ _ => x) := by + simp [is_cont_p] + + theorem is_cont_p_rec (f : ((x:a) → Result (b x)) → (x:a) → Result (b x)) (x : a) : + is_cont_p f (λ f => f x) := by + simp_all [is_cont_p, fix] + + -- The important lemma about `is_cont_p` + theorem is_cont_p_bind + (k : ((x:a) → Result (b x)) → (x:a) → Result (b x)) + (Hkmono : is_mono k) + (g : ((x:a) → Result (b x)) → Result c) + (h : c → ((x:a) → Result (b x)) → Result d) : + is_mono_p g → + is_cont_p k g → + (∀ y, is_mono_p (h y)) → + (∀ y, is_cont_p k (h y)) → + is_cont_p k (λ k => do let y ← g k; h y k) := by + intro Hgmono Hgcont Hhmono Hhcont + simp [is_cont_p] + intro Hdiv + -- Case on `g (fix... k)`: is there an n s.t. it terminates? + cases Classical.em (∀ n, g (fix_fuel n k) = .div) <;> rename_i Hn + . -- Case 1: g diverges + have Hgcont := Hgcont Hn + simp_all + . -- Case 2: g doesn't diverge + simp at Hn + let ⟨ n, Hn ⟩ := Hn + have Hdivn := Hdiv n + have Hffmono := fix_fuel_fix_mono Hkmono n + have Hgeq := Hgmono Hffmono + simp [result_rel] at Hgeq + cases Heq: g (fix_fuel n k) <;> rename_i y <;> simp_all + -- Remains the .ret case + -- Use Hdiv to prove that: ∀ n, h y (fix_fuel n f) = div + -- We do this in two steps: first we prove it for m ≥ n + have Hhdiv: ∀ m, h y (fix_fuel m k) = .div := by + have Hhdiv : ∀ m, n ≤ m → h y (fix_fuel m k) = .div := by + -- We use the fact that `g (fix_fuel n f) = .div`, combined with Hdiv + intro m Hle + have Hdivm := Hdiv m + -- Monotonicity of g + have Hffmono := fix_fuel_mono Hkmono Hle + have Hgmono := Hgmono Hffmono + -- We need to clear Hdiv because otherwise simp_all rewrites Hdivm with Hdiv + clear Hdiv + simp_all [result_rel] + intro m + -- TODO: we shouldn't need the excluded middle here because it is decidable + cases Classical.em (n ≤ m) <;> rename_i Hl + . apply Hhdiv; assumption + . simp at Hl + -- Make a case disjunction on `h y (fix_fuel m k)`: if it is not equal + -- to div, use the monotonicity of `h y` + have Hle : m ≤ n := by linarith + have Hffmono := fix_fuel_mono Hkmono Hle + have Hmono := Hhmono y Hffmono + simp [result_rel] at Hmono + cases Heq: h y (fix_fuel m k) <;> simp_all + -- We can now use the continuity hypothesis for h + apply Hhcont; assumption + + -- The validity property for an expression + def is_valid_p (k : ((x:a) → Result (b x)) → (x:a) → Result (b x)) + (e : ((x:a) → Result (b x)) → Result c) : Prop := + is_mono_p e ∧ + (is_mono k → is_cont_p k e) + + @[simp] theorem is_valid_p_same + (k : ((x:a) → Result (b x)) → (x:a) → Result (b x)) (x : Result c) : + is_valid_p k (λ _ => x) := by + simp [is_valid_p, is_mono_p_same, is_cont_p_same] + + @[simp] theorem is_valid_p_rec + (k : ((x:a) → Result (b x)) → (x:a) → Result (b x)) (x : a) : + is_valid_p k (λ k => k x) := by + simp_all [is_valid_p, is_mono_p_rec, is_cont_p_rec] + + -- Lean is good at unification: we can write a very general version + -- (in particular, it will manage to figure out `g` and `h` when we + -- apply the lemma) + theorem is_valid_p_bind + {{k : ((x:a) → Result (b x)) → (x:a) → Result (b x)}} + {{g : ((x:a) → Result (b x)) → Result c}} + {{h : c → ((x:a) → Result (b x)) → Result d}} + (Hgvalid : is_valid_p k g) + (Hhvalid : ∀ y, is_valid_p k (h y)) : + is_valid_p k (λ k => do let y ← g k; h y k) := by + let ⟨ Hgmono, Hgcont ⟩ := Hgvalid + simp [is_valid_p, forall_and] at Hhvalid + have ⟨ Hhmono, Hhcont ⟩ := Hhvalid + simp [← imp_forall_iff] at Hhcont + simp [is_valid_p]; constructor + . -- Monotonicity + apply is_mono_p_bind <;> assumption + . -- Continuity + intro Hkmono + have Hgcont := Hgcont Hkmono + have Hhcont := Hhcont Hkmono + apply is_cont_p_bind <;> assumption + + def is_valid (f : ((x:a) → Result (b x)) → (x:a) → Result (b x)) : Prop := + ∀ k x, is_valid_p k (λ k => f k x) + + theorem is_valid_p_imp_is_valid {{f : ((x:a) → Result (b x)) → (x:a) → Result (b x)}} + (Hvalid : is_valid f) : + is_mono f ∧ is_cont f := by + have Hmono : is_mono f := by + intro f h Hr x + have Hmono := Hvalid (λ _ _ => .div) x + have Hmono := Hmono.left + apply Hmono; assumption + have Hcont : is_cont f := by + intro x Hdiv + have Hcont := (Hvalid f x).right Hmono + simp [is_cont_p] at Hcont + apply Hcont + intro n + have Hdiv := Hdiv n + simp [fix_fuel] at Hdiv + simp [*] + simp [*] + + theorem is_valid_fix_fixed_eq {{f : ((x:a) → Result (b x)) → (x:a) → Result (b x)}} + (Hvalid : is_valid f) : + fix f = f (fix f) := by + have ⟨ Hmono, Hcont ⟩ := is_valid_p_imp_is_valid Hvalid + exact fix_fixed_eq Hmono Hcont + +end Fix + +namespace FixI + /- Indexed fixed-point: definitions with indexed types, convenient to use for mutually + recursive definitions. We simply port the definitions and proofs from Fix to a more + specific case. + -/ + open Primitives Fix + + -- The index type + variable {id : Type} + + -- The input/output types + variable {a b : id → Type} + + -- Monotonicity relation over monadic arrows (i.e., Kleisli arrows) + def karrow_rel (k1 k2 : (i:id) → a i → Result (b i)) : Prop := + ∀ i x, result_rel (k1 i x) (k2 i x) + + def kk_to_gen (k : (i:id) → a i → Result (b i)) : + (x: (i:id) × a i) → Result (b x.fst) := + λ ⟨ i, x ⟩ => k i x + + def kk_of_gen (k : (x: (i:id) × a i) → Result (b x.fst)) : + (i:id) → a i → Result (b i) := + λ i x => k ⟨ i, x ⟩ + + def k_to_gen (k : ((i:id) → a i → Result (b i)) → (i:id) → a i → Result (b i)) : + ((x: (i:id) × a i) → Result (b x.fst)) → (x: (i:id) × a i) → Result (b x.fst) := + λ kk => kk_to_gen (k (kk_of_gen kk)) + + def k_of_gen (k : ((x: (i:id) × a i) → Result (b x.fst)) → (x: (i:id) × a i) → Result (b x.fst)) : + ((i:id) → a i → Result (b i)) → (i:id) → a i → Result (b i) := + λ kk => kk_of_gen (k (kk_to_gen kk)) + + def e_to_gen (e : ((i:id) → a i → Result (b i)) → Result c) : + ((x: (i:id) × a i) → Result (b x.fst)) → Result c := + λ k => e (kk_of_gen k) + + def is_valid_p (k : ((i:id) → a i → Result (b i)) → (i:id) → a i → Result (b i)) + (e : ((i:id) → a i → Result (b i)) → Result c) : Prop := + Fix.is_valid_p (k_to_gen k) (e_to_gen e) + + def is_valid (f : ((i:id) → a i → Result (b i)) → (i:id) → a i → Result (b i)) : Prop := + ∀ k i x, is_valid_p k (λ k => f k i x) + + noncomputable def fix + (f : ((i:id) → a i → Result (b i)) → (i:id) → a i → Result (b i)) : + (i:id) → a i → Result (b i) := + kk_of_gen (Fix.fix (k_to_gen f)) + + theorem is_valid_fix_fixed_eq + {{f : ((i:id) → a i → Result (b i)) → (i:id) → a i → Result (b i)}} + (Hvalid : is_valid f) : + fix f = f (fix f) := by + have Hvalid' : Fix.is_valid (k_to_gen f) := by + intro k x + simp only [is_valid, is_valid_p] at Hvalid + let ⟨ i, x ⟩ := x + have Hvalid := Hvalid (k_of_gen k) i x + simp only [k_to_gen, k_of_gen, kk_to_gen, kk_of_gen] at Hvalid + refine Hvalid + have Heq := Fix.is_valid_fix_fixed_eq Hvalid' + simp [fix] + conv => lhs; rw [Heq] + + /- Some utilities to define the mutually recursive functions -/ + + -- TODO: use more + @[simp] def kk_ty (id : Type) (a b : id → Type) := (i:id) → a i → Result (b i) + @[simp] def k_ty (id : Type) (a b : id → Type) := kk_ty id a b → kk_ty id a b + + -- Initially, we had left out the parameters id, a and b. + -- However, by parameterizing Funs with those parameters, we can state + -- and prove lemmas like Funs.is_valid_p_is_valid_p + inductive Funs (id : Type) (a b : id → Type) : + List (Type u) → List (Type u) → Type (u + 1) := + | Nil : Funs id a b [] [] + | Cons {ity oty : Type u} {itys otys : List (Type u)} + (f : kk_ty id a b → ity → Result oty) (tl : Funs id a b itys otys) : + Funs id a b (ity :: itys) (oty :: otys) + + theorem Funs.length_eq {itys otys : List (Type)} (fl : Funs id a b itys otys) : + otys.length = itys.length := + match fl with + | .Nil => by simp + | .Cons f tl => + have h:= Funs.length_eq tl + by simp [h] + + def fin_cast {n m : Nat} (h : m = n) (i : Fin n) : Fin m := + ⟨ i.val, by have h1:= i.isLt; simp_all ⟩ + + @[simp] def Funs.cast_fin {itys otys : List (Type)} + (fl : Funs id a b itys otys) (i : Fin itys.length) : Fin otys.length := + fin_cast (fl.length_eq) i + + def get_fun {itys otys : List (Type)} (fl : Funs id a b itys otys) : + (i : Fin itys.length) → kk_ty id a b → itys.get i → Result (otys.get (fl.cast_fin i)) := + match fl with + | .Nil => λ i => by have h:= i.isLt; simp at h + | @Funs.Cons id a b ity oty itys1 otys1 f tl => + λ i => + if h: i.val = 0 then + Eq.mp (by cases i; simp_all [List.get]) f + else + let j := i.val - 1 + have Hj: j < itys1.length := by + have Hi := i.isLt + simp at Hi + revert Hi + cases Heq: i.val <;> simp_all + simp_arith + let j: Fin itys1.length := ⟨ j, Hj ⟩ + Eq.mp + (by + cases Heq: i; rename_i val isLt; + cases Heq': j; rename_i val' isLt; + cases val <;> simp_all [List.get, fin_cast]) + (get_fun tl j) + + -- TODO: move + theorem add_one_le_iff_le_ne (n m : Nat) (h1 : m ≤ n) (h2 : m ≠ n) : m + 1 ≤ n := by + -- Damn, those proofs on natural numbers are hard - I wish Omega was in mathlib4... + simp [Nat.add_one_le_iff] + simp [Nat.lt_iff_le_and_ne] + simp_all + + def for_all_fin_aux {n : Nat} (f : Fin n → Prop) (m : Nat) (h : m ≤ n) : Prop := + if heq: m = n then True + else + f ⟨ m, by simp_all [Nat.lt_iff_le_and_ne] ⟩ ∧ + for_all_fin_aux f (m + 1) (by simp_all [add_one_le_iff_le_ne]) + termination_by for_all_fin_aux n _ m h => n - m + decreasing_by + simp_wf + apply Nat.sub_add_lt_sub <;> simp + simp_all [add_one_le_iff_le_ne] + + def for_all_fin {n : Nat} (f : Fin n → Prop) := for_all_fin_aux f 0 (by simp) + + theorem for_all_fin_aux_imp_forall {n : Nat} (f : Fin n → Prop) (m : Nat) : + (h : m ≤ n) → + for_all_fin_aux f m h → ∀ i, m ≤ i.val → f i + := by + generalize h: (n - m) = k + revert m + induction k -- TODO: induction h rather? + case zero => + simp_all + intro m h1 h2 + have h: n = m := by + linarith + unfold for_all_fin_aux; simp_all + simp_all + -- There is no i s.t. m ≤ i + intro i h3; cases i; simp_all + linarith + case succ k hi => + simp_all + intro m hk hmn + intro hf i hmi + have hne: m ≠ n := by + have hineq := Nat.lt_of_sub_eq_succ hk + linarith + -- m = i? + if heq: m = i then + -- Yes: simply use the `for_all_fin_aux` hyp + unfold for_all_fin_aux at hf + simp_all + tauto + else + -- No: use the induction hypothesis + have hlt: m < i := by simp_all [Nat.lt_iff_le_and_ne] + have hineq: m + 1 ≤ n := by + have hineq := Nat.lt_of_sub_eq_succ hk + simp [*, Nat.add_one_le_iff] + have heq1: n - (m + 1) = k := by + -- TODO: very annoying arithmetic proof + simp [Nat.sub_eq_iff_eq_add hineq] + have hineq1: m ≤ n := by linarith + simp [Nat.sub_eq_iff_eq_add hineq1] at hk + simp_arith [hk] + have hi := hi (m + 1) heq1 hineq + apply hi <;> simp_all + . unfold for_all_fin_aux at hf + simp_all + . simp_all [add_one_le_iff_le_ne] + + -- TODO: this is not necessary anymore + theorem for_all_fin_imp_forall (n : Nat) (f : Fin n → Prop) : + for_all_fin f → ∀ i, f i + := by + intro Hf i + apply for_all_fin_aux_imp_forall <;> try assumption + simp + + /- Automating the proofs -/ + @[simp] theorem is_valid_p_same + (k : ((i:id) → a i → Result (b i)) → (i:id) → a i → Result (b i)) (x : Result c) : + is_valid_p k (λ _ => x) := by + simp [is_valid_p, k_to_gen, e_to_gen] + + @[simp] theorem is_valid_p_rec + (k : ((i:id) → a i → Result (b i)) → (i:id) → a i → Result (b i)) (i : id) (x : a i) : + is_valid_p k (λ k => k i x) := by + simp [is_valid_p, k_to_gen, e_to_gen, kk_to_gen, kk_of_gen] + + theorem is_valid_p_bind + {{k : ((i:id) → a i → Result (b i)) → (i:id) → a i → Result (b i)}} + {{g : ((i:id) → a i → Result (b i)) → Result c}} + {{h : c → ((i:id) → a i → Result (b i)) → Result d}} + (Hgvalid : is_valid_p k g) + (Hhvalid : ∀ y, is_valid_p k (h y)) : + is_valid_p k (λ k => do let y ← g k; h y k) := by + apply Fix.is_valid_p_bind + . apply Hgvalid + . apply Hhvalid + + def Funs.is_valid_p + (k : k_ty id a b) + (fl : Funs id a b itys otys) : + Prop := + match fl with + | .Nil => True + | .Cons f fl => (∀ x, FixI.is_valid_p k (λ k => f k x)) ∧ fl.is_valid_p k + + def Funs.is_valid_p_is_valid_p_aux + {k : k_ty id a b} + {itys otys : List Type} + (Heq : List.length otys = List.length itys) + (fl : Funs id a b itys otys) (Hvalid : is_valid_p k fl) : + ∀ (i : Fin itys.length) (x : itys.get i), FixI.is_valid_p k (fun k => get_fun fl i k x) := by + -- Prepare the induction + have ⟨ n, Hn ⟩ : { n : Nat // itys.length = n } := ⟨ itys.length, by rfl ⟩ + revert itys otys Heq fl Hvalid + induction n + -- + case zero => + intro itys otys Heq fl Hvalid Hlen; + have Heq: itys = [] := by cases itys <;> simp_all + have Heq: otys = [] := by cases otys <;> simp_all + intro i x + simp_all + have Hi := i.isLt + simp_all + case succ n Hn => + intro itys otys Heq fl Hvalid Hlen i x; + cases fl <;> simp at Hlen i x Heq Hvalid + rename_i ity oty itys otys f fl + have ⟨ Hvf, Hvalid ⟩ := Hvalid + have Hvf1: is_valid_p k fl := by + simp [Hvalid, Funs.is_valid_p] + have Hn := @Hn itys otys (by simp[*]) fl Hvf1 (by simp [*]) + -- Case disjunction on i + match i with + | ⟨ 0, _ ⟩ => + simp at x + simp [get_fun] + apply (Hvf x) + | ⟨ .succ j, HiLt ⟩ => + simp_arith at HiLt + simp at x + let j : Fin (List.length itys) := ⟨ j, by simp_arith [HiLt] ⟩ + have Hn := Hn j x + apply Hn + + def Funs.is_valid_p_is_valid_p + (itys otys : List (Type)) (Heq: otys.length = itys.length := by decide) + (k : k_ty (Fin (List.length itys)) (List.get itys) fun i => List.get otys (fin_cast Heq i)) + (fl : Funs (Fin itys.length) itys.get (λ i => otys.get (fin_cast Heq i)) itys otys) : + fl.is_valid_p k → + ∀ (i : Fin itys.length) (x : itys.get i), FixI.is_valid_p k (fun k => get_fun fl i k x) + := by + intro Hvalid + apply is_valid_p_is_valid_p_aux <;> simp [*] + +end FixI + +namespace Ex1 + /- An example of use of the fixed-point -/ + open Primitives Fix + + variable {a : Type} (k : (List a × Int) → Result a) + + def list_nth_body (x : (List a × Int)) : Result a := + let (ls, i) := x + match ls with + | [] => .fail .panic + | hd :: tl => + if i = 0 then .ret hd + else k (tl, i - 1) + + theorem list_nth_body_is_valid: ∀ k x, is_valid_p k (λ k => @list_nth_body a k x) := by + intro k x + simp [list_nth_body] + split <;> simp + split <;> simp + + noncomputable + def list_nth (ls : List a) (i : Int) : Result a := fix list_nth_body (ls, i) + + -- The unfolding equation - diverges if `i < 0` + theorem list_nth_eq (ls : List a) (i : Int) : + list_nth ls i = + match ls with + | [] => .fail .panic + | hd :: tl => + if i = 0 then .ret hd + else list_nth tl (i - 1) + := by + have Heq := is_valid_fix_fixed_eq (@list_nth_body_is_valid a) + simp [list_nth] + conv => lhs; rw [Heq] + +end Ex1 + +namespace Ex2 + /- Same as Ex1, but we make the body of nth non tail-rec (this is mostly + to see what happens when there are let-bindings) -/ + open Primitives Fix + + variable {a : Type} (k : (List a × Int) → Result a) + + def list_nth_body (x : (List a × Int)) : Result a := + let (ls, i) := x + match ls with + | [] => .fail .panic + | hd :: tl => + if i = 0 then .ret hd + else + do + let y ← k (tl, i - 1) + .ret y + + theorem list_nth_body_is_valid: ∀ k x, is_valid_p k (λ k => @list_nth_body a k x) := by + intro k x + simp [list_nth_body] + split <;> simp + split <;> simp + apply is_valid_p_bind <;> intros <;> simp_all + + noncomputable + def list_nth (ls : List a) (i : Int) : Result a := fix list_nth_body (ls, i) + + -- The unfolding equation - diverges if `i < 0` + theorem list_nth_eq (ls : List a) (i : Int) : + (list_nth ls i = + match ls with + | [] => .fail .panic + | hd :: tl => + if i = 0 then .ret hd + else + do + let y ← list_nth tl (i - 1) + .ret y) + := by + have Heq := is_valid_fix_fixed_eq (@list_nth_body_is_valid a) + simp [list_nth] + conv => lhs; rw [Heq] + +end Ex2 + +namespace Ex3 + /- Mutually recursive functions - first encoding (see Ex4 for a better encoding) -/ + open Primitives Fix + + /- Because we have mutually recursive functions, we use a sum for the inputs + and the output types: + - inputs: the sum allows to select the function to call in the recursive + calls (and the functions may not have the same input types) + - outputs: this case is degenerate because `even` and `odd` have the same + return type `Bool`, but generally speaking we need a sum type because + the functions in the mutually recursive group may have different + return types. + -/ + variable (k : (Int ⊕ Int) → Result (Bool ⊕ Bool)) + + def is_even_is_odd_body (x : (Int ⊕ Int)) : Result (Bool ⊕ Bool) := + match x with + | .inl i => + -- Body of `is_even` + if i = 0 + then .ret (.inl true) -- We use .inl because this is `is_even` + else + do + let b ← + do + -- Call `odd`: we need to wrap the input value in `.inr`, then + -- extract the output value + let r ← k (.inr (i- 1)) + match r with + | .inl _ => .fail .panic -- Invalid output + | .inr b => .ret b + -- Wrap the return value + .ret (.inl b) + | .inr i => + -- Body of `is_odd` + if i = 0 + then .ret (.inr false) -- We use .inr because this is `is_odd` + else + do + let b ← + do + -- Call `is_even`: we need to wrap the input value in .inr, then + -- extract the output value + let r ← k (.inl (i- 1)) + match r with + | .inl b => .ret b + | .inr _ => .fail .panic -- Invalid output + -- Wrap the return value + .ret (.inr b) + + theorem is_even_is_odd_body_is_valid: + ∀ k x, is_valid_p k (λ k => is_even_is_odd_body k x) := by + intro k x + simp [is_even_is_odd_body] + split <;> simp <;> split <;> simp + apply is_valid_p_bind; simp + intros; split <;> simp + apply is_valid_p_bind; simp + intros; split <;> simp + + noncomputable + def is_even (i : Int): Result Bool := + do + let r ← fix is_even_is_odd_body (.inl i) + match r with + | .inl b => .ret b + | .inr _ => .fail .panic + + noncomputable + def is_odd (i : Int): Result Bool := + do + let r ← fix is_even_is_odd_body (.inr i) + match r with + | .inl _ => .fail .panic + | .inr b => .ret b + + -- The unfolding equation for `is_even` - diverges if `i < 0` + theorem is_even_eq (i : Int) : + is_even i = (if i = 0 then .ret true else is_odd (i - 1)) + := by + have Heq := is_valid_fix_fixed_eq is_even_is_odd_body_is_valid + simp [is_even, is_odd] + conv => lhs; rw [Heq]; simp; rw [is_even_is_odd_body]; simp + -- Very annoying: we need to swap the matches + -- Doing this with rewriting lemmas is hard generally speaking + -- (especially as we may have to generate lemmas for user-defined + -- inductives on the fly). + -- The simplest is to repeatedly split then simplify (we identify + -- the outer match or monadic let-binding, and split on its scrutinee) + split <;> simp + cases H0 : fix is_even_is_odd_body (Sum.inr (i - 1)) <;> simp + rename_i v + split <;> simp + + -- The unfolding equation for `is_odd` - diverges if `i < 0` + theorem is_odd_eq (i : Int) : + is_odd i = (if i = 0 then .ret false else is_even (i - 1)) + := by + have Heq := is_valid_fix_fixed_eq is_even_is_odd_body_is_valid + simp [is_even, is_odd] + conv => lhs; rw [Heq]; simp; rw [is_even_is_odd_body]; simp + -- Same remark as for `even` + split <;> simp + cases H0 : fix is_even_is_odd_body (Sum.inl (i - 1)) <;> simp + rename_i v + split <;> simp + +end Ex3 + +namespace Ex4 + /- Mutually recursive functions - 2nd encoding -/ + open Primitives FixI + + attribute [local simp] List.get + + /- We make the input type and output types dependent on a parameter -/ + @[simp] def input_ty (i : Fin 2) : Type := + [Int, Int].get i + + @[simp] def output_ty (i : Fin 2) : Type := + [Bool, Bool].get i + + /- The continuation -/ + variable (k : (i : Fin 2) → input_ty i → Result (output_ty i)) + + /- The bodies are more natural -/ + def is_even_body (k : (i : Fin 2) → input_ty i → Result (output_ty i)) (i : Int) : Result Bool := + if i = 0 + then .ret true + else do + let b ← k 1 (i - 1) + .ret b + + def is_odd_body (i : Int) : Result Bool := + if i = 0 + then .ret false + else do + let b ← k 0 (i - 1) + .ret b + + @[simp] def bodies : + Funs (Fin 2) input_ty output_ty [Int, Int] [Bool, Bool] := + Funs.Cons (is_even_body) (Funs.Cons (is_odd_body) Funs.Nil) + + def body (k : (i : Fin 2) → input_ty i → Result (output_ty i)) (i: Fin 2) : + input_ty i → Result (output_ty i) := get_fun bodies i k + + theorem body_is_valid : is_valid body := by + -- Split the proof into proofs of validity of the individual bodies + rw [is_valid] + simp only [body] + intro k + apply (Funs.is_valid_p_is_valid_p [Int, Int] [Bool, Bool]) + simp [Funs.is_valid_p] + (repeat (apply And.intro)) <;> intro x <;> simp at x <;> + simp only [is_even_body, is_odd_body] + -- Prove the validity of the individual bodies + . split <;> simp + apply is_valid_p_bind <;> simp + . split <;> simp + apply is_valid_p_bind <;> simp + + theorem body_fix_eq : fix body = body (fix body) := + is_valid_fix_fixed_eq body_is_valid + + noncomputable def is_even (i : Int) : Result Bool := fix body 0 i + noncomputable def is_odd (i : Int) : Result Bool := fix body 1 i + + theorem is_even_eq (i : Int) : is_even i = + (if i = 0 + then .ret true + else do + let b ← is_odd (i - 1) + .ret b) := by + simp [is_even, is_odd]; + conv => lhs; rw [body_fix_eq] + + theorem is_odd_eq (i : Int) : is_odd i = + (if i = 0 + then .ret false + else do + let b ← is_even (i - 1) + .ret b) := by + simp [is_even, is_odd]; + conv => lhs; rw [body_fix_eq] + +end Ex4 + +namespace Ex5 + /- Higher-order example -/ + open Primitives Fix + + variable {a b : Type} + + /- An auxiliary function, which doesn't require the fixed-point -/ + def map (f : a → Result b) (ls : List a) : Result (List b) := + match ls with + | [] => .ret [] + | hd :: tl => + do + let hd ← f hd + let tl ← map f tl + .ret (hd :: tl) + + /- The validity theorem for `map`, generic in `f` -/ + theorem map_is_valid + {{f : (a → Result b) → a → Result c}} + (Hfvalid : ∀ k x, is_valid_p k (λ k => f k x)) + (k : (a → Result b) → a → Result b) + (ls : List a) : + is_valid_p k (λ k => map (f k) ls) := by + induction ls <;> simp [map] + apply is_valid_p_bind <;> simp_all + intros + apply is_valid_p_bind <;> simp_all + + /- An example which uses map -/ + inductive Tree (a : Type) := + | leaf (x : a) + | node (tl : List (Tree a)) + + def id_body (k : Tree a → Result (Tree a)) (t : Tree a) : Result (Tree a) := + match t with + | .leaf x => .ret (.leaf x) + | .node tl => + do + let tl ← map k tl + .ret (.node tl) + + theorem id_body_is_valid : + ∀ k x, is_valid_p k (λ k => @id_body a k x) := by + intro k x + simp only [id_body] + split <;> simp + apply is_valid_p_bind <;> simp [*] + -- We have to show that `map k tl` is valid + apply map_is_valid; + -- Remark: if we don't do the intro, then the last step is expensive: + -- "typeclass inference of Nonempty took 119ms" + intro k x + simp only [is_valid_p_same, is_valid_p_rec] + + noncomputable def id (t : Tree a) := fix id_body t + + -- The unfolding equation + theorem id_eq (t : Tree a) : + (id t = + match t with + | .leaf x => .ret (.leaf x) + | .node tl => + do + let tl ← map id tl + .ret (.node tl)) + := by + have Heq := is_valid_fix_fixed_eq (@id_body_is_valid a) + simp [id] + conv => lhs; rw [Heq]; simp; rw [id_body] + +end Ex5 diff --git a/backends/lean/Base/Diverge/Elab.lean b/backends/lean/Base/Diverge/Elab.lean new file mode 100644 index 00000000..313c5a79 --- /dev/null +++ b/backends/lean/Base/Diverge/Elab.lean @@ -0,0 +1,182 @@ +import Lean +import Lean.Meta.Tactic.Simp +import Init.Data.List.Basic +import Mathlib.Tactic.RunCmd +import Base.Diverge.Base +import Base.Diverge.ElabBase + +namespace Diverge + +/- Automating the generation of the encoding and the proofs so as to use nice + syntactic sugar. -/ + +syntax (name := divergentDef) + declModifiers "divergent" "def" declId ppIndent(optDeclSig) declVal : command + +open Lean Elab Term Meta Primitives + +initialize registerTraceClass `Diverge.divRecursion (inherited := true) + +set_option trace.Diverge.divRecursion true + +/- The following was copied from the `wfRecursion` function. -/ + +open WF in +def divRecursion (preDefs : Array PreDefinition) : TermElabM Unit := do + let msg := toMessageData <| preDefs.map fun pd => (pd.declName, pd.levelParams, pd.type, pd.value) + logInfo ("divRecursion: defs: " ++ msg) + + -- CHANGE HERE This function should add definitions with these names/types/values ^^ + -- Temporarily add the predefinitions as axioms + for preDef in preDefs do + addAsAxiom preDef + + -- TODO: what is this? + for preDef in preDefs do + applyAttributesOf #[preDef] AttributeApplicationTime.afterCompilation + + -- Process the definitions + addAndCompilePartialRec preDefs + +-- The following function is copy&pasted from Lean.Elab.PreDefinition.Main +-- This is the only part where we make actual changes and hook into the equation compiler. +-- (I've removed all the well-founded stuff to make it easier to read though.) + +open private ensureNoUnassignedMVarsAtPreDef betaReduceLetRecApps partitionPreDefs + addAndCompilePartial addAsAxioms from Lean.Elab.PreDefinition.Main + +def addPreDefinitions (preDefs : Array PreDefinition) : TermElabM Unit := withLCtx {} {} do + for preDef in preDefs do + trace[Elab.definition.body] "{preDef.declName} : {preDef.type} :=\n{preDef.value}" + let preDefs ← preDefs.mapM ensureNoUnassignedMVarsAtPreDef + let preDefs ← betaReduceLetRecApps preDefs + let cliques := partitionPreDefs preDefs + let mut hasErrors := false + for preDefs in cliques do + trace[Elab.definition.scc] "{preDefs.map (·.declName)}" + try + logInfo "calling divRecursion" + withRef (preDefs[0]!.ref) do + divRecursion preDefs + logInfo "divRecursion succeeded" + catch ex => + -- If it failed, we + logInfo "divRecursion failed" + hasErrors := true + logException ex + let s ← saveState + try + if preDefs.all fun preDef => preDef.kind == DefKind.def || + preDefs.all fun preDef => preDef.kind == DefKind.abbrev then + -- try to add as partial definition + try + addAndCompilePartial preDefs (useSorry := true) + catch _ => + -- Compilation failed try again just as axiom + s.restore + addAsAxioms preDefs + else return () + catch _ => s.restore + +-- The following two functions are copy&pasted from Lean.Elab.MutualDef + +open private elabHeaders levelMVarToParamHeaders getAllUserLevelNames withFunLocalDecls elabFunValues + instantiateMVarsAtHeader instantiateMVarsAtLetRecToLift checkLetRecsToLiftTypes withUsed from Lean.Elab.MutualDef + +def Term.elabMutualDef (vars : Array Expr) (views : Array DefView) : TermElabM Unit := do + let scopeLevelNames ← getLevelNames + let headers ← elabHeaders views + let headers ← levelMVarToParamHeaders views headers + let allUserLevelNames := getAllUserLevelNames headers + withFunLocalDecls headers fun funFVars => do + for view in views, funFVar in funFVars do + addLocalVarInfo view.declId funFVar + let values ← + try + let values ← elabFunValues headers + Term.synthesizeSyntheticMVarsNoPostponing + values.mapM (instantiateMVars ·) + catch ex => + logException ex + headers.mapM fun header => mkSorry header.type (synthetic := true) + let headers ← headers.mapM instantiateMVarsAtHeader + let letRecsToLift ← getLetRecsToLift + let letRecsToLift ← letRecsToLift.mapM instantiateMVarsAtLetRecToLift + checkLetRecsToLiftTypes funFVars letRecsToLift + withUsed vars headers values letRecsToLift fun vars => do + let preDefs ← MutualClosure.main vars headers funFVars values letRecsToLift + for preDef in preDefs do + trace[Elab.definition] "{preDef.declName} : {preDef.type} :=\n{preDef.value}" + let preDefs ← withLevelNames allUserLevelNames <| levelMVarToParamPreDecls preDefs + let preDefs ← instantiateMVarsAtPreDecls preDefs + let preDefs ← fixLevelParams preDefs scopeLevelNames allUserLevelNames + for preDef in preDefs do + trace[Elab.definition] "after eraseAuxDiscr, {preDef.declName} : {preDef.type} :=\n{preDef.value}" + checkForHiddenUnivLevels allUserLevelNames preDefs + addPreDefinitions preDefs + +open Command in +def Command.elabMutualDef (ds : Array Syntax) : CommandElabM Unit := do + let views ← ds.mapM fun d => do + let `($mods:declModifiers divergent def $id:declId $sig:optDeclSig $val:declVal) := d + | throwUnsupportedSyntax + let modifiers ← elabModifiers mods + let (binders, type) := expandOptDeclSig sig + let deriving? := none + pure { ref := d, kind := DefKind.def, modifiers, + declId := id, binders, type? := type, value := val, deriving? } + runTermElabM fun vars => Term.elabMutualDef vars views + +-- Special command so that we don't fall back to the built-in mutual when we produce an error. +local syntax "_divergent" Parser.Command.mutual : command +elab_rules : command | `(_divergent mutual $decls* end) => Command.elabMutualDef decls + +macro_rules + | `(mutual $decls* end) => do + unless !decls.isEmpty && decls.all (·.1.getKind == ``divergentDef) do + Macro.throwUnsupported + `(command| _divergent mutual $decls* end) + +open private setDeclIdName from Lean.Elab.Declaration +elab_rules : command + | `($mods:declModifiers divergent%$tk def $id:declId $sig:optDeclSig $val:declVal) => do + let (name, _) := expandDeclIdCore id + if (`_root_).isPrefixOf name then throwUnsupportedSyntax + let view := extractMacroScopes name + let .str ns shortName := view.name | throwUnsupportedSyntax + let shortName' := { view with name := shortName }.review + let cmd ← `(mutual $mods:declModifiers divergent%$tk def $(⟨setDeclIdName id shortName'⟩):declId $sig:optDeclSig $val:declVal end) + if ns matches .anonymous then + Command.elabCommand cmd + else + Command.elabCommand <| ← `(namespace $(mkIdentFrom id ns) $cmd end $(mkIdentFrom id ns)) + +mutual + divergent def is_even (i : Int) : Result Bool := + if i = 0 then return true else return (← is_odd (i - 1)) + + divergent def is_odd (i : Int) : Result Bool := + if i = 0 then return false else return (← is_even (i - 1)) +end + +example (i : Int) : is_even i = .ret (i % 2 = 0) ∧ is_odd i = .ret (i % 2 ≠ 0) := by + induction i + unfold is_even + sorry + +divergent def list_nth {a: Type} (ls : List a) (i : Int) : Result a := + match ls with + | [] => .fail .panic + | x :: ls => + if i = 0 then return x + else return (← list_nth ls (i - 1)) + +mutual + divergent def foo (i : Int) : Result Nat := + if i > 10 then return (← foo (i / 10)) + (← bar i) else bar 10 + + divergent def bar (i : Int) : Result Nat := + if i > 20 then foo (i / 20) else .ret 42 +end + +end Diverge diff --git a/backends/lean/Base/Diverge/ElabBase.lean b/backends/lean/Base/Diverge/ElabBase.lean new file mode 100644 index 00000000..e693dce2 --- /dev/null +++ b/backends/lean/Base/Diverge/ElabBase.lean @@ -0,0 +1,9 @@ +import Lean + +namespace Diverge + +open Lean + +initialize registerTraceClass `Diverge.divRecursion (inherited := true) + +end Diverge -- cgit v1.2.3 From a6de153f3bfda7feb27d16fcdf2131d37f99c7a3 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Thu, 29 Jun 2023 11:22:32 +0200 Subject: Start working on Elab.lean --- backends/lean/Base/Diverge/Base.lean | 3 + backends/lean/Base/Diverge/Elab.lean | 138 ++++++++++++++++++++++++++++--- backends/lean/Base/Diverge/ElabBase.lean | 75 ++++++++++++++++- 3 files changed, 203 insertions(+), 13 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Diverge/Base.lean b/backends/lean/Base/Diverge/Base.lean index 0f92e682..2e60f6e8 100644 --- a/backends/lean/Base/Diverge/Base.lean +++ b/backends/lean/Base/Diverge/Base.lean @@ -4,6 +4,9 @@ import Init.Data.List.Basic import Mathlib.Tactic.RunCmd import Mathlib.Tactic.Linarith +-- For debugging +import Base.Diverge.ElabBase + /- TODO: - we want an easier to use cases: diff --git a/backends/lean/Base/Diverge/Elab.lean b/backends/lean/Base/Diverge/Elab.lean index 313c5a79..22e0039f 100644 --- a/backends/lean/Base/Diverge/Elab.lean +++ b/backends/lean/Base/Diverge/Elab.lean @@ -15,16 +15,53 @@ syntax (name := divergentDef) open Lean Elab Term Meta Primitives -initialize registerTraceClass `Diverge.divRecursion (inherited := true) - -set_option trace.Diverge.divRecursion true +set_option trace.Diverge.def true /- The following was copied from the `wfRecursion` function. -/ open WF in + + + +-- Replace the recursive calls by a call to the continuation +-- def replace_rec_calls + +#check Lean.Meta.forallTelescope +#check Expr +#check withRef +#check MonadRef.withRef +#check Nat +#check Array +#check Lean.Meta.inferType +#check Nat +#check Int + +#check (0, 1) +#check Prod +#check () +#check Unit +#check Sigma + +-- print_decl is_even_body +#check instOfNatNat +#check OfNat.ofNat -- @OfNat.ofNat ℕ 2 ... +#check OfNat.ofNat -- @OfNat.ofNat (Fin 2) 1 ... +#check Fin.instOfNatFinHAddNatInstHAddInstAddNatOfNat + + +-- TODO: is there already such a utility somewhere? +-- TODO: change to mkSigmas +def mkProds (tys : List Expr) : MetaM Expr := + match tys with + | [] => do return (Expr.const ``PUnit.unit []) + | [ty] => do return ty + | ty :: tys => do + let pty ← mkProds tys + mkAppM ``Prod.mk #[ty, pty] + def divRecursion (preDefs : Array PreDefinition) : TermElabM Unit := do let msg := toMessageData <| preDefs.map fun pd => (pd.declName, pd.levelParams, pd.type, pd.value) - logInfo ("divRecursion: defs: " ++ msg) + trace[Diverge.def] ("divRecursion: defs: " ++ msg) -- CHANGE HERE This function should add definitions with these names/types/values ^^ -- Temporarily add the predefinitions as axioms @@ -35,6 +72,85 @@ def divRecursion (preDefs : Array PreDefinition) : TermElabM Unit := do for preDef in preDefs do applyAttributesOf #[preDef] AttributeApplicationTime.afterCompilation + -- Retrieve the name of the first definition, that we will use as the namespace + -- for the definitions common to the group + let def0 := preDefs[0]! + let grName := def0.declName + trace[Diverge.def] "group name: {grName}" + + /- Compute the type of the continuation. + + We do the following + - we make sure all the definitions have the same universe parameters + (we can make this more general later) + - we group all the type parameters together, make sure all the + definitions have the same type parameters, and enforce + a uniform polymorphism (we can also lift this later). + This would require generalizing a bit our indexed fixed point to + make the output type parametric in the input. + - we group all the non-type parameters: we parameterize the continuation + by those + -/ + let grLvlParams := def0.levelParams + trace[Diverge.def] "def0 type: {def0.type}" + + -- Small utility: compute the list of type parameters + let getTypeParams (ty: Expr) : MetaM (List Expr × List Expr × Expr) := + Lean.Meta.forallTelescope ty fun tys out_ty => do + trace[Diverge.def] "types: {tys}" +/- let (_, params) ← StateT.run (do + for x in tys do + let ty ← Lean.Meta.inferType x + match ty with + | .sort _ => do + let st ← StateT.get + StateT.set (ty :: st) + | _ => do break + ) ([] : List Expr) + let params := params.reverse + trace[Diverge.def] " type parameters {params}" + return params -/ + let rec get_params (ls : List Expr) : MetaM (List Expr × List Expr) := + match ls with + | x :: tl => do + let ty ← Lean.Meta.inferType x + match ty with + | .sort _ => do + let (ty_params, params) ← get_params tl + return (x :: ty_params, params) + | _ => do return ([], ls) + | _ => do return ([], []) + let (ty_params, params) ← get_params tys.toList + trace[Diverge.def] " parameters: {ty_params}; {params}" + return (ty_params, params, out_ty) + let (grTyParams, _, _) ← do + getTypeParams def0.type + + -- Compute the input types and the output types + let all_tys ← preDefs.mapM fun preDef => do + let (tyParams, params, ret_ty) ← getTypeParams preDef.type + -- TODO: this is not complete, there are more checks to perform + if tyParams.length ≠ grTyParams.length then + throwError "Non-uniform polymorphism" + return (params, ret_ty) + + -- TODO: I think there are issues with the free variables + let (input_tys, output_tys) := List.unzip all_tys.toList + let input_tys : List Expr ← liftM (List.mapM mkProds input_tys) + + trace[Diverge.def] " in/out tys: {input_tys}; {output_tys}" + + -- Compute the names set + let names := preDefs.map PreDefinition.declName + let names := HashSet.empty.insertMany names + + -- + for preDef in preDefs do + trace[Diverge.def] "about to explore: {preDef.declName}" + explore_term "" preDef.value + + -- Compute the bodies + -- Process the definitions addAndCompilePartialRec preDefs @@ -47,21 +163,21 @@ open private ensureNoUnassignedMVarsAtPreDef betaReduceLetRecApps partitionPreDe def addPreDefinitions (preDefs : Array PreDefinition) : TermElabM Unit := withLCtx {} {} do for preDef in preDefs do - trace[Elab.definition.body] "{preDef.declName} : {preDef.type} :=\n{preDef.value}" + trace[Diverge.elab] "{preDef.declName} : {preDef.type} :=\n{preDef.value}" let preDefs ← preDefs.mapM ensureNoUnassignedMVarsAtPreDef let preDefs ← betaReduceLetRecApps preDefs let cliques := partitionPreDefs preDefs let mut hasErrors := false for preDefs in cliques do - trace[Elab.definition.scc] "{preDefs.map (·.declName)}" + trace[Diverge.elab] "{preDefs.map (·.declName)}" try - logInfo "calling divRecursion" + trace[Diverge.elab] "calling divRecursion" withRef (preDefs[0]!.ref) do divRecursion preDefs - logInfo "divRecursion succeeded" + trace[Diverge.elab] "divRecursion succeeded" catch ex => -- If it failed, we - logInfo "divRecursion failed" + trace[Diverge.elab] "divRecursion failed" hasErrors := true logException ex let s ← saveState @@ -106,12 +222,12 @@ def Term.elabMutualDef (vars : Array Expr) (views : Array DefView) : TermElabM U withUsed vars headers values letRecsToLift fun vars => do let preDefs ← MutualClosure.main vars headers funFVars values letRecsToLift for preDef in preDefs do - trace[Elab.definition] "{preDef.declName} : {preDef.type} :=\n{preDef.value}" + trace[Diverge.elab] "{preDef.declName} : {preDef.type} :=\n{preDef.value}" let preDefs ← withLevelNames allUserLevelNames <| levelMVarToParamPreDecls preDefs let preDefs ← instantiateMVarsAtPreDecls preDefs let preDefs ← fixLevelParams preDefs scopeLevelNames allUserLevelNames for preDef in preDefs do - trace[Elab.definition] "after eraseAuxDiscr, {preDef.declName} : {preDef.type} :=\n{preDef.value}" + trace[Diverge.elab] "after eraseAuxDiscr, {preDef.declName} : {preDef.type} :=\n{preDef.value}" checkForHiddenUnivLevels allUserLevelNames preDefs addPreDefinitions preDefs diff --git a/backends/lean/Base/Diverge/ElabBase.lean b/backends/lean/Base/Diverge/ElabBase.lean index e693dce2..84b73a30 100644 --- a/backends/lean/Base/Diverge/ElabBase.lean +++ b/backends/lean/Base/Diverge/ElabBase.lean @@ -2,8 +2,79 @@ import Lean namespace Diverge -open Lean +open Lean Elab Term Meta -initialize registerTraceClass `Diverge.divRecursion (inherited := true) +initialize registerTraceClass `Diverge.elab (inherited := true) +initialize registerTraceClass `Diverge.def (inherited := true) + +-- TODO: move +-- TODO: small helper +def explore_term (incr : String) (e : Expr) : TermElabM Unit := + match e with + | .bvar _ => do logInfo m!"{incr}bvar: {e}"; return () + | .fvar _ => do logInfo m!"{incr}fvar: {e}"; return () + | .mvar _ => do logInfo m!"{incr}mvar: {e}"; return () + | .sort _ => do logInfo m!"{incr}sort: {e}"; return () + | .const _ _ => do logInfo m!"{incr}const: {e}"; return () + | .app fn arg => do + logInfo m!"{incr}app: {e}" + explore_term (incr ++ " ") fn + explore_term (incr ++ " ") arg + | .lam _bName bTy body _binfo => do + logInfo m!"{incr}lam: {e}" + explore_term (incr ++ " ") bTy + explore_term (incr ++ " ") body + | .forallE _bName bTy body _bInfo => do + logInfo m!"{incr}forallE: {e}" + explore_term (incr ++ " ") bTy + explore_term (incr ++ " ") body + | .letE _dName ty val body _nonDep => do + logInfo m!"{incr}letE: {e}" + explore_term (incr ++ " ") ty + explore_term (incr ++ " ") val + explore_term (incr ++ " ") body + | .lit _ => do logInfo m!"{incr}lit: {e}"; return () + | .mdata _ e => do + logInfo m!"{incr}mdata: {e}" + explore_term (incr ++ " ") e + | .proj _ _ struct => do + logInfo m!"{incr}proj: {e}" + explore_term (incr ++ " ") struct + +def explore_decl (n : Name) : TermElabM Unit := do + logInfo m!"Name: {n}" + let env ← getEnv + let decl := env.constants.find! n + match decl with + | .defnInfo val => + logInfo m!"About to explore defn: {decl.name}" + logInfo m!"# Type:" + explore_term "" val.type + logInfo m!"# Value:" + explore_term "" val.value + | .axiomInfo _ => throwError m!"axiom: {n}" + | .thmInfo _ => throwError m!"thm: {n}" + | .opaqueInfo _ => throwError m!"opaque: {n}" + | .quotInfo _ => throwError m!"quot: {n}" + | .inductInfo _ => throwError m!"induct: {n}" + | .ctorInfo _ => throwError m!"ctor: {n}" + | .recInfo _ => throwError m!"rec: {n}" + +syntax (name := printDecl) "print_decl " ident : command + +open Lean.Elab.Command + +@[command_elab printDecl] def elabPrintDecl : CommandElab := fun stx => do + liftTermElabM do + let id := stx[1] + addCompletionInfo <| CompletionInfo.id id id.getId (danglingDot := false) {} none + let cs ← resolveGlobalConstWithInfos id + explore_decl cs[0]! + +private def test1 : Nat := 0 +private def test2 (x : Nat) : Nat := x + +print_decl test1 +print_decl test2 end Diverge -- cgit v1.2.3 From 0cee49de70bec6d3ec2221b64a532d19ad71e5e0 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Thu, 29 Jun 2023 14:51:53 +0200 Subject: Generalize a bit FixI and add an example --- backends/lean/Base/Diverge/Base.lean | 260 ++++++++++++++++++++--------------- 1 file changed, 151 insertions(+), 109 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Diverge/Base.lean b/backends/lean/Base/Diverge/Base.lean index 2e60f6e8..630c0bf6 100644 --- a/backends/lean/Base/Diverge/Base.lean +++ b/backends/lean/Base/Diverge/Base.lean @@ -57,7 +57,7 @@ deriving Repr, BEq open Result -def bind (x: Result α) (f: α -> Result β) : Result β := +def bind {α : Type u} {β : Type v} (x: Result α) (f: α -> Result β) : Result β := match x with | ret v => f v | fail v => fail v @@ -84,7 +84,7 @@ instance : Pure Result where @[simp] theorem bind_tc_div (f : α → Result β) : (do let y ← div; f y) = div := by simp [Bind.bind, bind] -def div? {α: Type} (r: Result α): Bool := +def div? {α: Type u} (r: Result α): Bool := match r with | div => true | ret _ | fail _ => false @@ -96,8 +96,8 @@ namespace Fix open Primitives open Result - variable {a : Type} {b : a → Type} - variable {c d : Type} + variable {a : Type u} {b : a → Type v} + variable {c d : Type w} -- TODO: why do we have to make them both : Type w? /-! # The least fixed point definition and its properties -/ @@ -334,7 +334,8 @@ namespace Fix (h : c → ((x:a) → Result (b x)) → Result d) : is_mono_p g → (∀ y, is_mono_p (h y)) → - @is_mono_p a b d (λ k => do let y ← g k; h y k) := by + @is_mono_p a b d (λ k => @Bind.bind Result _ c d (g k) (fun y => h y k)) := by +-- @is_mono_p a b d (λ k => do let (y : c) ← g k; h y k) := by intro hg hh simp [is_mono_p] intro fg fh Hrgh @@ -494,49 +495,49 @@ namespace FixI open Primitives Fix -- The index type - variable {id : Type} + variable {id : Type u} -- The input/output types - variable {a b : id → Type} + variable {a : id → Type v} {b : (i:id) → a i → Type w} -- Monotonicity relation over monadic arrows (i.e., Kleisli arrows) - def karrow_rel (k1 k2 : (i:id) → a i → Result (b i)) : Prop := + def karrow_rel (k1 k2 : (i:id) → (x:a i) → Result (b i x)) : Prop := ∀ i x, result_rel (k1 i x) (k2 i x) - def kk_to_gen (k : (i:id) → a i → Result (b i)) : - (x: (i:id) × a i) → Result (b x.fst) := + def kk_to_gen (k : (i:id) → (x:a i) → Result (b i x)) : + (x: (i:id) × a i) → Result (b x.fst x.snd) := λ ⟨ i, x ⟩ => k i x - def kk_of_gen (k : (x: (i:id) × a i) → Result (b x.fst)) : - (i:id) → a i → Result (b i) := + def kk_of_gen (k : (x: (i:id) × a i) → Result (b x.fst x.snd)) : + (i:id) → (x:a i) → Result (b i x) := λ i x => k ⟨ i, x ⟩ - def k_to_gen (k : ((i:id) → a i → Result (b i)) → (i:id) → a i → Result (b i)) : - ((x: (i:id) × a i) → Result (b x.fst)) → (x: (i:id) × a i) → Result (b x.fst) := + def k_to_gen (k : ((i:id) → (x:a i) → Result (b i x)) → (i:id) → (x:a i) → Result (b i x)) : + ((x: (i:id) × a i) → Result (b x.fst x.snd)) → (x: (i:id) × a i) → Result (b x.fst x.snd) := λ kk => kk_to_gen (k (kk_of_gen kk)) - def k_of_gen (k : ((x: (i:id) × a i) → Result (b x.fst)) → (x: (i:id) × a i) → Result (b x.fst)) : - ((i:id) → a i → Result (b i)) → (i:id) → a i → Result (b i) := + def k_of_gen (k : ((x: (i:id) × a i) → Result (b x.fst x.snd)) → (x: (i:id) × a i) → Result (b x.fst x.snd)) : + ((i:id) → (x:a i) → Result (b i x)) → (i:id) → (x:a i) → Result (b i x) := λ kk => kk_of_gen (k (kk_to_gen kk)) - def e_to_gen (e : ((i:id) → a i → Result (b i)) → Result c) : - ((x: (i:id) × a i) → Result (b x.fst)) → Result c := + def e_to_gen (e : ((i:id) → (x:a i) → Result (b i x)) → Result c) : + ((x: (i:id) × a i) → Result (b x.fst x.snd)) → Result c := λ k => e (kk_of_gen k) - def is_valid_p (k : ((i:id) → a i → Result (b i)) → (i:id) → a i → Result (b i)) - (e : ((i:id) → a i → Result (b i)) → Result c) : Prop := + def is_valid_p (k : ((i:id) → (x:a i) → Result (b i x)) → (i:id) → (x:a i) → Result (b i x)) + (e : ((i:id) → (x:a i) → Result (b i x)) → Result c) : Prop := Fix.is_valid_p (k_to_gen k) (e_to_gen e) - def is_valid (f : ((i:id) → a i → Result (b i)) → (i:id) → a i → Result (b i)) : Prop := + def is_valid (f : ((i:id) → (x:a i) → Result (b i x)) → (i:id) → (x:a i) → Result (b i x)) : Prop := ∀ k i x, is_valid_p k (λ k => f k i x) noncomputable def fix - (f : ((i:id) → a i → Result (b i)) → (i:id) → a i → Result (b i)) : - (i:id) → a i → Result (b i) := + (f : ((i:id) → (x:a i) → Result (b i x)) → (i:id) → (x:a i) → Result (b i x)) : + (i:id) → (x:a i) → Result (b i x) := kk_of_gen (Fix.fix (k_to_gen f)) theorem is_valid_fix_fixed_eq - {{f : ((i:id) → a i → Result (b i)) → (i:id) → a i → Result (b i)}} + {{f : ((i:id) → (x:a i) → Result (b i x)) → (i:id) → (x:a i) → Result (b i x)}} (Hvalid : is_valid f) : fix f = f (fix f) := by have Hvalid' : Fix.is_valid (k_to_gen f) := by @@ -553,57 +554,43 @@ namespace FixI /- Some utilities to define the mutually recursive functions -/ -- TODO: use more - @[simp] def kk_ty (id : Type) (a b : id → Type) := (i:id) → a i → Result (b i) - @[simp] def k_ty (id : Type) (a b : id → Type) := kk_ty id a b → kk_ty id a b + @[simp] def kk_ty (id : Type u) (a : id → Type v) (b : (i:id) → (x:a i) → Type w) := + (i:id) → (x:a i) → Result (b i x) + @[simp] def k_ty (id : Type u) (a : id → Type v) (b : (i:id) → (x:a i) → Type w) := + kk_ty id a b → kk_ty id a b + + def in_out_ty : Type (imax (u + 1) (v + 1)) := (in_ty : Type u) × ((x:in_ty) → Type v) + @[simp] def mk_in_out_ty (in_ty : Type u) (out_ty : in_ty → Type v) : + in_out_ty := + Sigma.mk in_ty out_ty -- Initially, we had left out the parameters id, a and b. -- However, by parameterizing Funs with those parameters, we can state -- and prove lemmas like Funs.is_valid_p_is_valid_p - inductive Funs (id : Type) (a b : id → Type) : - List (Type u) → List (Type u) → Type (u + 1) := - | Nil : Funs id a b [] [] - | Cons {ity oty : Type u} {itys otys : List (Type u)} - (f : kk_ty id a b → ity → Result oty) (tl : Funs id a b itys otys) : - Funs id a b (ity :: itys) (oty :: otys) - - theorem Funs.length_eq {itys otys : List (Type)} (fl : Funs id a b itys otys) : - otys.length = itys.length := - match fl with - | .Nil => by simp - | .Cons f tl => - have h:= Funs.length_eq tl - by simp [h] - - def fin_cast {n m : Nat} (h : m = n) (i : Fin n) : Fin m := - ⟨ i.val, by have h1:= i.isLt; simp_all ⟩ - - @[simp] def Funs.cast_fin {itys otys : List (Type)} - (fl : Funs id a b itys otys) (i : Fin itys.length) : Fin otys.length := - fin_cast (fl.length_eq) i - - def get_fun {itys otys : List (Type)} (fl : Funs id a b itys otys) : - (i : Fin itys.length) → kk_ty id a b → itys.get i → Result (otys.get (fl.cast_fin i)) := + inductive Funs (id : Type u) (a : id → Type v) (b : (i:id) → (x:a i) → Type w) : + List in_out_ty.{v, w} → Type (max (u + 1) (max (v + 1) (w + 1))) := + | Nil : Funs id a b [] + | Cons {ity : Type v} {oty : ity → Type w} {tys : List in_out_ty} + (f : kk_ty id a b → (x:ity) → Result (oty x)) (tl : Funs id a b tys) : + Funs id a b (⟨ ity, oty ⟩ :: tys) + + def get_fun {tys : List in_out_ty} (fl : Funs id a b tys) : + (i : Fin tys.length) → kk_ty id a b → (x : (tys.get i).fst) → + Result ((tys.get i).snd x) := match fl with | .Nil => λ i => by have h:= i.isLt; simp at h - | @Funs.Cons id a b ity oty itys1 otys1 f tl => - λ i => - if h: i.val = 0 then - Eq.mp (by cases i; simp_all [List.get]) f - else - let j := i.val - 1 - have Hj: j < itys1.length := by - have Hi := i.isLt - simp at Hi - revert Hi - cases Heq: i.val <;> simp_all + | @Funs.Cons id a b ity oty tys1 f tl => + λ ⟨ i, iLt ⟩ => + match i with + | 0 => + Eq.mp (by simp [List.get]) f + | .succ j => + have jLt: j < tys1.length := by + simp at iLt + revert iLt simp_arith - let j: Fin itys1.length := ⟨ j, Hj ⟩ - Eq.mp - (by - cases Heq: i; rename_i val isLt; - cases Heq': j; rename_i val' isLt; - cases val <;> simp_all [List.get, fin_cast]) - (get_fun tl j) + let j: Fin tys1.length := ⟨ j, jLt ⟩ + Eq.mp (by simp) (get_fun tl j) -- TODO: move theorem add_one_le_iff_le_ne (n m : Nat) (h1 : m ≤ n) (h2 : m ≠ n) : m + 1 ≤ n := by @@ -683,19 +670,19 @@ namespace FixI /- Automating the proofs -/ @[simp] theorem is_valid_p_same - (k : ((i:id) → a i → Result (b i)) → (i:id) → a i → Result (b i)) (x : Result c) : + (k : ((i:id) → (x:a i) → Result (b i x)) → (i:id) → (x:a i) → Result (b i x)) (x : Result c) : is_valid_p k (λ _ => x) := by simp [is_valid_p, k_to_gen, e_to_gen] @[simp] theorem is_valid_p_rec - (k : ((i:id) → a i → Result (b i)) → (i:id) → a i → Result (b i)) (i : id) (x : a i) : + (k : ((i:id) → (x:a i) → Result (b i x)) → (i:id) → (x:a i) → Result (b i x)) (i : id) (x : a i) : is_valid_p k (λ k => k i x) := by simp [is_valid_p, k_to_gen, e_to_gen, kk_to_gen, kk_of_gen] theorem is_valid_p_bind - {{k : ((i:id) → a i → Result (b i)) → (i:id) → a i → Result (b i)}} - {{g : ((i:id) → a i → Result (b i)) → Result c}} - {{h : c → ((i:id) → a i → Result (b i)) → Result d}} + {{k : ((i:id) → (x:a i) → Result (b i x)) → (i:id) → (x:a i) → Result (b i x)}} + {{g : ((i:id) → (x:a i) → Result (b i x)) → Result c}} + {{h : c → ((i:id) → (x:a i) → Result (b i x)) → Result d}} (Hgvalid : is_valid_p k g) (Hhvalid : ∀ y, is_valid_p k (h y)) : is_valid_p k (λ k => do let y ← g k; h y k) := by @@ -705,7 +692,7 @@ namespace FixI def Funs.is_valid_p (k : k_ty id a b) - (fl : Funs id a b itys otys) : + (fl : Funs id a b tys) : Prop := match fl with | .Nil => True @@ -713,31 +700,29 @@ namespace FixI def Funs.is_valid_p_is_valid_p_aux {k : k_ty id a b} - {itys otys : List Type} - (Heq : List.length otys = List.length itys) - (fl : Funs id a b itys otys) (Hvalid : is_valid_p k fl) : - ∀ (i : Fin itys.length) (x : itys.get i), FixI.is_valid_p k (fun k => get_fun fl i k x) := by + {tys : List in_out_ty} + (fl : Funs id a b tys) (Hvalid : is_valid_p k fl) : + ∀ (i : Fin tys.length) (x : (tys.get i).fst), FixI.is_valid_p k (fun k => get_fun fl i k x) := by -- Prepare the induction - have ⟨ n, Hn ⟩ : { n : Nat // itys.length = n } := ⟨ itys.length, by rfl ⟩ - revert itys otys Heq fl Hvalid + have ⟨ n, Hn ⟩ : { n : Nat // tys.length = n } := ⟨ tys.length, by rfl ⟩ + revert tys fl Hvalid induction n -- case zero => - intro itys otys Heq fl Hvalid Hlen; - have Heq: itys = [] := by cases itys <;> simp_all - have Heq: otys = [] := by cases otys <;> simp_all + intro tys fl Hvalid Hlen; + have Heq: tys = [] := by cases tys <;> simp_all intro i x simp_all have Hi := i.isLt simp_all case succ n Hn => - intro itys otys Heq fl Hvalid Hlen i x; - cases fl <;> simp at Hlen i x Heq Hvalid - rename_i ity oty itys otys f fl + intro tys fl Hvalid Hlen i x; + cases fl <;> simp at Hlen i x Hvalid + rename_i ity oty tys f fl have ⟨ Hvf, Hvalid ⟩ := Hvalid have Hvf1: is_valid_p k fl := by simp [Hvalid, Funs.is_valid_p] - have Hn := @Hn itys otys (by simp[*]) fl Hvf1 (by simp [*]) + have Hn := @Hn tys fl Hvf1 (by simp [*]) -- Case disjunction on i match i with | ⟨ 0, _ ⟩ => @@ -747,19 +732,20 @@ namespace FixI | ⟨ .succ j, HiLt ⟩ => simp_arith at HiLt simp at x - let j : Fin (List.length itys) := ⟨ j, by simp_arith [HiLt] ⟩ + let j : Fin (List.length tys) := ⟨ j, by simp_arith [HiLt] ⟩ have Hn := Hn j x apply Hn def Funs.is_valid_p_is_valid_p - (itys otys : List (Type)) (Heq: otys.length = itys.length := by decide) - (k : k_ty (Fin (List.length itys)) (List.get itys) fun i => List.get otys (fin_cast Heq i)) - (fl : Funs (Fin itys.length) itys.get (λ i => otys.get (fin_cast Heq i)) itys otys) : + (tys : List in_out_ty) + (k : k_ty (Fin (List.length tys)) (λ i => (tys.get i).fst) (fun i x => (List.get tys i).snd x)) + (fl : Funs (Fin tys.length) (λ i => (tys.get i).fst) (λ i x => (tys.get i).snd x) tys) : fl.is_valid_p k → - ∀ (i : Fin itys.length) (x : itys.get i), FixI.is_valid_p k (fun k => get_fun fl i k x) + ∀ (i : Fin tys.length) (x : (tys.get i).fst), + FixI.is_valid_p k (fun k => get_fun fl i k x) := by intro Hvalid - apply is_valid_p_is_valid_p_aux <;> simp [*] + apply is_valid_p_is_valid_p_aux; simp [*] end FixI @@ -960,27 +946,21 @@ namespace Ex4 /- Mutually recursive functions - 2nd encoding -/ open Primitives FixI - attribute [local simp] List.get - /- We make the input type and output types dependent on a parameter -/ - @[simp] def input_ty (i : Fin 2) : Type := - [Int, Int].get i - - @[simp] def output_ty (i : Fin 2) : Type := - [Bool, Bool].get i - - /- The continuation -/ - variable (k : (i : Fin 2) → input_ty i → Result (output_ty i)) + @[simp] def tys : List in_out_ty := [mk_in_out_ty Int (λ _ => Bool), mk_in_out_ty Int (λ _ => Bool)] + @[simp] def input_ty (i : Fin 2) : Type := (tys.get i).fst + @[simp] def output_ty (i : Fin 2) (x : input_ty i) : Type := + (tys.get i).snd x /- The bodies are more natural -/ - def is_even_body (k : (i : Fin 2) → input_ty i → Result (output_ty i)) (i : Int) : Result Bool := + def is_even_body (k : (i : Fin 2) → (x : input_ty i) → Result (output_ty i x)) (i : Int) : Result Bool := if i = 0 then .ret true else do let b ← k 1 (i - 1) .ret b - def is_odd_body (i : Int) : Result Bool := + def is_odd_body (k : (i : Fin 2) → (x : input_ty i) → Result (output_ty i x)) (i : Int) : Result Bool := if i = 0 then .ret false else do @@ -988,18 +968,19 @@ namespace Ex4 .ret b @[simp] def bodies : - Funs (Fin 2) input_ty output_ty [Int, Int] [Bool, Bool] := + Funs (Fin 2) input_ty output_ty + [mk_in_out_ty Int (λ _ => Bool), mk_in_out_ty Int (λ _ => Bool)] := Funs.Cons (is_even_body) (Funs.Cons (is_odd_body) Funs.Nil) - def body (k : (i : Fin 2) → input_ty i → Result (output_ty i)) (i: Fin 2) : - input_ty i → Result (output_ty i) := get_fun bodies i k + def body (k : (i : Fin 2) → (x : input_ty i) → Result (output_ty i x)) (i: Fin 2) : + (x : input_ty i) → Result (output_ty i x) := get_fun bodies i k theorem body_is_valid : is_valid body := by -- Split the proof into proofs of validity of the individual bodies rw [is_valid] simp only [body] intro k - apply (Funs.is_valid_p_is_valid_p [Int, Int] [Bool, Bool]) + apply (Funs.is_valid_p_is_valid_p tys) simp [Funs.is_valid_p] (repeat (apply And.intro)) <;> intro x <;> simp at x <;> simp only [is_even_body, is_odd_body] @@ -1106,3 +1087,64 @@ namespace Ex5 conv => lhs; rw [Heq]; simp; rw [id_body] end Ex5 + +namespace Ex6 + /- `list_nth` again, but this time we use FixI -/ + open Primitives FixI + + @[simp] def tys.{u} : List in_out_ty := + [mk_in_out_ty ((a:Type u) × (List a × Int)) (λ ⟨ a, _ ⟩ => a)] + + @[simp] def input_ty (i : Fin 1) := (tys.get i).fst + @[simp] def output_ty (i : Fin 1) (x : input_ty i) := + (tys.get i).snd x + + def list_nth_body.{u} (k : (i:Fin 1) → (x:input_ty i) → Result (output_ty i x)) + (x : (a : Type u) × List a × Int) : Result x.fst := + let ⟨ a, ls, i ⟩ := x + match ls with + | [] => .fail .panic + | hd :: tl => + if i = 0 then .ret hd + else k 0 ⟨ a, tl, i - 1 ⟩ + + @[simp] def bodies : + Funs (Fin 1) input_ty output_ty tys := + Funs.Cons list_nth_body Funs.Nil + + def body (k : (i : Fin 1) → (x : input_ty i) → Result (output_ty i x)) (i: Fin 1) : + (x : input_ty i) → Result (output_ty i x) := get_fun bodies i k + + theorem list_nth_body_is_valid: is_valid body := by + -- Split the proof into proofs of validity of the individual bodies + rw [is_valid] + simp only [body] + intro k + apply (Funs.is_valid_p_is_valid_p tys) + simp [Funs.is_valid_p] + (repeat (apply And.intro)); intro x; simp at x + simp only [list_nth_body] + -- Prove the validity of the individual bodies + intro k x + simp [list_nth_body] + split <;> simp + split <;> simp + + noncomputable + def list_nth {a: Type u} (ls : List a) (i : Int) : Result a := + fix body 0 ⟨ a, ls , i ⟩ + + -- The unfolding equation - diverges if `i < 0` + theorem list_nth_eq (ls : List a) (i : Int) : + list_nth ls i = + match ls with + | [] => .fail .panic + | hd :: tl => + if i = 0 then .ret hd + else list_nth tl (i - 1) + := by + have Heq := is_valid_fix_fixed_eq (@list_nth_body_is_valid a) + simp [list_nth] + conv => lhs; rw [Heq] + +end Ex6 -- cgit v1.2.3 From fdc8693772ecb1978873018c790061854f00a015 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Thu, 29 Jun 2023 23:15:20 +0200 Subject: Write function to compute the input/output types --- backends/lean/Base/Diverge/Base.lean | 3 +- backends/lean/Base/Diverge/Elab.lean | 154 ++++++++++++++++++++++++------- backends/lean/Base/Diverge/ElabBase.lean | 1 + 3 files changed, 126 insertions(+), 32 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Diverge/Base.lean b/backends/lean/Base/Diverge/Base.lean index 630c0bf6..22b59bd0 100644 --- a/backends/lean/Base/Diverge/Base.lean +++ b/backends/lean/Base/Diverge/Base.lean @@ -560,6 +560,7 @@ namespace FixI kk_ty id a b → kk_ty id a b def in_out_ty : Type (imax (u + 1) (v + 1)) := (in_ty : Type u) × ((x:in_ty) → Type v) + -- TODO: remove? @[simp] def mk_in_out_ty (in_ty : Type u) (out_ty : in_ty → Type v) : in_out_ty := Sigma.mk in_ty out_ty @@ -1143,7 +1144,7 @@ namespace Ex6 if i = 0 then .ret hd else list_nth tl (i - 1) := by - have Heq := is_valid_fix_fixed_eq (@list_nth_body_is_valid a) + have Heq := is_valid_fix_fixed_eq list_nth_body_is_valid simp [list_nth] conv => lhs; rw [Heq] diff --git a/backends/lean/Base/Diverge/Elab.lean b/backends/lean/Base/Diverge/Elab.lean index 22e0039f..116c5d8b 100644 --- a/backends/lean/Base/Diverge/Elab.lean +++ b/backends/lean/Base/Diverge/Elab.lean @@ -13,7 +13,7 @@ namespace Diverge syntax (name := divergentDef) declModifiers "divergent" "def" declId ppIndent(optDeclSig) declVal : command -open Lean Elab Term Meta Primitives +open Lean Elab Term Meta Primitives Lean.Meta set_option trace.Diverge.def true @@ -21,27 +21,9 @@ set_option trace.Diverge.def true open WF in - - -- Replace the recursive calls by a call to the continuation -- def replace_rec_calls -#check Lean.Meta.forallTelescope -#check Expr -#check withRef -#check MonadRef.withRef -#check Nat -#check Array -#check Lean.Meta.inferType -#check Nat -#check Int - -#check (0, 1) -#check Prod -#check () -#check Unit -#check Sigma - -- print_decl is_even_body #check instOfNatNat #check OfNat.ofNat -- @OfNat.ofNat ℕ 2 ... @@ -59,6 +41,100 @@ def mkProds (tys : List Expr) : MetaM Expr := let pty ← mkProds tys mkAppM ``Prod.mk #[ty, pty] +/- Generate the input type of a function body, which is a sigma type (i.e., a + dependent tuple) which groups all its inputs. + + Example: + - xl = [(a:Type), (ls:List a), (i:Int)] + + Generates: + `(a:Type) × (ls:List a) × (i:Int)` + + -/ +def mkSigmasTypesOfTypes (xl : List Expr) : MetaM Expr := + match xl with + | [] => do + trace[Diverge.def.sigmas] "mkSigmasOfTypes: []" + return (Expr.const ``PUnit.unit []) + | [x] => do + trace[Diverge.def.sigmas] "mkSigmasOfTypes: [{x}]" + let ty ← Lean.Meta.inferType x + return ty + | x :: xl => do + trace[Diverge.def.sigmas] "mkSigmasOfTypes: [{x}::{xl}]" + let alpha ← Lean.Meta.inferType x + let sty ← mkSigmasTypesOfTypes xl + trace[Diverge.def.sigmas] "mkSigmasOfTypes: [{x}::{xl}]: alpha={alpha}, sty={sty}" + let beta ← mkLambdaFVars #[x] sty + trace[Diverge.def.sigmas] "mkSigmasOfTypes: ({alpha}) ({beta})" + mkAppOptM ``Sigma #[some alpha, some beta] + +def mk_indexed_name (index : Nat) : Name := .num (.str .anonymous "_uniq") index + +/- Generate the out_ty of the body of a function, which from an input (a sigma + type generated by `mkSigmasTypesOfTypes`) gives the output type of the function. + + Example: + - xl = `[a:Type, ls:List a, i:Int]` + - out_ty = `a` + - index = 0 -- For naming purposes: we use it to numerotate the "scrutinee" variables + + Generates: + ``` + match scrut0 with + | Sigma.mk x scrut1 => + match scrut1 with + | Sigma.mk ls i => + a + ``` +-/ +def mkSigmasOutType (xl : List Expr) (out_ty : Expr) (index : Nat := 0) : MetaM Expr := + match xl with + | [] => do + -- This would be unexpected + throwError "mkSigmasOutType: empyt list of input parameters" + | [x] => do + -- In the explanations above: inner match case + trace[Diverge.def.sigmas] "mkSigmasOutType: [{x}]" + mkLambdaFVars #[x] out_ty + | fst :: xl => do + -- In the explanations above: outer match case + -- Remark: for the naming purposes, we use the same convention as for the + -- fields and parameters in `Sigma.casesOn and `Sigma.mk + trace[Diverge.def.sigmas] "mkSigmasOutType: [{fst}::{xl}]" + let alpha ← Lean.Meta.inferType fst + let snd_ty ← mkSigmasTypesOfTypes xl + let beta ← mkLambdaFVars #[fst] snd_ty + let snd ← mkSigmasOutType xl out_ty (index + 1) + let scrut_ty ← mkSigmasTypesOfTypes (fst :: xl) + withLocalDeclD (mk_indexed_name index) scrut_ty fun scrut => do + let mk ← mkLambdaFVars #[fst] snd + trace[Diverge.def.sigmas] "mkSigmasOutType: scrut: ({scrut}) : ({← inferType scrut})" + let motive ← mkLambdaFVars #[scrut] (← inferType out_ty) + trace[Diverge.def.sigmas] "mkSigmasOutType:\n ({alpha})\n ({beta})\n ({motive})\n ({scrut})\n ({mk})" + let out ← mkAppOptM ``Sigma.casesOn #[some alpha, some beta, some motive, some scrut, some mk] + let out ← mkLambdaFVars #[scrut] out + trace[Diverge.def.sigmas] "mkSigmasOutType: out: {out}" + return out + +/- Small tests for list_nth: give a model of what `mkSigmasOutType` should generate -/ +private def list_nth_out_ty2 (a :Type) (scrut1: @Sigma (List a) (fun (_ls : List a) => Int)) := + @Sigma.casesOn (List a) + (fun (_ls : List a) => Int) + (fun (_scrut1:@Sigma (List a) (fun (_ls : List a) => Int)) => Type) + scrut1 + (fun (_ls : List a) (_i : Int) => Diverge.Primitives.Result a) + +private def list_nth_out_ty1 (scrut0 : @Sigma (Type) (fun (a:Type) => + @Sigma (List a) (fun (_ls : List a) => Int))) := + @Sigma.casesOn (Type) + (fun (a:Type) => @Sigma (List a) (fun (_ls : List a) => Int)) + (fun (_scrut0:@Sigma (Type) (fun (a:Type) => @Sigma (List a) (fun (_ls : List a) => Int))) => Type) + scrut0 + (fun (a : Type) (scrut1: @Sigma (List a) (fun (_ls : List a) => Int)) => + list_nth_out_ty2 a scrut1) +/- -/ + def divRecursion (preDefs : Array PreDefinition) : TermElabM Unit := do let msg := toMessageData <| preDefs.map fun pd => (pd.declName, pd.levelParams, pd.type, pd.value) trace[Diverge.def] ("divRecursion: defs: " ++ msg) @@ -94,7 +170,23 @@ def divRecursion (preDefs : Array PreDefinition) : TermElabM Unit := do let grLvlParams := def0.levelParams trace[Diverge.def] "def0 type: {def0.type}" - -- Small utility: compute the list of type parameters + -- Compute the list of pairs: (input type × output type) + let inOutTys : Array (Expr × Expr) ← + preDefs.mapM (fun preDef => do + -- Check the universe parameters - TODO: I'm not sure what the best thing + -- to do is. In practice, all the type parameters should be in Type 0, so + -- we shouldn't have universe issues. + if preDef.levelParams ≠ grLvlParams then + throwError "Non-uniform polymorphism in the universes" + forallTelescope preDef.type (fun in_tys out_ty => do + let in_ty ← liftM (mkSigmasTypesOfTypes in_tys.toList) + let out_ty ← liftM (mkSigmasOutType in_tys.toList out_ty) + return (in_ty, out_ty) + ) + ) + trace[Diverge.def] "inOutTys: {inOutTys}" + +/- -- Small utility: compute the list of type parameters let getTypeParams (ty: Expr) : MetaM (List Expr × List Expr × Expr) := Lean.Meta.forallTelescope ty fun tys out_ty => do trace[Diverge.def] "types: {tys}" @@ -138,16 +230,16 @@ def divRecursion (preDefs : Array PreDefinition) : TermElabM Unit := do let (input_tys, output_tys) := List.unzip all_tys.toList let input_tys : List Expr ← liftM (List.mapM mkProds input_tys) - trace[Diverge.def] " in/out tys: {input_tys}; {output_tys}" + trace[Diverge.def] " in/out tys: {input_tys}; {output_tys}" -/ -- Compute the names set let names := preDefs.map PreDefinition.declName let names := HashSet.empty.insertMany names -- - for preDef in preDefs do - trace[Diverge.def] "about to explore: {preDef.declName}" - explore_term "" preDef.value + -- for preDef in preDefs do + -- trace[Diverge.def] "about to explore: {preDef.declName}" + -- explore_term "" preDef.value -- Compute the bodies @@ -267,6 +359,13 @@ elab_rules : command else Command.elabCommand <| ← `(namespace $(mkIdentFrom id ns) $cmd end $(mkIdentFrom id ns)) +divergent def list_nth {a: Type} (ls : List a) (i : Int) : Result a := + match ls with + | [] => .fail .panic + | x :: ls => + if i = 0 then return x + else return (← list_nth ls (i - 1)) + mutual divergent def is_even (i : Int) : Result Bool := if i = 0 then return true else return (← is_odd (i - 1)) @@ -280,13 +379,6 @@ example (i : Int) : is_even i = .ret (i % 2 = 0) ∧ is_odd i = .ret (i % 2 ≠ unfold is_even sorry -divergent def list_nth {a: Type} (ls : List a) (i : Int) : Result a := - match ls with - | [] => .fail .panic - | x :: ls => - if i = 0 then return x - else return (← list_nth ls (i - 1)) - mutual divergent def foo (i : Int) : Result Nat := if i > 10 then return (← foo (i / 10)) + (← bar i) else bar 10 diff --git a/backends/lean/Base/Diverge/ElabBase.lean b/backends/lean/Base/Diverge/ElabBase.lean index 84b73a30..441b25f0 100644 --- a/backends/lean/Base/Diverge/ElabBase.lean +++ b/backends/lean/Base/Diverge/ElabBase.lean @@ -5,6 +5,7 @@ namespace Diverge open Lean Elab Term Meta initialize registerTraceClass `Diverge.elab (inherited := true) +initialize registerTraceClass `Diverge.def.sigmas (inherited := true) initialize registerTraceClass `Diverge.def (inherited := true) -- TODO: move -- cgit v1.2.3 From 1c9331ce92b68b9a83c601212149a6c24591708f Mon Sep 17 00:00:00 2001 From: Son Ho Date: Fri, 30 Jun 2023 15:53:39 +0200 Subject: Generate the fixed-point bodies in Elab.lean --- backends/lean/Base/Diverge/Base.lean | 8 +- backends/lean/Base/Diverge/Elab.lean | 451 +++++++++++++++++++++++-------- backends/lean/Base/Diverge/ElabBase.lean | 47 +++- 3 files changed, 391 insertions(+), 115 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Diverge/Base.lean b/backends/lean/Base/Diverge/Base.lean index 22b59bd0..aa0539ba 100644 --- a/backends/lean/Base/Diverge/Base.lean +++ b/backends/lean/Base/Diverge/Base.lean @@ -554,14 +554,14 @@ namespace FixI /- Some utilities to define the mutually recursive functions -/ -- TODO: use more - @[simp] def kk_ty (id : Type u) (a : id → Type v) (b : (i:id) → (x:a i) → Type w) := + abbrev kk_ty (id : Type u) (a : id → Type v) (b : (i:id) → (x:a i) → Type w) := (i:id) → (x:a i) → Result (b i x) - @[simp] def k_ty (id : Type u) (a : id → Type v) (b : (i:id) → (x:a i) → Type w) := + abbrev k_ty (id : Type u) (a : id → Type v) (b : (i:id) → (x:a i) → Type w) := kk_ty id a b → kk_ty id a b - def in_out_ty : Type (imax (u + 1) (v + 1)) := (in_ty : Type u) × ((x:in_ty) → Type v) + abbrev in_out_ty : Type (imax (u + 1) (v + 1)) := (in_ty : Type u) × ((x:in_ty) → Type v) -- TODO: remove? - @[simp] def mk_in_out_ty (in_ty : Type u) (out_ty : in_ty → Type v) : + abbrev mk_in_out_ty (in_ty : Type u) (out_ty : in_ty → Type v) : in_out_ty := Sigma.mk in_ty out_ty diff --git a/backends/lean/Base/Diverge/Elab.lean b/backends/lean/Base/Diverge/Elab.lean index 116c5d8b..f7de7518 100644 --- a/backends/lean/Base/Diverge/Elab.lean +++ b/backends/lean/Base/Diverge/Elab.lean @@ -16,31 +16,62 @@ syntax (name := divergentDef) open Lean Elab Term Meta Primitives Lean.Meta set_option trace.Diverge.def true +-- set_option trace.Diverge.def.sigmas true /- The following was copied from the `wfRecursion` function. -/ open WF in --- Replace the recursive calls by a call to the continuation --- def replace_rec_calls +def mkList (xl : List Expr) (ty : Expr) : MetaM Expr := + match xl with + | [] => + mkAppOptM ``List.nil #[some ty] + | x :: tl => do + let tl ← mkList tl ty + mkAppOptM ``List.cons #[some ty, some x, some tl] --- print_decl is_even_body -#check instOfNatNat -#check OfNat.ofNat -- @OfNat.ofNat ℕ 2 ... -#check OfNat.ofNat -- @OfNat.ofNat (Fin 2) 1 ... -#check Fin.instOfNatFinHAddNatInstHAddInstAddNatOfNat +def mkProd (x y : Expr) : MetaM Expr := + mkAppM ``Prod.mk #[x, y] +def mkInOutTy (x y : Expr) : MetaM Expr := + mkAppM ``FixI.mk_in_out_ty #[x, y] -- TODO: is there already such a utility somewhere? -- TODO: change to mkSigmas def mkProds (tys : List Expr) : MetaM Expr := match tys with - | [] => do return (Expr.const ``PUnit.unit []) - | [ty] => do return ty + | [] => do pure (Expr.const ``PUnit.unit []) + | [ty] => do pure ty | ty :: tys => do let pty ← mkProds tys mkAppM ``Prod.mk #[ty, pty] +-- Return the `a` in `Return a` +def get_result_ty (ty : Expr) : MetaM Expr := + ty.withApp fun f args => do + if ¬ f.isConstOf ``Result ∨ args.size ≠ 1 then + throwError "Invalid argument to get_result_ty: {ty}" + else + pure (args.get! 0) + +-- Group a list of expressions into a dependent tuple +def mkSigmas (xl : List Expr) : MetaM Expr := + match xl with + | [] => do + trace[Diverge.def.sigmas] "mkSigmas: []" + pure (Expr.const ``PUnit.unit []) + | [x] => do + trace[Diverge.def.sigmas] "mkSigmas: [{x}]" + pure x + | fst :: xl => do + trace[Diverge.def.sigmas] "mkSigmas: [{fst}::{xl}]" + let alpha ← Lean.Meta.inferType fst + let snd ← mkSigmas xl + let snd_ty ← inferType snd + let beta ← mkLambdaFVars #[fst] snd_ty + trace[Diverge.def.sigmas] "mkSigmas:\n{alpha}\n{beta}\n{fst}\n{snd}" + mkAppOptM ``Sigma.mk #[some alpha, some beta, some fst, some snd] + /- Generate the input type of a function body, which is a sigma type (i.e., a dependent tuple) which groups all its inputs. @@ -55,11 +86,11 @@ def mkSigmasTypesOfTypes (xl : List Expr) : MetaM Expr := match xl with | [] => do trace[Diverge.def.sigmas] "mkSigmasOfTypes: []" - return (Expr.const ``PUnit.unit []) + pure (Expr.const ``PUnit.unit []) | [x] => do trace[Diverge.def.sigmas] "mkSigmasOfTypes: [{x}]" let ty ← Lean.Meta.inferType x - return ty + pure ty | x :: xl => do trace[Diverge.def.sigmas] "mkSigmasOfTypes: [{x}::{xl}]" let alpha ← Lean.Meta.inferType x @@ -71,15 +102,26 @@ def mkSigmasTypesOfTypes (xl : List Expr) : MetaM Expr := def mk_indexed_name (index : Nat) : Name := .num (.str .anonymous "_uniq") index -/- Generate the out_ty of the body of a function, which from an input (a sigma - type generated by `mkSigmasTypesOfTypes`) gives the output type of the function. +/- Given a list of values `[x0:ty0, ..., xn:ty1]` where every `xi` might use the previous + `xj` (j < i) and a value `out` which uses `x0`, ..., `xn`, generate the following + expression: + ``` + fun x:((x0:ty0) × ... × (xn:tyn) => -- **Dependent** tuple + match x with + | (x0, ..., xn) => out + ``` + + The `index` parameter is used for naming purposes: we use it to numerotate the + bound variables that we introduce. Example: + ======== + More precisely: - xl = `[a:Type, ls:List a, i:Int]` - - out_ty = `a` - - index = 0 -- For naming purposes: we use it to numerotate the "scrutinee" variables + - out = `a` + - index = 0 - Generates: + generates: ``` match scrut0 with | Sigma.mk x scrut1 => @@ -88,36 +130,47 @@ def mk_indexed_name (index : Nat) : Name := .num (.str .anonymous "_uniq") index a ``` -/ -def mkSigmasOutType (xl : List Expr) (out_ty : Expr) (index : Nat := 0) : MetaM Expr := +partial def mkSigmasMatch (xl : List Expr) (out : Expr) (index : Nat := 0) : MetaM Expr := match xl with | [] => do -- This would be unexpected - throwError "mkSigmasOutType: empyt list of input parameters" + throwError "mkSigmasMatch: empyt list of input parameters" | [x] => do -- In the explanations above: inner match case - trace[Diverge.def.sigmas] "mkSigmasOutType: [{x}]" - mkLambdaFVars #[x] out_ty + trace[Diverge.def.sigmas] "mkSigmasMatch: [{x}]" + mkLambdaFVars #[x] out | fst :: xl => do -- In the explanations above: outer match case -- Remark: for the naming purposes, we use the same convention as for the -- fields and parameters in `Sigma.casesOn and `Sigma.mk - trace[Diverge.def.sigmas] "mkSigmasOutType: [{fst}::{xl}]" + trace[Diverge.def.sigmas] "mkSigmasMatch: [{fst}::{xl}]" let alpha ← Lean.Meta.inferType fst let snd_ty ← mkSigmasTypesOfTypes xl let beta ← mkLambdaFVars #[fst] snd_ty - let snd ← mkSigmasOutType xl out_ty (index + 1) + let snd ← mkSigmasMatch xl out (index + 1) let scrut_ty ← mkSigmasTypesOfTypes (fst :: xl) withLocalDeclD (mk_indexed_name index) scrut_ty fun scrut => do let mk ← mkLambdaFVars #[fst] snd - trace[Diverge.def.sigmas] "mkSigmasOutType: scrut: ({scrut}) : ({← inferType scrut})" - let motive ← mkLambdaFVars #[scrut] (← inferType out_ty) - trace[Diverge.def.sigmas] "mkSigmasOutType:\n ({alpha})\n ({beta})\n ({motive})\n ({scrut})\n ({mk})" - let out ← mkAppOptM ``Sigma.casesOn #[some alpha, some beta, some motive, some scrut, some mk] - let out ← mkLambdaFVars #[scrut] out - trace[Diverge.def.sigmas] "mkSigmasOutType: out: {out}" - return out - -/- Small tests for list_nth: give a model of what `mkSigmasOutType` should generate -/ + trace[Diverge.def.sigmas] "mkSigmasMatch: scrut: ({scrut}) : ({← inferType scrut})" + -- TODO: make the computation of the motive more efficient + let motive ← do + let out_ty ← inferType out + match out_ty with + | .sort _ | .lit _ | .const .. => + -- The type of the motive doesn't depend on the scrutinee + mkLambdaFVars #[scrut] out_ty + | _ => + -- The type of the motive *may* depend on the scrutinee + -- TODO: make this more efficient (we could change the output type of + -- mkSigmasMatch + mkSigmasMatch (fst :: xl) out_ty + trace[Diverge.def.sigmas] "mkSigmasMatch:\n ({alpha})\n ({beta})\n ({motive})\n ({scrut})\n ({mk})" + let sm ← mkAppOptM ``Sigma.casesOn #[some alpha, some beta, some motive, some scrut, some mk] + let sm ← mkLambdaFVars #[scrut] sm + trace[Diverge.def.sigmas] "mkSigmasMatch: sm: {sm}" + pure sm + +/- Small tests for list_nth: give a model of what `mkSigmasMatch` should generate -/ private def list_nth_out_ty2 (a :Type) (scrut1: @Sigma (List a) (fun (_ls : List a) => Int)) := @Sigma.casesOn (List a) (fun (_ls : List a) => Int) @@ -135,14 +188,199 @@ private def list_nth_out_ty1 (scrut0 : @Sigma (Type) (fun (a:Type) => list_nth_out_ty2 a scrut1) /- -/ +-- TODO: move +-- TODO: we can use Array.mapIdx +@[specialize] def mapiAux (i : Nat) (f : Nat → α → β) : List α → List β + | [] => [] + | a::as => f i a :: mapiAux (i+1) f as + +@[specialize] def mapi (f : Nat → α → β) : List α → List β := mapiAux 0 f + +#check Array.map +-- Return the expression: `Fin n` +-- TODO: use more +def mkFin (n : Nat) : Expr := + mkAppN (.const ``Fin []) #[.lit (.natVal n)] + +-- Return the expression: `i : Fin n` +def mkFinVal (n i : Nat) : MetaM Expr := do + let n_lit : Expr := .lit (.natVal (n - 1)) + let i_lit : Expr := .lit (.natVal i) + -- We could use `trySynthInstance`, but as we know the instance that we are + -- going to use, we can save the lookup + let ofNat ← mkAppOptM ``Fin.instOfNatFinHAddNatInstHAddInstAddNatOfNat #[n_lit, i_lit] + mkAppOptM ``OfNat.ofNat #[none, none, ofNat] + +-- TODO: remove? +def mkFinValOld (n i : Nat) : MetaM Expr := do + let finTy := mkFin n + let ofNat ← mkAppM ``OfNat #[finTy, .lit (.natVal i)] + match ← trySynthInstance ofNat with + | LOption.some x => + mkAppOptM ``OfNat.ofNat #[none, none, x] + | _ => throwError "mkFinVal: could not synthesize an instance of {ofNat} " + +/- Generate and declare as individual definitions the bodies for the individual funcions: + - replace the recursive calls with calls to the continutation `k` + - make those bodies take one single dependent tuple as input + + We name the declarations: "[original_name].body". + We return the new declarations. + -/ +def mkDeclareUnaryBodies (grLvlParams : List Name) (k_var : Expr) + (preDefs : Array PreDefinition) : + MetaM (Array Expr) := do + let grSize := preDefs.size + + -- Compute the map from name to index - the continuation has an indexed type: + -- we use the index (a finite number of type `Fin`) to control the function + -- we call at the recursive call + let nameToId : HashMap Name Nat := + let namesIds := mapi (fun i d => (d.declName, i)) preDefs.toList + HashMap.ofList namesIds + + trace[Diverge.def.genBody] "nameToId: {nameToId.toList}" + + -- Auxiliary function to explore the function bodies and replace the + -- recursive calls + let visit_e (e : Expr) : MetaM Expr := do + trace[Diverge.def.genBody] "visiting expression: {e}" + match e with + | .app .. => do + e.withApp fun f args => do + trace[Diverge.def.genBody] "this is an app: {f} {args}" + -- Check if this is a recursive call + if f.isConst then + let name := f.constName! + match nameToId.find? name with + | none => pure e + | some id => + -- This is a recursive call: replace it + -- Compute the index + let i ← mkFinVal grSize id + -- Put the arguments in one big dependent tuple + let args ← mkSigmas args.toList + mkAppM' k_var #[i, args] + else + -- Not a recursive call: do nothing + pure e + | .const name _ => + -- Sanity check: we eliminated all the recursive calls + if (nameToId.find? name).isSome then + throwError "mkUnaryBodies: a recursive call was not eliminated" + else pure e + | _ => pure e + + -- Explore the bodies + preDefs.mapM fun preDef => do + -- Replace the recursive calls + let body ← mapVisit visit_e preDef.value + + -- Change the type + lambdaLetTelescope body fun args body => do + let body ← mkSigmasMatch args.toList body 0 + + -- Add the declaration + let value ← mkLambdaFVars #[k_var] body + let name := preDef.declName.append "body" + let levelParams := grLvlParams + let decl := Declaration.defnDecl { + name := name + levelParams := levelParams + type := ← inferType value -- TODO: change the type + value := value + hints := ReducibilityHints.regular (getMaxHeight (← getEnv) value + 1) + safety := .safe + all := [name] + } + addDecl decl + trace[Diverge.def] "individual body of {preDef.declName}: {body}" + -- Return the constant + let body := Lean.mkConst name (levelParams.map .param) + -- let body ← mkAppM' body #[k_var] + trace[Diverge.def] "individual body (after decl): {body}" + pure body + +-- Generate a unique function body from the bodies of the mutually recursive group, +-- and add it as a declaration in the context +def mkDeclareMutualBody (grName : Name) (grLvlParams : List Name) + (i_var k_var : Expr) + (in_ty out_ty : Expr) (inOutTys : List (Expr × Expr)) + (bodies : Array Expr) : MetaM Expr := do + -- Generate the body + let grSize := bodies.size + let finTypeExpr := mkFin grSize + -- TODO: not very clean + let inOutTyType ← do + let (x, y) := inOutTys.get! 0 + inferType (← mkInOutTy x y) + let rec mkFuns (inOutTys : List (Expr × Expr)) (bl : List Expr) : MetaM Expr := + match inOutTys, bl with + | [], [] => + mkAppOptM ``FixI.Funs.Nil #[finTypeExpr, in_ty, out_ty] + | (ity, oty) :: inOutTys, b :: bl => do + -- Retrieving ity and oty - this is not very clean + let inOutTysExpr ← mkList (← inOutTys.mapM (λ (x, y) => mkInOutTy x y)) inOutTyType + let fl ← mkFuns inOutTys bl + mkAppOptM ``FixI.Funs.Cons #[finTypeExpr, in_ty, out_ty, ity, oty, inOutTysExpr, b, fl] + | _, _ => throwError "mkDeclareMutualBody: `tys` and `bodies` don't have the same length" + let bodyFuns ← mkFuns inOutTys bodies.toList + -- Wrap in `get_fun` + let body ← mkAppM ``FixI.get_fun #[bodyFuns, i_var, k_var] + -- Add the index `i` and the continuation `k` as a variables + let body ← mkLambdaFVars #[k_var, i_var] body + trace[Diverge.def] "mkDeclareMutualBody: body: {body}" + -- Add the declaration + let name := grName.append "mutrec_body" + let levelParams := grLvlParams + let decl := Declaration.defnDecl { + name := name + levelParams := levelParams + type := ← inferType body + value := body + hints := ReducibilityHints.regular (getMaxHeight (← getEnv) body + 1) + safety := .safe + all := [name] + } + addDecl decl + -- Return the constant + pure (Lean.mkConst name (levelParams.map .param)) + +-- Generate the final definions by using the mutual body and the fixed point operator. +def mkDeclareFixDefs (mutBody : Expr) (preDefs : Array PreDefinition) : + TermElabM Unit := do + let grSize := preDefs.size + let _ ← preDefs.mapIdxM fun idx preDef => do + lambdaLetTelescope preDef.value fun xs _ => do + -- Create the index + let idx ← mkFinVal grSize idx.val + -- Group the inputs into a dependent tuple + let input ← mkSigmas xs.toList + -- Apply the fixed point + let fixedBody ← mkAppM ``FixI.fix #[mutBody, idx, input] + let fixedBody ← mkLambdaFVars xs fixedBody + -- Create the declaration + let name := preDef.declName + let decl := Declaration.defnDecl { + name := name + levelParams := preDef.levelParams + type := preDef.type + value := fixedBody + hints := ReducibilityHints.regular (getMaxHeight (← getEnv) fixedBody + 1) + safety := .safe + all := [name] + } + addDecl decl + pure () + def divRecursion (preDefs : Array PreDefinition) : TermElabM Unit := do let msg := toMessageData <| preDefs.map fun pd => (pd.declName, pd.levelParams, pd.type, pd.value) trace[Diverge.def] ("divRecursion: defs: " ++ msg) -- CHANGE HERE This function should add definitions with these names/types/values ^^ -- Temporarily add the predefinitions as axioms - for preDef in preDefs do - addAsAxiom preDef + -- for preDef in preDefs do + -- addAsAxiom preDef -- TODO: what is this? for preDef in preDefs do @@ -154,25 +392,14 @@ def divRecursion (preDefs : Array PreDefinition) : TermElabM Unit := do let grName := def0.declName trace[Diverge.def] "group name: {grName}" - /- Compute the type of the continuation. - - We do the following - - we make sure all the definitions have the same universe parameters - (we can make this more general later) - - we group all the type parameters together, make sure all the - definitions have the same type parameters, and enforce - a uniform polymorphism (we can also lift this later). - This would require generalizing a bit our indexed fixed point to - make the output type parametric in the input. - - we group all the non-type parameters: we parameterize the continuation - by those - -/ + /- # Compute the input/output types of the continuation `k`. -/ let grLvlParams := def0.levelParams - trace[Diverge.def] "def0 type: {def0.type}" + trace[Diverge.def] "def0 universe levels: {def0.levelParams}" - -- Compute the list of pairs: (input type × output type) + -- We first compute the list of pairs: (input type × output type) let inOutTys : Array (Expr × Expr) ← preDefs.mapM (fun preDef => do + withRef preDef.ref do -- is the withRef useful? -- Check the universe parameters - TODO: I'm not sure what the best thing -- to do is. In practice, all the type parameters should be in Type 0, so -- we shouldn't have universe issues. @@ -180,68 +407,74 @@ def divRecursion (preDefs : Array PreDefinition) : TermElabM Unit := do throwError "Non-uniform polymorphism in the universes" forallTelescope preDef.type (fun in_tys out_ty => do let in_ty ← liftM (mkSigmasTypesOfTypes in_tys.toList) - let out_ty ← liftM (mkSigmasOutType in_tys.toList out_ty) - return (in_ty, out_ty) + -- Retrieve the type in the "Result" + let out_ty ← get_result_ty out_ty + let out_ty ← liftM (mkSigmasMatch in_tys.toList out_ty) + pure (in_ty, out_ty) ) ) trace[Diverge.def] "inOutTys: {inOutTys}" - -/- -- Small utility: compute the list of type parameters - let getTypeParams (ty: Expr) : MetaM (List Expr × List Expr × Expr) := - Lean.Meta.forallTelescope ty fun tys out_ty => do - trace[Diverge.def] "types: {tys}" -/- let (_, params) ← StateT.run (do - for x in tys do - let ty ← Lean.Meta.inferType x - match ty with - | .sort _ => do - let st ← StateT.get - StateT.set (ty :: st) - | _ => do break - ) ([] : List Expr) - let params := params.reverse - trace[Diverge.def] " type parameters {params}" - return params -/ - let rec get_params (ls : List Expr) : MetaM (List Expr × List Expr) := - match ls with - | x :: tl => do - let ty ← Lean.Meta.inferType x - match ty with - | .sort _ => do - let (ty_params, params) ← get_params tl - return (x :: ty_params, params) - | _ => do return ([], ls) - | _ => do return ([], []) - let (ty_params, params) ← get_params tys.toList - trace[Diverge.def] " parameters: {ty_params}; {params}" - return (ty_params, params, out_ty) - let (grTyParams, _, _) ← do - getTypeParams def0.type - - -- Compute the input types and the output types - let all_tys ← preDefs.mapM fun preDef => do - let (tyParams, params, ret_ty) ← getTypeParams preDef.type - -- TODO: this is not complete, there are more checks to perform - if tyParams.length ≠ grTyParams.length then - throwError "Non-uniform polymorphism" - return (params, ret_ty) - - -- TODO: I think there are issues with the free variables - let (input_tys, output_tys) := List.unzip all_tys.toList - let input_tys : List Expr ← liftM (List.mapM mkProds input_tys) - - trace[Diverge.def] " in/out tys: {input_tys}; {output_tys}" -/ - - -- Compute the names set - let names := preDefs.map PreDefinition.declName - let names := HashSet.empty.insertMany names - - -- - -- for preDef in preDefs do - -- trace[Diverge.def] "about to explore: {preDef.declName}" - -- explore_term "" preDef.value - - -- Compute the bodies + -- Turn the list of input/output type pairs into an expresion + let inOutTysExpr ← inOutTys.mapM (λ (x, y) => mkInOutTy x y) + let inOutTysExpr ← mkList inOutTysExpr.toList (← inferType (inOutTysExpr.get! 0)) + + -- From the list of pairs of input/output types, actually compute the + -- type of the continuation `k`. + -- We first introduce the index `i : Fin n` where `n` is the number of + -- functions in the group. + let i_var_ty := mkFin preDefs.size + withLocalDeclD (.num (.str .anonymous "i") 0) i_var_ty fun i_var => do + let in_out_ty ← mkAppM ``List.get #[inOutTysExpr, i_var] + trace[Diverge.def] "in_out_ty := {in_out_ty} : {← inferType in_out_ty}" + -- Add an auxiliary definition for `in_out_ty` + let in_out_ty ← do + let value ← mkLambdaFVars #[i_var] in_out_ty + let name := grName.append "in_out_ty" + let levelParams := grLvlParams + let decl := Declaration.defnDecl { + name := name + levelParams := levelParams + type := ← inferType value + value := value + hints := .abbrev + safety := .safe + all := [name] + } + addDecl decl + -- Return the constant + let in_out_ty := Lean.mkConst name (levelParams.map .param) + mkAppM' in_out_ty #[i_var] + trace[Diverge.def] "in_out_ty (after decl) := {in_out_ty} : {← inferType in_out_ty}" + let in_ty ← mkAppM ``Sigma.fst #[in_out_ty] + trace[Diverge.def] "in_ty: {in_ty}" + withLocalDeclD (.num (.str .anonymous "x") 1) in_ty fun input => do + let out_ty ← mkAppM' (← mkAppM ``Sigma.snd #[in_out_ty]) #[input] + trace[Diverge.def] "out_ty: {out_ty}" + + -- Introduce the continuation `k` + let in_ty ← mkLambdaFVars #[i_var] in_ty + let out_ty ← mkLambdaFVars #[i_var, input] out_ty + let k_var_ty ← mkAppM ``FixI.kk_ty #[i_var_ty, in_ty, out_ty] -- + trace[Diverge.def] "k_var_ty: {k_var_ty}" + withLocalDeclD (.num (.str .anonymous "k") 2) k_var_ty fun k_var => do + trace[Diverge.def] "k_var: {k_var}" + + -- Replace the recursive calls in all the function bodies by calls to the + -- continuation `k` and and generate for those bodies declarations + let bodies ← mkDeclareUnaryBodies grLvlParams k_var preDefs + -- Generate the mutually recursive body + let body ← mkDeclareMutualBody grName grLvlParams i_var k_var in_ty out_ty inOutTys.toList bodies + trace[Diverge.def] "mut rec body (after decl): {body}" + + -- Prove that the mut rec body satisfies the validity criteria required by + -- our fixed-point + -- TODO + + -- Generate the final definitions + let defs ← mkDeclareFixDefs body preDefs + + -- Prove the unfolding equations + -- TODO -- Process the definitions addAndCompilePartialRec preDefs @@ -366,6 +599,10 @@ divergent def list_nth {a: Type} (ls : List a) (i : Int) : Result a := if i = 0 then return x else return (← list_nth ls (i - 1)) +#print list_nth.in_out_ty +#check list_nth.body +#print list_nth + mutual divergent def is_even (i : Int) : Result Bool := if i = 0 then return true else return (← is_odd (i - 1)) diff --git a/backends/lean/Base/Diverge/ElabBase.lean b/backends/lean/Base/Diverge/ElabBase.lean index 441b25f0..82f79f94 100644 --- a/backends/lean/Base/Diverge/ElabBase.lean +++ b/backends/lean/Base/Diverge/ElabBase.lean @@ -4,13 +4,14 @@ namespace Diverge open Lean Elab Term Meta -initialize registerTraceClass `Diverge.elab (inherited := true) -initialize registerTraceClass `Diverge.def.sigmas (inherited := true) -initialize registerTraceClass `Diverge.def (inherited := true) +initialize registerTraceClass `Diverge.elab +initialize registerTraceClass `Diverge.def +initialize registerTraceClass `Diverge.def.sigmas +initialize registerTraceClass `Diverge.def.genBody -- TODO: move -- TODO: small helper -def explore_term (incr : String) (e : Expr) : TermElabM Unit := +def explore_term (incr : String) (e : Expr) : MetaM Unit := match e with | .bvar _ => do logInfo m!"{incr}bvar: {e}"; return () | .fvar _ => do logInfo m!"{incr}fvar: {e}"; return () @@ -78,4 +79,42 @@ private def test2 (x : Nat) : Nat := x print_decl test1 print_decl test2 +-- We adapted this from AbstractNestedProofs.visit +-- A map visitor function for expressions +partial def mapVisit (k : Expr → MetaM Expr) (e : Expr) : MetaM Expr := do + let mapVisitBinders (xs : Array Expr) (k2 : MetaM Expr) : MetaM Expr := do + let localInstances ← getLocalInstances + let mut lctx ← getLCtx + for x in xs do + let xFVarId := x.fvarId! + let localDecl ← xFVarId.getDecl + let type ← mapVisit k localDecl.type + let localDecl := localDecl.setType type + let localDecl ← match localDecl.value? with + | some value => let value ← mapVisit k value; pure <| localDecl.setValue value + | none => pure localDecl + lctx :=lctx.modifyLocalDecl xFVarId fun _ => localDecl + withLCtx lctx localInstances k2 + -- TODO: use a cache? (Lean.checkCache) + -- Explore + let e ← k e + match e with + | .bvar _ + | .fvar _ + | .mvar _ + | .sort _ + | .lit _ + | .const _ _ => pure e + | .app .. => do e.withApp fun f args => return mkAppN f (← args.mapM (mapVisit k)) + | .lam .. => + lambdaLetTelescope e fun xs b => + mapVisitBinders xs do mkLambdaFVars xs (← mapVisit k b) (usedLetOnly := false) + | .forallE .. => do + forallTelescope e fun xs b => mapVisitBinders xs do mkForallFVars xs (← mapVisit k b) + | .letE .. => do + lambdaLetTelescope e fun xs b => mapVisitBinders xs do + mkLambdaFVars xs (← mapVisit k b) (usedLetOnly := false) + | .mdata _ b => return e.updateMData! (← mapVisit k b) + | .proj _ _ b => return e.updateProj! (← mapVisit k b) + end Diverge -- cgit v1.2.3 From 37e5d5501e024869037bf0ea1559229a8be62da7 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Mon, 3 Jul 2023 16:24:44 +0200 Subject: Generate the proofs of validity in Elab.lean --- backends/lean/Base/Diverge/Base.lean | 76 +++++- backends/lean/Base/Diverge/Elab.lean | 403 ++++++++++++++++++++++++++++--- backends/lean/Base/Diverge/ElabBase.lean | 1 + 3 files changed, 446 insertions(+), 34 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Diverge/Base.lean b/backends/lean/Base/Diverge/Base.lean index aa0539ba..89365d25 100644 --- a/backends/lean/Base/Diverge/Base.lean +++ b/backends/lean/Base/Diverge/Base.lean @@ -434,6 +434,23 @@ namespace Fix is_valid_p k (λ k => k x) := by simp_all [is_valid_p, is_mono_p_rec, is_cont_p_rec] + theorem is_valid_p_ite + (k : ((x:a) → Result (b x)) → (x:a) → Result (b x)) + (cond : Prop) [h : Decidable cond] + {e1 e2 : ((x:a) → Result (b x)) → Result c} + (he1: is_valid_p k e1) (he2 : is_valid_p k e2) : + is_valid_p k (ite cond e1 e2) := by + split <;> assumption + + theorem is_valid_p_dite + (k : ((x:a) → Result (b x)) → (x:a) → Result (b x)) + (cond : Prop) [h : Decidable cond] + {e1 : cond → ((x:a) → Result (b x)) → Result c} + {e2 : Not cond → ((x:a) → Result (b x)) → Result c} + (he1: ∀ x, is_valid_p k (e1 x)) (he2 : ∀ x, is_valid_p k (e2 x)) : + is_valid_p k (dite cond e1 e2) := by + split <;> simp [*] + -- Lean is good at unification: we can write a very general version -- (in particular, it will manage to figure out `g` and `h` when we -- apply the lemma) @@ -680,6 +697,24 @@ namespace FixI is_valid_p k (λ k => k i x) := by simp [is_valid_p, k_to_gen, e_to_gen, kk_to_gen, kk_of_gen] + theorem is_valid_p_ite + (k : ((i:id) → (x:a i) → Result (b i x)) → (i:id) → (x:a i) → Result (b i x)) + (cond : Prop) [h : Decidable cond] + {e1 e2 : ((i:id) → (x:a i) → Result (b i x)) → Result c} + (he1: is_valid_p k e1) (he2 : is_valid_p k e2) : + is_valid_p k (λ k => ite cond (e1 k) (e2 k)) := by + split <;> assumption + + theorem is_valid_p_dite + (k : ((i:id) → (x:a i) → Result (b i x)) → (i:id) → (x:a i) → Result (b i x)) + (cond : Prop) [h : Decidable cond] + {e1 : ((i:id) → (x:a i) → Result (b i x)) → cond → Result c} + {e2 : ((i:id) → (x:a i) → Result (b i x)) → Not cond → Result c} + (he1: ∀ x, is_valid_p k (λ k => e1 k x)) + (he2 : ∀ x, is_valid_p k (λ k => e2 k x)) : + is_valid_p k (λ k => dite cond (e1 k) (e2 k)) := by + split <;> simp [*] + theorem is_valid_p_bind {{k : ((i:id) → (x:a i) → Result (b i x)) → (i:id) → (x:a i) → Result (b i x)}} {{g : ((i:id) → (x:a i) → Result (b i x)) → Result c}} @@ -699,6 +734,9 @@ namespace FixI | .Nil => True | .Cons f fl => (∀ x, FixI.is_valid_p k (λ k => f k x)) ∧ fl.is_valid_p k + theorem Funs.is_valid_p_Nil (k : k_ty id a b) : + Funs.is_valid_p k Funs.Nil := by simp [Funs.is_valid_p] + def Funs.is_valid_p_is_valid_p_aux {k : k_ty id a b} {tys : List in_out_ty} @@ -1116,7 +1154,7 @@ namespace Ex6 def body (k : (i : Fin 1) → (x : input_ty i) → Result (output_ty i x)) (i: Fin 1) : (x : input_ty i) → Result (output_ty i x) := get_fun bodies i k - theorem list_nth_body_is_valid: is_valid body := by + theorem body_is_valid: is_valid body := by -- Split the proof into proofs of validity of the individual bodies rw [is_valid] simp only [body] @@ -1131,6 +1169,20 @@ namespace Ex6 split <;> simp split <;> simp + -- Writing the proof terms explicitly + theorem list_nth_body_is_valid' (k : k_ty (Fin 1) input_ty output_ty) + (x : (a : Type u) × List a × Int) : is_valid_p k (fun k => list_nth_body k x) := + let ⟨ a, ls, i ⟩ := x + match ls with + | [] => is_valid_p_same k (.fail .panic) + | hd :: tl => + is_valid_p_ite k (Eq i 0) (is_valid_p_same k (.ret hd)) (is_valid_p_rec k 0 ⟨a, tl, i-1⟩) + + theorem body_is_valid' : is_valid body := + fun k => + Funs.is_valid_p_is_valid_p tys k bodies + (And.intro (list_nth_body_is_valid' k) (Funs.is_valid_p_Nil k)) + noncomputable def list_nth {a: Type u} (ls : List a) (i : Int) : Result a := fix body 0 ⟨ a, ls , i ⟩ @@ -1144,8 +1196,28 @@ namespace Ex6 if i = 0 then .ret hd else list_nth tl (i - 1) := by - have Heq := is_valid_fix_fixed_eq list_nth_body_is_valid + have Heq := is_valid_fix_fixed_eq body_is_valid simp [list_nth] conv => lhs; rw [Heq] + -- Write the proof term explicitly: the generation of the proof term (without tactics) + -- is automatable, and the proof term is actually a lot simpler and smaller when we + -- don't use tactics. + theorem list_nth_eq'.{u} {a : Type u} (ls : List a) (i : Int) : + list_nth ls i = + match ls with + | [] => .fail .panic + | hd :: tl => + if i = 0 then .ret hd + else list_nth tl (i - 1) + := + -- Use the fixed-point equation + have Heq := is_valid_fix_fixed_eq body_is_valid.{u} + -- Add the index + have Heqi := congr_fun Heq 0 + -- Add the input + have Heqix := congr_fun Heqi { fst := a, snd := (ls, i) } + -- Done + Heqix + end Ex6 diff --git a/backends/lean/Base/Diverge/Elab.lean b/backends/lean/Base/Diverge/Elab.lean index f7de7518..cf40ea8f 100644 --- a/backends/lean/Base/Diverge/Elab.lean +++ b/backends/lean/Base/Diverge/Elab.lean @@ -16,6 +16,7 @@ syntax (name := divergentDef) open Lean Elab Term Meta Primitives Lean.Meta set_option trace.Diverge.def true +set_option trace.Diverge.def.valid true -- set_option trace.Diverge.def.sigmas true /- The following was copied from the `wfRecursion` function. -/ @@ -196,7 +197,6 @@ private def list_nth_out_ty1 (scrut0 : @Sigma (Type) (fun (a:Type) => @[specialize] def mapi (f : Nat → α → β) : List α → List β := mapiAux 0 f -#check Array.map -- Return the expression: `Fin n` -- TODO: use more def mkFin (n : Nat) : Expr := @@ -227,7 +227,7 @@ def mkFinValOld (n i : Nat) : MetaM Expr := do We name the declarations: "[original_name].body". We return the new declarations. -/ -def mkDeclareUnaryBodies (grLvlParams : List Name) (k_var : Expr) +def mkDeclareUnaryBodies (grLvlParams : List Name) (kk_var : Expr) (preDefs : Array PreDefinition) : MetaM (Array Expr) := do let grSize := preDefs.size @@ -260,7 +260,7 @@ def mkDeclareUnaryBodies (grLvlParams : List Name) (k_var : Expr) let i ← mkFinVal grSize id -- Put the arguments in one big dependent tuple let args ← mkSigmas args.toList - mkAppM' k_var #[i, args] + mkAppM' kk_var #[i, args] else -- Not a recursive call: do nothing pure e @@ -281,8 +281,8 @@ def mkDeclareUnaryBodies (grLvlParams : List Name) (k_var : Expr) let body ← mkSigmasMatch args.toList body 0 -- Add the declaration - let value ← mkLambdaFVars #[k_var] body - let name := preDef.declName.append "body" + let value ← mkLambdaFVars #[kk_var] body + let name := preDef.declName.append "sbody" let levelParams := grLvlParams let decl := Declaration.defnDecl { name := name @@ -297,16 +297,17 @@ def mkDeclareUnaryBodies (grLvlParams : List Name) (k_var : Expr) trace[Diverge.def] "individual body of {preDef.declName}: {body}" -- Return the constant let body := Lean.mkConst name (levelParams.map .param) - -- let body ← mkAppM' body #[k_var] + -- let body ← mkAppM' body #[kk_var] trace[Diverge.def] "individual body (after decl): {body}" pure body -- Generate a unique function body from the bodies of the mutually recursive group, --- and add it as a declaration in the context -def mkDeclareMutualBody (grName : Name) (grLvlParams : List Name) - (i_var k_var : Expr) +-- and add it as a declaration in the context. +-- We return the list of bodies (of type `Funs ...`) and the mutually recursive body. +def mkDeclareMutRecBody (grName : Name) (grLvlParams : List Name) + (kk_var i_var : Expr) (in_ty out_ty : Expr) (inOutTys : List (Expr × Expr)) - (bodies : Array Expr) : MetaM Expr := do + (bodies : Array Expr) : MetaM (Expr × Expr) := do -- Generate the body let grSize := bodies.size let finTypeExpr := mkFin grSize @@ -323,15 +324,15 @@ def mkDeclareMutualBody (grName : Name) (grLvlParams : List Name) let inOutTysExpr ← mkList (← inOutTys.mapM (λ (x, y) => mkInOutTy x y)) inOutTyType let fl ← mkFuns inOutTys bl mkAppOptM ``FixI.Funs.Cons #[finTypeExpr, in_ty, out_ty, ity, oty, inOutTysExpr, b, fl] - | _, _ => throwError "mkDeclareMutualBody: `tys` and `bodies` don't have the same length" + | _, _ => throwError "mkDeclareMutRecBody: `tys` and `bodies` don't have the same length" let bodyFuns ← mkFuns inOutTys bodies.toList -- Wrap in `get_fun` - let body ← mkAppM ``FixI.get_fun #[bodyFuns, i_var, k_var] + let body ← mkAppM ``FixI.get_fun #[bodyFuns, i_var, kk_var] -- Add the index `i` and the continuation `k` as a variables - let body ← mkLambdaFVars #[k_var, i_var] body - trace[Diverge.def] "mkDeclareMutualBody: body: {body}" + let body ← mkLambdaFVars #[kk_var, i_var] body + trace[Diverge.def] "mkDeclareMutRecBody: body: {body}" -- Add the declaration - let name := grName.append "mutrec_body" + let name := grName.append "mut_rec_body" let levelParams := grLvlParams let decl := Declaration.defnDecl { name := name @@ -344,10 +345,348 @@ def mkDeclareMutualBody (grName : Name) (grLvlParams : List Name) } addDecl decl -- Return the constant - pure (Lean.mkConst name (levelParams.map .param)) + pure (bodyFuns, Lean.mkConst name (levelParams.map .param)) + +def isCasesExpr (e : Expr) : MetaM Bool := do + let e := e.getAppFn + if e.isConst then + return isCasesOnRecursor (← getEnv) e.constName + else return false + +structure MatchInfo where + matcherName : Name + matcherLevels : Array Level + params : Array Expr + motive : Expr + scruts : Array Expr + branchesNumParams : Array Nat + branches : Array Expr + +instance : ToMessageData MatchInfo where + -- This is not a very clean formatting, but we don't need more + toMessageData := fun me => m!"\n- matcherName: {me.matcherName}\n- params: {me.params}\n- motive: {me.motive}\n- scruts: {me.scruts}\n- branchesNumParams: {me.branchesNumParams}\n- branches: {me.branches}" + +-- An expression which doesn't use the continuation kk is valid +def proveNoKExprIsValid (k_var : Expr) (e : Expr) : MetaM Expr := do + trace[Diverge.def.valid] "proveNoKExprIsValid: {e}" + let eIsValid ← mkAppM ``FixI.is_valid_p_same #[k_var, e] + trace[Diverge.def.valid] "proveNoKExprIsValid: result:\n{eIsValid}:\n{← inferType eIsValid}" + pure eIsValid + +mutual + +partial def proveExprIsValid (k_var kk_var : Expr) (e : Expr) : MetaM Expr := do + trace[Diverge.def.valid] "proveValid: {e}" + match e with + | .bvar _ + | .fvar _ + | .mvar _ + | .sort _ + | .lit _ + | .const _ _ => throwError "Unimplemented" + | .lam .. => throwError "Unimplemented" + | .forallE .. => throwError "Unreachable" -- Shouldn't get there + | .letE .. => throwError "TODO" + -- lambdaLetTelescope e fun xs b => mapVisitBinders xs do + -- mkLambdaFVars xs (← mapVisit k b) (usedLetOnly := false) + | .mdata _ b => proveExprIsValid k_var kk_var b + | .proj _ _ _ => + -- The projection shouldn't use the continuation + proveNoKExprIsValid k_var e + | .app .. => + e.withApp fun f args => do + -- There are several cases: first, check if this is a match/if + -- The expression is a (dependent) if then else + let isIte := e.isIte + if isIte || e.isDIte then do + e.withApp fun f args => do + trace[Diverge.def.valid] "ite/dite: {f}:\n{args}" + if args.size ≠ 5 then + throwError "Wrong number of parameters for {f}: {args}" + let cond := args.get! 1 + let dec := args.get! 2 + -- Prove that the branches are valid + let br0 := args.get! 3 + let br1 := args.get! 4 + let proveBranchValid (br : Expr) : MetaM Expr := + if isIte then proveExprIsValid k_var kk_var br + else do + -- There is a lambda -- TODO: how do we remove exacly *one* lambda? + lambdaLetTelescope br fun xs br => do + let x := xs.get! 0 + let xs := xs.extract 1 xs.size + let br ← mkLambdaFVars xs br + let brValid ← proveExprIsValid k_var kk_var br + mkLambdaFVars #[x] brValid + let br0Valid ← proveBranchValid br0 + let br1Valid ← proveBranchValid br1 + let const := if isIte then ``FixI.is_valid_p_ite else ``FixI.is_valid_p_dite + let eIsValid ← mkAppOptM const #[none, none, none, none, some k_var, some cond, some dec, none, none, some br0Valid, some br1Valid] + trace[Diverge.def.valid] "ite/dite: result:\n{eIsValid}:\n{← inferType eIsValid}" + pure eIsValid + -- The expression is a match (this case is for when the elaborator + -- introduces auxiliary definitions to hide the match behind syntactic + -- sugar) + else if let some me := ← matchMatcherApp? e then do + trace[Diverge.def.valid] + "matcherApp: + - params: {me.params} + - motive: {me.motive} + - discrs: {me.discrs} + - altNumParams: {me.altNumParams} + - alts: {me.alts} + - remaining: {me.remaining}" + -- matchMatcherApp has already done the work for us + if me.remaining.size ≠ 0 then + throwError "MatcherApp: non empty remaining array: {me.remaining}" + let me : MatchInfo := { + matcherName := me.matcherName + matcherLevels := me.matcherLevels + params := me.params + motive := me.motive + scruts := me.discrs + branchesNumParams := me.altNumParams + branches := me.alts + } + proveMatchIsValid k_var kk_var me + -- The expression is a raw match (this case is for when the expression + -- is a direct call to the primitive `casesOn` function, without + -- syntactic sugar) + else if ← isCasesExpr f then do + trace[Diverge.def.valid] "rawMatch: {e}" + -- The casesOn definition is always of the following shape: + -- input parameters (implicit parameters), then motive (implicit), + -- scrutinee (explicit), branches (explicit). + let matcherName := f.constName! + let matcherLevels := f.constLevels!.toArray + -- Find the first explicit parameter: this is the scrutinee + forallTelescope (← inferType f) fun xs _ => do + let rec findFirstExplicit (i : Nat) : MetaM Nat := do + if i ≥ xs.size then throwError "Unexpected: could not find an explicit parameter" + else + let x := xs.get! i + let xFVarId := x.fvarId! + let localDecl ← xFVarId.getDecl + match localDecl.binderInfo with + | .default => pure i + | _ => findFirstExplicit (i + 1) + let scrutIdx ← findFirstExplicit 0 + -- Split the arguments + let params := args.extract 0 (scrutIdx - 1) + let motive := args.get! (scrutIdx - 1) + let scrut := args.get! scrutIdx + let branches := args.extract (scrutIdx + 1) args.size + -- Compute the number of parameters for the branches: for this we use + -- the type of the uninstantiated casesOn constant + let branchesNumParams : Array Nat ← do + let env ← getEnv + let decl := env.constants.find! matcherName + let ty := decl.type + forallTelescope ty fun xs _ => do + let xs := xs.extract (scrutIdx + 1) xs.size + xs.mapM fun x => do + let xty ← inferType x + forallTelescope xty fun ys _ => do + pure ys.size + let me : MatchInfo := { + matcherName, + matcherLevels, + params, + motive, + scruts := #[scrut], + branchesNumParams, + branches, + } + proveMatchIsValid k_var kk_var me + -- Monadic let-binding + else if f.isConstOf ``Bind.bind then do + trace[Diverge.def.valid] "bind:\n{args}" + let x := args.get! 4 + let y := args.get! 5 + -- Prove that the subexpressions are valid + let xValid ← proveExprIsValid k_var kk_var x + trace[Diverge.def.valid] "bind: xValid:\n{xValid}:\n{← inferType xValid}" + let yValid ← do + -- This is a lambda expression -- TODO: how do we remove exacly *one* lambda? + lambdaLetTelescope y fun xs y => do + let x := xs.get! 0 + let xs := xs.extract 1 xs.size + let y ← mkLambdaFVars xs y + trace[Diverge.def.valid] "bind: y: {y}" + let yValid ← proveExprIsValid k_var kk_var y + trace[Diverge.def.valid] "bind: yValid (no forall): {yValid}" + trace[Diverge.def.valid] "bind: yValid: x: {x}" + let yValid ← mkLambdaFVars #[x] yValid + trace[Diverge.def.valid] "bind: yValid (forall): {yValid}: {← inferType yValid}" + pure yValid + -- Put everything together + trace[Diverge.def.valid] "bind:\n- xValid: {xValid}: {← inferType xValid}\n- yValid: {yValid}: {← inferType yValid}" + mkAppM ``FixI.is_valid_p_bind #[xValid, yValid] + -- Recursive call + else if f.isFVarOf kk_var.fvarId! then do + trace[Diverge.def.valid] "rec: args: \n{args}" + if args.size ≠ 2 then throwError "Recursive call with invalid number of parameters: {args}" + let i_arg := args.get! 0 + let x_arg := args.get! 1 + let eIsValid ← mkAppM ``FixI.is_valid_p_rec #[k_var, i_arg, x_arg] + trace[Diverge.def.valid] "rec: result: \n{eIsValid}" + pure eIsValid + else do + -- Remaining case: normal application. + -- It shouldn't use the continuation + proveNoKExprIsValid k_var e + +partial def proveMatchIsValid (k_var kk_var : Expr) (me : MatchInfo) : MetaM Expr := do + trace[Diverge.def.valid] "proveMatchIsValid: {me}" + -- Prove the validity of the branch expressions + let branchesValid:Array Expr ← me.branches.mapIdxM fun idx br => do + -- Go inside the lambdas - note that we have to be careful: some of the + -- binders might come from the match, and some of the binders might come + -- from the fact that the expression in the match is a lambda expression: + -- we use the branchesNumParams field for this reason + lambdaLetTelescope br fun xs br => do + let numParams := me.branchesNumParams.get! idx + let xs_beg := xs.extract 0 numParams + let xs_end := xs.extract numParams xs.size + let br ← mkLambdaFVars xs_end br + -- Prove that the branch expression is valid + let brValid ← proveExprIsValid k_var kk_var br + -- Reconstruct the lambda expression + mkLambdaFVars xs_beg brValid + trace[Diverge.def.valid] "branchesValid:\n{branchesValid}" + -- Put together: compute the motive. + -- It must be of the shape: + -- ``` + -- λ scrut => is_valid_p k (λ k => match scrut with ...) + -- ``` + let validMotive : Expr ← do + -- The motive is a function of the scrutinees (i.e., a lambda expression): + -- introduce binders for the scrutinees + let declInfos := me.scruts.mapIdx fun idx scrut => + let name : Name := (.num (.str .anonymous "scrut") idx) + let ty := λ (_ : Array Expr) => inferType scrut + (name, ty) + withLocalDeclsD declInfos fun scrutVars => do + -- Create a match expression but where the scrutinees have been replaced + -- by variables + let params : Array (Option Expr) := me.params.map some + let motive : Option Expr := some me.motive + let scruts : Array (Option Expr) := scrutVars.map some + let branches : Array (Option Expr) := me.branches.map some + let args := params ++ [motive] ++ scruts ++ branches + let matchE ← mkAppOptM me.matcherName args + -- let matchE ← mkLambdaFVars scrutVars (← mkAppOptM me.matcherName args) + -- Wrap in the `is_valid_p` predicate + let matchE ← mkLambdaFVars #[kk_var] matchE + let validMotive ← mkAppM ``FixI.is_valid_p #[k_var, matchE] + -- Abstract away the scrutinee variables + mkLambdaFVars scrutVars validMotive + trace[Diverge.def.valid] "valid motive: {validMotive}" + -- Put together + let valid ← do + let params : Array (Option Expr) := me.params.map (λ _ => none) + let motive := some validMotive + let scruts := me.scruts.map some + let branches := branchesValid.map some + let args := params ++ [motive] ++ scruts ++ branches + mkAppOptM me.matcherName args + trace[Diverge.def.valid] "proveMatchIsValid:\n{valid}:\n{← inferType valid}" + pure valid + +end + +-- Prove that a single body (in the mutually recursive group) is valid +partial def proveSingleBodyIsValid (k_var : Expr) (preDef : PreDefinition) (bodyConst : Expr) : + MetaM Expr := do + trace[Diverge.def.valid] "proveSingleBodyIsValid: bodyConst: {bodyConst}" + -- Lookup the definition (`bodyConst` is the definition of the body, we want + -- to retrieve the value itself to dive inside) + let name := bodyConst.constName! + let env ← getEnv + let body := (env.constants.find! name).value! + trace[Diverge.def.valid] "body: {body}" + lambdaLetTelescope body fun xs body => do + assert! xs.size = 2 + let kk_var := xs.get! 0 + let x_var := xs.get! 1 + -- State the type of the theorem to prove + let thmTy ← mkAppM ``FixI.is_valid_p + #[k_var, ← mkLambdaFVars #[kk_var] (← mkAppM' bodyConst #[kk_var, x_var])] + trace[Diverge.def.valid] "thmTy: {thmTy}" + -- Prove that the body is valid + let proof ← proveExprIsValid k_var kk_var body + let proof ← mkLambdaFVars #[k_var, x_var] proof + trace[Diverge.def.valid] "proveSingleBodyIsValid: proof:\n{proof}:\n{← inferType proof}" + -- The target type (we don't have to do this: this is simply a sanity check, + -- and this allows a nicer debugging output) + let thmTy ← do + let body ← mkAppM' bodyConst #[kk_var, x_var] + let body ← mkLambdaFVars #[kk_var] body + let ty ← mkAppM ``FixI.is_valid_p #[k_var, body] + mkForallFVars #[k_var, x_var] ty + trace[Diverge.def.valid] "proveSingleBodyIsValid: thmTy\n{thmTy}:\n{← inferType thmTy}" + -- Save the theorem + let name := preDef.declName ++ "sbody_is_valid" + let decl := Declaration.thmDecl { + name + levelParams := preDef.levelParams + type := thmTy + value := proof + all := [name] + } + addDecl decl + trace[Diverge.def.valid] "proveSingleBodyIsValid: added thm: {name}" + -- Return the theorem + pure (Expr.const name (preDef.levelParams.map .param)) + +partial def proveFunsBodyIsValid (inOutTys: Expr) (bodyFuns : Expr) + (k_var : Expr) (bodiesValid : Array Expr) : MetaM Expr := do + -- Create the big "and" expression, which groups the validity proof of the individual bodies + let rec mkValidConj (i : Nat) : MetaM Expr := do + if i = bodiesValid.size then + -- We reached the end + mkAppM ``FixI.Funs.is_valid_p_Nil #[k_var] + else do + -- We haven't reached the end: introduce a conjunction + let valid := bodiesValid.get! i + let valid ← mkAppM' valid #[k_var] + mkAppM ``And.intro #[valid, ← mkValidConj (i + 1)] + let andExpr ← mkValidConj 0 + -- Wrap in the `is_valid_p_is_valid_p` theorem, and abstract the continuation + let isValid ← mkAppM ``FixI.Funs.is_valid_p_is_valid_p #[inOutTys, k_var, bodyFuns, andExpr] + mkLambdaFVars #[k_var] isValid + +-- Prove that the mut rec body is valid +-- TODO: maybe this function should introduce k_var itself +def proveMutRecIsValid + (grName : Name) (grLvlParams : List Name) + (inOutTys : Expr) (bodyFuns mutRecBodyConst : Expr) + (k_var : Expr) (preDefs : Array PreDefinition) + (bodies : Array Expr) : MetaM Expr := do + -- First prove that the individual bodies are valid + let bodiesValid ← + bodies.mapIdxM fun idx body => do + let preDef := preDefs.get! idx + proveSingleBodyIsValid k_var preDef body + -- Then prove that the mut rec body is valid + let isValid ← proveFunsBodyIsValid inOutTys bodyFuns k_var bodiesValid + -- Save the theorem + let thmTy ← mkAppM ``FixI.is_valid #[mutRecBodyConst] + let name := grName ++ "mut_rec_body_is_valid" + let decl := Declaration.thmDecl { + name + levelParams := grLvlParams + type := thmTy + value := isValid + all := [name] + } + addDecl decl + trace[Diverge.def.valid] "proveFunsBodyIsValid: added thm: {name}:\n{thmTy}" + -- Return the theorem + pure (Expr.const name (grLvlParams.map .param)) -- Generate the final definions by using the mutual body and the fixed point operator. -def mkDeclareFixDefs (mutBody : Expr) (preDefs : Array PreDefinition) : +def mkDeclareFixDefs (mutRecBody : Expr) (preDefs : Array PreDefinition) : TermElabM Unit := do let grSize := preDefs.size let _ ← preDefs.mapIdxM fun idx preDef => do @@ -357,7 +696,7 @@ def mkDeclareFixDefs (mutBody : Expr) (preDefs : Array PreDefinition) : -- Group the inputs into a dependent tuple let input ← mkSigmas xs.toList -- Apply the fixed point - let fixedBody ← mkAppM ``FixI.fix #[mutBody, idx, input] + let fixedBody ← mkAppM ``FixI.fix #[mutRecBody, idx, input] let fixedBody ← mkLambdaFVars xs fixedBody -- Create the declaration let name := preDef.declName @@ -454,24 +793,26 @@ def divRecursion (preDefs : Array PreDefinition) : TermElabM Unit := do -- Introduce the continuation `k` let in_ty ← mkLambdaFVars #[i_var] in_ty let out_ty ← mkLambdaFVars #[i_var, input] out_ty - let k_var_ty ← mkAppM ``FixI.kk_ty #[i_var_ty, in_ty, out_ty] -- - trace[Diverge.def] "k_var_ty: {k_var_ty}" - withLocalDeclD (.num (.str .anonymous "k") 2) k_var_ty fun k_var => do - trace[Diverge.def] "k_var: {k_var}" + let kk_var_ty ← mkAppM ``FixI.kk_ty #[i_var_ty, in_ty, out_ty] + trace[Diverge.def] "kk_var_ty: {kk_var_ty}" + withLocalDeclD (.num (.str .anonymous "kk") 2) kk_var_ty fun kk_var => do + trace[Diverge.def] "kk_var: {kk_var}" -- Replace the recursive calls in all the function bodies by calls to the -- continuation `k` and and generate for those bodies declarations - let bodies ← mkDeclareUnaryBodies grLvlParams k_var preDefs + let bodies ← mkDeclareUnaryBodies grLvlParams kk_var preDefs -- Generate the mutually recursive body - let body ← mkDeclareMutualBody grName grLvlParams i_var k_var in_ty out_ty inOutTys.toList bodies - trace[Diverge.def] "mut rec body (after decl): {body}" + let (bodyFuns, mutRecBody) ← mkDeclareMutRecBody grName grLvlParams kk_var i_var in_ty out_ty inOutTys.toList bodies + trace[Diverge.def] "mut rec body (after decl): {mutRecBody}" -- Prove that the mut rec body satisfies the validity criteria required by -- our fixed-point - -- TODO + let k_var_ty ← mkAppM ``FixI.k_ty #[i_var_ty, in_ty, out_ty] + withLocalDeclD (.num (.str .anonymous "k") 3) k_var_ty fun k_var => do + let isValidThm ← proveMutRecIsValid grName grLvlParams inOutTysExpr bodyFuns mutRecBody k_var preDefs bodies -- Generate the final definitions - let defs ← mkDeclareFixDefs body preDefs + let defs ← mkDeclareFixDefs mutRecBody preDefs -- Prove the unfolding equations -- TODO @@ -496,13 +837,10 @@ def addPreDefinitions (preDefs : Array PreDefinition) : TermElabM Unit := withLC for preDefs in cliques do trace[Diverge.elab] "{preDefs.map (·.declName)}" try - trace[Diverge.elab] "calling divRecursion" withRef (preDefs[0]!.ref) do divRecursion preDefs - trace[Diverge.elab] "divRecursion succeeded" catch ex => - -- If it failed, we - trace[Diverge.elab] "divRecursion failed" + -- If it failed, we add the functions as partial functions hasErrors := true logException ex let s ← saveState @@ -600,7 +938,8 @@ divergent def list_nth {a: Type} (ls : List a) (i : Int) : Result a := else return (← list_nth ls (i - 1)) #print list_nth.in_out_ty -#check list_nth.body +#check list_nth.sbody +#check list_nth.mut_rec_body #print list_nth mutual diff --git a/backends/lean/Base/Diverge/ElabBase.lean b/backends/lean/Base/Diverge/ElabBase.lean index 82f79f94..281dbd6c 100644 --- a/backends/lean/Base/Diverge/ElabBase.lean +++ b/backends/lean/Base/Diverge/ElabBase.lean @@ -8,6 +8,7 @@ initialize registerTraceClass `Diverge.elab initialize registerTraceClass `Diverge.def initialize registerTraceClass `Diverge.def.sigmas initialize registerTraceClass `Diverge.def.genBody +initialize registerTraceClass `Diverge.def.valid -- TODO: move -- TODO: small helper -- cgit v1.2.3 From 7ceab6a725e5bd17c05bfd381753e453b15afaf7 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Mon, 3 Jul 2023 16:46:59 +0200 Subject: Add a missing case in the validity proofs --- backends/lean/Base/Diverge/Elab.lean | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Diverge/Elab.lean b/backends/lean/Base/Diverge/Elab.lean index cf40ea8f..063480a2 100644 --- a/backends/lean/Base/Diverge/Elab.lean +++ b/backends/lean/Base/Diverge/Elab.lean @@ -378,17 +378,22 @@ mutual partial def proveExprIsValid (k_var kk_var : Expr) (e : Expr) : MetaM Expr := do trace[Diverge.def.valid] "proveValid: {e}" match e with + | .const _ _ => throwError "Unimplemented" -- Shouldn't get there? | .bvar _ | .fvar _ - | .mvar _ - | .sort _ | .lit _ - | .const _ _ => throwError "Unimplemented" + | .mvar _ + | .sort _ => throwError "Unreachable" | .lam .. => throwError "Unimplemented" | .forallE .. => throwError "Unreachable" -- Shouldn't get there - | .letE .. => throwError "TODO" - -- lambdaLetTelescope e fun xs b => mapVisitBinders xs do - -- mkLambdaFVars xs (← mapVisit k b) (usedLetOnly := false) + | .letE dName dTy dValue body _nonDep => do + -- Introduce a local declaration for the let-binding + withLetDecl dName dTy dValue fun decl => do + let isValid ← proveExprIsValid k_var kk_var body + -- Add the let-binding around (rem.: the let-binding should be + -- *inside* the `is_valid_p`, not outside, but because it reduces + -- in the end it doesn't matter) + mkLetFVars #[decl] isValid | .mdata _ b => proveExprIsValid k_var kk_var b | .proj _ _ _ => -- The projection shouldn't use the continuation @@ -963,4 +968,12 @@ mutual if i > 20 then foo (i / 20) else .ret 42 end +-- Testing dependent branching and let-bindings +-- TODO: why the linter warning? +divergent def is_non_zero (i : Int) : Result Bool := + if _h:i = 0 then return false + else + let b := true + return b + end Diverge -- cgit v1.2.3 From 9214484c471ad931924865855687f9a2ffe255dd Mon Sep 17 00:00:00 2001 From: Son Ho Date: Mon, 3 Jul 2023 18:02:52 +0200 Subject: Automate the proofs of the unfolding theorems for Diverge --- backends/lean/Base/Diverge/Elab.lean | 107 +++++++++++++++++++++++++------ backends/lean/Base/Diverge/ElabBase.lean | 1 + 2 files changed, 89 insertions(+), 19 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Diverge/Elab.lean b/backends/lean/Base/Diverge/Elab.lean index 063480a2..91c51a31 100644 --- a/backends/lean/Base/Diverge/Elab.lean +++ b/backends/lean/Base/Diverge/Elab.lean @@ -16,8 +16,9 @@ syntax (name := divergentDef) open Lean Elab Term Meta Primitives Lean.Meta set_option trace.Diverge.def true -set_option trace.Diverge.def.valid true +-- set_option trace.Diverge.def.valid true -- set_option trace.Diverge.def.sigmas true +set_option trace.Diverge.def.unfold true /- The following was copied from the `wfRecursion` function. -/ @@ -390,9 +391,10 @@ partial def proveExprIsValid (k_var kk_var : Expr) (e : Expr) : MetaM Expr := do -- Introduce a local declaration for the let-binding withLetDecl dName dTy dValue fun decl => do let isValid ← proveExprIsValid k_var kk_var body - -- Add the let-binding around (rem.: the let-binding should be - -- *inside* the `is_valid_p`, not outside, but because it reduces - -- in the end it doesn't matter) + -- Add the let-binding around. + -- Rem.: the let-binding should be *inside* the `is_valid_p`, not outside, + -- but because it reduces in the end it doesn't matter. More precisely: + -- `P (let x := v in y)` and `let x := v in P y` reduce to the same expression. mkLetFVars #[decl] isValid | .mdata _ b => proveExprIsValid k_var kk_var b | .proj _ _ _ => @@ -692,9 +694,9 @@ def proveMutRecIsValid -- Generate the final definions by using the mutual body and the fixed point operator. def mkDeclareFixDefs (mutRecBody : Expr) (preDefs : Array PreDefinition) : - TermElabM Unit := do + TermElabM (Array Name) := do let grSize := preDefs.size - let _ ← preDefs.mapIdxM fun idx preDef => do + let defs ← preDefs.mapIdxM fun idx preDef => do lambdaLetTelescope preDef.value fun xs _ => do -- Create the index let idx ← mkFinVal grSize idx.val @@ -715,7 +717,58 @@ def mkDeclareFixDefs (mutRecBody : Expr) (preDefs : Array PreDefinition) : all := [name] } addDecl decl - pure () + pure name + pure defs + +-- Prove the equations that we will use as unfolding theorems +partial def proveUnfoldingThms (isValidThm : Expr) (preDefs : Array PreDefinition) + (decls : Array Name) : MetaM Unit := do + let grSize := preDefs.size + let proveIdx (i : Nat) : MetaM Unit := do + let preDef := preDefs.get! i + let defName := decls.get! i + -- Retrieve the arguments + lambdaLetTelescope preDef.value fun xs body => do + trace[Diverge.def.unfold] "proveUnfoldingThms: xs: {xs}" + trace[Diverge.def.unfold] "proveUnfoldingThms: body: {body}" + -- The theorem statement + let thmTy ← do + -- The equation: the declaration gives the lhs, the pre-def gives the rhs + let lhs ← mkAppOptM defName (xs.map some) + let rhs := body + let eq ← mkAppM ``Eq #[lhs, rhs] + mkForallFVars xs eq + trace[Diverge.def.unfold] "proveUnfoldingThms: thm statement: {thmTy}" + -- The proof + -- Use the fixed-point equation + let proof ← mkAppM ``FixI.is_valid_fix_fixed_eq #[isValidThm] + -- Add the index + let idx ← mkFinVal grSize i + let proof ← mkAppM ``congr_fun #[proof, idx] + -- Add the input argument + let arg ← mkSigmas xs.toList + let proof ← mkAppM ``congr_fun #[proof, arg] + -- Abstract the arguments away + let proof ← mkLambdaFVars xs proof + trace[Diverge.def.unfold] "proveUnfoldingThms: proof: {proof}:\n{← inferType proof}" + -- Declare the theorem + let name := preDef.declName ++ "unfold" + let decl := Declaration.thmDecl { + name + levelParams := preDef.levelParams + type := thmTy + value := proof + all := [name] + } + addDecl decl + trace[Diverge.def.unfold] "proveUnfoldingThms: added thm: {name}:\n{thmTy}" + let rec prove (i : Nat) : MetaM Unit := do + if i = preDefs.size then pure () + else do + proveIdx i + prove (i + 1) + -- + prove 0 def divRecursion (preDefs : Array PreDefinition) : TermElabM Unit := do let msg := toMessageData <| preDefs.map fun pd => (pd.declName, pd.levelParams, pd.type, pd.value) @@ -817,12 +870,12 @@ def divRecursion (preDefs : Array PreDefinition) : TermElabM Unit := do let isValidThm ← proveMutRecIsValid grName grLvlParams inOutTysExpr bodyFuns mutRecBody k_var preDefs bodies -- Generate the final definitions - let defs ← mkDeclareFixDefs mutRecBody preDefs + let decls ← mkDeclareFixDefs mutRecBody preDefs - -- Prove the unfolding equations - -- TODO + -- Prove the unfolding theorems + proveUnfoldingThms isValidThm preDefs decls - -- Process the definitions + -- Process the definitions - TODO addAndCompilePartialRec preDefs -- The following function is copy&pasted from Lean.Elab.PreDefinition.Main @@ -942,10 +995,23 @@ divergent def list_nth {a: Type} (ls : List a) (i : Int) : Result a := if i = 0 then return x else return (← list_nth ls (i - 1)) -#print list_nth.in_out_ty -#check list_nth.sbody -#check list_nth.mut_rec_body -#print list_nth +example {a: Type} (ls : List a) : + ∀ (i : Int), + 0 ≤ i → i < ls.length → + ∃ x, list_nth ls i = .ret x := by + induction ls + . intro i hpos h; simp at h; linarith + . rename_i hd tl ih + intro i hpos h + rw [list_nth.unfold]; simp + split <;> simp [*] + . tauto + . -- TODO: we shouldn't have to do that + have hneq : 0 < i := by cases i <;> rename_i a _ <;> simp_all; cases a <;> simp_all + simp at h + have ⟨ x, ih ⟩ := ih (i - 1) (by linarith) (by linarith) + simp [ih] + tauto mutual divergent def is_even (i : Int) : Result Bool := @@ -955,10 +1021,8 @@ mutual if i = 0 then return false else return (← is_even (i - 1)) end -example (i : Int) : is_even i = .ret (i % 2 = 0) ∧ is_odd i = .ret (i % 2 ≠ 0) := by - induction i - unfold is_even - sorry +#print is_even.unfold +#print is_odd.unfold mutual divergent def foo (i : Int) : Result Nat := @@ -968,6 +1032,9 @@ mutual if i > 20 then foo (i / 20) else .ret 42 end +#print foo.unfold +#print bar.unfold + -- Testing dependent branching and let-bindings -- TODO: why the linter warning? divergent def is_non_zero (i : Int) : Result Bool := @@ -976,4 +1043,6 @@ divergent def is_non_zero (i : Int) : Result Bool := let b := true return b +#print is_non_zero.unfold + end Diverge diff --git a/backends/lean/Base/Diverge/ElabBase.lean b/backends/lean/Base/Diverge/ElabBase.lean index 281dbd6c..fd95291e 100644 --- a/backends/lean/Base/Diverge/ElabBase.lean +++ b/backends/lean/Base/Diverge/ElabBase.lean @@ -9,6 +9,7 @@ initialize registerTraceClass `Diverge.def initialize registerTraceClass `Diverge.def.sigmas initialize registerTraceClass `Diverge.def.genBody initialize registerTraceClass `Diverge.def.valid +initialize registerTraceClass `Diverge.def.unfold -- TODO: move -- TODO: small helper -- cgit v1.2.3 From 75fae6384716f24fe137283d4a41836782b9aec7 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Mon, 3 Jul 2023 19:26:27 +0200 Subject: Cleanup a bit Diverge/Elab.lean --- backends/lean/Base/Diverge/Elab.lean | 366 +++++++++++++++++++---------------- 1 file changed, 197 insertions(+), 169 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Diverge/Elab.lean b/backends/lean/Base/Diverge/Elab.lean index 91c51a31..cc580265 100644 --- a/backends/lean/Base/Diverge/Elab.lean +++ b/backends/lean/Base/Diverge/Elab.lean @@ -15,39 +15,16 @@ syntax (name := divergentDef) open Lean Elab Term Meta Primitives Lean.Meta -set_option trace.Diverge.def true --- set_option trace.Diverge.def.valid true --- set_option trace.Diverge.def.sigmas true -set_option trace.Diverge.def.unfold true - /- The following was copied from the `wfRecursion` function. -/ open WF in -def mkList (xl : List Expr) (ty : Expr) : MetaM Expr := - match xl with - | [] => - mkAppOptM ``List.nil #[some ty] - | x :: tl => do - let tl ← mkList tl ty - mkAppOptM ``List.cons #[some ty, some x, some tl] - def mkProd (x y : Expr) : MetaM Expr := mkAppM ``Prod.mk #[x, y] def mkInOutTy (x y : Expr) : MetaM Expr := mkAppM ``FixI.mk_in_out_ty #[x, y] --- TODO: is there already such a utility somewhere? --- TODO: change to mkSigmas -def mkProds (tys : List Expr) : MetaM Expr := - match tys with - | [] => do pure (Expr.const ``PUnit.unit []) - | [ty] => do pure ty - | ty :: tys => do - let pty ← mkProds tys - mkAppM ``Prod.mk #[ty, pty] - -- Return the `a` in `Return a` def get_result_ty (ty : Expr) : MetaM Expr := ty.withApp fun f args => do @@ -56,26 +33,31 @@ def get_result_ty (ty : Expr) : MetaM Expr := else pure (args.get! 0) --- Group a list of expressions into a dependent tuple -def mkSigmas (xl : List Expr) : MetaM Expr := +/- Group a list of expressions into a dependent tuple. + + Example: + xl = [`a : Type`, `ls : List a`] + returns: + `⟨ (a:Type), (ls: List a) ⟩` + -/ +def mkSigmasVal (xl : List Expr) : MetaM Expr := match xl with | [] => do - trace[Diverge.def.sigmas] "mkSigmas: []" + trace[Diverge.def.sigmas] "mkSigmasVal: []" pure (Expr.const ``PUnit.unit []) | [x] => do - trace[Diverge.def.sigmas] "mkSigmas: [{x}]" + trace[Diverge.def.sigmas] "mkSigmasVal: [{x}]" pure x | fst :: xl => do - trace[Diverge.def.sigmas] "mkSigmas: [{fst}::{xl}]" + trace[Diverge.def.sigmas] "mkSigmasVal: [{fst}::{xl}]" let alpha ← Lean.Meta.inferType fst - let snd ← mkSigmas xl + let snd ← mkSigmasVal xl let snd_ty ← inferType snd let beta ← mkLambdaFVars #[fst] snd_ty - trace[Diverge.def.sigmas] "mkSigmas:\n{alpha}\n{beta}\n{fst}\n{snd}" + trace[Diverge.def.sigmas] "mkSigmasVal:\n{alpha}\n{beta}\n{fst}\n{snd}" mkAppOptM ``Sigma.mk #[some alpha, some beta, some fst, some snd] -/- Generate the input type of a function body, which is a sigma type (i.e., a - dependent tuple) which groups all its inputs. +/- Generate a Sigma type from a list of expressions. Example: - xl = [(a:Type), (ls:List a), (i:Int)] @@ -84,7 +66,7 @@ def mkSigmas (xl : List Expr) : MetaM Expr := `(a:Type) × (ls:List a) × (i:Int)` -/ -def mkSigmasTypesOfTypes (xl : List Expr) : MetaM Expr := +def mkSigmasType (xl : List Expr) : MetaM Expr := match xl with | [] => do trace[Diverge.def.sigmas] "mkSigmasOfTypes: []" @@ -96,15 +78,16 @@ def mkSigmasTypesOfTypes (xl : List Expr) : MetaM Expr := | x :: xl => do trace[Diverge.def.sigmas] "mkSigmasOfTypes: [{x}::{xl}]" let alpha ← Lean.Meta.inferType x - let sty ← mkSigmasTypesOfTypes xl + let sty ← mkSigmasType xl trace[Diverge.def.sigmas] "mkSigmasOfTypes: [{x}::{xl}]: alpha={alpha}, sty={sty}" let beta ← mkLambdaFVars #[x] sty trace[Diverge.def.sigmas] "mkSigmasOfTypes: ({alpha}) ({beta})" mkAppOptM ``Sigma #[some alpha, some beta] -def mk_indexed_name (index : Nat) : Name := .num (.str .anonymous "_uniq") index +def mkAnonymous (s : String) (i : Nat) : Name := + .num (.str .anonymous s) i -/- Given a list of values `[x0:ty0, ..., xn:ty1]` where every `xi` might use the previous +/- Given a list of values `[x0:ty0, ..., xn:ty1]`, where every `xi` might use the previous `xj` (j < i) and a value `out` which uses `x0`, ..., `xn`, generate the following expression: ``` @@ -112,20 +95,22 @@ def mk_indexed_name (index : Nat) : Name := .num (.str .anonymous "_uniq") index match x with | (x0, ..., xn) => out ``` - + The `index` parameter is used for naming purposes: we use it to numerotate the bound variables that we introduce. + We use this function to currify functions (the function bodies given to the + fixed-point operator must be unary functions). + Example: ======== - More precisely: - xl = `[a:Type, ls:List a, i:Int]` - out = `a` - index = 0 - generates: + generates (getting rid of most of the syntactic sugar): ``` - match scrut0 with + λ scrut0 => match scrut0 with | Sigma.mk x scrut1 => match scrut1 with | Sigma.mk ls i => @@ -138,21 +123,30 @@ partial def mkSigmasMatch (xl : List Expr) (out : Expr) (index : Nat := 0) : Met -- This would be unexpected throwError "mkSigmasMatch: empyt list of input parameters" | [x] => do - -- In the explanations above: inner match case + -- In the example given for the explanations: this is the inner match case trace[Diverge.def.sigmas] "mkSigmasMatch: [{x}]" mkLambdaFVars #[x] out | fst :: xl => do - -- In the explanations above: outer match case + -- In the example given for the explanations: this is the outer match case -- Remark: for the naming purposes, we use the same convention as for the - -- fields and parameters in `Sigma.casesOn and `Sigma.mk + -- fields and parameters in `Sigma.casesOn` and `Sigma.mk` (looking at + -- those definitions might help) + -- + -- We want to build the match expression: + -- ``` + -- λ scrut => + -- match scrut with + -- | Sigma.mk x ... -- the hole is given by a recursive call on the tail + -- ``` trace[Diverge.def.sigmas] "mkSigmasMatch: [{fst}::{xl}]" let alpha ← Lean.Meta.inferType fst - let snd_ty ← mkSigmasTypesOfTypes xl + let snd_ty ← mkSigmasType xl let beta ← mkLambdaFVars #[fst] snd_ty let snd ← mkSigmasMatch xl out (index + 1) - let scrut_ty ← mkSigmasTypesOfTypes (fst :: xl) - withLocalDeclD (mk_indexed_name index) scrut_ty fun scrut => do let mk ← mkLambdaFVars #[fst] snd + -- Introduce the "scrut" variable + let scrut_ty ← mkSigmasType (fst :: xl) + withLocalDeclD (mkAnonymous "scrut" index) scrut_ty fun scrut => do trace[Diverge.def.sigmas] "mkSigmasMatch: scrut: ({scrut}) : ({← inferType scrut})" -- TODO: make the computation of the motive more efficient let motive ← do @@ -166,38 +160,32 @@ partial def mkSigmasMatch (xl : List Expr) (out : Expr) (index : Nat := 0) : Met -- TODO: make this more efficient (we could change the output type of -- mkSigmasMatch mkSigmasMatch (fst :: xl) out_ty + -- The final expression: putting everything together trace[Diverge.def.sigmas] "mkSigmasMatch:\n ({alpha})\n ({beta})\n ({motive})\n ({scrut})\n ({mk})" let sm ← mkAppOptM ``Sigma.casesOn #[some alpha, some beta, some motive, some scrut, some mk] + -- Abstracting the "scrut" variable let sm ← mkLambdaFVars #[scrut] sm trace[Diverge.def.sigmas] "mkSigmasMatch: sm: {sm}" pure sm /- Small tests for list_nth: give a model of what `mkSigmasMatch` should generate -/ -private def list_nth_out_ty2 (a :Type) (scrut1: @Sigma (List a) (fun (_ls : List a) => Int)) := +private def list_nth_out_ty_inner (a :Type) (scrut1: @Sigma (List a) (fun (_ls : List a) => Int)) := @Sigma.casesOn (List a) (fun (_ls : List a) => Int) (fun (_scrut1:@Sigma (List a) (fun (_ls : List a) => Int)) => Type) scrut1 (fun (_ls : List a) (_i : Int) => Diverge.Primitives.Result a) -private def list_nth_out_ty1 (scrut0 : @Sigma (Type) (fun (a:Type) => +private def list_nth_out_ty_outer (scrut0 : @Sigma (Type) (fun (a:Type) => @Sigma (List a) (fun (_ls : List a) => Int))) := @Sigma.casesOn (Type) (fun (a:Type) => @Sigma (List a) (fun (_ls : List a) => Int)) (fun (_scrut0:@Sigma (Type) (fun (a:Type) => @Sigma (List a) (fun (_ls : List a) => Int))) => Type) scrut0 (fun (a : Type) (scrut1: @Sigma (List a) (fun (_ls : List a) => Int)) => - list_nth_out_ty2 a scrut1) + list_nth_out_ty_inner a scrut1) /- -/ --- TODO: move --- TODO: we can use Array.mapIdx -@[specialize] def mapiAux (i : Nat) (f : Nat → α → β) : List α → List β - | [] => [] - | a::as => f i a :: mapiAux (i+1) f as - -@[specialize] def mapi (f : Nat → α → β) : List α → List β := mapiAux 0 f - -- Return the expression: `Fin n` -- TODO: use more def mkFin (n : Nat) : Expr := @@ -212,15 +200,6 @@ def mkFinVal (n i : Nat) : MetaM Expr := do let ofNat ← mkAppOptM ``Fin.instOfNatFinHAddNatInstHAddInstAddNatOfNat #[n_lit, i_lit] mkAppOptM ``OfNat.ofNat #[none, none, ofNat] --- TODO: remove? -def mkFinValOld (n i : Nat) : MetaM Expr := do - let finTy := mkFin n - let ofNat ← mkAppM ``OfNat #[finTy, .lit (.natVal i)] - match ← trySynthInstance ofNat with - | LOption.some x => - mkAppOptM ``OfNat.ofNat #[none, none, x] - | _ => throwError "mkFinVal: could not synthesize an instance of {ofNat} " - /- Generate and declare as individual definitions the bodies for the individual funcions: - replace the recursive calls with calls to the continutation `k` - make those bodies take one single dependent tuple as input @@ -234,11 +213,11 @@ def mkDeclareUnaryBodies (grLvlParams : List Name) (kk_var : Expr) let grSize := preDefs.size -- Compute the map from name to index - the continuation has an indexed type: - -- we use the index (a finite number of type `Fin`) to control the function - -- we call at the recursive call + -- we use the index (a finite number of type `Fin`) to control which function + -- we call at the recursive call site. let nameToId : HashMap Name Nat := - let namesIds := mapi (fun i d => (d.declName, i)) preDefs.toList - HashMap.ofList namesIds + let namesIds := preDefs.mapIdx (fun i d => (d.declName, i.val)) + HashMap.ofList namesIds.toList trace[Diverge.def.genBody] "nameToId: {nameToId.toList}" @@ -260,7 +239,7 @@ def mkDeclareUnaryBodies (grLvlParams : List Name) (kk_var : Expr) -- Compute the index let i ← mkFinVal grSize id -- Put the arguments in one big dependent tuple - let args ← mkSigmas args.toList + let args ← mkSigmasVal args.toList mkAppM' kk_var #[i, args] else -- Not a recursive call: do nothing @@ -277,13 +256,14 @@ def mkDeclareUnaryBodies (grLvlParams : List Name) (kk_var : Expr) -- Replace the recursive calls let body ← mapVisit visit_e preDef.value - -- Change the type + -- Currify the function by grouping the arguments into a dependent tuple + -- (over which we match to retrieve the individual arguments). lambdaLetTelescope body fun args body => do let body ← mkSigmasMatch args.toList body 0 -- Add the declaration let value ← mkLambdaFVars #[kk_var] body - let name := preDef.declName.append "sbody" + let name := preDef.declName.append "body" let levelParams := grLvlParams let decl := Declaration.defnDecl { name := name @@ -304,7 +284,7 @@ def mkDeclareUnaryBodies (grLvlParams : List Name) (kk_var : Expr) -- Generate a unique function body from the bodies of the mutually recursive group, -- and add it as a declaration in the context. --- We return the list of bodies (of type `Funs ...`) and the mutually recursive body. +-- We return the list of bodies (of type `FixI.Funs ...`) and the mutually recursive body. def mkDeclareMutRecBody (grName : Name) (grLvlParams : List Name) (kk_var i_var : Expr) (in_ty out_ty : Expr) (inOutTys : List (Expr × Expr)) @@ -322,7 +302,7 @@ def mkDeclareMutRecBody (grName : Name) (grLvlParams : List Name) mkAppOptM ``FixI.Funs.Nil #[finTypeExpr, in_ty, out_ty] | (ity, oty) :: inOutTys, b :: bl => do -- Retrieving ity and oty - this is not very clean - let inOutTysExpr ← mkList (← inOutTys.mapM (λ (x, y) => mkInOutTy x y)) inOutTyType + let inOutTysExpr ← mkListLit inOutTyType (← inOutTys.mapM (λ (x, y) => mkInOutTy x y)) let fl ← mkFuns inOutTys bl mkAppOptM ``FixI.Funs.Cons #[finTypeExpr, in_ty, out_ty, ity, oty, inOutTysExpr, b, fl] | _, _ => throwError "mkDeclareMutRecBody: `tys` and `bodies` don't have the same length" @@ -345,7 +325,7 @@ def mkDeclareMutRecBody (grName : Name) (grLvlParams : List Name) all := [name] } addDecl decl - -- Return the constant + -- Return the bodies and the constant pure (bodyFuns, Lean.mkConst name (levelParams.map .param)) def isCasesExpr (e : Expr) : MetaM Bool := do @@ -367,7 +347,8 @@ instance : ToMessageData MatchInfo where -- This is not a very clean formatting, but we don't need more toMessageData := fun me => m!"\n- matcherName: {me.matcherName}\n- params: {me.params}\n- motive: {me.motive}\n- scruts: {me.scruts}\n- branchesNumParams: {me.branchesNumParams}\n- branches: {me.branches}" --- An expression which doesn't use the continuation kk is valid +-- Small helper: prove that an expression which doesn't use the continuation `kk` +-- is valid, and return the proof. def proveNoKExprIsValid (k_var : Expr) (e : Expr) : MetaM Expr := do trace[Diverge.def.valid] "proveNoKExprIsValid: {e}" let eIsValid ← mkAppM ``FixI.is_valid_p_same #[k_var, e] @@ -376,6 +357,14 @@ def proveNoKExprIsValid (k_var : Expr) (e : Expr) : MetaM Expr := do mutual +/- Prove that an expression is valid, and return the proof. + + More precisely, if `e` is an expression which potentially uses the continution + `kk`, return an expression of type: + ``` + is_valid_p k (λ kk => e) + ``` + -/ partial def proveExprIsValid (k_var kk_var : Expr) (e : Expr) : MetaM Expr := do trace[Diverge.def.valid] "proveValid: {e}" match e with @@ -403,7 +392,9 @@ partial def proveExprIsValid (k_var kk_var : Expr) (e : Expr) : MetaM Expr := do | .app .. => e.withApp fun f args => do -- There are several cases: first, check if this is a match/if - -- The expression is a (dependent) if then else + -- Check if the expression is a (dependent) if then else. + -- We treat the if then else expressions differently from the other matches, + -- and have dedicated theorems for them. let isIte := e.isIte if isIte || e.isDIte then do e.withApp fun f args => do @@ -431,9 +422,9 @@ partial def proveExprIsValid (k_var kk_var : Expr) (e : Expr) : MetaM Expr := do let eIsValid ← mkAppOptM const #[none, none, none, none, some k_var, some cond, some dec, none, none, some br0Valid, some br1Valid] trace[Diverge.def.valid] "ite/dite: result:\n{eIsValid}:\n{← inferType eIsValid}" pure eIsValid - -- The expression is a match (this case is for when the elaborator + -- Check if the expression is a match (this case is for when the elaborator -- introduces auxiliary definitions to hide the match behind syntactic - -- sugar) + -- sugar): else if let some me := ← matchMatcherApp? e then do trace[Diverge.def.valid] "matcherApp: @@ -443,7 +434,8 @@ partial def proveExprIsValid (k_var kk_var : Expr) (e : Expr) : MetaM Expr := do - altNumParams: {me.altNumParams} - alts: {me.alts} - remaining: {me.remaining}" - -- matchMatcherApp has already done the work for us + -- matchMatcherApp does all the work for us: we simply need to gather + -- the information and call the auxiliary helper `proveMatchIsValid` if me.remaining.size ≠ 0 then throwError "MatcherApp: non empty remaining array: {me.remaining}" let me : MatchInfo := { @@ -456,14 +448,21 @@ partial def proveExprIsValid (k_var kk_var : Expr) (e : Expr) : MetaM Expr := do branches := me.alts } proveMatchIsValid k_var kk_var me - -- The expression is a raw match (this case is for when the expression - -- is a direct call to the primitive `casesOn` function, without - -- syntactic sugar) + -- Check if the expression is a raw match (this case is for when the expression + -- is a direct call to the primitive `casesOn` function, without syntactic sugar). + -- We have to check this case because functions like `mkSigmasMatch`, which we + -- use to currify function bodies, introduce such raw matches. else if ← isCasesExpr f then do trace[Diverge.def.valid] "rawMatch: {e}" + -- Deconstruct the match, and call the auxiliary helper `proveMatchIsValid`. + -- -- The casesOn definition is always of the following shape: - -- input parameters (implicit parameters), then motive (implicit), - -- scrutinee (explicit), branches (explicit). + -- - input parameters (implicit parameters) + -- - motive (implicit), -- the motive gives the return type of the match + -- - scrutinee (explicit) + -- - branches (explicit). + -- In particular, we notice that the scrutinee is the first *explicit* + -- parameter - this is how we spot it. let matcherName := f.constName! let matcherLevels := f.constLevels!.toArray -- Find the first explicit parameter: this is the scrutinee @@ -484,7 +483,9 @@ partial def proveExprIsValid (k_var kk_var : Expr) (e : Expr) : MetaM Expr := do let scrut := args.get! scrutIdx let branches := args.extract (scrutIdx + 1) args.size -- Compute the number of parameters for the branches: for this we use - -- the type of the uninstantiated casesOn constant + -- the type of the uninstantiated casesOn constant (we can't just + -- destruct the lambdas in the branch expressions because the result + -- of a match might be a lambda expression). let branchesNumParams : Array Nat ← do let env ← getEnv let decl := env.constants.find! matcherName @@ -505,9 +506,11 @@ partial def proveExprIsValid (k_var kk_var : Expr) (e : Expr) : MetaM Expr := do branches, } proveMatchIsValid k_var kk_var me - -- Monadic let-binding + -- Check if this is a monadic let-binding else if f.isConstOf ``Bind.bind then do trace[Diverge.def.valid] "bind:\n{args}" + -- We simply need to prove that the subexpressions are valid, and call + -- the appropriate lemma. let x := args.get! 4 let y := args.get! 5 -- Prove that the subexpressions are valid @@ -529,7 +532,7 @@ partial def proveExprIsValid (k_var kk_var : Expr) (e : Expr) : MetaM Expr := do -- Put everything together trace[Diverge.def.valid] "bind:\n- xValid: {xValid}: {← inferType xValid}\n- yValid: {yValid}: {← inferType yValid}" mkAppM ``FixI.is_valid_p_bind #[xValid, yValid] - -- Recursive call + -- Check if this is a recursive call, i.e., a call to the continuation `kk` else if f.isFVarOf kk_var.fvarId! then do trace[Diverge.def.valid] "rec: args: \n{args}" if args.size ≠ 2 then throwError "Recursive call with invalid number of parameters: {args}" @@ -540,9 +543,10 @@ partial def proveExprIsValid (k_var kk_var : Expr) (e : Expr) : MetaM Expr := do pure eIsValid else do -- Remaining case: normal application. - -- It shouldn't use the continuation + -- It shouldn't use the continuation. proveNoKExprIsValid k_var e +-- Prove that a match expression is valid. partial def proveMatchIsValid (k_var kk_var : Expr) (me : MatchInfo) : MetaM Expr := do trace[Diverge.def.valid] "proveMatchIsValid: {me}" -- Prove the validity of the branch expressions @@ -561,16 +565,18 @@ partial def proveMatchIsValid (k_var kk_var : Expr) (me : MatchInfo) : MetaM Exp -- Reconstruct the lambda expression mkLambdaFVars xs_beg brValid trace[Diverge.def.valid] "branchesValid:\n{branchesValid}" - -- Put together: compute the motive. - -- It must be of the shape: + -- Compute the motive, which has the following shape: -- ``` -- λ scrut => is_valid_p k (λ k => match scrut with ...) + -- ^^^^^^^^^^^^^^^^^^^^ + -- this is the original match expression, with the + -- the difference that the scrutinee(s) is a variable -- ``` let validMotive : Expr ← do -- The motive is a function of the scrutinees (i.e., a lambda expression): -- introduce binders for the scrutinees let declInfos := me.scruts.mapIdx fun idx scrut => - let name : Name := (.num (.str .anonymous "scrut") idx) + let name : Name := mkAnonymous "scrut" idx let ty := λ (_ : Array Expr) => inferType scrut (name, ty) withLocalDeclsD declInfos fun scrutVars => do @@ -582,7 +588,6 @@ partial def proveMatchIsValid (k_var kk_var : Expr) (me : MatchInfo) : MetaM Exp let branches : Array (Option Expr) := me.branches.map some let args := params ++ [motive] ++ scruts ++ branches let matchE ← mkAppOptM me.matcherName args - -- let matchE ← mkLambdaFVars scrutVars (← mkAppOptM me.matcherName args) -- Wrap in the `is_valid_p` predicate let matchE ← mkLambdaFVars #[kk_var] matchE let validMotive ← mkAppM ``FixI.is_valid_p #[k_var, matchE] @@ -591,6 +596,7 @@ partial def proveMatchIsValid (k_var kk_var : Expr) (me : MatchInfo) : MetaM Exp trace[Diverge.def.valid] "valid motive: {validMotive}" -- Put together let valid ← do + -- We let Lean infer the parameters let params : Array (Option Expr) := me.params.map (λ _ => none) let motive := some validMotive let scruts := me.scruts.map some @@ -602,12 +608,16 @@ partial def proveMatchIsValid (k_var kk_var : Expr) (me : MatchInfo) : MetaM Exp end --- Prove that a single body (in the mutually recursive group) is valid -partial def proveSingleBodyIsValid (k_var : Expr) (preDef : PreDefinition) (bodyConst : Expr) : +-- Prove that a single body (in the mutually recursive group) is valid. +-- +-- For instance, if we define the mutually recursive group [`is_even`, `is_odd`], +-- we prove that `is_even.body` and `is_odd.body` are valid. +partial def proveSingleBodyIsValid + (k_var : Expr) (preDef : PreDefinition) (bodyConst : Expr) : MetaM Expr := do trace[Diverge.def.valid] "proveSingleBodyIsValid: bodyConst: {bodyConst}" - -- Lookup the definition (`bodyConst` is the definition of the body, we want - -- to retrieve the value itself to dive inside) + -- Lookup the definition (`bodyConst` is a const, we want to retrieve its + -- definition to dive inside) let name := bodyConst.constName! let env ← getEnv let body := (env.constants.find! name).value! @@ -633,7 +643,7 @@ partial def proveSingleBodyIsValid (k_var : Expr) (preDef : PreDefinition) (body mkForallFVars #[k_var, x_var] ty trace[Diverge.def.valid] "proveSingleBodyIsValid: thmTy\n{thmTy}:\n{← inferType thmTy}" -- Save the theorem - let name := preDef.declName ++ "sbody_is_valid" + let name := preDef.declName ++ "body_is_valid" let decl := Declaration.thmDecl { name levelParams := preDef.levelParams @@ -646,6 +656,11 @@ partial def proveSingleBodyIsValid (k_var : Expr) (preDef : PreDefinition) (body -- Return the theorem pure (Expr.const name (preDef.levelParams.map .param)) +-- Prove that the list of bodies are valid. +-- +-- For instance, if we define the mutually recursive group [`is_even`, `is_odd`], +-- we prove that `Funs.Cons is_even.body (Funs.Cons is_odd.body Funs.Nil)` is +-- valid. partial def proveFunsBodyIsValid (inOutTys: Expr) (bodyFuns : Expr) (k_var : Expr) (bodiesValid : Array Expr) : MetaM Expr := do -- Create the big "and" expression, which groups the validity proof of the individual bodies @@ -663,7 +678,13 @@ partial def proveFunsBodyIsValid (inOutTys: Expr) (bodyFuns : Expr) let isValid ← mkAppM ``FixI.Funs.is_valid_p_is_valid_p #[inOutTys, k_var, bodyFuns, andExpr] mkLambdaFVars #[k_var] isValid --- Prove that the mut rec body is valid +-- Prove that the mut rec body (i.e., the unary body which groups the bodies +-- of all the functions in the mutually recursive group and on which we will +-- apply the fixed-point operator) is valid. +-- +-- We save the proof in the theorem "[GROUP_NAME]."mut_rec_body_is_valid", +-- which we return. +-- -- TODO: maybe this function should introduce k_var itself def proveMutRecIsValid (grName : Name) (grLvlParams : List Name) @@ -693,6 +714,12 @@ def proveMutRecIsValid pure (Expr.const name (grLvlParams.map .param)) -- Generate the final definions by using the mutual body and the fixed point operator. +-- +-- For instance: +-- ``` +-- def is_even (i : Int) : Result Bool := mut_rec_body 0 i +-- def is_odd (i : Int) : Result Bool := mut_rec_body 1 i +-- ``` def mkDeclareFixDefs (mutRecBody : Expr) (preDefs : Array PreDefinition) : TermElabM (Array Name) := do let grSize := preDefs.size @@ -701,7 +728,7 @@ def mkDeclareFixDefs (mutRecBody : Expr) (preDefs : Array PreDefinition) : -- Create the index let idx ← mkFinVal grSize idx.val -- Group the inputs into a dependent tuple - let input ← mkSigmas xs.toList + let input ← mkSigmasVal xs.toList -- Apply the fixed point let fixedBody ← mkAppM ``FixI.fix #[mutRecBody, idx, input] let fixedBody ← mkLambdaFVars xs fixedBody @@ -746,7 +773,7 @@ partial def proveUnfoldingThms (isValidThm : Expr) (preDefs : Array PreDefinitio let idx ← mkFinVal grSize i let proof ← mkAppM ``congr_fun #[proof, idx] -- Add the input argument - let arg ← mkSigmas xs.toList + let arg ← mkSigmasVal xs.toList let proof ← mkAppM ``congr_fun #[proof, arg] -- Abstract the arguments away let proof ← mkLambdaFVars xs proof @@ -774,11 +801,6 @@ def divRecursion (preDefs : Array PreDefinition) : TermElabM Unit := do let msg := toMessageData <| preDefs.map fun pd => (pd.declName, pd.levelParams, pd.type, pd.value) trace[Diverge.def] ("divRecursion: defs: " ++ msg) - -- CHANGE HERE This function should add definitions with these names/types/values ^^ - -- Temporarily add the predefinitions as axioms - -- for preDef in preDefs do - -- addAsAxiom preDef - -- TODO: what is this? for preDef in preDefs do applyAttributesOf #[preDef] AttributeApplicationTime.afterCompilation @@ -803,7 +825,7 @@ def divRecursion (preDefs : Array PreDefinition) : TermElabM Unit := do if preDef.levelParams ≠ grLvlParams then throwError "Non-uniform polymorphism in the universes" forallTelescope preDef.type (fun in_tys out_ty => do - let in_ty ← liftM (mkSigmasTypesOfTypes in_tys.toList) + let in_ty ← liftM (mkSigmasType in_tys.toList) -- Retrieve the type in the "Result" let out_ty ← get_result_ty out_ty let out_ty ← liftM (mkSigmasMatch in_tys.toList out_ty) @@ -813,14 +835,14 @@ def divRecursion (preDefs : Array PreDefinition) : TermElabM Unit := do trace[Diverge.def] "inOutTys: {inOutTys}" -- Turn the list of input/output type pairs into an expresion let inOutTysExpr ← inOutTys.mapM (λ (x, y) => mkInOutTy x y) - let inOutTysExpr ← mkList inOutTysExpr.toList (← inferType (inOutTysExpr.get! 0)) + let inOutTysExpr ← mkListLit (← inferType (inOutTysExpr.get! 0)) inOutTysExpr.toList -- From the list of pairs of input/output types, actually compute the -- type of the continuation `k`. -- We first introduce the index `i : Fin n` where `n` is the number of -- functions in the group. let i_var_ty := mkFin preDefs.size - withLocalDeclD (.num (.str .anonymous "i") 0) i_var_ty fun i_var => do + withLocalDeclD (mkAnonymous "i" 0) i_var_ty fun i_var => do let in_out_ty ← mkAppM ``List.get #[inOutTysExpr, i_var] trace[Diverge.def] "in_out_ty := {in_out_ty} : {← inferType in_out_ty}" -- Add an auxiliary definition for `in_out_ty` @@ -844,7 +866,7 @@ def divRecursion (preDefs : Array PreDefinition) : TermElabM Unit := do trace[Diverge.def] "in_out_ty (after decl) := {in_out_ty} : {← inferType in_out_ty}" let in_ty ← mkAppM ``Sigma.fst #[in_out_ty] trace[Diverge.def] "in_ty: {in_ty}" - withLocalDeclD (.num (.str .anonymous "x") 1) in_ty fun input => do + withLocalDeclD (mkAnonymous "x" 1) in_ty fun input => do let out_ty ← mkAppM' (← mkAppM ``Sigma.snd #[in_out_ty]) #[input] trace[Diverge.def] "out_ty: {out_ty}" @@ -853,7 +875,7 @@ def divRecursion (preDefs : Array PreDefinition) : TermElabM Unit := do let out_ty ← mkLambdaFVars #[i_var, input] out_ty let kk_var_ty ← mkAppM ``FixI.kk_ty #[i_var_ty, in_ty, out_ty] trace[Diverge.def] "kk_var_ty: {kk_var_ty}" - withLocalDeclD (.num (.str .anonymous "kk") 2) kk_var_ty fun kk_var => do + withLocalDeclD (mkAnonymous "kk" 2) kk_var_ty fun kk_var => do trace[Diverge.def] "kk_var: {kk_var}" -- Replace the recursive calls in all the function bodies by calls to the @@ -866,7 +888,7 @@ def divRecursion (preDefs : Array PreDefinition) : TermElabM Unit := do -- Prove that the mut rec body satisfies the validity criteria required by -- our fixed-point let k_var_ty ← mkAppM ``FixI.k_ty #[i_var_ty, in_ty, out_ty] - withLocalDeclD (.num (.str .anonymous "k") 3) k_var_ty fun k_var => do + withLocalDeclD (mkAnonymous "k" 3) k_var_ty fun k_var => do let isValidThm ← proveMutRecIsValid grName grLvlParams inOutTysExpr bodyFuns mutRecBody k_var preDefs bodies -- Generate the final definitions @@ -915,7 +937,7 @@ def addPreDefinitions (preDefs : Array PreDefinition) : TermElabM Unit := withLC else return () catch _ => s.restore --- The following two functions are copy&pasted from Lean.Elab.MutualDef +-- The following two functions are copy-pasted from Lean.Elab.MutualDef open private elabHeaders levelMVarToParamHeaders getAllUserLevelNames withFunLocalDecls elabFunValues instantiateMVarsAtHeader instantiateMVarsAtLetRecToLift checkLetRecsToLiftTypes withUsed from Lean.Elab.MutualDef @@ -988,61 +1010,67 @@ elab_rules : command else Command.elabCommand <| ← `(namespace $(mkIdentFrom id ns) $cmd end $(mkIdentFrom id ns)) -divergent def list_nth {a: Type} (ls : List a) (i : Int) : Result a := - match ls with - | [] => .fail .panic - | x :: ls => - if i = 0 then return x - else return (← list_nth ls (i - 1)) - -example {a: Type} (ls : List a) : - ∀ (i : Int), - 0 ≤ i → i < ls.length → - ∃ x, list_nth ls i = .ret x := by - induction ls - . intro i hpos h; simp at h; linarith - . rename_i hd tl ih - intro i hpos h - rw [list_nth.unfold]; simp - split <;> simp [*] - . tauto - . -- TODO: we shouldn't have to do that - have hneq : 0 < i := by cases i <;> rename_i a _ <;> simp_all; cases a <;> simp_all - simp at h - have ⟨ x, ih ⟩ := ih (i - 1) (by linarith) (by linarith) - simp [ih] - tauto - -mutual - divergent def is_even (i : Int) : Result Bool := - if i = 0 then return true else return (← is_odd (i - 1)) - - divergent def is_odd (i : Int) : Result Bool := - if i = 0 then return false else return (← is_even (i - 1)) -end - -#print is_even.unfold -#print is_odd.unfold - -mutual - divergent def foo (i : Int) : Result Nat := - if i > 10 then return (← foo (i / 10)) + (← bar i) else bar 10 - - divergent def bar (i : Int) : Result Nat := - if i > 20 then foo (i / 20) else .ret 42 -end - -#print foo.unfold -#print bar.unfold - --- Testing dependent branching and let-bindings --- TODO: why the linter warning? -divergent def is_non_zero (i : Int) : Result Bool := - if _h:i = 0 then return false - else - let b := true - return b +namespace Tests + /- Some examples of partial functions -/ + + divergent def list_nth {a: Type} (ls : List a) (i : Int) : Result a := + match ls with + | [] => .fail .panic + | x :: ls => + if i = 0 then return x + else return (← list_nth ls (i - 1)) + + #check list_nth.unfold + + example {a: Type} (ls : List a) : + ∀ (i : Int), + 0 ≤ i → i < ls.length → + ∃ x, list_nth ls i = .ret x := by + induction ls + . intro i hpos h; simp at h; linarith + . rename_i hd tl ih + intro i hpos h + rw [list_nth.unfold]; simp + split <;> simp [*] + . tauto + . -- TODO: we shouldn't have to do that + have hneq : 0 < i := by cases i <;> rename_i a _ <;> simp_all; cases a <;> simp_all + simp at h + have ⟨ x, ih ⟩ := ih (i - 1) (by linarith) (by linarith) + simp [ih] + tauto + + mutual + divergent def is_even (i : Int) : Result Bool := + if i = 0 then return true else return (← is_odd (i - 1)) + + divergent def is_odd (i : Int) : Result Bool := + if i = 0 then return false else return (← is_even (i - 1)) + end + + #check is_even.unfold + #check is_odd.unfold + + mutual + divergent def foo (i : Int) : Result Nat := + if i > 10 then return (← foo (i / 10)) + (← bar i) else bar 10 + + divergent def bar (i : Int) : Result Nat := + if i > 20 then foo (i / 20) else .ret 42 + end + + #check foo.unfold + #check bar.unfold + + -- Testing dependent branching and let-bindings + -- TODO: why the linter warning? + divergent def is_non_zero (i : Int) : Result Bool := + if _h:i = 0 then return false + else + let b := true + return b -#print is_non_zero.unfold + #check is_non_zero.unfold +end Tests end Diverge -- cgit v1.2.3 From 40e21034fa9e955734351b78a8cc5f16315418bd Mon Sep 17 00:00:00 2001 From: Son Ho Date: Tue, 4 Jul 2023 12:13:09 +0200 Subject: Add an implemented_by attribute to fix --- backends/lean/Base/Diverge/Base.lean | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Diverge/Base.lean b/backends/lean/Base/Diverge/Base.lean index 89365d25..a8503107 100644 --- a/backends/lean/Base/Diverge/Base.lean +++ b/backends/lean/Base/Diverge/Base.lean @@ -57,6 +57,12 @@ deriving Repr, BEq open Result +instance Result_Inhabited (α : Type u) : Inhabited (Result α) := + Inhabited.mk (fail panic) + +instance Result_Nonempty (α : Type u) : Nonempty (Result α) := + Nonempty.intro div + def bind {α : Type u} {β : Type v} (x: Result α) (f: α -> Result β) : Result β := match x with | ret v => f v @@ -156,7 +162,14 @@ namespace Fix (x : a) (n : Nat) : Prop := fix_fuel_pred f x n - noncomputable + partial + def fixImpl (f : ((x:a) → Result (b x)) → (x:a) → Result (b x)) (x : a) : Result (b x) := + f (fixImpl f) x + + -- The fact that `fix` is implemented by `fixImpl` allows us to not mark the + -- functions defined with the fixed-point as noncomputable. One big advantage + -- is that it allows us to evaluate those functions, for instance with #eval. + @[implemented_by fixImpl] def fix (f : ((x:a) → Result (b x)) → (x:a) → Result (b x)) (x : a) : Result (b x) := fix_fuel (least (fix_fuel_P f x)) f x @@ -548,7 +561,7 @@ namespace FixI def is_valid (f : ((i:id) → (x:a i) → Result (b i x)) → (i:id) → (x:a i) → Result (b i x)) : Prop := ∀ k i x, is_valid_p k (λ k => f k i x) - noncomputable def fix + def fix (f : ((i:id) → (x:a i) → Result (b i x)) → (i:id) → (x:a i) → Result (b i x)) : (i:id) → (x:a i) → Result (b i x) := kk_of_gen (Fix.fix (k_to_gen f)) @@ -808,7 +821,6 @@ namespace Ex1 split <;> simp split <;> simp - noncomputable def list_nth (ls : List a) (i : Int) : Result a := fix list_nth_body (ls, i) -- The unfolding equation - diverges if `i < 0` @@ -851,7 +863,6 @@ namespace Ex2 split <;> simp apply is_valid_p_bind <;> intros <;> simp_all - noncomputable def list_nth (ls : List a) (i : Int) : Result a := fix list_nth_body (ls, i) -- The unfolding equation - diverges if `i < 0` @@ -932,7 +943,6 @@ namespace Ex3 apply is_valid_p_bind; simp intros; split <;> simp - noncomputable def is_even (i : Int): Result Bool := do let r ← fix is_even_is_odd_body (.inl i) @@ -940,7 +950,6 @@ namespace Ex3 | .inl b => .ret b | .inr _ => .fail .panic - noncomputable def is_odd (i : Int): Result Bool := do let r ← fix is_even_is_odd_body (.inr i) @@ -1032,8 +1041,8 @@ namespace Ex4 theorem body_fix_eq : fix body = body (fix body) := is_valid_fix_fixed_eq body_is_valid - noncomputable def is_even (i : Int) : Result Bool := fix body 0 i - noncomputable def is_odd (i : Int) : Result Bool := fix body 1 i + def is_even (i : Int) : Result Bool := fix body 0 i + def is_odd (i : Int) : Result Bool := fix body 1 i theorem is_even_eq (i : Int) : is_even i = (if i = 0 @@ -1052,7 +1061,6 @@ namespace Ex4 .ret b) := by simp [is_even, is_odd]; conv => lhs; rw [body_fix_eq] - end Ex4 namespace Ex5 @@ -1109,7 +1117,7 @@ namespace Ex5 intro k x simp only [is_valid_p_same, is_valid_p_rec] - noncomputable def id (t : Tree a) := fix id_body t + def id (t : Tree a) := fix id_body t -- The unfolding equation theorem id_eq (t : Tree a) : @@ -1183,7 +1191,6 @@ namespace Ex6 Funs.is_valid_p_is_valid_p tys k bodies (And.intro (list_nth_body_is_valid' k) (Funs.is_valid_p_Nil k)) - noncomputable def list_nth {a: Type u} (ls : List a) (i : Int) : Result a := fix body 0 ⟨ a, ls , i ⟩ -- cgit v1.2.3 From 4fd17e4bb91eb46d4704643dfbfbbf0874837b07 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Tue, 4 Jul 2023 12:49:37 +0200 Subject: Make Diverge use Primitives --- backends/lean/Base/Diverge/Base.lean | 65 +------------------------------- backends/lean/Base/Diverge/Elab.lean | 2 +- backends/lean/Base/Diverge/ElabBase.lean | 8 ++-- backends/lean/Base/Primitives.lean | 25 ++---------- 4 files changed, 10 insertions(+), 90 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Diverge/Base.lean b/backends/lean/Base/Diverge/Base.lean index a8503107..e22eb914 100644 --- a/backends/lean/Base/Diverge/Base.lean +++ b/backends/lean/Base/Diverge/Base.lean @@ -4,8 +4,7 @@ import Init.Data.List.Basic import Mathlib.Tactic.RunCmd import Mathlib.Tactic.Linarith --- For debugging -import Base.Diverge.ElabBase +import Base.Primitives /- TODO: @@ -35,68 +34,6 @@ set_option profiler.threshold 100 namespace Diverge -namespace Primitives -/-! # Copy-pasting from Primitives to make the file self-contained -/ - -inductive Error where - | assertionFailure: Error - | integerOverflow: Error - | divisionByZero: Error - | arrayOutOfBounds: Error - | maximumSizeExceeded: Error - | panic: Error -deriving Repr, BEq - -open Error - -inductive Result (α : Type u) where - | ret (v: α): Result α - | fail (e: Error): Result α - | div -deriving Repr, BEq - -open Result - -instance Result_Inhabited (α : Type u) : Inhabited (Result α) := - Inhabited.mk (fail panic) - -instance Result_Nonempty (α : Type u) : Nonempty (Result α) := - Nonempty.intro div - -def bind {α : Type u} {β : Type v} (x: Result α) (f: α -> Result β) : Result β := - match x with - | ret v => f v - | fail v => fail v - | div => div - -@[simp] theorem bind_ret (x : α) (f : α → Result β) : bind (.ret x) f = f x := by simp [bind] -@[simp] theorem bind_fail (x : Error) (f : α → Result β) : bind (.fail x) f = .fail x := by simp [bind] -@[simp] theorem bind_div (f : α → Result β) : bind .div f = .div := by simp [bind] - --- Allows using Result in do-blocks -instance : Bind Result where - bind := bind - --- Allows using return x in do-blocks -instance : Pure Result where - pure := fun x => ret x - -@[simp] theorem bind_tc_ret (x : α) (f : α → Result β) : - (do let y ← .ret x; f y) = f x := by simp [Bind.bind, bind] - -@[simp] theorem bind_tc_fail (x : Error) (f : α → Result β) : - (do let y ← fail x; f y) = fail x := by simp [Bind.bind, bind] - -@[simp] theorem bind_tc_div (f : α → Result β) : - (do let y ← div; f y) = div := by simp [Bind.bind, bind] - -def div? {α: Type u} (r: Result α): Bool := - match r with - | div => true - | ret _ | fail _ => false - -end Primitives - namespace Fix open Primitives diff --git a/backends/lean/Base/Diverge/Elab.lean b/backends/lean/Base/Diverge/Elab.lean index cc580265..41209021 100644 --- a/backends/lean/Base/Diverge/Elab.lean +++ b/backends/lean/Base/Diverge/Elab.lean @@ -174,7 +174,7 @@ private def list_nth_out_ty_inner (a :Type) (scrut1: @Sigma (List a) (fun (_ls : (fun (_ls : List a) => Int) (fun (_scrut1:@Sigma (List a) (fun (_ls : List a) => Int)) => Type) scrut1 - (fun (_ls : List a) (_i : Int) => Diverge.Primitives.Result a) + (fun (_ls : List a) (_i : Int) => Primitives.Result a) private def list_nth_out_ty_outer (scrut0 : @Sigma (Type) (fun (a:Type) => @Sigma (List a) (fun (_ls : List a) => Int))) := diff --git a/backends/lean/Base/Diverge/ElabBase.lean b/backends/lean/Base/Diverge/ElabBase.lean index fd95291e..1c1062c0 100644 --- a/backends/lean/Base/Diverge/ElabBase.lean +++ b/backends/lean/Base/Diverge/ElabBase.lean @@ -4,6 +4,7 @@ namespace Diverge open Lean Elab Term Meta +-- We can't define and use trace classes in the same file initialize registerTraceClass `Diverge.elab initialize registerTraceClass `Diverge.def initialize registerTraceClass `Diverge.def.sigmas @@ -11,8 +12,8 @@ initialize registerTraceClass `Diverge.def.genBody initialize registerTraceClass `Diverge.def.valid initialize registerTraceClass `Diverge.def.unfold --- TODO: move --- TODO: small helper +-- Useful helper to explore definitions and figure out the variant +-- of their sub-expressions. def explore_term (incr : String) (e : Expr) : MetaM Unit := match e with | .bvar _ => do logInfo m!"{incr}bvar: {e}"; return () @@ -81,8 +82,7 @@ private def test2 (x : Nat) : Nat := x print_decl test1 print_decl test2 --- We adapted this from AbstractNestedProofs.visit --- A map visitor function for expressions +-- A map visitor function for expressions (adapted from `AbstractNestedProofs.visit`) partial def mapVisit (k : Expr → MetaM Expr) (e : Expr) : MetaM Expr := do let mapVisitBinders (xs : Array Expr) (k2 : MetaM Expr) : MetaM Expr := do let localInstances ← getLocalInstances diff --git a/backends/lean/Base/Primitives.lean b/backends/lean/Base/Primitives.lean index 1185a07d..117f76a2 100644 --- a/backends/lean/Base/Primitives.lean +++ b/backends/lean/Base/Primitives.lean @@ -60,12 +60,12 @@ instance Result_Nonempty (α : Type u) : Nonempty (Result α) := /- HELPERS -/ -def ret? {α: Type} (r: Result α): Bool := +def ret? {α: Type u} (r: Result α): Bool := match r with | ret _ => true | fail _ | div => false -def div? {α: Type} (r: Result α): Bool := +def div? {α: Type u} (r: Result α): Bool := match r with | div => true | ret _ | fail _ => false @@ -73,14 +73,14 @@ def div? {α: Type} (r: Result α): Bool := def massert (b:Bool) : Result Unit := if b then ret () else fail assertionFailure -def eval_global {α: Type} (x: Result α) (_: ret? x): α := +def eval_global {α: Type u} (x: Result α) (_: ret? x): α := match x with | fail _ | div => by contradiction | ret x => x /- DO-DSL SUPPORT -/ -def bind (x: Result α) (f: α -> Result β) : Result β := +def bind {α : Type u} {β : Type v} (x: Result α) (f: α -> Result β) : Result β := match x with | ret v => f v | fail v => fail v @@ -111,23 +111,6 @@ def Result.attach {α: Type} (o : Result α): Result { x : α // o = ret x } := | fail e => fail e | div => div -macro "let" e:term " ⟵ " f:term : doElem => - `(doElem| let ⟨$e, h⟩ ← Result.attach $f) - --- TODO: any way to factorize both definitions? -macro "let" e:term " <-- " f:term : doElem => - `(doElem| let ⟨$e, h⟩ ← Result.attach $f) - --- We call the hypothesis `h`, in effect making it unavailable to the user --- (because too much shadowing). But in practice, once can use the French single --- quote notation (input with f< and f>), where `‹ h ›` finds a suitable --- hypothesis in the context, this is equivalent to `have x: h := by assumption in x` -#eval do - let y <-- .ret (0: Nat) - let _: y = 0 := by cases ‹ ret 0 = ret y › ; decide - let r: { x: Nat // x = 0 } := ⟨ y, by assumption ⟩ - .ret r - @[simp] theorem bind_tc_ret (x : α) (f : α → Result β) : (do let y ← .ret x; f y) = f x := by simp [Bind.bind, bind] -- cgit v1.2.3 From 87d6f6c7c90bf7b427397d6bd2e2c70d610678e3 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Tue, 4 Jul 2023 14:57:51 +0200 Subject: Reorganize the Lean tests --- backends/lean/Base.lean | 1 - backends/lean/lake-manifest.json | 10 ++++++++-- backends/lean/lean-toolchain | 2 +- 3 files changed, 9 insertions(+), 4 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base.lean b/backends/lean/Base.lean index 6e9ff873..1f8cbc8e 100644 --- a/backends/lean/Base.lean +++ b/backends/lean/Base.lean @@ -1,4 +1,3 @@ import Base.Primitives import Base.Diverge -import Base.TestTactics import Base.Arith diff --git a/backends/lean/lake-manifest.json b/backends/lean/lake-manifest.json index e5d362fc..40eb1682 100644 --- a/backends/lean/lake-manifest.json +++ b/backends/lean/lake-manifest.json @@ -2,9 +2,15 @@ "packagesDir": "lake-packages", "packages": [{"git": + {"url": "https://github.com/EdAyers/ProofWidgets4", + "subDir?": null, + "rev": "c43db94a8f495dad37829e9d7ad65483d68c86b8", + "name": "proofwidgets", + "inputRev?": "v0.0.11"}}, + {"git": {"url": "https://github.com/leanprover-community/mathlib4.git", "subDir?": null, - "rev": "cdb1b898e4317567699181f27533182046ebc544", + "rev": "4f103b3696795c62e76fb89d177efb91c29afdf5", "name": "mathlib", "inputRev?": null}}, {"git": @@ -22,6 +28,6 @@ {"git": {"url": "https://github.com/leanprover/std4", "subDir?": null, - "rev": "6932c4ea52914dc6b0488944e367459ddc4d01a6", + "rev": "e68aa8f5fe47aad78987df45f99094afbcb5e936", "name": "std", "inputRev?": "main"}}]} diff --git a/backends/lean/lean-toolchain b/backends/lean/lean-toolchain index 1211e372..42e7d786 100644 --- a/backends/lean/lean-toolchain +++ b/backends/lean/lean-toolchain @@ -1 +1 @@ -leanprover/lean4:nightly-2023-05-31 \ No newline at end of file +leanprover/lean4:nightly-2023-06-20 \ No newline at end of file -- cgit v1.2.3 From bd873499f9a8d517cc948c6336a5c6ce856d846d Mon Sep 17 00:00:00 2001 From: Son Ho Date: Tue, 4 Jul 2023 17:30:35 +0200 Subject: Fix some issues with the extraction to Lean --- backends/lean/Base/Diverge/Elab.lean | 63 +++++++++++++++++++++++++++--------- 1 file changed, 47 insertions(+), 16 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Diverge/Elab.lean b/backends/lean/Base/Diverge/Elab.lean index 41209021..4b08fe44 100644 --- a/backends/lean/Base/Diverge/Elab.lean +++ b/backends/lean/Base/Diverge/Elab.lean @@ -255,10 +255,11 @@ def mkDeclareUnaryBodies (grLvlParams : List Name) (kk_var : Expr) preDefs.mapM fun preDef => do -- Replace the recursive calls let body ← mapVisit visit_e preDef.value + trace[Diverge.def.genBody] "Body after replacement of the recursive calls: {body}" -- Currify the function by grouping the arguments into a dependent tuple -- (over which we match to retrieve the individual arguments). - lambdaLetTelescope body fun args body => do + lambdaTelescope body fun args body => do let body ← mkSigmasMatch args.toList body 0 -- Add the declaration @@ -376,15 +377,18 @@ partial def proveExprIsValid (k_var kk_var : Expr) (e : Expr) : MetaM Expr := do | .sort _ => throwError "Unreachable" | .lam .. => throwError "Unimplemented" | .forallE .. => throwError "Unreachable" -- Shouldn't get there - | .letE dName dTy dValue body _nonDep => do - -- Introduce a local declaration for the let-binding - withLetDecl dName dTy dValue fun decl => do + | .letE .. => do + -- Telescope all the let-bindings (remark: this also telescopes the lambdas) + lambdaLetTelescope e fun xs body => do + -- Note that we don't visit the bound values: there shouldn't be + -- recursive calls, lambda expressions, etc. inside + -- Prove that the body is valid let isValid ← proveExprIsValid k_var kk_var body - -- Add the let-binding around. + -- Add the let-bindings around. -- Rem.: the let-binding should be *inside* the `is_valid_p`, not outside, -- but because it reduces in the end it doesn't matter. More precisely: -- `P (let x := v in y)` and `let x := v in P y` reduce to the same expression. - mkLetFVars #[decl] isValid + mkLambdaFVars xs isValid (usedLetOnly := false) | .mdata _ b => proveExprIsValid k_var kk_var b | .proj _ _ _ => -- The projection shouldn't use the continuation @@ -410,7 +414,7 @@ partial def proveExprIsValid (k_var kk_var : Expr) (e : Expr) : MetaM Expr := do if isIte then proveExprIsValid k_var kk_var br else do -- There is a lambda -- TODO: how do we remove exacly *one* lambda? - lambdaLetTelescope br fun xs br => do + lambdaTelescope br fun xs br => do let x := xs.get! 0 let xs := xs.extract 1 xs.size let br ← mkLambdaFVars xs br @@ -518,7 +522,7 @@ partial def proveExprIsValid (k_var kk_var : Expr) (e : Expr) : MetaM Expr := do trace[Diverge.def.valid] "bind: xValid:\n{xValid}:\n{← inferType xValid}" let yValid ← do -- This is a lambda expression -- TODO: how do we remove exacly *one* lambda? - lambdaLetTelescope y fun xs y => do + lambdaTelescope y fun xs y => do let x := xs.get! 0 let xs := xs.extract 1 xs.size let y ← mkLambdaFVars xs y @@ -555,7 +559,7 @@ partial def proveMatchIsValid (k_var kk_var : Expr) (me : MatchInfo) : MetaM Exp -- binders might come from the match, and some of the binders might come -- from the fact that the expression in the match is a lambda expression: -- we use the branchesNumParams field for this reason - lambdaLetTelescope br fun xs br => do + lambdaTelescope br fun xs br => do let numParams := me.branchesNumParams.get! idx let xs_beg := xs.extract 0 numParams let xs_end := xs.extract numParams xs.size @@ -622,7 +626,7 @@ partial def proveSingleBodyIsValid let env ← getEnv let body := (env.constants.find! name).value! trace[Diverge.def.valid] "body: {body}" - lambdaLetTelescope body fun xs body => do + lambdaTelescope body fun xs body => do assert! xs.size = 2 let kk_var := xs.get! 0 let x_var := xs.get! 1 @@ -695,8 +699,10 @@ def proveMutRecIsValid let bodiesValid ← bodies.mapIdxM fun idx body => do let preDef := preDefs.get! idx + trace[Diverge.def.valid] "## Proving that the body {body} is valid" proveSingleBodyIsValid k_var preDef body -- Then prove that the mut rec body is valid + trace[Diverge.def.valid] "## Proving that the 'Funs' body is valid" let isValid ← proveFunsBodyIsValid inOutTys bodyFuns k_var bodiesValid -- Save the theorem let thmTy ← mkAppM ``FixI.is_valid #[mutRecBodyConst] @@ -724,7 +730,7 @@ def mkDeclareFixDefs (mutRecBody : Expr) (preDefs : Array PreDefinition) : TermElabM (Array Name) := do let grSize := preDefs.size let defs ← preDefs.mapIdxM fun idx preDef => do - lambdaLetTelescope preDef.value fun xs _ => do + lambdaTelescope preDef.value fun xs _ => do -- Create the index let idx ← mkFinVal grSize idx.val -- Group the inputs into a dependent tuple @@ -755,7 +761,7 @@ partial def proveUnfoldingThms (isValidThm : Expr) (preDefs : Array PreDefinitio let preDef := preDefs.get! i let defName := decls.get! i -- Retrieve the arguments - lambdaLetTelescope preDef.value fun xs body => do + lambdaTelescope preDef.value fun xs body => do trace[Diverge.def.unfold] "proveUnfoldingThms: xs: {xs}" trace[Diverge.def.unfold] "proveUnfoldingThms: body: {body}" -- The theorem statement @@ -799,7 +805,7 @@ partial def proveUnfoldingThms (isValidThm : Expr) (preDefs : Array PreDefinitio def divRecursion (preDefs : Array PreDefinition) : TermElabM Unit := do let msg := toMessageData <| preDefs.map fun pd => (pd.declName, pd.levelParams, pd.type, pd.value) - trace[Diverge.def] ("divRecursion: defs: " ++ msg) + trace[Diverge.def] ("divRecursion: defs:\n" ++ msg) -- TODO: what is this? for preDef in preDefs do @@ -880,8 +886,11 @@ def divRecursion (preDefs : Array PreDefinition) : TermElabM Unit := do -- Replace the recursive calls in all the function bodies by calls to the -- continuation `k` and and generate for those bodies declarations + trace[Diverge.def] "# Generating the unary bodies" let bodies ← mkDeclareUnaryBodies grLvlParams kk_var preDefs + trace[Diverge.def] "Unary bodies (after decl): {bodies}" -- Generate the mutually recursive body + trace[Diverge.def] "# Generating the mut rec body" let (bodyFuns, mutRecBody) ← mkDeclareMutRecBody grName grLvlParams kk_var i_var in_ty out_ty inOutTys.toList bodies trace[Diverge.def] "mut rec body (after decl): {mutRecBody}" @@ -889,15 +898,18 @@ def divRecursion (preDefs : Array PreDefinition) : TermElabM Unit := do -- our fixed-point let k_var_ty ← mkAppM ``FixI.k_ty #[i_var_ty, in_ty, out_ty] withLocalDeclD (mkAnonymous "k" 3) k_var_ty fun k_var => do + trace[Diverge.def] "# Proving that the mut rec body is valid" let isValidThm ← proveMutRecIsValid grName grLvlParams inOutTysExpr bodyFuns mutRecBody k_var preDefs bodies -- Generate the final definitions + trace[Diverge.def] "# Generating the final definitions" let decls ← mkDeclareFixDefs mutRecBody preDefs -- Prove the unfolding theorems + trace[Diverge.def] "# Proving the unfolding theorems" proveUnfoldingThms isValidThm preDefs decls - -- Process the definitions - TODO + -- Generating code -- TODO addAndCompilePartialRec preDefs -- The following function is copy&pasted from Lean.Elab.PreDefinition.Main @@ -1064,13 +1076,32 @@ namespace Tests -- Testing dependent branching and let-bindings -- TODO: why the linter warning? - divergent def is_non_zero (i : Int) : Result Bool := + divergent def isNonZero (i : Int) : Result Bool := if _h:i = 0 then return false else let b := true return b - #check is_non_zero.unfold + #check isNonZero.unfold + + -- Testing let-bindings + divergent def iInBounds {a : Type} (ls : List a) (i : Int) : Result Bool := + let i0 := ls.length + if i < i0 + then Result.ret True + else Result.ret False + + #check iInBounds.unfold + + divergent def isCons + {a : Type} (ls : List a) : Result Bool := + let ls1 := ls + match ls1 with + | [] => Result.ret False + | x :: tl => Result.ret True + + #check isCons.unfold + end Tests end Diverge -- cgit v1.2.3 From 442caaf62e4a217b9a10116c4e529c49f83c4efd Mon Sep 17 00:00:00 2001 From: Son Ho Date: Tue, 4 Jul 2023 22:45:02 +0200 Subject: Fix an issue with mkSigmasVal --- backends/lean/Base/Diverge/Elab.lean | 228 +++++++++++++++++++------------ backends/lean/Base/Diverge/ElabBase.lean | 47 ++++--- backends/lean/lake-manifest.json | 2 +- 3 files changed, 170 insertions(+), 107 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Diverge/Elab.lean b/backends/lean/Base/Diverge/Elab.lean index 4b08fe44..1af06fea 100644 --- a/backends/lean/Base/Diverge/Elab.lean +++ b/backends/lean/Base/Diverge/Elab.lean @@ -26,38 +26,42 @@ def mkInOutTy (x y : Expr) : MetaM Expr := mkAppM ``FixI.mk_in_out_ty #[x, y] -- Return the `a` in `Return a` -def get_result_ty (ty : Expr) : MetaM Expr := +def getResultTy (ty : Expr) : MetaM Expr := ty.withApp fun f args => do if ¬ f.isConstOf ``Result ∨ args.size ≠ 1 then - throwError "Invalid argument to get_result_ty: {ty}" + throwError "Invalid argument to getResultTy: {ty}" else pure (args.get! 0) -/- Group a list of expressions into a dependent tuple. +/- Deconstruct a sigma type. - Example: - xl = [`a : Type`, `ls : List a`] - returns: - `⟨ (a:Type), (ls: List a) ⟩` + For instance, deconstructs `(a : Type) × List a` into + `Type` and `λ a => List a`. -/ -def mkSigmasVal (xl : List Expr) : MetaM Expr := - match xl with - | [] => do - trace[Diverge.def.sigmas] "mkSigmasVal: []" - pure (Expr.const ``PUnit.unit []) - | [x] => do - trace[Diverge.def.sigmas] "mkSigmasVal: [{x}]" - pure x - | fst :: xl => do - trace[Diverge.def.sigmas] "mkSigmasVal: [{fst}::{xl}]" - let alpha ← Lean.Meta.inferType fst - let snd ← mkSigmasVal xl - let snd_ty ← inferType snd - let beta ← mkLambdaFVars #[fst] snd_ty - trace[Diverge.def.sigmas] "mkSigmasVal:\n{alpha}\n{beta}\n{fst}\n{snd}" - mkAppOptM ``Sigma.mk #[some alpha, some beta, some fst, some snd] - -/- Generate a Sigma type from a list of expressions. +def getSigmaTypes (ty : Expr) : MetaM (Expr × Expr) := do + ty.withApp fun f args => do + if ¬ f.isConstOf ``Sigma ∨ args.size ≠ 2 then + throwError "Invalid argument to getSigmaTypes: {ty}" + else + pure (args.get! 0, args.get! 1) + +/- Like `lambdaTelescopeN` but only destructs a fixed number of lambdas -/ +def lambdaTelescopeN (e : Expr) (n : Nat) (k : Array Expr → Expr → MetaM α) : MetaM α := + lambdaTelescope e fun xs body => do + if xs.size < n then throwError "lambdaTelescopeN: not enough lambdas"; + let xs := xs.extract 0 n + let ys := xs.extract n xs.size + let body ← mkLambdaFVars ys body + k xs body + +/- Like `lambdaTelescope`, but only destructs one lambda + TODO: is there an equivalent of this function somewhere in the + standard library? -/ +def lambdaOne (e : Expr) (k : Expr → Expr → MetaM α) : MetaM α := + lambdaTelescopeN e 1 λ xs b => k (xs.get! 0) b + +/- Generate a Sigma type from a list of *variables* (all the expressions + must be variables). Example: - xl = [(a:Type), (ls:List a), (i:Int)] @@ -84,6 +88,53 @@ def mkSigmasType (xl : List Expr) : MetaM Expr := trace[Diverge.def.sigmas] "mkSigmasOfTypes: ({alpha}) ({beta})" mkAppOptM ``Sigma #[some alpha, some beta] +/- Apply a lambda expression to some arguments, simplifying the lambdas -/ +def applyLambdaToArgs (e : Expr) (xs : Array Expr) : MetaM Expr := do + lambdaTelescopeN e xs.size fun vars body => + -- Create the substitution + let s : HashMap FVarId Expr := HashMap.ofList (List.zip (vars.toList.map Expr.fvarId!) xs.toList) + -- Substitute in the body + pure (body.replace fun e => + match e with + | Expr.fvar fvarId => match s.find? fvarId with + | none => e + | some v => v + | _ => none) + +/- Group a list of expressions into a dependent tuple. + + Example: + xl = [`a : Type`, `ls : List a`] + returns: + `⟨ (a:Type), (ls: List a) ⟩` + + We need the type argument because as the elements in the tuple are + "concrete", we can't in all generality figure out the type of the tuple. + + Example: + `⟨ True, 3 ⟩ : (x : Bool) × (if x then Int else Unit)` + -/ +def mkSigmasVal (ty : Expr) (xl : List Expr) : MetaM Expr := + match xl with + | [] => do + trace[Diverge.def.sigmas] "mkSigmasVal: []" + pure (Expr.const ``PUnit.unit []) + | [x] => do + trace[Diverge.def.sigmas] "mkSigmasVal: [{x}]" + pure x + | fst :: xl => do + trace[Diverge.def.sigmas] "mkSigmasVal: [{fst}::{xl}]" + -- Deconstruct the type + let (alpha, beta) ← getSigmaTypes ty + -- Compute the "second" field + -- Specialize beta for fst + let nty ← applyLambdaToArgs beta #[fst] + -- Recursive call + let snd ← mkSigmasVal nty xl + -- Put everything together + trace[Diverge.def.sigmas] "mkSigmasVal:\n{alpha}\n{beta}\n{fst}\n{snd}" + mkAppOptM ``Sigma.mk #[some alpha, some beta, some fst, some snd] + def mkAnonymous (s : String) (i : Nat) : Name := .num (.str .anonymous s) i @@ -208,52 +259,57 @@ def mkFinVal (n i : Nat) : MetaM Expr := do We return the new declarations. -/ def mkDeclareUnaryBodies (grLvlParams : List Name) (kk_var : Expr) - (preDefs : Array PreDefinition) : + (inOutTys : Array (Expr × Expr)) (preDefs : Array PreDefinition) : MetaM (Array Expr) := do let grSize := preDefs.size - -- Compute the map from name to index - the continuation has an indexed type: - -- we use the index (a finite number of type `Fin`) to control which function - -- we call at the recursive call site. - let nameToId : HashMap Name Nat := - let namesIds := preDefs.mapIdx (fun i d => (d.declName, i.val)) - HashMap.ofList namesIds.toList + -- Compute the map from name to (index × input type). + -- Remark: the continuation has an indexed type; we use the index (a finite number of + -- type `Fin`) to control which function we call at the recursive call site. + let nameToInfo : HashMap Name (Nat × Expr) := + let bl := preDefs.mapIdx fun i d => (d.declName, (i.val, (inOutTys.get! i.val).fst)) + HashMap.ofList bl.toList - trace[Diverge.def.genBody] "nameToId: {nameToId.toList}" + trace[Diverge.def.genBody] "nameToId: {nameToInfo.toList}" -- Auxiliary function to explore the function bodies and replace the -- recursive calls - let visit_e (e : Expr) : MetaM Expr := do - trace[Diverge.def.genBody] "visiting expression: {e}" - match e with - | .app .. => do - e.withApp fun f args => do - trace[Diverge.def.genBody] "this is an app: {f} {args}" - -- Check if this is a recursive call - if f.isConst then - let name := f.constName! - match nameToId.find? name with - | none => pure e - | some id => - -- This is a recursive call: replace it - -- Compute the index - let i ← mkFinVal grSize id - -- Put the arguments in one big dependent tuple - let args ← mkSigmasVal args.toList - mkAppM' kk_var #[i, args] - else - -- Not a recursive call: do nothing - pure e - | .const name _ => - -- Sanity check: we eliminated all the recursive calls - if (nameToId.find? name).isSome then - throwError "mkUnaryBodies: a recursive call was not eliminated" - else pure e - | _ => pure e + let visit_e (i : Nat) (e : Expr) : MetaM Expr := do + trace[Diverge.def.genBody] "visiting expression (dept: {i}): {e}" + let ne ← do + match e with + | .app .. => do + e.withApp fun f args => do + trace[Diverge.def.genBody] "this is an app: {f} {args}" + -- Check if this is a recursive call + if f.isConst then + let name := f.constName! + match nameToInfo.find? name with + | none => pure e + | some (id, in_ty) => + trace[Diverge.def.genBody] "this is a recursive call" + -- This is a recursive call: replace it + -- Compute the index + let i ← mkFinVal grSize id + -- Put the arguments in one big dependent tuple + let args ← mkSigmasVal in_ty args.toList + mkAppM' kk_var #[i, args] + else + -- Not a recursive call: do nothing + pure e + | .const name _ => + -- Sanity check: we eliminated all the recursive calls + if (nameToInfo.find? name).isSome then + throwError "mkUnaryBodies: a recursive call was not eliminated" + else pure e + | _ => pure e + trace[Diverge.def.genBody] "done with expression (depth: {i}): {e}" + pure ne -- Explore the bodies preDefs.mapM fun preDef => do -- Replace the recursive calls + trace[Diverge.def.genBody] "About to replace recursive calls in {preDef.declName}" let body ← mapVisit visit_e preDef.value trace[Diverge.def.genBody] "Body after replacement of the recursive calls: {body}" @@ -413,11 +469,8 @@ partial def proveExprIsValid (k_var kk_var : Expr) (e : Expr) : MetaM Expr := do let proveBranchValid (br : Expr) : MetaM Expr := if isIte then proveExprIsValid k_var kk_var br else do - -- There is a lambda -- TODO: how do we remove exacly *one* lambda? - lambdaTelescope br fun xs br => do - let x := xs.get! 0 - let xs := xs.extract 1 xs.size - let br ← mkLambdaFVars xs br + -- There is a lambda + lambdaOne br fun x br => do let brValid ← proveExprIsValid k_var kk_var br mkLambdaFVars #[x] brValid let br0Valid ← proveBranchValid br0 @@ -521,11 +574,8 @@ partial def proveExprIsValid (k_var kk_var : Expr) (e : Expr) : MetaM Expr := do let xValid ← proveExprIsValid k_var kk_var x trace[Diverge.def.valid] "bind: xValid:\n{xValid}:\n{← inferType xValid}" let yValid ← do - -- This is a lambda expression -- TODO: how do we remove exacly *one* lambda? - lambdaTelescope y fun xs y => do - let x := xs.get! 0 - let xs := xs.extract 1 xs.size - let y ← mkLambdaFVars xs y + -- This is a lambda expression + lambdaOne y fun x y => do trace[Diverge.def.valid] "bind: y: {y}" let yValid ← proveExprIsValid k_var kk_var y trace[Diverge.def.valid] "bind: yValid (no forall): {yValid}" @@ -559,15 +609,12 @@ partial def proveMatchIsValid (k_var kk_var : Expr) (me : MatchInfo) : MetaM Exp -- binders might come from the match, and some of the binders might come -- from the fact that the expression in the match is a lambda expression: -- we use the branchesNumParams field for this reason - lambdaTelescope br fun xs br => do let numParams := me.branchesNumParams.get! idx - let xs_beg := xs.extract 0 numParams - let xs_end := xs.extract numParams xs.size - let br ← mkLambdaFVars xs_end br + lambdaTelescopeN br numParams fun xs br => do -- Prove that the branch expression is valid let brValid ← proveExprIsValid k_var kk_var br -- Reconstruct the lambda expression - mkLambdaFVars xs_beg brValid + mkLambdaFVars xs brValid trace[Diverge.def.valid] "branchesValid:\n{branchesValid}" -- Compute the motive, which has the following shape: -- ``` @@ -726,15 +773,17 @@ def proveMutRecIsValid -- def is_even (i : Int) : Result Bool := mut_rec_body 0 i -- def is_odd (i : Int) : Result Bool := mut_rec_body 1 i -- ``` -def mkDeclareFixDefs (mutRecBody : Expr) (preDefs : Array PreDefinition) : +def mkDeclareFixDefs (mutRecBody : Expr) (inOutTys : Array (Expr × Expr)) (preDefs : Array PreDefinition) : TermElabM (Array Name) := do let grSize := preDefs.size let defs ← preDefs.mapIdxM fun idx preDef => do lambdaTelescope preDef.value fun xs _ => do + -- Retrieve the input type + let in_ty := (inOutTys.get! idx.val).fst -- Create the index let idx ← mkFinVal grSize idx.val -- Group the inputs into a dependent tuple - let input ← mkSigmasVal xs.toList + let input ← mkSigmasVal in_ty xs.toList -- Apply the fixed point let fixedBody ← mkAppM ``FixI.fix #[mutRecBody, idx, input] let fixedBody ← mkLambdaFVars xs fixedBody @@ -754,8 +803,8 @@ def mkDeclareFixDefs (mutRecBody : Expr) (preDefs : Array PreDefinition) : pure defs -- Prove the equations that we will use as unfolding theorems -partial def proveUnfoldingThms (isValidThm : Expr) (preDefs : Array PreDefinition) - (decls : Array Name) : MetaM Unit := do +partial def proveUnfoldingThms (isValidThm : Expr) (inOutTys : Array (Expr × Expr)) + (preDefs : Array PreDefinition) (decls : Array Name) : MetaM Unit := do let grSize := preDefs.size let proveIdx (i : Nat) : MetaM Unit := do let preDef := preDefs.get! i @@ -779,7 +828,7 @@ partial def proveUnfoldingThms (isValidThm : Expr) (preDefs : Array PreDefinitio let idx ← mkFinVal grSize i let proof ← mkAppM ``congr_fun #[proof, idx] -- Add the input argument - let arg ← mkSigmasVal xs.toList + let arg ← mkSigmasVal (inOutTys.get! i).fst xs.toList let proof ← mkAppM ``congr_fun #[proof, arg] -- Abstract the arguments away let proof ← mkLambdaFVars xs proof @@ -833,7 +882,7 @@ def divRecursion (preDefs : Array PreDefinition) : TermElabM Unit := do forallTelescope preDef.type (fun in_tys out_ty => do let in_ty ← liftM (mkSigmasType in_tys.toList) -- Retrieve the type in the "Result" - let out_ty ← get_result_ty out_ty + let out_ty ← getResultTy out_ty let out_ty ← liftM (mkSigmasMatch in_tys.toList out_ty) pure (in_ty, out_ty) ) @@ -886,8 +935,8 @@ def divRecursion (preDefs : Array PreDefinition) : TermElabM Unit := do -- Replace the recursive calls in all the function bodies by calls to the -- continuation `k` and and generate for those bodies declarations - trace[Diverge.def] "# Generating the unary bodies" - let bodies ← mkDeclareUnaryBodies grLvlParams kk_var preDefs + trace[Diverge.def] "# Generating the unary bodies" + let bodies ← mkDeclareUnaryBodies grLvlParams kk_var inOutTys preDefs trace[Diverge.def] "Unary bodies (after decl): {bodies}" -- Generate the mutually recursive body trace[Diverge.def] "# Generating the mut rec body" @@ -903,11 +952,11 @@ def divRecursion (preDefs : Array PreDefinition) : TermElabM Unit := do -- Generate the final definitions trace[Diverge.def] "# Generating the final definitions" - let decls ← mkDeclareFixDefs mutRecBody preDefs + let decls ← mkDeclareFixDefs mutRecBody inOutTys preDefs -- Prove the unfolding theorems trace[Diverge.def] "# Proving the unfolding theorems" - proveUnfoldingThms isValidThm preDefs decls + proveUnfoldingThms isValidThm inOutTys preDefs decls -- Generating code -- TODO addAndCompilePartialRec preDefs @@ -1102,6 +1151,15 @@ namespace Tests #check isCons.unfold + -- Testing what happens when we use concrete arguments in dependent tuples + divergent def test1 + (_ : Option Bool) (_ : Unit) : + Result Unit + := + test1 Option.none () + + #check test1.unfold + end Tests end Diverge diff --git a/backends/lean/Base/Diverge/ElabBase.lean b/backends/lean/Base/Diverge/ElabBase.lean index 1c1062c0..aaaea6f7 100644 --- a/backends/lean/Base/Diverge/ElabBase.lean +++ b/backends/lean/Base/Diverge/ElabBase.lean @@ -83,7 +83,10 @@ print_decl test1 print_decl test2 -- A map visitor function for expressions (adapted from `AbstractNestedProofs.visit`) -partial def mapVisit (k : Expr → MetaM Expr) (e : Expr) : MetaM Expr := do +-- The continuation takes as parameters: +-- - the current depth of the expression (useful for printing/debugging) +-- - the expression to explore +partial def mapVisit (k : Nat → Expr → MetaM Expr) (e : Expr) : MetaM Expr := do let mapVisitBinders (xs : Array Expr) (k2 : MetaM Expr) : MetaM Expr := do let localInstances ← getLocalInstances let mut lctx ← getLCtx @@ -98,25 +101,27 @@ partial def mapVisit (k : Expr → MetaM Expr) (e : Expr) : MetaM Expr := do lctx :=lctx.modifyLocalDecl xFVarId fun _ => localDecl withLCtx lctx localInstances k2 -- TODO: use a cache? (Lean.checkCache) - -- Explore - let e ← k e - match e with - | .bvar _ - | .fvar _ - | .mvar _ - | .sort _ - | .lit _ - | .const _ _ => pure e - | .app .. => do e.withApp fun f args => return mkAppN f (← args.mapM (mapVisit k)) - | .lam .. => - lambdaLetTelescope e fun xs b => - mapVisitBinders xs do mkLambdaFVars xs (← mapVisit k b) (usedLetOnly := false) - | .forallE .. => do - forallTelescope e fun xs b => mapVisitBinders xs do mkForallFVars xs (← mapVisit k b) - | .letE .. => do - lambdaLetTelescope e fun xs b => mapVisitBinders xs do - mkLambdaFVars xs (← mapVisit k b) (usedLetOnly := false) - | .mdata _ b => return e.updateMData! (← mapVisit k b) - | .proj _ _ b => return e.updateProj! (← mapVisit k b) + let rec visit (i : Nat) (e : Expr) : MetaM Expr := do + -- Explore + let e ← k i e + match e with + | .bvar _ + | .fvar _ + | .mvar _ + | .sort _ + | .lit _ + | .const _ _ => pure e + | .app .. => do e.withApp fun f args => return mkAppN f (← args.mapM (visit (i + 1))) + | .lam .. => + lambdaLetTelescope e fun xs b => + mapVisitBinders xs do mkLambdaFVars xs (← visit (i + 1) b) (usedLetOnly := false) + | .forallE .. => do + forallTelescope e fun xs b => mapVisitBinders xs do mkForallFVars xs (← visit (i + 1) b) + | .letE .. => do + lambdaLetTelescope e fun xs b => mapVisitBinders xs do + mkLambdaFVars xs (← visit (i + 1) b) (usedLetOnly := false) + | .mdata _ b => return e.updateMData! (← visit (i + 1) b) + | .proj _ _ b => return e.updateProj! (← visit (i + 1) b) + visit 0 e end Diverge diff --git a/backends/lean/lake-manifest.json b/backends/lean/lake-manifest.json index 40eb1682..f4759ad3 100644 --- a/backends/lean/lake-manifest.json +++ b/backends/lean/lake-manifest.json @@ -10,7 +10,7 @@ {"git": {"url": "https://github.com/leanprover-community/mathlib4.git", "subDir?": null, - "rev": "4f103b3696795c62e76fb89d177efb91c29afdf5", + "rev": "cc5d11f24e1b92db65ec3389bb5142f4b2d7670e", "name": "mathlib", "inputRev?": null}}, {"git": -- cgit v1.2.3 From 5ca36bfc50083a01af2b7ae5f75993a520757ef5 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Wed, 5 Jul 2023 15:17:58 +0200 Subject: Simplify the names used in Primitives.lean --- backends/lean/Base/Primitives.lean | 37 +++++++++++++++++++++---------------- 1 file changed, 21 insertions(+), 16 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Primitives.lean b/backends/lean/Base/Primitives.lean index 117f76a2..808c1461 100644 --- a/backends/lean/Base/Primitives.lean +++ b/backends/lean/Base/Primitives.lean @@ -564,15 +564,17 @@ macro_rules def Vec (α : Type u) := { l : List α // List.length l ≤ Usize.max } -def vec_new (α : Type u): Vec α := ⟨ [], by apply Scalar.cMax_suffices .Usize; simp ⟩ +def Vec.new (α : Type u): Vec α := ⟨ [], by apply Scalar.cMax_suffices .Usize; simp ⟩ -def vec_len (α : Type u) (v : Vec α) : Usize := +def Vec.len (α : Type u) (v : Vec α) : Usize := let ⟨ v, l ⟩ := v Usize.ofIntCore (List.length v) (by simp [Scalar.min]) l -def vec_push_fwd (α : Type u) (_ : Vec α) (_ : α) : Unit := () +-- This shouldn't be used +def Vec.push_fwd (α : Type u) (_ : Vec α) (_ : α) : Unit := () -def vec_push_back (α : Type u) (v : Vec α) (x : α) : Result (Vec α) +-- This is actually the backward function +def Vec.push (α : Type u) (v : Vec α) (x : α) : Result (Vec α) := let nlen := List.length v.val + 1 if h : nlen ≤ U32.max || nlen ≤ Usize.max then @@ -588,13 +590,15 @@ def vec_push_back (α : Type u) (v : Vec α) (x : α) : Result (Vec α) else fail maximumSizeExceeded -def vec_insert_fwd (α : Type u) (v: Vec α) (i: Usize) (_: α): Result Unit := +-- This shouldn't be used +def Vec.insert_fwd (α : Type u) (v: Vec α) (i: Usize) (_: α): Result Unit := if i.val < List.length v.val then .ret () else .fail arrayOutOfBounds -def vec_insert_back (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec α) := +-- This is actually the backward function +def Vec.insert (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec α) := if i.val < List.length v.val then -- TODO: maybe we should redefine a list library which uses integers -- (instead of natural numbers) @@ -607,7 +611,7 @@ def vec_insert_back (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec α else .fail arrayOutOfBounds -def vec_index_to_fin {α : Type u} {v: Vec α} {i: Usize} (h : i.val < List.length v.val) : +def Vec.index_to_fin {α : Type u} {v: Vec α} {i: Usize} (h : i.val < List.length v.val) : Fin (List.length v.val) := let j := i.val.toNat let h: j < List.length v.val := by @@ -616,29 +620,30 @@ def vec_index_to_fin {α : Type u} {v: Vec α} {i: Usize} (h : i.val < List.leng assumption ⟨j, h⟩ -def vec_index_fwd (α : Type u) (v: Vec α) (i: Usize): Result α := +def Vec.index (α : Type u) (v: Vec α) (i: Usize): Result α := if h: i.val < List.length v.val then - let i := vec_index_to_fin h + let i := Vec.index_to_fin h .ret (List.get v.val i) else .fail arrayOutOfBounds -def vec_index_back (α : Type u) (v: Vec α) (i: Usize) (_: α): Result Unit := +-- This shouldn't be used +def Vec.index_back (α : Type u) (v: Vec α) (i: Usize) (_: α): Result Unit := if i.val < List.length v.val then .ret () else .fail arrayOutOfBounds -def vec_index_mut_fwd (α : Type u) (v: Vec α) (i: Usize): Result α := +def Vec.index_mut (α : Type u) (v: Vec α) (i: Usize): Result α := if h: i.val < List.length v.val then - let i := vec_index_to_fin h + let i := Vec.index_to_fin h .ret (List.get v.val i) else .fail arrayOutOfBounds -def vec_index_mut_back (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec α) := +def Vec.index_mut_back (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec α) := if h: i.val < List.length v.val then - let i := vec_index_to_fin h + let i := Vec.index_to_fin h .ret ⟨ List.set v.val i x, by have h: List.length v.val ≤ Usize.max := v.property simp [*] at * @@ -651,8 +656,8 @@ def vec_index_mut_back (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec -- MISC -- ---------- -@[simp] def mem_replace_fwd (a : Type) (x : a) (_ : a) : a := x -@[simp] def mem_replace_back (a : Type) (_ : a) (y : a) : a := y +@[simp] def mem.replace_fwd (a : Type) (x : a) (_ : a) : a := x +@[simp] def mem.replace_back (a : Type) (_ : a) (y : a) : a := y /-- Aeneas-translated function -- useful to reduce non-recursive definitions. Use with `simp [ aeneas ]` -/ -- cgit v1.2.3 From 7c95800cefc87fad894f8bf855cfc047e713b3a7 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Thu, 6 Jul 2023 12:20:28 +0200 Subject: Improve the generated comments --- backends/lean/Base/Primitives.lean | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'backends') diff --git a/backends/lean/Base/Primitives.lean b/backends/lean/Base/Primitives.lean index 808c1461..14f5971e 100644 --- a/backends/lean/Base/Primitives.lean +++ b/backends/lean/Base/Primitives.lean @@ -656,7 +656,7 @@ def Vec.index_mut_back (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec -- MISC -- ---------- -@[simp] def mem.replace_fwd (a : Type) (x : a) (_ : a) : a := x +@[simp] def mem.replace (a : Type) (x : a) (_ : a) : a := x @[simp] def mem.replace_back (a : Type) (_ : a) (y : a) : a := y /-- Aeneas-translated function -- useful to reduce non-recursive definitions. -- cgit v1.2.3 From 2496a08691809683e256af7c479588a2fae8e3d7 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Thu, 6 Jul 2023 14:23:21 +0200 Subject: Register the unfolding theorems in the Lean equation compilers and solve a "unused variable" warning --- backends/lean/Base/Diverge/Elab.lean | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Diverge/Elab.lean b/backends/lean/Base/Diverge/Elab.lean index 1af06fea..e5b39440 100644 --- a/backends/lean/Base/Diverge/Elab.lean +++ b/backends/lean/Base/Diverge/Elab.lean @@ -843,6 +843,8 @@ partial def proveUnfoldingThms (isValidThm : Expr) (inOutTys : Array (Expr × Ex all := [name] } addDecl decl + -- Add the unfolding theorem to the equation compiler + eqnsAttribute.add preDef.declName #[name] trace[Diverge.def.unfold] "proveUnfoldingThms: added thm: {name}:\n{thmTy}" let rec prove (i : Nat) : MetaM Unit := do if i = preDefs.size then pure () @@ -1011,6 +1013,13 @@ def Term.elabMutualDef (vars : Array Expr) (views : Array DefView) : TermElabM U withFunLocalDecls headers fun funFVars => do for view in views, funFVar in funFVars do addLocalVarInfo view.declId funFVar + -- Add fake use site to prevent "unused variable" warning (if the + -- function is actually not recursive, Lean would print this warning). + -- Remark: we could detect this case and encode the function without + -- using the fixed-point. In practice it shouldn't happen however: + -- we define non-recursive functions with the `divergent` keyword + -- only for testing purposes. + addTermInfo' view.declId funFVar let values ← try let values ← elabFunValues headers @@ -1091,7 +1100,8 @@ namespace Tests . intro i hpos h; simp at h; linarith . rename_i hd tl ih intro i hpos h - rw [list_nth.unfold]; simp + -- We can directly use `rw [list_nth]`! + rw [list_nth]; simp split <;> simp [*] . tauto . -- TODO: we shouldn't have to do that @@ -1147,7 +1157,7 @@ namespace Tests let ls1 := ls match ls1 with | [] => Result.ret False - | x :: tl => Result.ret True + | _ :: _ => Result.ret True #check isCons.unfold -- cgit v1.2.3 From 9515bbad5b58ed1c51ac6d9fc9d7a4e5884b6273 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Thu, 6 Jul 2023 15:23:53 +0200 Subject: Reorganize a bit the lean backend files --- backends/lean/Base/Diverge/Elab.lean | 2 + backends/lean/Base/Utils.lean | 119 +++++++++++++++++++++++++++++++++++ 2 files changed, 121 insertions(+) create mode 100644 backends/lean/Base/Utils.lean (limited to 'backends') diff --git a/backends/lean/Base/Diverge/Elab.lean b/backends/lean/Base/Diverge/Elab.lean index e5b39440..96f7abc0 100644 --- a/backends/lean/Base/Diverge/Elab.lean +++ b/backends/lean/Base/Diverge/Elab.lean @@ -2,6 +2,7 @@ import Lean import Lean.Meta.Tactic.Simp import Init.Data.List.Basic import Mathlib.Tactic.RunCmd +import Base.Utils import Base.Diverge.Base import Base.Diverge.ElabBase @@ -13,6 +14,7 @@ namespace Diverge syntax (name := divergentDef) declModifiers "divergent" "def" declId ppIndent(optDeclSig) declVal : command +open Utils open Lean Elab Term Meta Primitives Lean.Meta /- The following was copied from the `wfRecursion` function. -/ diff --git a/backends/lean/Base/Utils.lean b/backends/lean/Base/Utils.lean new file mode 100644 index 00000000..161b9ddb --- /dev/null +++ b/backends/lean/Base/Utils.lean @@ -0,0 +1,119 @@ +import Lean + +namespace Utils + +open Lean Elab Term Meta + +-- Useful helper to explore definitions and figure out the variant +-- of their sub-expressions. +def explore_term (incr : String) (e : Expr) : MetaM Unit := + match e with + | .bvar _ => do logInfo m!"{incr}bvar: {e}"; return () + | .fvar _ => do logInfo m!"{incr}fvar: {e}"; return () + | .mvar _ => do logInfo m!"{incr}mvar: {e}"; return () + | .sort _ => do logInfo m!"{incr}sort: {e}"; return () + | .const _ _ => do logInfo m!"{incr}const: {e}"; return () + | .app fn arg => do + logInfo m!"{incr}app: {e}" + explore_term (incr ++ " ") fn + explore_term (incr ++ " ") arg + | .lam _bName bTy body _binfo => do + logInfo m!"{incr}lam: {e}" + explore_term (incr ++ " ") bTy + explore_term (incr ++ " ") body + | .forallE _bName bTy body _bInfo => do + logInfo m!"{incr}forallE: {e}" + explore_term (incr ++ " ") bTy + explore_term (incr ++ " ") body + | .letE _dName ty val body _nonDep => do + logInfo m!"{incr}letE: {e}" + explore_term (incr ++ " ") ty + explore_term (incr ++ " ") val + explore_term (incr ++ " ") body + | .lit _ => do logInfo m!"{incr}lit: {e}"; return () + | .mdata _ e => do + logInfo m!"{incr}mdata: {e}" + explore_term (incr ++ " ") e + | .proj _ _ struct => do + logInfo m!"{incr}proj: {e}" + explore_term (incr ++ " ") struct + +def explore_decl (n : Name) : TermElabM Unit := do + logInfo m!"Name: {n}" + let env ← getEnv + let decl := env.constants.find! n + match decl with + | .defnInfo val => + logInfo m!"About to explore defn: {decl.name}" + logInfo m!"# Type:" + explore_term "" val.type + logInfo m!"# Value:" + explore_term "" val.value + | .axiomInfo _ => throwError m!"axiom: {n}" + | .thmInfo _ => throwError m!"thm: {n}" + | .opaqueInfo _ => throwError m!"opaque: {n}" + | .quotInfo _ => throwError m!"quot: {n}" + | .inductInfo _ => throwError m!"induct: {n}" + | .ctorInfo _ => throwError m!"ctor: {n}" + | .recInfo _ => throwError m!"rec: {n}" + +syntax (name := printDecl) "print_decl " ident : command + +open Lean.Elab.Command + +@[command_elab printDecl] def elabPrintDecl : CommandElab := fun stx => do + liftTermElabM do + let id := stx[1] + addCompletionInfo <| CompletionInfo.id id id.getId (danglingDot := false) {} none + let cs ← resolveGlobalConstWithInfos id + explore_decl cs[0]! + +private def test1 : Nat := 0 +private def test2 (x : Nat) : Nat := x + +print_decl test1 +print_decl test2 + +-- A map visitor function for expressions (adapted from `AbstractNestedProofs.visit`) +-- The continuation takes as parameters: +-- - the current depth of the expression (useful for printing/debugging) +-- - the expression to explore +partial def mapVisit (k : Nat → Expr → MetaM Expr) (e : Expr) : MetaM Expr := do + let mapVisitBinders (xs : Array Expr) (k2 : MetaM Expr) : MetaM Expr := do + let localInstances ← getLocalInstances + let mut lctx ← getLCtx + for x in xs do + let xFVarId := x.fvarId! + let localDecl ← xFVarId.getDecl + let type ← mapVisit k localDecl.type + let localDecl := localDecl.setType type + let localDecl ← match localDecl.value? with + | some value => let value ← mapVisit k value; pure <| localDecl.setValue value + | none => pure localDecl + lctx :=lctx.modifyLocalDecl xFVarId fun _ => localDecl + withLCtx lctx localInstances k2 + -- TODO: use a cache? (Lean.checkCache) + let rec visit (i : Nat) (e : Expr) : MetaM Expr := do + -- Explore + let e ← k i e + match e with + | .bvar _ + | .fvar _ + | .mvar _ + | .sort _ + | .lit _ + | .const _ _ => pure e + | .app .. => do e.withApp fun f args => return mkAppN f (← args.mapM (visit (i + 1))) + | .lam .. => + lambdaLetTelescope e fun xs b => + mapVisitBinders xs do mkLambdaFVars xs (← visit (i + 1) b) (usedLetOnly := false) + | .forallE .. => do + forallTelescope e fun xs b => mapVisitBinders xs do mkForallFVars xs (← visit (i + 1) b) + | .letE .. => do + lambdaLetTelescope e fun xs b => mapVisitBinders xs do + mkLambdaFVars xs (← visit (i + 1) b) (usedLetOnly := false) + | .mdata _ b => return e.updateMData! (← visit (i + 1) b) + | .proj _ _ b => return e.updateProj! (← visit (i + 1) b) + visit 0 e + +end Utils -- cgit v1.2.3 From 0d1ac53f88f947ae94f6afb57b2a7e18a77460a7 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Sun, 9 Jul 2023 10:11:13 +0200 Subject: Make progress on the int tactic --- backends/lean/Base/Arith.lean | 413 +++++++++++++++++++------------ backends/lean/Base/ArithBase.lean | 10 + backends/lean/Base/Diverge/ElabBase.lean | 112 --------- 3 files changed, 267 insertions(+), 268 deletions(-) create mode 100644 backends/lean/Base/ArithBase.lean (limited to 'backends') diff --git a/backends/lean/Base/Arith.lean b/backends/lean/Base/Arith.lean index bb776b55..df48a6a2 100644 --- a/backends/lean/Base/Arith.lean +++ b/backends/lean/Base/Arith.lean @@ -8,6 +8,7 @@ import Mathlib.Tactic.Linarith -- TODO: there is no Omega tactic for now - it seems it hasn't been ported yet --import Mathlib.Tactic.Omega import Base.Primitives +import Base.ArithBase /- Mathlib tactics: @@ -53,9 +54,24 @@ namespace Arith open Primitives ---set_option pp.explicit true ---set_option pp.notation false ---set_option pp.coercions false +-- TODO: move? +theorem ne_zero_is_lt_or_gt {x : Int} (hne : x ≠ 0) : x < 0 ∨ x > 0 := by + cases h: x <;> simp_all + . rename_i n; + cases n <;> simp_all + . apply Int.negSucc_lt_zero + +-- TODO: move? +theorem ne_is_lt_or_gt {x y : Int} (hne : x ≠ y) : x < y ∨ x > y := by + have hne : x - y ≠ 0 := by + simp + intro h + have: x = y := by linarith + simp_all + have h := ne_zero_is_lt_or_gt hne + match h with + | .inl _ => left; linarith + | .inr _ => right; linarith -- TODO: move instance Vec.cast (a : Type): Coe (Vec a) (List a) where coe := λ v => v.val @@ -71,17 +87,9 @@ instance Vec.cast (a : Type): Coe (Vec a) (List a) where coe := λ v => v.val -/ def Scalar.toInt {ty : ScalarTy} (x : Scalar ty) : Int := x.val --- We use this type-class to test if an expression is a scalar (if we manage --- to lookup an instance of this type-class, then it is) -class IsScalar (a : Type) where - -instance (ty : ScalarTy) : IsScalar (Scalar ty) where - -example (ty : ScalarTy) : IsScalar (Scalar ty) := inferInstance - -- Remark: I tried a version of the shape `HasProp {a : Type} (x : a)` -- but the lookup didn't work -class HasProp (a : Type) where +class HasProp (a : Sort u) where prop_ty : a → Prop prop : ∀ x:a, prop_ty x @@ -93,73 +101,50 @@ instance (a : Type) : HasProp (Vec a) where prop_ty := λ v => v.val.length ≤ Scalar.max ScalarTy.Usize prop := λ ⟨ _, l ⟩ => l -open Lean Lean.Elab Command Term Lean.Meta - --- Return true if the expression is a scalar expression -def isScalarExpr (e : Expr) : MetaM Bool := do - -- Try to convert the expression to a scalar - -- TODO: I tried to do it with Lean.Meta.mkAppM but it didn't work: how - -- do we allow Lean to perform (controlled) unfoldings for instantiation - -- purposes? - let r ← Lean.observing? do - let ty ← Lean.Meta.inferType e - let isScalar ← mkAppM `Arith.IsScalar #[ty] - let isScalar ← trySynthInstance isScalar - match isScalar with - | LOption.some x => some x - | _ => none - match r with - | .some _ => pure true - | _ => pure false +class PropHasImp (x : Prop) where + concl : Prop + prop : x → concl --- Return an instance of `HasProp` for `e` if it has some -def lookupHasProp (e : Expr) : MetaM (Option Expr) := do - logInfo f!"lookupHasProp" - -- TODO: do we need Lean.observing? - -- This actually eliminates the error messages - Lean.observing? do - logInfo f!"lookupHasProp: observing" - let ty ← Lean.Meta.inferType e - let hasProp ← mkAppM ``Arith.HasProp #[ty] - let hasPropInst ← trySynthInstance hasProp - match hasPropInst with - | LOption.some i => - logInfo m!"Found HasProp instance" - let i_prop ← mkProjection i `prop - some (← mkAppM' i_prop #[e]) - | _ => none +-- This also works for `x ≠ y` because this expression reduces to `¬ x = y` +-- and `Ne` is marked as `reducible` +instance (x y : Int) : PropHasImp (¬ x = y) where + concl := x < y ∨ x > y + prop := λ (h:x ≠ y) => ne_is_lt_or_gt h --- Auxiliary function for `collectHasPropInstances` -private partial def collectHasPropInstancesAux (hs : HashSet Expr) (e : Expr) : MetaM (HashSet Expr) := do - -- We do it in a very simpler manner: we deconstruct applications, - -- and recursively explore the sub-expressions. Note that we do - -- not go inside foralls and abstractions (should we?). - e.withApp fun f args => do - let hasPropInst ← lookupHasProp f - let hs := Option.getD (hasPropInst.map hs.insert) hs - let hs ← args.foldlM collectHasPropInstancesAux hs - pure hs +open Lean Lean.Elab Command Term Lean.Meta --- Explore a term and return the instances of `HasProp` found for the sub-expressions -def collectHasPropInstances (e : Expr) : MetaM (HashSet Expr) := - collectHasPropInstancesAux HashSet.empty e +-- Small utility: print all the declarations in the context +elab "print_all_decls" : tactic => do + let ctx ← Lean.MonadLCtx.getLCtx + for decl in ← ctx.getDecls do + let ty ← Lean.Meta.inferType decl.toExpr + logInfo m!"{decl.toExpr} : {ty}" + pure () --- Explore a term and return the set of scalar expressions found inside -partial def collectScalarExprsAux (hs : HashSet Expr) (e : Expr) : MetaM (HashSet Expr) := do +-- Explore a term by decomposing the applications (we explore the applied +-- functions and their arguments, but ignore lambdas, forall, etc. - +-- should we go inside?). +partial def foldTermApps (k : α → Expr → MetaM α) (s : α) (e : Expr) : MetaM α := do -- We do it in a very simpler manner: we deconstruct applications, -- and recursively explore the sub-expressions. Note that we do -- not go inside foralls and abstractions (should we?). e.withApp fun f args => do - let hs ← if ← isScalarExpr f then pure (hs.insert f) else pure hs - let hs ← args.foldlM collectScalarExprsAux hs - pure hs - --- Explore a term and return the list of scalar expressions found inside -def collectScalarExprs (e : Expr) : MetaM (HashSet Expr) := - collectScalarExprsAux HashSet.empty e - --- Collect the scalar expressions in the context -def getScalarExprsFromMainCtx : Tactic.TacticM (HashSet Expr) := do + let s ← k s f + args.foldlM (foldTermApps k) s + +-- Provided a function `k` which lookups type class instances on an expression, +-- collect all the instances lookuped by applying `k` on the sub-expressions of `e`. +def collectInstances + (k : Expr → MetaM (Option Expr)) (s : HashSet Expr) (e : Expr) : MetaM (HashSet Expr) := do + let k s e := do + match ← k e with + | none => pure s + | some i => pure (s.insert i) + foldTermApps k s e + +-- Similar to `collectInstances`, but explores all the local declarations in the +-- main context. +def collectInstancesFromMainCtx (k : Expr → MetaM (Option Expr)) : Tactic.TacticM (HashSet Expr) := do Lean.Elab.Tactic.withMainContext do -- Get the local context let ctx ← Lean.MonadLCtx.getLCtx @@ -169,40 +154,37 @@ def getScalarExprsFromMainCtx : Tactic.TacticM (HashSet Expr) := do let hs := HashSet.empty -- Explore the declarations let decls ← ctx.getDecls - let hs ← decls.foldlM (fun hs d => collectScalarExprsAux hs d.toExpr) hs - -- Return - pure hs + decls.foldlM (fun hs d => collectInstances k hs d.toExpr) hs + +-- Return an instance of `HasProp` for `e` if it has some +def lookupHasProp (e : Expr) : MetaM (Option Expr) := do + trace[Arith] "lookupHasProp" + -- TODO: do we need Lean.observing? + -- This actually eliminates the error messages + Lean.observing? do + trace[Arith] "lookupHasProp: observing" + let ty ← Lean.Meta.inferType e + let hasProp ← mkAppM ``HasProp #[ty] + let hasPropInst ← trySynthInstance hasProp + match hasPropInst with + | LOption.some i => + trace[Arith] "Found HasProp instance" + let i_prop ← mkProjection i (Name.mkSimple "prop") + some (← mkAppM' i_prop #[e]) + | _ => none -- Collect the instances of `HasProp` for the subexpressions in the context -def getHasPropInstancesFromMainCtx : Tactic.TacticM (HashSet Expr) := do - Lean.Elab.Tactic.withMainContext do - -- Get the local context - let ctx ← Lean.MonadLCtx.getLCtx - -- Just a matter of precaution - let ctx ← instantiateLCtxMVars ctx - -- Initialize the hashset - let hs := HashSet.empty - -- Explore the declarations - let decls ← ctx.getDecls - let hs ← decls.foldlM (fun hs d => collectHasPropInstancesAux hs d.toExpr) hs - -- Return - pure hs +def collectHasPropInstancesFromMainCtx : Tactic.TacticM (HashSet Expr) := do + collectInstancesFromMainCtx lookupHasProp -elab "list_scalar_exprs" : tactic => do - logInfo m!"Listing scalar expressions" - let hs ← getScalarExprsFromMainCtx +elab "display_has_prop_instances" : tactic => do + trace[Arith] "Displaying the HasProp instances" + let hs ← collectHasPropInstancesFromMainCtx hs.forM fun e => do - logInfo m!"+ Scalar expression: {e}" + trace[Arith] "+ HasProp instance: {e}" -example (x y : U32) (z : Usize) : True := by - list_scalar_exprs - simp -elab "display_has_prop_instances" : tactic => do - logInfo m!"Displaying the HasProp instances" - let hs ← getHasPropInstancesFromMainCtx - hs.forM fun e => do - logInfo m!"+ HasProp instance: {e}" +set_option trace.Arith true example (x : U32) : True := by let i : HasProp U32 := inferInstance @@ -211,34 +193,45 @@ example (x : U32) : True := by display_has_prop_instances simp -elab "list_local_decls_1" : tactic => - Lean.Elab.Tactic.withMainContext do - -- Get the local context - let ctx ← Lean.MonadLCtx.getLCtx - let ctx ← instantiateLCtxMVars ctx - let decls ← ctx.getDecls - -- Filter the scalar expressions - let decls ← decls.filterMapM fun decl: Lean.LocalDecl => do - let declExpr := decl.toExpr - let declName := decl.userName - let declType ← Lean.Meta.inferType declExpr - dbg_trace f!"+ local decl: name: {declName} | expr: {declExpr} | ty: {declType}" - -- Try to convert the expression to a scalar - -- TODO: I tried to do it with Lean.Meta.mkAppM but it didn't work: how - -- do we allow Lean to perform (controlled) unfoldings for instantiation - -- purposes? - let r ← Lean.observing? do - let isScalar ← mkAppM `Arith.IsScalar #[declType] - let isScalar ← trySynthInstance isScalar - match isScalar with - | LOption.some x => some x - | _ => none - match r with - | .some _ => dbg_trace f!" Scalar expression"; pure r - | _ => dbg_trace f!" Not a scalar"; pure .none - pure () +set_option trace.Arith false -def evalAddDecl (name : Name) (val : Expr) (type : Expr) (asLet : Bool := false) : Tactic.TacticM Unit := +-- Return an instance of `PropHasImp` for `e` if it has some +def lookupPropHasImp (e : Expr) : MetaM (Option Expr) := do + trace[Arith] "lookupPropHasImp" + -- TODO: do we need Lean.observing? + -- This actually eliminates the error messages + Lean.observing? do + trace[Arith] "lookupPropHasImp: observing" + let ty ← Lean.Meta.inferType e + trace[Arith] "lookupPropHasImp: ty: {ty}" + let cl ← mkAppM ``PropHasImp #[ty] + let inst ← trySynthInstance cl + match inst with + | LOption.some i => + trace[Arith] "Found PropHasImp instance" + let i_prop ← mkProjection i (Name.mkSimple "prop") + some (← mkAppM' i_prop #[e]) + | _ => none + +-- Collect the instances of `PropHasImp` for the subexpressions in the context +def collectPropHasImpInstancesFromMainCtx : Tactic.TacticM (HashSet Expr) := do + collectInstancesFromMainCtx lookupPropHasImp + +elab "display_prop_has_imp_instances" : tactic => do + trace[Arith] "Displaying the PropHasImp instances" + let hs ← collectPropHasImpInstancesFromMainCtx + hs.forM fun e => do + trace[Arith] "+ PropHasImp instance: {e}" + +set_option trace.Arith true + +example (x y : Int) (h0 : x ≠ y) (h1 : ¬ x = y) : True := by + display_prop_has_imp_instances + simp + +set_option trace.Arith false + +def addDecl (name : Name) (val : Expr) (type : Expr) (asLet : Bool) (k : Expr → Tactic.TacticM Unit) : Tactic.TacticM Unit := -- I don't think we need that Lean.Elab.Tactic.withMainContext do -- Insert the new declaration @@ -248,8 +241,7 @@ def evalAddDecl (name : Name) (val : Expr) (type : Expr) (asLet : Bool := false) let lctx ← Lean.MonadLCtx.getLCtx let fid := nval.fvarId! let decl := lctx.get! fid - -- Remark: logInfo instantiates the mvars (contrary to dbg_trace): - logInfo m!" new decl: \"{decl.userName}\" ({nval}) : {decl.type} := {decl.value}" + trace[Arith] " new decl: \"{decl.userName}\" ({nval}) : {decl.type} := {decl.value}" -- -- Tranform the main goal `?m0` to `let x = nval in ?m1` let mvarId ← Tactic.getMainGoal @@ -260,17 +252,14 @@ def evalAddDecl (name : Name) (val : Expr) (type : Expr) (asLet : Bool := false) -- - asLet is false: ewVal is `λ $name => $newMVar` -- We need to apply it to `val` let newVal := if asLet then newVal else mkAppN newVal #[val] - -- Focus on the current goal - Tactic.focus do - -- Assign the main goal. - -- We must do this *after* we focused on the current goal, because - -- after we assigned the meta variable the goal is considered as solved + -- Assign the main goal and update the current goal mvarId.assign newVal - -- Replace the list of goals with the new goal - we can do this because - -- we focused on the current goal - Lean.Elab.Tactic.setGoals [newMVar.mvarId!] + let goals ← Tactic.getUnsolvedGoals + Lean.Elab.Tactic.setGoals (newMVar.mvarId! :: goals) + -- Call the continuation + k nval -def evalAddDeclSyntax (name : Name) (val : Syntax) (asLet : Bool := false) : Tactic.TacticM Unit := +def addDeclSyntax (name : Name) (val : Syntax) (asLet : Bool) (k : Expr → Tactic.TacticM Unit) : Tactic.TacticM Unit := -- I don't think we need that Lean.Elab.Tactic.withMainContext do -- @@ -281,13 +270,13 @@ def evalAddDeclSyntax (name : Name) (val : Syntax) (asLet : Bool := false) : Tac -- not choose): we force the instantiation of the meta-variable synthesizeSyntheticMVarsUsingDefault -- - evalAddDecl name val type asLet + addDecl name val type asLet k elab "custom_let " n:ident " := " v:term : tactic => - evalAddDeclSyntax n.getId v (asLet := true) + addDeclSyntax n.getId v (asLet := true) (λ _ => pure ()) elab "custom_have " n:ident " := " v:term : tactic => - evalAddDeclSyntax n.getId v (asLet := false) + addDeclSyntax n.getId v (asLet := false) (λ _ => pure ()) example : Nat := by custom_let x := 4 @@ -297,26 +286,32 @@ example : Nat := by example (x : Bool) : Nat := by cases x <;> custom_let x := 3 <;> apply x --- Lookup the instances of `HasProp for all the sub-expressions in the context, --- and introduce the corresponding assumptions -elab "intro_has_prop_instances" : tactic => do - logInfo m!"Introducing the HasProp instances" - let hs ← getHasPropInstancesFromMainCtx +-- Lookup instances in a context and introduce them with additional declarations. +def introInstances (declToUnfold : Name) (lookup : Expr → MetaM (Option Expr)) (k : Expr → Tactic.TacticM Unit) : Tactic.TacticM Unit := do + let hs ← collectInstancesFromMainCtx lookup hs.forM fun e => do let type ← inferType e - let name := `h - evalAddDecl name e type (asLet := false) - -- Simplify to unfold the `prop_ty` projector + let name ← mkFreshUserName `h + -- Add a declaration + addDecl name e type (asLet := false) λ nval => do + -- Simplify to unfold the declaration to unfold (i.e., the projector) let simpTheorems ← Tactic.simpOnlyBuiltins.foldlM (·.addConst ·) ({} : SimpTheorems) - -- Add the equational theorem for `HashProp'.prop_ty` - let simpTheorems ← simpTheorems.addDeclToUnfold ``HasProp.prop_ty + -- Add the equational theorem for the decl to unfold + let simpTheorems ← simpTheorems.addDeclToUnfold declToUnfold let congrTheorems ← getSimpCongrTheorems let ctx : Simp.Context := { simpTheorems := #[simpTheorems], congrTheorems } -- Where to apply the simplifier let loc := Tactic.Location.targets #[mkIdent name] false -- Apply the simplifier let _ ← Tactic.simpLocation ctx (discharge? := .none) loc - pure () + -- Call the continuation + k nval + +-- Lookup the instances of `HasProp for all the sub-expressions in the context, +-- and introduce the corresponding assumptions +elab "intro_has_prop_instances" : tactic => do + trace[Arith] "Introducing the HasProp instances" + introInstances ``HasProp.prop_ty lookupHasProp (fun _ => pure ()) example (x y : U32) : x.val ≤ Scalar.max ScalarTy.U32 := by intro_has_prop_instances @@ -326,23 +321,129 @@ example {a: Type} (v : Vec a) : v.val.length ≤ Scalar.max ScalarTy.Usize := by intro_has_prop_instances simp_all [Scalar.max, Scalar.min] --- A tactic to solve linear arithmetic goals -syntax "int_tac" : tactic +-- Tactic to split on a disjunction +def splitDisj (h : Expr) : Tactic.TacticM Unit := do + trace[Arith] "assumption on which to split: {h}" + -- Retrieve the main goal + Lean.Elab.Tactic.withMainContext do + let goalType ← Lean.Elab.Tactic.getMainTarget + -- Case disjunction + let hTy ← inferType h + hTy.withApp fun f xs => do + trace[Arith] "as app: {f} {xs}" + -- Sanity check + if ¬ (f.isConstOf ``Or ∧ xs.size = 2) then throwError "Invalid argument to splitDisj" + let a := xs.get! 0 + let b := xs.get! 1 + -- Introduce the new goals + -- Returns: + -- - the match branch + -- - a fresh new mvar id + let mkGoal (hTy : Expr) (nGoalName : String) : MetaM (Expr × MVarId) := do + -- Introduce a variable for the assumption (`a` or `b`) + let asmName ← mkFreshUserName `h + withLocalDeclD asmName hTy fun var => do + -- The new goal + let mgoal ← mkFreshExprSyntheticOpaqueMVar goalType (tag := Name.mkSimple nGoalName) + -- The branch expression + let branch ← mkLambdaFVars #[var] mgoal + pure (branch, mgoal.mvarId!) + let (inl, mleft) ← mkGoal a "left" + let (inr, mright) ← mkGoal b "right" + trace[Arith] "left: {inl}: {mleft}" + trace[Arith] "right: {inr}: {mright}" + -- Create the match expression + withLocalDeclD (← mkFreshUserName `h) hTy fun hVar => do + let motive ← mkLambdaFVars #[hVar] goalType + let casesExpr ← mkAppOptM ``Or.casesOn #[a, b, motive, h, inl, inr] + let mgoal ← Tactic.getMainGoal + trace[Arith] "goals: {← Tactic.getUnsolvedGoals}" + trace[Arith] "main goal: {mgoal}" + mgoal.assign casesExpr + let goals ← Tactic.getUnsolvedGoals + Tactic.setGoals (mleft :: mright :: goals) + trace[Arith] "new goals: {← Tactic.getUnsolvedGoals}" + +elab "split_disj " n:ident : tactic => do + Lean.Elab.Tactic.withMainContext do + let decl ← Lean.Meta.getLocalDeclFromUserName n.getId + let fvar := mkFVar decl.fvarId + splitDisj fvar + +example (x y : Int) (h0 : x ≤ y ∨ x ≥ y) : x ≤ y ∨ x ≥ y := by + split_disj h0 + . left; assumption + . right; assumption + +-- Lookup the instances of `PropHasImp for all the sub-expressions in the context, +-- and introduce the corresponding assumptions +elab "intro_prop_has_imp_instances" : tactic => do + trace[Arith] "Introducing the PropHasImp instances" + introInstances ``PropHasImp.concl lookupPropHasImp (fun _ => pure ()) + +example (x y : Int) (h0 : x ≤ y) (h1 : x ≠ y) : x < y := by + intro_prop_has_imp_instances + rename_i h + split_disj h + . linarith + . linarith + +syntax "int_tac_preprocess" : tactic + +/- Boosting a bit the linarith tac. + + We do the following: + - for all the assumptions of the shape `(x : Int) ≠ y` or `¬ (x = y), we + introduce two goals with the assumptions `x < y` and `x > y` + TODO: we could create a PR for mathlib. + -/ +def intTacPreprocess : Tactic.TacticM Unit := do + Lean.Elab.Tactic.withMainContext do + -- Get the local context + let ctx ← Lean.MonadLCtx.getLCtx + -- Just a matter of precaution + let ctx ← instantiateLCtxMVars ctx + -- Explore the declarations - Remark: we don't explore the subexpressions + -- (we could, but it is rarely necessary, because terms of the shape `x ≠ y` + -- are often introduced because of branchings in the code, and using `split` + -- introduces exactly the assumptions `x = y` and `x ≠ y`). + let decls ← ctx.getDecls + for decl in decls do + let ty ← Lean.Meta.inferType decl.toExpr + ty.withApp fun f args => do + trace[Arith] "decl: {f} {args}" + -- Check if this is an inequality between integers + pure () + +-- TODO: int_tac + +elab "int_tac_preprocess" : tactic => + intTacPreprocess + +example (x : Int) (h0: 0 ≤ x) (h1: x ≠ 0) : 0 < x := by + int_tac_preprocess + simp_all + int_tac_preprocess + +-- A tactic to solve linear arithmetic goals in the presence of scalars +syntax "scalar_tac" : tactic macro_rules - | `(tactic| int_tac) => + | `(tactic| scalar_tac) => `(tactic| intro_has_prop_instances; have := Scalar.cMin_bound ScalarTy.Usize; have := Scalar.cMin_bound ScalarTy.Isize; have := Scalar.cMax_bound ScalarTy.Usize; have := Scalar.cMax_bound ScalarTy.Isize; + -- TODO: not too sure about that simp only [*, Scalar.max, Scalar.min, Scalar.cMin, Scalar.cMax] at *; + -- TODO: use int_tac linarith) example (x y : U32) : x.val ≤ Scalar.max ScalarTy.U32 := by - int_tac + scalar_tac example {a: Type} (v : Vec a) : v.val.length ≤ Scalar.max ScalarTy.Usize := by - int_tac + scalar_tac end Arith diff --git a/backends/lean/Base/ArithBase.lean b/backends/lean/Base/ArithBase.lean new file mode 100644 index 00000000..ddd2dc24 --- /dev/null +++ b/backends/lean/Base/ArithBase.lean @@ -0,0 +1,10 @@ +import Lean + +namespace Arith + +open Lean Elab Term Meta + +-- We can't define and use trace classes in the same file +initialize registerTraceClass `Arith + +end Arith diff --git a/backends/lean/Base/Diverge/ElabBase.lean b/backends/lean/Base/Diverge/ElabBase.lean index aaaea6f7..fedb1c74 100644 --- a/backends/lean/Base/Diverge/ElabBase.lean +++ b/backends/lean/Base/Diverge/ElabBase.lean @@ -12,116 +12,4 @@ initialize registerTraceClass `Diverge.def.genBody initialize registerTraceClass `Diverge.def.valid initialize registerTraceClass `Diverge.def.unfold --- Useful helper to explore definitions and figure out the variant --- of their sub-expressions. -def explore_term (incr : String) (e : Expr) : MetaM Unit := - match e with - | .bvar _ => do logInfo m!"{incr}bvar: {e}"; return () - | .fvar _ => do logInfo m!"{incr}fvar: {e}"; return () - | .mvar _ => do logInfo m!"{incr}mvar: {e}"; return () - | .sort _ => do logInfo m!"{incr}sort: {e}"; return () - | .const _ _ => do logInfo m!"{incr}const: {e}"; return () - | .app fn arg => do - logInfo m!"{incr}app: {e}" - explore_term (incr ++ " ") fn - explore_term (incr ++ " ") arg - | .lam _bName bTy body _binfo => do - logInfo m!"{incr}lam: {e}" - explore_term (incr ++ " ") bTy - explore_term (incr ++ " ") body - | .forallE _bName bTy body _bInfo => do - logInfo m!"{incr}forallE: {e}" - explore_term (incr ++ " ") bTy - explore_term (incr ++ " ") body - | .letE _dName ty val body _nonDep => do - logInfo m!"{incr}letE: {e}" - explore_term (incr ++ " ") ty - explore_term (incr ++ " ") val - explore_term (incr ++ " ") body - | .lit _ => do logInfo m!"{incr}lit: {e}"; return () - | .mdata _ e => do - logInfo m!"{incr}mdata: {e}" - explore_term (incr ++ " ") e - | .proj _ _ struct => do - logInfo m!"{incr}proj: {e}" - explore_term (incr ++ " ") struct - -def explore_decl (n : Name) : TermElabM Unit := do - logInfo m!"Name: {n}" - let env ← getEnv - let decl := env.constants.find! n - match decl with - | .defnInfo val => - logInfo m!"About to explore defn: {decl.name}" - logInfo m!"# Type:" - explore_term "" val.type - logInfo m!"# Value:" - explore_term "" val.value - | .axiomInfo _ => throwError m!"axiom: {n}" - | .thmInfo _ => throwError m!"thm: {n}" - | .opaqueInfo _ => throwError m!"opaque: {n}" - | .quotInfo _ => throwError m!"quot: {n}" - | .inductInfo _ => throwError m!"induct: {n}" - | .ctorInfo _ => throwError m!"ctor: {n}" - | .recInfo _ => throwError m!"rec: {n}" - -syntax (name := printDecl) "print_decl " ident : command - -open Lean.Elab.Command - -@[command_elab printDecl] def elabPrintDecl : CommandElab := fun stx => do - liftTermElabM do - let id := stx[1] - addCompletionInfo <| CompletionInfo.id id id.getId (danglingDot := false) {} none - let cs ← resolveGlobalConstWithInfos id - explore_decl cs[0]! - -private def test1 : Nat := 0 -private def test2 (x : Nat) : Nat := x - -print_decl test1 -print_decl test2 - --- A map visitor function for expressions (adapted from `AbstractNestedProofs.visit`) --- The continuation takes as parameters: --- - the current depth of the expression (useful for printing/debugging) --- - the expression to explore -partial def mapVisit (k : Nat → Expr → MetaM Expr) (e : Expr) : MetaM Expr := do - let mapVisitBinders (xs : Array Expr) (k2 : MetaM Expr) : MetaM Expr := do - let localInstances ← getLocalInstances - let mut lctx ← getLCtx - for x in xs do - let xFVarId := x.fvarId! - let localDecl ← xFVarId.getDecl - let type ← mapVisit k localDecl.type - let localDecl := localDecl.setType type - let localDecl ← match localDecl.value? with - | some value => let value ← mapVisit k value; pure <| localDecl.setValue value - | none => pure localDecl - lctx :=lctx.modifyLocalDecl xFVarId fun _ => localDecl - withLCtx lctx localInstances k2 - -- TODO: use a cache? (Lean.checkCache) - let rec visit (i : Nat) (e : Expr) : MetaM Expr := do - -- Explore - let e ← k i e - match e with - | .bvar _ - | .fvar _ - | .mvar _ - | .sort _ - | .lit _ - | .const _ _ => pure e - | .app .. => do e.withApp fun f args => return mkAppN f (← args.mapM (visit (i + 1))) - | .lam .. => - lambdaLetTelescope e fun xs b => - mapVisitBinders xs do mkLambdaFVars xs (← visit (i + 1) b) (usedLetOnly := false) - | .forallE .. => do - forallTelescope e fun xs b => mapVisitBinders xs do mkForallFVars xs (← visit (i + 1) b) - | .letE .. => do - lambdaLetTelescope e fun xs b => mapVisitBinders xs do - mkLambdaFVars xs (← visit (i + 1) b) (usedLetOnly := false) - | .mdata _ b => return e.updateMData! (← visit (i + 1) b) - | .proj _ _ b => return e.updateProj! (← visit (i + 1) b) - visit 0 e - end Diverge -- cgit v1.2.3 From 1c251c13b1e6698f3c7c974ea88c2c8a28777cc1 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Sun, 9 Jul 2023 10:50:50 +0200 Subject: Implement a first working version of int_tac --- backends/lean/Base/Arith.lean | 96 ++++++++++++++++++++++++++----------------- 1 file changed, 58 insertions(+), 38 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Arith.lean b/backends/lean/Base/Arith.lean index df48a6a2..8cdf75a3 100644 --- a/backends/lean/Base/Arith.lean +++ b/backends/lean/Base/Arith.lean @@ -231,7 +231,7 @@ example (x y : Int) (h0 : x ≠ y) (h1 : ¬ x = y) : True := by set_option trace.Arith false -def addDecl (name : Name) (val : Expr) (type : Expr) (asLet : Bool) (k : Expr → Tactic.TacticM Unit) : Tactic.TacticM Unit := +def addDecl (name : Name) (val : Expr) (type : Expr) (asLet : Bool) : Tactic.TacticM Expr := -- I don't think we need that Lean.Elab.Tactic.withMainContext do -- Insert the new declaration @@ -256,10 +256,11 @@ def addDecl (name : Name) (val : Expr) (type : Expr) (asLet : Bool) (k : Expr mvarId.assign newVal let goals ← Tactic.getUnsolvedGoals Lean.Elab.Tactic.setGoals (newMVar.mvarId! :: goals) - -- Call the continuation - k nval + -- Return the new value - note: we are in the *new* context, created + -- after the declaration was added, so it will persist + pure nval -def addDeclSyntax (name : Name) (val : Syntax) (asLet : Bool) (k : Expr → Tactic.TacticM Unit) : Tactic.TacticM Unit := +def addDeclSyntax (name : Name) (val : Syntax) (asLet : Bool) : Tactic.TacticM Unit := -- I don't think we need that Lean.Elab.Tactic.withMainContext do -- @@ -270,13 +271,13 @@ def addDeclSyntax (name : Name) (val : Syntax) (asLet : Bool) (k : Expr → Tact -- not choose): we force the instantiation of the meta-variable synthesizeSyntheticMVarsUsingDefault -- - addDecl name val type asLet k + let _ ← addDecl name val type asLet -elab "custom_let " n:ident " := " v:term : tactic => - addDeclSyntax n.getId v (asLet := true) (λ _ => pure ()) +elab "custom_let " n:ident " := " v:term : tactic => do + addDeclSyntax n.getId v (asLet := true) elab "custom_have " n:ident " := " v:term : tactic => - addDeclSyntax n.getId v (asLet := false) (λ _ => pure ()) + addDeclSyntax n.getId v (asLet := false) example : Nat := by custom_let x := 4 @@ -287,13 +288,13 @@ example (x : Bool) : Nat := by cases x <;> custom_let x := 3 <;> apply x -- Lookup instances in a context and introduce them with additional declarations. -def introInstances (declToUnfold : Name) (lookup : Expr → MetaM (Option Expr)) (k : Expr → Tactic.TacticM Unit) : Tactic.TacticM Unit := do +def introInstances (declToUnfold : Name) (lookup : Expr → MetaM (Option Expr)) : Tactic.TacticM (Array Expr) := do let hs ← collectInstancesFromMainCtx lookup - hs.forM fun e => do + hs.toArray.mapM fun e => do let type ← inferType e let name ← mkFreshUserName `h -- Add a declaration - addDecl name e type (asLet := false) λ nval => do + let nval ← addDecl name e type (asLet := false) -- Simplify to unfold the declaration to unfold (i.e., the projector) let simpTheorems ← Tactic.simpOnlyBuiltins.foldlM (·.addConst ·) ({} : SimpTheorems) -- Add the equational theorem for the decl to unfold @@ -304,14 +305,14 @@ def introInstances (declToUnfold : Name) (lookup : Expr → MetaM (Option Expr)) let loc := Tactic.Location.targets #[mkIdent name] false -- Apply the simplifier let _ ← Tactic.simpLocation ctx (discharge? := .none) loc - -- Call the continuation - k nval + -- Return the new value + pure nval -- Lookup the instances of `HasProp for all the sub-expressions in the context, -- and introduce the corresponding assumptions elab "intro_has_prop_instances" : tactic => do trace[Arith] "Introducing the HasProp instances" - introInstances ``HasProp.prop_ty lookupHasProp (fun _ => pure ()) + let _ ← introInstances ``HasProp.prop_ty lookupHasProp example (x y : U32) : x.val ≤ Scalar.max ScalarTy.U32 := by intro_has_prop_instances @@ -322,7 +323,7 @@ example {a: Type} (v : Vec a) : v.val.length ≤ Scalar.max ScalarTy.Usize := by simp_all [Scalar.max, Scalar.min] -- Tactic to split on a disjunction -def splitDisj (h : Expr) : Tactic.TacticM Unit := do +def splitDisj (h : Expr) (kleft kright : Tactic.TacticM Unit) : Tactic.TacticM Unit := do trace[Arith] "assumption on which to split: {h}" -- Retrieve the main goal Lean.Elab.Tactic.withMainContext do @@ -361,14 +362,23 @@ def splitDisj (h : Expr) : Tactic.TacticM Unit := do trace[Arith] "main goal: {mgoal}" mgoal.assign casesExpr let goals ← Tactic.getUnsolvedGoals - Tactic.setGoals (mleft :: mright :: goals) + -- Focus on the left + Tactic.setGoals [mleft] + kleft + let leftGoals ← Tactic.getUnsolvedGoals + -- Focus on the right + Tactic.setGoals [mright] + kright + let rightGoals ← Tactic.getUnsolvedGoals + -- Put all the goals back + Tactic.setGoals (leftGoals ++ rightGoals ++ goals) trace[Arith] "new goals: {← Tactic.getUnsolvedGoals}" elab "split_disj " n:ident : tactic => do Lean.Elab.Tactic.withMainContext do let decl ← Lean.Meta.getLocalDeclFromUserName n.getId let fvar := mkFVar decl.fvarId - splitDisj fvar + splitDisj fvar (fun _ => pure ()) (fun _ => pure ()) example (x y : Int) (h0 : x ≤ y ∨ x ≥ y) : x ≤ y ∨ x ≥ y := by split_disj h0 @@ -379,7 +389,7 @@ example (x y : Int) (h0 : x ≤ y ∨ x ≥ y) : x ≤ y ∨ x ≥ y := by -- and introduce the corresponding assumptions elab "intro_prop_has_imp_instances" : tactic => do trace[Arith] "Introducing the PropHasImp instances" - introInstances ``PropHasImp.concl lookupPropHasImp (fun _ => pure ()) + let _ ← introInstances ``PropHasImp.concl lookupPropHasImp example (x y : Int) (h0 : x ≤ y) (h1 : x ≠ y) : x < y := by intro_prop_has_imp_instances @@ -388,7 +398,7 @@ example (x y : Int) (h0 : x ≤ y) (h1 : x ≠ y) : x < y := by . linarith . linarith -syntax "int_tac_preprocess" : tactic +--syntax "int_tac_preprocess" : tactic /- Boosting a bit the linarith tac. @@ -399,31 +409,41 @@ syntax "int_tac_preprocess" : tactic -/ def intTacPreprocess : Tactic.TacticM Unit := do Lean.Elab.Tactic.withMainContext do - -- Get the local context - let ctx ← Lean.MonadLCtx.getLCtx - -- Just a matter of precaution - let ctx ← instantiateLCtxMVars ctx - -- Explore the declarations - Remark: we don't explore the subexpressions - -- (we could, but it is rarely necessary, because terms of the shape `x ≠ y` - -- are often introduced because of branchings in the code, and using `split` - -- introduces exactly the assumptions `x = y` and `x ≠ y`). - let decls ← ctx.getDecls - for decl in decls do - let ty ← Lean.Meta.inferType decl.toExpr - ty.withApp fun f args => do - trace[Arith] "decl: {f} {args}" - -- Check if this is an inequality between integers - pure () - --- TODO: int_tac + -- Lookup the instances of PropHasImp (this is how we detect assumptions + -- of the proper shape), introduce assumptions in the context and split + -- on those + -- TODO: get rid of the assumption that we split + let rec splitOnAsms (asms : List Expr) : Tactic.TacticM Unit := + match asms with + | [] => pure () + | asm :: asms => + let k := splitOnAsms asms + splitDisj asm k k + -- Introduce + let asms ← introInstances ``PropHasImp.concl lookupPropHasImp + -- Split + splitOnAsms asms.toList elab "int_tac_preprocess" : tactic => intTacPreprocess example (x : Int) (h0: 0 ≤ x) (h1: x ≠ 0) : 0 < x := by int_tac_preprocess - simp_all - int_tac_preprocess + linarith + linarith + +syntax "int_tac" : tactic +macro_rules + | `(tactic| int_tac) => + `(tactic| + int_tac_preprocess <;> + linarith) + +example (x : Int) (h0: 0 ≤ x) (h1: x ≠ 0) : 0 < x := by int_tac + +-- Checking that things append correctly when there are several disjunctions +example (x y : Int) (h0: 0 ≤ x) (h1: x ≠ 0) (h2 : 0 ≤ y) (h3 : y ≠ 0) : 0 < x ∧ 0 < y := by + int_tac_preprocess <;> apply And.intro <;> linarith -- A tactic to solve linear arithmetic goals in the presence of scalars syntax "scalar_tac" : tactic -- cgit v1.2.3 From d9a11b312ef0df13795d9a1982ca1cd2eba0e124 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Sun, 9 Jul 2023 22:27:44 +0200 Subject: Improve int_tac --- backends/lean/Base/Arith.lean | 41 ++++++++++++++++++----------------------- 1 file changed, 18 insertions(+), 23 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Arith.lean b/backends/lean/Base/Arith.lean index 8cdf75a3..364447ed 100644 --- a/backends/lean/Base/Arith.lean +++ b/backends/lean/Base/Arith.lean @@ -183,9 +183,6 @@ elab "display_has_prop_instances" : tactic => do hs.forM fun e => do trace[Arith] "+ HasProp instance: {e}" - -set_option trace.Arith true - example (x : U32) : True := by let i : HasProp U32 := inferInstance have p := @HasProp.prop _ i x @@ -193,8 +190,6 @@ example (x : U32) : True := by display_has_prop_instances simp -set_option trace.Arith false - -- Return an instance of `PropHasImp` for `e` if it has some def lookupPropHasImp (e : Expr) : MetaM (Option Expr) := do trace[Arith] "lookupPropHasImp" @@ -223,14 +218,10 @@ elab "display_prop_has_imp_instances" : tactic => do hs.forM fun e => do trace[Arith] "+ PropHasImp instance: {e}" -set_option trace.Arith true - -example (x y : Int) (h0 : x ≠ y) (h1 : ¬ x = y) : True := by +example (x y : Int) (_ : x ≠ y) (_ : ¬ x = y) : True := by display_prop_has_imp_instances simp -set_option trace.Arith false - def addDecl (name : Name) (val : Expr) (type : Expr) (asLet : Bool) : Tactic.TacticM Expr := -- I don't think we need that Lean.Elab.Tactic.withMainContext do @@ -322,12 +313,15 @@ example {a: Type} (v : Vec a) : v.val.length ≤ Scalar.max ScalarTy.Usize := by intro_has_prop_instances simp_all [Scalar.max, Scalar.min] --- Tactic to split on a disjunction +-- Tactic to split on a disjunction. +-- The expression `h` should be an fvar. def splitDisj (h : Expr) (kleft kright : Tactic.TacticM Unit) : Tactic.TacticM Unit := do trace[Arith] "assumption on which to split: {h}" -- Retrieve the main goal Lean.Elab.Tactic.withMainContext do let goalType ← Lean.Elab.Tactic.getMainTarget + let hDecl := (← getLCtx).get! h.fvarId! + let hName := hDecl.userName -- Case disjunction let hTy ← inferType h hTy.withApp fun f xs => do @@ -341,14 +335,16 @@ def splitDisj (h : Expr) (kleft kright : Tactic.TacticM Unit) : Tactic.TacticM U -- - the match branch -- - a fresh new mvar id let mkGoal (hTy : Expr) (nGoalName : String) : MetaM (Expr × MVarId) := do - -- Introduce a variable for the assumption (`a` or `b`) - let asmName ← mkFreshUserName `h - withLocalDeclD asmName hTy fun var => do + -- Introduce a variable for the assumption (`a` or `b`). Note that we reuse + -- the name of the assumption we split. + withLocalDeclD hName hTy fun var => do -- The new goal let mgoal ← mkFreshExprSyntheticOpaqueMVar goalType (tag := Name.mkSimple nGoalName) + -- Clear the assumption that we split + let mgoal ← mgoal.mvarId!.tryClearMany #[h.fvarId!] -- The branch expression - let branch ← mkLambdaFVars #[var] mgoal - pure (branch, mgoal.mvarId!) + let branch ← mkLambdaFVars #[var] (mkMVar mgoal) + pure (branch, mgoal) let (inl, mleft) ← mkGoal a "left" let (inr, mright) ← mkGoal b "right" trace[Arith] "left: {inl}: {mleft}" @@ -398,8 +394,6 @@ example (x y : Int) (h0 : x ≤ y) (h1 : x ≠ y) : x < y := by . linarith . linarith ---syntax "int_tac_preprocess" : tactic - /- Boosting a bit the linarith tac. We do the following: @@ -412,7 +406,7 @@ def intTacPreprocess : Tactic.TacticM Unit := do -- Lookup the instances of PropHasImp (this is how we detect assumptions -- of the proper shape), introduce assumptions in the context and split -- on those - -- TODO: get rid of the assumption that we split + -- TODO: get rid of the assumptions that we split let rec splitOnAsms (asms : List Expr) : Tactic.TacticM Unit := match asms with | [] => pure () @@ -436,14 +430,16 @@ syntax "int_tac" : tactic macro_rules | `(tactic| int_tac) => `(tactic| + (repeat (apply And.intro)) <;> -- TODO: improve this int_tac_preprocess <;> linarith) -example (x : Int) (h0: 0 ≤ x) (h1: x ≠ 0) : 0 < x := by int_tac +example (x : Int) (h0: 0 ≤ x) (h1: x ≠ 0) : 0 < x := by + int_tac -- Checking that things append correctly when there are several disjunctions example (x y : Int) (h0: 0 ≤ x) (h1: x ≠ 0) (h2 : 0 ≤ y) (h3 : y ≠ 0) : 0 < x ∧ 0 < y := by - int_tac_preprocess <;> apply And.intro <;> linarith + int_tac -- A tactic to solve linear arithmetic goals in the presence of scalars syntax "scalar_tac" : tactic @@ -457,8 +453,7 @@ macro_rules have := Scalar.cMax_bound ScalarTy.Isize; -- TODO: not too sure about that simp only [*, Scalar.max, Scalar.min, Scalar.cMin, Scalar.cMax] at *; - -- TODO: use int_tac - linarith) + int_tac) example (x y : U32) : x.val ≤ Scalar.max ScalarTy.U32 := by scalar_tac -- cgit v1.2.3 From 7206b48a73d6204baea99f4f4675be2518a8f8c2 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Mon, 10 Jul 2023 15:06:12 +0200 Subject: Start working on the progress tactic --- backends/lean/Base.lean | 2 + backends/lean/Base/Arith.lean | 464 ----------------------------------- backends/lean/Base/Arith/Arith.lean | 409 ++++++++++++++++++++++++++++++ backends/lean/Base/Arith/Base.lean | 10 + backends/lean/Base/ArithBase.lean | 10 - backends/lean/Base/Diverge/Elab.lean | 17 +- backends/lean/Base/Utils.lean | 95 +++++++ 7 files changed, 517 insertions(+), 490 deletions(-) delete mode 100644 backends/lean/Base/Arith.lean create mode 100644 backends/lean/Base/Arith/Arith.lean create mode 100644 backends/lean/Base/Arith/Base.lean delete mode 100644 backends/lean/Base/ArithBase.lean (limited to 'backends') diff --git a/backends/lean/Base.lean b/backends/lean/Base.lean index 1f8cbc8e..51211704 100644 --- a/backends/lean/Base.lean +++ b/backends/lean/Base.lean @@ -1,3 +1,5 @@ +import Base.Utils import Base.Primitives import Base.Diverge import Base.Arith +import Base.Progress diff --git a/backends/lean/Base/Arith.lean b/backends/lean/Base/Arith.lean deleted file mode 100644 index 364447ed..00000000 --- a/backends/lean/Base/Arith.lean +++ /dev/null @@ -1,464 +0,0 @@ -/- This file contains tactics to solve arithmetic goals -/ - -import Lean -import Lean.Meta.Tactic.Simp -import Init.Data.List.Basic -import Mathlib.Tactic.RunCmd -import Mathlib.Tactic.Linarith --- TODO: there is no Omega tactic for now - it seems it hasn't been ported yet ---import Mathlib.Tactic.Omega -import Base.Primitives -import Base.ArithBase - -/- -Mathlib tactics: -- rcases: https://leanprover-community.github.io/mathlib_docs/tactics.html#rcases -- split_ifs: https://leanprover-community.github.io/mathlib_docs/tactics.html#split_ifs -- norm_num: https://leanprover-community.github.io/mathlib_docs/tactics.html#norm_num -- should we use linarith or omega? -- hint: https://leanprover-community.github.io/mathlib_docs/tactics.html#hint -- classical: https://leanprover-community.github.io/mathlib_docs/tactics.html#classical --/ - -namespace List - - -- TODO: I could not find this function?? - @[simp] def flatten {a : Type u} : List (List a) → List a - | [] => [] - | x :: ls => x ++ flatten ls - -end List - -namespace Lean - -namespace LocalContext - - open Lean Lean.Elab Command Term Lean.Meta - - -- Small utility: return the list of declarations in the context, from - -- the last to the first. - def getAllDecls (lctx : Lean.LocalContext) : MetaM (List Lean.LocalDecl) := - lctx.foldrM (fun d ls => do let d ← instantiateLocalDeclMVars d; pure (d :: ls)) [] - - -- Return the list of declarations in the context, but filter the - -- declarations which are considered as implementation details - def getDecls (lctx : Lean.LocalContext) : MetaM (List Lean.LocalDecl) := do - let ls ← lctx.getAllDecls - pure (ls.filter (fun d => not d.isImplementationDetail)) - -end LocalContext - -end Lean - -namespace Arith - -open Primitives - --- TODO: move? -theorem ne_zero_is_lt_or_gt {x : Int} (hne : x ≠ 0) : x < 0 ∨ x > 0 := by - cases h: x <;> simp_all - . rename_i n; - cases n <;> simp_all - . apply Int.negSucc_lt_zero - --- TODO: move? -theorem ne_is_lt_or_gt {x y : Int} (hne : x ≠ y) : x < y ∨ x > y := by - have hne : x - y ≠ 0 := by - simp - intro h - have: x = y := by linarith - simp_all - have h := ne_zero_is_lt_or_gt hne - match h with - | .inl _ => left; linarith - | .inr _ => right; linarith - --- TODO: move -instance Vec.cast (a : Type): Coe (Vec a) (List a) where coe := λ v => v.val - --- TODO: move -/- Remark: we can't write the following instance because of restrictions about - the type class parameters (`ty` doesn't appear in the return type, which is - forbidden): - - ``` - instance Scalar.cast (ty : ScalarTy) : Coe (Scalar ty) Int where coe := λ v => v.val - ``` - -/ -def Scalar.toInt {ty : ScalarTy} (x : Scalar ty) : Int := x.val - --- Remark: I tried a version of the shape `HasProp {a : Type} (x : a)` --- but the lookup didn't work -class HasProp (a : Sort u) where - prop_ty : a → Prop - prop : ∀ x:a, prop_ty x - -instance (ty : ScalarTy) : HasProp (Scalar ty) where - -- prop_ty is inferred - prop := λ x => And.intro x.hmin x.hmax - -instance (a : Type) : HasProp (Vec a) where - prop_ty := λ v => v.val.length ≤ Scalar.max ScalarTy.Usize - prop := λ ⟨ _, l ⟩ => l - -class PropHasImp (x : Prop) where - concl : Prop - prop : x → concl - --- This also works for `x ≠ y` because this expression reduces to `¬ x = y` --- and `Ne` is marked as `reducible` -instance (x y : Int) : PropHasImp (¬ x = y) where - concl := x < y ∨ x > y - prop := λ (h:x ≠ y) => ne_is_lt_or_gt h - -open Lean Lean.Elab Command Term Lean.Meta - --- Small utility: print all the declarations in the context -elab "print_all_decls" : tactic => do - let ctx ← Lean.MonadLCtx.getLCtx - for decl in ← ctx.getDecls do - let ty ← Lean.Meta.inferType decl.toExpr - logInfo m!"{decl.toExpr} : {ty}" - pure () - --- Explore a term by decomposing the applications (we explore the applied --- functions and their arguments, but ignore lambdas, forall, etc. - --- should we go inside?). -partial def foldTermApps (k : α → Expr → MetaM α) (s : α) (e : Expr) : MetaM α := do - -- We do it in a very simpler manner: we deconstruct applications, - -- and recursively explore the sub-expressions. Note that we do - -- not go inside foralls and abstractions (should we?). - e.withApp fun f args => do - let s ← k s f - args.foldlM (foldTermApps k) s - --- Provided a function `k` which lookups type class instances on an expression, --- collect all the instances lookuped by applying `k` on the sub-expressions of `e`. -def collectInstances - (k : Expr → MetaM (Option Expr)) (s : HashSet Expr) (e : Expr) : MetaM (HashSet Expr) := do - let k s e := do - match ← k e with - | none => pure s - | some i => pure (s.insert i) - foldTermApps k s e - --- Similar to `collectInstances`, but explores all the local declarations in the --- main context. -def collectInstancesFromMainCtx (k : Expr → MetaM (Option Expr)) : Tactic.TacticM (HashSet Expr) := do - Lean.Elab.Tactic.withMainContext do - -- Get the local context - let ctx ← Lean.MonadLCtx.getLCtx - -- Just a matter of precaution - let ctx ← instantiateLCtxMVars ctx - -- Initialize the hashset - let hs := HashSet.empty - -- Explore the declarations - let decls ← ctx.getDecls - decls.foldlM (fun hs d => collectInstances k hs d.toExpr) hs - --- Return an instance of `HasProp` for `e` if it has some -def lookupHasProp (e : Expr) : MetaM (Option Expr) := do - trace[Arith] "lookupHasProp" - -- TODO: do we need Lean.observing? - -- This actually eliminates the error messages - Lean.observing? do - trace[Arith] "lookupHasProp: observing" - let ty ← Lean.Meta.inferType e - let hasProp ← mkAppM ``HasProp #[ty] - let hasPropInst ← trySynthInstance hasProp - match hasPropInst with - | LOption.some i => - trace[Arith] "Found HasProp instance" - let i_prop ← mkProjection i (Name.mkSimple "prop") - some (← mkAppM' i_prop #[e]) - | _ => none - --- Collect the instances of `HasProp` for the subexpressions in the context -def collectHasPropInstancesFromMainCtx : Tactic.TacticM (HashSet Expr) := do - collectInstancesFromMainCtx lookupHasProp - -elab "display_has_prop_instances" : tactic => do - trace[Arith] "Displaying the HasProp instances" - let hs ← collectHasPropInstancesFromMainCtx - hs.forM fun e => do - trace[Arith] "+ HasProp instance: {e}" - -example (x : U32) : True := by - let i : HasProp U32 := inferInstance - have p := @HasProp.prop _ i x - simp only [HasProp.prop_ty] at p - display_has_prop_instances - simp - --- Return an instance of `PropHasImp` for `e` if it has some -def lookupPropHasImp (e : Expr) : MetaM (Option Expr) := do - trace[Arith] "lookupPropHasImp" - -- TODO: do we need Lean.observing? - -- This actually eliminates the error messages - Lean.observing? do - trace[Arith] "lookupPropHasImp: observing" - let ty ← Lean.Meta.inferType e - trace[Arith] "lookupPropHasImp: ty: {ty}" - let cl ← mkAppM ``PropHasImp #[ty] - let inst ← trySynthInstance cl - match inst with - | LOption.some i => - trace[Arith] "Found PropHasImp instance" - let i_prop ← mkProjection i (Name.mkSimple "prop") - some (← mkAppM' i_prop #[e]) - | _ => none - --- Collect the instances of `PropHasImp` for the subexpressions in the context -def collectPropHasImpInstancesFromMainCtx : Tactic.TacticM (HashSet Expr) := do - collectInstancesFromMainCtx lookupPropHasImp - -elab "display_prop_has_imp_instances" : tactic => do - trace[Arith] "Displaying the PropHasImp instances" - let hs ← collectPropHasImpInstancesFromMainCtx - hs.forM fun e => do - trace[Arith] "+ PropHasImp instance: {e}" - -example (x y : Int) (_ : x ≠ y) (_ : ¬ x = y) : True := by - display_prop_has_imp_instances - simp - -def addDecl (name : Name) (val : Expr) (type : Expr) (asLet : Bool) : Tactic.TacticM Expr := - -- I don't think we need that - Lean.Elab.Tactic.withMainContext do - -- Insert the new declaration - let withDecl := if asLet then withLetDecl name type val else withLocalDeclD name type - withDecl fun nval => do - -- For debugging - let lctx ← Lean.MonadLCtx.getLCtx - let fid := nval.fvarId! - let decl := lctx.get! fid - trace[Arith] " new decl: \"{decl.userName}\" ({nval}) : {decl.type} := {decl.value}" - -- - -- Tranform the main goal `?m0` to `let x = nval in ?m1` - let mvarId ← Tactic.getMainGoal - let newMVar ← mkFreshExprSyntheticOpaqueMVar (← mvarId.getType) - let newVal ← mkLetFVars #[nval] newMVar - -- There are two cases: - -- - asLet is true: newVal is `let $name := $val in $newMVar` - -- - asLet is false: ewVal is `λ $name => $newMVar` - -- We need to apply it to `val` - let newVal := if asLet then newVal else mkAppN newVal #[val] - -- Assign the main goal and update the current goal - mvarId.assign newVal - let goals ← Tactic.getUnsolvedGoals - Lean.Elab.Tactic.setGoals (newMVar.mvarId! :: goals) - -- Return the new value - note: we are in the *new* context, created - -- after the declaration was added, so it will persist - pure nval - -def addDeclSyntax (name : Name) (val : Syntax) (asLet : Bool) : Tactic.TacticM Unit := - -- I don't think we need that - Lean.Elab.Tactic.withMainContext do - -- - let val ← elabTerm val .none - let type ← inferType val - -- In some situations, the type will be left as a metavariable (for instance, - -- if the term is `3`, Lean has the choice between `Nat` and `Int` and will - -- not choose): we force the instantiation of the meta-variable - synthesizeSyntheticMVarsUsingDefault - -- - let _ ← addDecl name val type asLet - -elab "custom_let " n:ident " := " v:term : tactic => do - addDeclSyntax n.getId v (asLet := true) - -elab "custom_have " n:ident " := " v:term : tactic => - addDeclSyntax n.getId v (asLet := false) - -example : Nat := by - custom_let x := 4 - custom_have y := 4 - apply y - -example (x : Bool) : Nat := by - cases x <;> custom_let x := 3 <;> apply x - --- Lookup instances in a context and introduce them with additional declarations. -def introInstances (declToUnfold : Name) (lookup : Expr → MetaM (Option Expr)) : Tactic.TacticM (Array Expr) := do - let hs ← collectInstancesFromMainCtx lookup - hs.toArray.mapM fun e => do - let type ← inferType e - let name ← mkFreshUserName `h - -- Add a declaration - let nval ← addDecl name e type (asLet := false) - -- Simplify to unfold the declaration to unfold (i.e., the projector) - let simpTheorems ← Tactic.simpOnlyBuiltins.foldlM (·.addConst ·) ({} : SimpTheorems) - -- Add the equational theorem for the decl to unfold - let simpTheorems ← simpTheorems.addDeclToUnfold declToUnfold - let congrTheorems ← getSimpCongrTheorems - let ctx : Simp.Context := { simpTheorems := #[simpTheorems], congrTheorems } - -- Where to apply the simplifier - let loc := Tactic.Location.targets #[mkIdent name] false - -- Apply the simplifier - let _ ← Tactic.simpLocation ctx (discharge? := .none) loc - -- Return the new value - pure nval - --- Lookup the instances of `HasProp for all the sub-expressions in the context, --- and introduce the corresponding assumptions -elab "intro_has_prop_instances" : tactic => do - trace[Arith] "Introducing the HasProp instances" - let _ ← introInstances ``HasProp.prop_ty lookupHasProp - -example (x y : U32) : x.val ≤ Scalar.max ScalarTy.U32 := by - intro_has_prop_instances - simp [*] - -example {a: Type} (v : Vec a) : v.val.length ≤ Scalar.max ScalarTy.Usize := by - intro_has_prop_instances - simp_all [Scalar.max, Scalar.min] - --- Tactic to split on a disjunction. --- The expression `h` should be an fvar. -def splitDisj (h : Expr) (kleft kright : Tactic.TacticM Unit) : Tactic.TacticM Unit := do - trace[Arith] "assumption on which to split: {h}" - -- Retrieve the main goal - Lean.Elab.Tactic.withMainContext do - let goalType ← Lean.Elab.Tactic.getMainTarget - let hDecl := (← getLCtx).get! h.fvarId! - let hName := hDecl.userName - -- Case disjunction - let hTy ← inferType h - hTy.withApp fun f xs => do - trace[Arith] "as app: {f} {xs}" - -- Sanity check - if ¬ (f.isConstOf ``Or ∧ xs.size = 2) then throwError "Invalid argument to splitDisj" - let a := xs.get! 0 - let b := xs.get! 1 - -- Introduce the new goals - -- Returns: - -- - the match branch - -- - a fresh new mvar id - let mkGoal (hTy : Expr) (nGoalName : String) : MetaM (Expr × MVarId) := do - -- Introduce a variable for the assumption (`a` or `b`). Note that we reuse - -- the name of the assumption we split. - withLocalDeclD hName hTy fun var => do - -- The new goal - let mgoal ← mkFreshExprSyntheticOpaqueMVar goalType (tag := Name.mkSimple nGoalName) - -- Clear the assumption that we split - let mgoal ← mgoal.mvarId!.tryClearMany #[h.fvarId!] - -- The branch expression - let branch ← mkLambdaFVars #[var] (mkMVar mgoal) - pure (branch, mgoal) - let (inl, mleft) ← mkGoal a "left" - let (inr, mright) ← mkGoal b "right" - trace[Arith] "left: {inl}: {mleft}" - trace[Arith] "right: {inr}: {mright}" - -- Create the match expression - withLocalDeclD (← mkFreshUserName `h) hTy fun hVar => do - let motive ← mkLambdaFVars #[hVar] goalType - let casesExpr ← mkAppOptM ``Or.casesOn #[a, b, motive, h, inl, inr] - let mgoal ← Tactic.getMainGoal - trace[Arith] "goals: {← Tactic.getUnsolvedGoals}" - trace[Arith] "main goal: {mgoal}" - mgoal.assign casesExpr - let goals ← Tactic.getUnsolvedGoals - -- Focus on the left - Tactic.setGoals [mleft] - kleft - let leftGoals ← Tactic.getUnsolvedGoals - -- Focus on the right - Tactic.setGoals [mright] - kright - let rightGoals ← Tactic.getUnsolvedGoals - -- Put all the goals back - Tactic.setGoals (leftGoals ++ rightGoals ++ goals) - trace[Arith] "new goals: {← Tactic.getUnsolvedGoals}" - -elab "split_disj " n:ident : tactic => do - Lean.Elab.Tactic.withMainContext do - let decl ← Lean.Meta.getLocalDeclFromUserName n.getId - let fvar := mkFVar decl.fvarId - splitDisj fvar (fun _ => pure ()) (fun _ => pure ()) - -example (x y : Int) (h0 : x ≤ y ∨ x ≥ y) : x ≤ y ∨ x ≥ y := by - split_disj h0 - . left; assumption - . right; assumption - --- Lookup the instances of `PropHasImp for all the sub-expressions in the context, --- and introduce the corresponding assumptions -elab "intro_prop_has_imp_instances" : tactic => do - trace[Arith] "Introducing the PropHasImp instances" - let _ ← introInstances ``PropHasImp.concl lookupPropHasImp - -example (x y : Int) (h0 : x ≤ y) (h1 : x ≠ y) : x < y := by - intro_prop_has_imp_instances - rename_i h - split_disj h - . linarith - . linarith - -/- Boosting a bit the linarith tac. - - We do the following: - - for all the assumptions of the shape `(x : Int) ≠ y` or `¬ (x = y), we - introduce two goals with the assumptions `x < y` and `x > y` - TODO: we could create a PR for mathlib. - -/ -def intTacPreprocess : Tactic.TacticM Unit := do - Lean.Elab.Tactic.withMainContext do - -- Lookup the instances of PropHasImp (this is how we detect assumptions - -- of the proper shape), introduce assumptions in the context and split - -- on those - -- TODO: get rid of the assumptions that we split - let rec splitOnAsms (asms : List Expr) : Tactic.TacticM Unit := - match asms with - | [] => pure () - | asm :: asms => - let k := splitOnAsms asms - splitDisj asm k k - -- Introduce - let asms ← introInstances ``PropHasImp.concl lookupPropHasImp - -- Split - splitOnAsms asms.toList - -elab "int_tac_preprocess" : tactic => - intTacPreprocess - -example (x : Int) (h0: 0 ≤ x) (h1: x ≠ 0) : 0 < x := by - int_tac_preprocess - linarith - linarith - -syntax "int_tac" : tactic -macro_rules - | `(tactic| int_tac) => - `(tactic| - (repeat (apply And.intro)) <;> -- TODO: improve this - int_tac_preprocess <;> - linarith) - -example (x : Int) (h0: 0 ≤ x) (h1: x ≠ 0) : 0 < x := by - int_tac - --- Checking that things append correctly when there are several disjunctions -example (x y : Int) (h0: 0 ≤ x) (h1: x ≠ 0) (h2 : 0 ≤ y) (h3 : y ≠ 0) : 0 < x ∧ 0 < y := by - int_tac - --- A tactic to solve linear arithmetic goals in the presence of scalars -syntax "scalar_tac" : tactic -macro_rules - | `(tactic| scalar_tac) => - `(tactic| - intro_has_prop_instances; - have := Scalar.cMin_bound ScalarTy.Usize; - have := Scalar.cMin_bound ScalarTy.Isize; - have := Scalar.cMax_bound ScalarTy.Usize; - have := Scalar.cMax_bound ScalarTy.Isize; - -- TODO: not too sure about that - simp only [*, Scalar.max, Scalar.min, Scalar.cMin, Scalar.cMax] at *; - int_tac) - -example (x y : U32) : x.val ≤ Scalar.max ScalarTy.U32 := by - scalar_tac - -example {a: Type} (v : Vec a) : v.val.length ≤ Scalar.max ScalarTy.Usize := by - scalar_tac - -end Arith diff --git a/backends/lean/Base/Arith/Arith.lean b/backends/lean/Base/Arith/Arith.lean new file mode 100644 index 00000000..0ba73d18 --- /dev/null +++ b/backends/lean/Base/Arith/Arith.lean @@ -0,0 +1,409 @@ +/- This file contains tactics to solve arithmetic goals -/ + +import Lean +import Lean.Meta.Tactic.Simp +import Init.Data.List.Basic +import Mathlib.Tactic.RunCmd +import Mathlib.Tactic.Linarith +-- TODO: there is no Omega tactic for now - it seems it hasn't been ported yet +--import Mathlib.Tactic.Omega +import Base.Primitives +import Base.Utils +import Base.Arith.Base + +/- +Mathlib tactics: +- rcases: https://leanprover-community.github.io/mathlib_docs/tactics.html#rcases +- split_ifs: https://leanprover-community.github.io/mathlib_docs/tactics.html#split_ifs +- norm_num: https://leanprover-community.github.io/mathlib_docs/tactics.html#norm_num +- should we use linarith or omega? +- hint: https://leanprover-community.github.io/mathlib_docs/tactics.html#hint +- classical: https://leanprover-community.github.io/mathlib_docs/tactics.html#classical +-/ + +namespace List + + -- TODO: I could not find this function?? + @[simp] def flatten {a : Type u} : List (List a) → List a + | [] => [] + | x :: ls => x ++ flatten ls + +end List + +namespace Lean + +namespace LocalContext + + open Lean Lean.Elab Command Term Lean.Meta + + -- Small utility: return the list of declarations in the context, from + -- the last to the first. + def getAllDecls (lctx : Lean.LocalContext) : MetaM (List Lean.LocalDecl) := + lctx.foldrM (fun d ls => do let d ← instantiateLocalDeclMVars d; pure (d :: ls)) [] + + -- Return the list of declarations in the context, but filter the + -- declarations which are considered as implementation details + def getDecls (lctx : Lean.LocalContext) : MetaM (List Lean.LocalDecl) := do + let ls ← lctx.getAllDecls + pure (ls.filter (fun d => not d.isImplementationDetail)) + +end LocalContext + +end Lean + +namespace Arith + +open Primitives + +-- TODO: move? +theorem ne_zero_is_lt_or_gt {x : Int} (hne : x ≠ 0) : x < 0 ∨ x > 0 := by + cases h: x <;> simp_all + . rename_i n; + cases n <;> simp_all + . apply Int.negSucc_lt_zero + +-- TODO: move? +theorem ne_is_lt_or_gt {x y : Int} (hne : x ≠ y) : x < y ∨ x > y := by + have hne : x - y ≠ 0 := by + simp + intro h + have: x = y := by linarith + simp_all + have h := ne_zero_is_lt_or_gt hne + match h with + | .inl _ => left; linarith + | .inr _ => right; linarith + +-- TODO: move +instance Vec.cast (a : Type): Coe (Vec a) (List a) where coe := λ v => v.val + +-- TODO: move +/- Remark: we can't write the following instance because of restrictions about + the type class parameters (`ty` doesn't appear in the return type, which is + forbidden): + + ``` + instance Scalar.cast (ty : ScalarTy) : Coe (Scalar ty) Int where coe := λ v => v.val + ``` + -/ +def Scalar.toInt {ty : ScalarTy} (x : Scalar ty) : Int := x.val + +-- Remark: I tried a version of the shape `HasProp {a : Type} (x : a)` +-- but the lookup didn't work +class HasProp (a : Sort u) where + prop_ty : a → Prop + prop : ∀ x:a, prop_ty x + +instance (ty : ScalarTy) : HasProp (Scalar ty) where + -- prop_ty is inferred + prop := λ x => And.intro x.hmin x.hmax + +instance (a : Type) : HasProp (Vec a) where + prop_ty := λ v => v.val.length ≤ Scalar.max ScalarTy.Usize + prop := λ ⟨ _, l ⟩ => l + +class PropHasImp (x : Prop) where + concl : Prop + prop : x → concl + +-- This also works for `x ≠ y` because this expression reduces to `¬ x = y` +-- and `Ne` is marked as `reducible` +instance (x y : Int) : PropHasImp (¬ x = y) where + concl := x < y ∨ x > y + prop := λ (h:x ≠ y) => ne_is_lt_or_gt h + +open Lean Lean.Elab Command Term Lean.Meta + +-- Small utility: print all the declarations in the context +elab "print_all_decls" : tactic => do + let ctx ← Lean.MonadLCtx.getLCtx + for decl in ← ctx.getDecls do + let ty ← Lean.Meta.inferType decl.toExpr + logInfo m!"{decl.toExpr} : {ty}" + pure () + +-- Explore a term by decomposing the applications (we explore the applied +-- functions and their arguments, but ignore lambdas, forall, etc. - +-- should we go inside?). +partial def foldTermApps (k : α → Expr → MetaM α) (s : α) (e : Expr) : MetaM α := do + -- We do it in a very simpler manner: we deconstruct applications, + -- and recursively explore the sub-expressions. Note that we do + -- not go inside foralls and abstractions (should we?). + e.withApp fun f args => do + let s ← k s f + args.foldlM (foldTermApps k) s + +-- Provided a function `k` which lookups type class instances on an expression, +-- collect all the instances lookuped by applying `k` on the sub-expressions of `e`. +def collectInstances + (k : Expr → MetaM (Option Expr)) (s : HashSet Expr) (e : Expr) : MetaM (HashSet Expr) := do + let k s e := do + match ← k e with + | none => pure s + | some i => pure (s.insert i) + foldTermApps k s e + +-- Similar to `collectInstances`, but explores all the local declarations in the +-- main context. +def collectInstancesFromMainCtx (k : Expr → MetaM (Option Expr)) : Tactic.TacticM (HashSet Expr) := do + Lean.Elab.Tactic.withMainContext do + -- Get the local context + let ctx ← Lean.MonadLCtx.getLCtx + -- Just a matter of precaution + let ctx ← instantiateLCtxMVars ctx + -- Initialize the hashset + let hs := HashSet.empty + -- Explore the declarations + let decls ← ctx.getDecls + decls.foldlM (fun hs d => collectInstances k hs d.toExpr) hs + +-- Return an instance of `HasProp` for `e` if it has some +def lookupHasProp (e : Expr) : MetaM (Option Expr) := do + trace[Arith] "lookupHasProp" + -- TODO: do we need Lean.observing? + -- This actually eliminates the error messages + Lean.observing? do + trace[Arith] "lookupHasProp: observing" + let ty ← Lean.Meta.inferType e + let hasProp ← mkAppM ``HasProp #[ty] + let hasPropInst ← trySynthInstance hasProp + match hasPropInst with + | LOption.some i => + trace[Arith] "Found HasProp instance" + let i_prop ← mkProjection i (Name.mkSimple "prop") + some (← mkAppM' i_prop #[e]) + | _ => none + +-- Collect the instances of `HasProp` for the subexpressions in the context +def collectHasPropInstancesFromMainCtx : Tactic.TacticM (HashSet Expr) := do + collectInstancesFromMainCtx lookupHasProp + +elab "display_has_prop_instances" : tactic => do + trace[Arith] "Displaying the HasProp instances" + let hs ← collectHasPropInstancesFromMainCtx + hs.forM fun e => do + trace[Arith] "+ HasProp instance: {e}" + +example (x : U32) : True := by + let i : HasProp U32 := inferInstance + have p := @HasProp.prop _ i x + simp only [HasProp.prop_ty] at p + display_has_prop_instances + simp + +-- Return an instance of `PropHasImp` for `e` if it has some +def lookupPropHasImp (e : Expr) : MetaM (Option Expr) := do + trace[Arith] "lookupPropHasImp" + -- TODO: do we need Lean.observing? + -- This actually eliminates the error messages + Lean.observing? do + trace[Arith] "lookupPropHasImp: observing" + let ty ← Lean.Meta.inferType e + trace[Arith] "lookupPropHasImp: ty: {ty}" + let cl ← mkAppM ``PropHasImp #[ty] + let inst ← trySynthInstance cl + match inst with + | LOption.some i => + trace[Arith] "Found PropHasImp instance" + let i_prop ← mkProjection i (Name.mkSimple "prop") + some (← mkAppM' i_prop #[e]) + | _ => none + +-- Collect the instances of `PropHasImp` for the subexpressions in the context +def collectPropHasImpInstancesFromMainCtx : Tactic.TacticM (HashSet Expr) := do + collectInstancesFromMainCtx lookupPropHasImp + +elab "display_prop_has_imp_instances" : tactic => do + trace[Arith] "Displaying the PropHasImp instances" + let hs ← collectPropHasImpInstancesFromMainCtx + hs.forM fun e => do + trace[Arith] "+ PropHasImp instance: {e}" + +example (x y : Int) (_ : x ≠ y) (_ : ¬ x = y) : True := by + display_prop_has_imp_instances + simp + +-- Lookup instances in a context and introduce them with additional declarations. +def introInstances (declToUnfold : Name) (lookup : Expr → MetaM (Option Expr)) : Tactic.TacticM (Array Expr) := do + let hs ← collectInstancesFromMainCtx lookup + hs.toArray.mapM fun e => do + let type ← inferType e + let name ← mkFreshUserName `h + -- Add a declaration + let nval ← Utils.addDecl name e type (asLet := false) + -- Simplify to unfold the declaration to unfold (i.e., the projector) + let simpTheorems ← Tactic.simpOnlyBuiltins.foldlM (·.addConst ·) ({} : SimpTheorems) + -- Add the equational theorem for the decl to unfold + let simpTheorems ← simpTheorems.addDeclToUnfold declToUnfold + let congrTheorems ← getSimpCongrTheorems + let ctx : Simp.Context := { simpTheorems := #[simpTheorems], congrTheorems } + -- Where to apply the simplifier + let loc := Tactic.Location.targets #[mkIdent name] false + -- Apply the simplifier + let _ ← Tactic.simpLocation ctx (discharge? := .none) loc + -- Return the new value + pure nval + +-- Lookup the instances of `HasProp for all the sub-expressions in the context, +-- and introduce the corresponding assumptions +elab "intro_has_prop_instances" : tactic => do + trace[Arith] "Introducing the HasProp instances" + let _ ← introInstances ``HasProp.prop_ty lookupHasProp + +example (x y : U32) : x.val ≤ Scalar.max ScalarTy.U32 := by + intro_has_prop_instances + simp [*] + +example {a: Type} (v : Vec a) : v.val.length ≤ Scalar.max ScalarTy.Usize := by + intro_has_prop_instances + simp_all [Scalar.max, Scalar.min] + +-- Tactic to split on a disjunction. +-- The expression `h` should be an fvar. +def splitDisj (h : Expr) (kleft kright : Tactic.TacticM Unit) : Tactic.TacticM Unit := do + trace[Arith] "assumption on which to split: {h}" + -- Retrieve the main goal + Lean.Elab.Tactic.withMainContext do + let goalType ← Lean.Elab.Tactic.getMainTarget + let hDecl := (← getLCtx).get! h.fvarId! + let hName := hDecl.userName + -- Case disjunction + let hTy ← inferType h + hTy.withApp fun f xs => do + trace[Arith] "as app: {f} {xs}" + -- Sanity check + if ¬ (f.isConstOf ``Or ∧ xs.size = 2) then throwError "Invalid argument to splitDisj" + let a := xs.get! 0 + let b := xs.get! 1 + -- Introduce the new goals + -- Returns: + -- - the match branch + -- - a fresh new mvar id + let mkGoal (hTy : Expr) (nGoalName : String) : MetaM (Expr × MVarId) := do + -- Introduce a variable for the assumption (`a` or `b`). Note that we reuse + -- the name of the assumption we split. + withLocalDeclD hName hTy fun var => do + -- The new goal + let mgoal ← mkFreshExprSyntheticOpaqueMVar goalType (tag := Name.mkSimple nGoalName) + -- Clear the assumption that we split + let mgoal ← mgoal.mvarId!.tryClearMany #[h.fvarId!] + -- The branch expression + let branch ← mkLambdaFVars #[var] (mkMVar mgoal) + pure (branch, mgoal) + let (inl, mleft) ← mkGoal a "left" + let (inr, mright) ← mkGoal b "right" + trace[Arith] "left: {inl}: {mleft}" + trace[Arith] "right: {inr}: {mright}" + -- Create the match expression + withLocalDeclD (← mkFreshUserName `h) hTy fun hVar => do + let motive ← mkLambdaFVars #[hVar] goalType + let casesExpr ← mkAppOptM ``Or.casesOn #[a, b, motive, h, inl, inr] + let mgoal ← Tactic.getMainGoal + trace[Arith] "goals: {← Tactic.getUnsolvedGoals}" + trace[Arith] "main goal: {mgoal}" + mgoal.assign casesExpr + let goals ← Tactic.getUnsolvedGoals + -- Focus on the left + Tactic.setGoals [mleft] + kleft + let leftGoals ← Tactic.getUnsolvedGoals + -- Focus on the right + Tactic.setGoals [mright] + kright + let rightGoals ← Tactic.getUnsolvedGoals + -- Put all the goals back + Tactic.setGoals (leftGoals ++ rightGoals ++ goals) + trace[Arith] "new goals: {← Tactic.getUnsolvedGoals}" + +elab "split_disj " n:ident : tactic => do + Lean.Elab.Tactic.withMainContext do + let decl ← Lean.Meta.getLocalDeclFromUserName n.getId + let fvar := mkFVar decl.fvarId + splitDisj fvar (fun _ => pure ()) (fun _ => pure ()) + +example (x y : Int) (h0 : x ≤ y ∨ x ≥ y) : x ≤ y ∨ x ≥ y := by + split_disj h0 + . left; assumption + . right; assumption + +-- Lookup the instances of `PropHasImp for all the sub-expressions in the context, +-- and introduce the corresponding assumptions +elab "intro_prop_has_imp_instances" : tactic => do + trace[Arith] "Introducing the PropHasImp instances" + let _ ← introInstances ``PropHasImp.concl lookupPropHasImp + +example (x y : Int) (h0 : x ≤ y) (h1 : x ≠ y) : x < y := by + intro_prop_has_imp_instances + rename_i h + split_disj h + . linarith + . linarith + +/- Boosting a bit the linarith tac. + + We do the following: + - for all the assumptions of the shape `(x : Int) ≠ y` or `¬ (x = y), we + introduce two goals with the assumptions `x < y` and `x > y` + TODO: we could create a PR for mathlib. + -/ +def intTacPreprocess : Tactic.TacticM Unit := do + Lean.Elab.Tactic.withMainContext do + -- Lookup the instances of PropHasImp (this is how we detect assumptions + -- of the proper shape), introduce assumptions in the context and split + -- on those + -- TODO: get rid of the assumptions that we split + let rec splitOnAsms (asms : List Expr) : Tactic.TacticM Unit := + match asms with + | [] => pure () + | asm :: asms => + let k := splitOnAsms asms + splitDisj asm k k + -- Introduce + let asms ← introInstances ``PropHasImp.concl lookupPropHasImp + -- Split + splitOnAsms asms.toList + +elab "int_tac_preprocess" : tactic => + intTacPreprocess + +example (x : Int) (h0: 0 ≤ x) (h1: x ≠ 0) : 0 < x := by + int_tac_preprocess + linarith + linarith + +syntax "int_tac" : tactic +macro_rules + | `(tactic| int_tac) => + `(tactic| + (repeat (apply And.intro)) <;> -- TODO: improve this + int_tac_preprocess <;> + linarith) + +example (x : Int) (h0: 0 ≤ x) (h1: x ≠ 0) : 0 < x := by + int_tac + +-- Checking that things append correctly when there are several disjunctions +example (x y : Int) (h0: 0 ≤ x) (h1: x ≠ 0) (h2 : 0 ≤ y) (h3 : y ≠ 0) : 0 < x ∧ 0 < y := by + int_tac + +-- A tactic to solve linear arithmetic goals in the presence of scalars +syntax "scalar_tac" : tactic +macro_rules + | `(tactic| scalar_tac) => + `(tactic| + intro_has_prop_instances; + have := Scalar.cMin_bound ScalarTy.Usize; + have := Scalar.cMin_bound ScalarTy.Isize; + have := Scalar.cMax_bound ScalarTy.Usize; + have := Scalar.cMax_bound ScalarTy.Isize; + -- TODO: not too sure about that + simp only [*, Scalar.max, Scalar.min, Scalar.cMin, Scalar.cMax] at *; + int_tac) + +example (x y : U32) : x.val ≤ Scalar.max ScalarTy.U32 := by + scalar_tac + +example {a: Type} (v : Vec a) : v.val.length ≤ Scalar.max ScalarTy.Usize := by + scalar_tac + +end Arith diff --git a/backends/lean/Base/Arith/Base.lean b/backends/lean/Base/Arith/Base.lean new file mode 100644 index 00000000..ddd2dc24 --- /dev/null +++ b/backends/lean/Base/Arith/Base.lean @@ -0,0 +1,10 @@ +import Lean + +namespace Arith + +open Lean Elab Term Meta + +-- We can't define and use trace classes in the same file +initialize registerTraceClass `Arith + +end Arith diff --git a/backends/lean/Base/ArithBase.lean b/backends/lean/Base/ArithBase.lean deleted file mode 100644 index ddd2dc24..00000000 --- a/backends/lean/Base/ArithBase.lean +++ /dev/null @@ -1,10 +0,0 @@ -import Lean - -namespace Arith - -open Lean Elab Term Meta - --- We can't define and use trace classes in the same file -initialize registerTraceClass `Arith - -end Arith diff --git a/backends/lean/Base/Diverge/Elab.lean b/backends/lean/Base/Diverge/Elab.lean index 96f7abc0..f109e847 100644 --- a/backends/lean/Base/Diverge/Elab.lean +++ b/backends/lean/Base/Diverge/Elab.lean @@ -14,8 +14,8 @@ namespace Diverge syntax (name := divergentDef) declModifiers "divergent" "def" declId ppIndent(optDeclSig) declVal : command -open Utils open Lean Elab Term Meta Primitives Lean.Meta +open Utils /- The following was copied from the `wfRecursion` function. -/ @@ -47,21 +47,6 @@ def getSigmaTypes (ty : Expr) : MetaM (Expr × Expr) := do else pure (args.get! 0, args.get! 1) -/- Like `lambdaTelescopeN` but only destructs a fixed number of lambdas -/ -def lambdaTelescopeN (e : Expr) (n : Nat) (k : Array Expr → Expr → MetaM α) : MetaM α := - lambdaTelescope e fun xs body => do - if xs.size < n then throwError "lambdaTelescopeN: not enough lambdas"; - let xs := xs.extract 0 n - let ys := xs.extract n xs.size - let body ← mkLambdaFVars ys body - k xs body - -/- Like `lambdaTelescope`, but only destructs one lambda - TODO: is there an equivalent of this function somewhere in the - standard library? -/ -def lambdaOne (e : Expr) (k : Expr → Expr → MetaM α) : MetaM α := - lambdaTelescopeN e 1 λ xs b => k (xs.get! 0) b - /- Generate a Sigma type from a list of *variables* (all the expressions must be variables). diff --git a/backends/lean/Base/Utils.lean b/backends/lean/Base/Utils.lean index 161b9ddb..2ce63620 100644 --- a/backends/lean/Base/Utils.lean +++ b/backends/lean/Base/Utils.lean @@ -116,4 +116,99 @@ partial def mapVisit (k : Nat → Expr → MetaM Expr) (e : Expr) : MetaM Expr : | .proj _ _ b => return e.updateProj! (← visit (i + 1) b) visit 0 e +section Methods + variable [MonadLiftT MetaM m] [MonadControlT MetaM m] [Monad m] [MonadError m] + variable {a : Type} + + /- Like `lambdaTelescopeN` but only destructs a fixed number of lambdas -/ + def lambdaTelescopeN (e : Expr) (n : Nat) (k : Array Expr → Expr → m a) : m a := + lambdaTelescope e fun xs body => do + if xs.size < n then throwError "lambdaTelescopeN: not enough lambdas" + let xs := xs.extract 0 n + let ys := xs.extract n xs.size + let body ← liftMetaM (mkLambdaFVars ys body) + k xs body + + /- Like `lambdaTelescope`, but only destructs one lambda + TODO: is there an equivalent of this function somewhere in the + standard library? -/ + def lambdaOne (e : Expr) (k : Expr → Expr → m a) : m a := + lambdaTelescopeN e 1 λ xs b => k (xs.get! 0) b + + def isExists (e : Expr) : Bool := e.getAppFn.isConstOf ``Exists ∧ e.getAppNumArgs = 2 + + -- Remark: Lean doesn't find the inhabited and nonempty instances if we don' + -- put them explicitely in the signature + partial def existsTelescopeProcess [Inhabited (m a)] [Nonempty (m a)] + (fvars : Array Expr) (e : Expr) (k : Array Expr → Expr → m a) : m a := do + -- Attempt to deconstruct an existential + if isExists e then do + let p := e.appArg! + lambdaOne p fun x ne => + existsTelescopeProcess (fvars.push x) ne k + else + -- No existential: call the continuation + k fvars e + + def existsTelescope [Inhabited (m a)] [Nonempty (m a)] (e : Expr) (k : Array Expr → Expr → m a) : m a := do + existsTelescopeProcess #[] e k + +end Methods + +def addDecl (name : Name) (val : Expr) (type : Expr) (asLet : Bool) : Tactic.TacticM Expr := + -- I don't think we need that + Lean.Elab.Tactic.withMainContext do + -- Insert the new declaration + let withDecl := if asLet then withLetDecl name type val else withLocalDeclD name type + withDecl fun nval => do + -- For debugging + let lctx ← Lean.MonadLCtx.getLCtx + let fid := nval.fvarId! + let decl := lctx.get! fid + trace[Arith] " new decl: \"{decl.userName}\" ({nval}) : {decl.type} := {decl.value}" + -- + -- Tranform the main goal `?m0` to `let x = nval in ?m1` + let mvarId ← Tactic.getMainGoal + let newMVar ← mkFreshExprSyntheticOpaqueMVar (← mvarId.getType) + let newVal ← mkLetFVars #[nval] newMVar + -- There are two cases: + -- - asLet is true: newVal is `let $name := $val in $newMVar` + -- - asLet is false: ewVal is `λ $name => $newMVar` + -- We need to apply it to `val` + let newVal := if asLet then newVal else mkAppN newVal #[val] + -- Assign the main goal and update the current goal + mvarId.assign newVal + let goals ← Tactic.getUnsolvedGoals + Lean.Elab.Tactic.setGoals (newMVar.mvarId! :: goals) + -- Return the new value - note: we are in the *new* context, created + -- after the declaration was added, so it will persist + pure nval + +def addDeclSyntax (name : Name) (val : Syntax) (asLet : Bool) : Tactic.TacticM Unit := + -- I don't think we need that + Lean.Elab.Tactic.withMainContext do + -- + let val ← elabTerm val .none + let type ← inferType val + -- In some situations, the type will be left as a metavariable (for instance, + -- if the term is `3`, Lean has the choice between `Nat` and `Int` and will + -- not choose): we force the instantiation of the meta-variable + synthesizeSyntheticMVarsUsingDefault + -- + let _ ← addDecl name val type asLet + +elab "custom_let " n:ident " := " v:term : tactic => do + addDeclSyntax n.getId v (asLet := true) + +elab "custom_have " n:ident " := " v:term : tactic => + addDeclSyntax n.getId v (asLet := false) + +example : Nat := by + custom_let x := 4 + custom_have y := 4 + apply y + +example (x : Bool) : Nat := by + cases x <;> custom_let x := 3 <;> apply x + end Utils -- cgit v1.2.3 From 6166c410a4b3353377e640acbae9f56e877a9118 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Tue, 11 Jul 2023 15:23:49 +0200 Subject: Work on the progress tactic --- backends/lean/Base/Arith.lean | 1 + backends/lean/Base/Arith/Arith.lean | 42 ++++--- backends/lean/Base/Progress.lean | 1 + backends/lean/Base/Progress/Base.lean | 175 ++++++++++++++++++++++++++++++ backends/lean/Base/Progress/Progress.lean | 112 +++++++++++++++++++ backends/lean/Base/Utils.lean | 28 +++++ 6 files changed, 346 insertions(+), 13 deletions(-) create mode 100644 backends/lean/Base/Arith.lean create mode 100644 backends/lean/Base/Progress.lean create mode 100644 backends/lean/Base/Progress/Base.lean create mode 100644 backends/lean/Base/Progress/Progress.lean (limited to 'backends') diff --git a/backends/lean/Base/Arith.lean b/backends/lean/Base/Arith.lean new file mode 100644 index 00000000..fd5698c5 --- /dev/null +++ b/backends/lean/Base/Arith.lean @@ -0,0 +1 @@ +import Base.Arith.Arith diff --git a/backends/lean/Base/Arith/Arith.lean b/backends/lean/Base/Arith/Arith.lean index 0ba73d18..ff628cf3 100644 --- a/backends/lean/Base/Arith/Arith.lean +++ b/backends/lean/Base/Arith/Arith.lean @@ -146,7 +146,7 @@ def collectInstances -- Similar to `collectInstances`, but explores all the local declarations in the -- main context. def collectInstancesFromMainCtx (k : Expr → MetaM (Option Expr)) : Tactic.TacticM (HashSet Expr) := do - Lean.Elab.Tactic.withMainContext do + Tactic.withMainContext do -- Get the local context let ctx ← Lean.MonadLCtx.getLCtx -- Just a matter of precaution @@ -263,8 +263,8 @@ example {a: Type} (v : Vec a) : v.val.length ≤ Scalar.max ScalarTy.Usize := by def splitDisj (h : Expr) (kleft kright : Tactic.TacticM Unit) : Tactic.TacticM Unit := do trace[Arith] "assumption on which to split: {h}" -- Retrieve the main goal - Lean.Elab.Tactic.withMainContext do - let goalType ← Lean.Elab.Tactic.getMainTarget + Tactic.withMainContext do + let goalType ← Tactic.getMainTarget let hDecl := (← getLCtx).get! h.fvarId! let hName := hDecl.userName -- Case disjunction @@ -316,7 +316,7 @@ def splitDisj (h : Expr) (kleft kright : Tactic.TacticM Unit) : Tactic.TacticM U trace[Arith] "new goals: {← Tactic.getUnsolvedGoals}" elab "split_disj " n:ident : tactic => do - Lean.Elab.Tactic.withMainContext do + Tactic.withMainContext do let decl ← Lean.Meta.getLocalDeclFromUserName n.getId let fvar := mkFVar decl.fvarId splitDisj fvar (fun _ => pure ()) (fun _ => pure ()) @@ -347,7 +347,7 @@ example (x y : Int) (h0 : x ≤ y) (h1 : x ≠ y) : x < y := by TODO: we could create a PR for mathlib. -/ def intTacPreprocess : Tactic.TacticM Unit := do - Lean.Elab.Tactic.withMainContext do + Tactic.withMainContext do -- Lookup the instances of PropHasImp (this is how we detect assumptions -- of the proper shape), introduce assumptions in the context and split -- on those @@ -366,19 +366,31 @@ def intTacPreprocess : Tactic.TacticM Unit := do elab "int_tac_preprocess" : tactic => intTacPreprocess +def intTac : Tactic.TacticM Unit := do + Tactic.withMainContext do + Tactic.focus do + -- Preprocess - wondering if we should do this before or after splitting + -- the goal. I think before leads to a smaller proof term? + Tactic.allGoals intTacPreprocess + -- Split the conjunctions in the goal + Utils.repeatTac Utils.splitConjTarget + -- Call linarith + let linarith := + let cfg : Linarith.LinarithConfig := { + -- We do this with our custom preprocessing + splitNe := false + } + Tactic.liftMetaFinishingTactic <| Linarith.linarith false [] cfg + Tactic.allGoals linarith + +elab "int_tac" : tactic => + intTac + example (x : Int) (h0: 0 ≤ x) (h1: x ≠ 0) : 0 < x := by int_tac_preprocess linarith linarith -syntax "int_tac" : tactic -macro_rules - | `(tactic| int_tac) => - `(tactic| - (repeat (apply And.intro)) <;> -- TODO: improve this - int_tac_preprocess <;> - linarith) - example (x : Int) (h0: 0 ≤ x) (h1: x ≠ 0) : 0 < x := by int_tac @@ -386,6 +398,10 @@ example (x : Int) (h0: 0 ≤ x) (h1: x ≠ 0) : 0 < x := by example (x y : Int) (h0: 0 ≤ x) (h1: x ≠ 0) (h2 : 0 ≤ y) (h3 : y ≠ 0) : 0 < x ∧ 0 < y := by int_tac +-- Checking that things append correctly when there are several disjunctions +example (x y : Int) (h0: 0 ≤ x) (h1: x ≠ 0) (h2 : 0 ≤ y) (h3 : y ≠ 0) : 0 < x ∧ 0 < y ∧ x + y ≥ 2 := by + int_tac + -- A tactic to solve linear arithmetic goals in the presence of scalars syntax "scalar_tac" : tactic macro_rules diff --git a/backends/lean/Base/Progress.lean b/backends/lean/Base/Progress.lean new file mode 100644 index 00000000..d812b896 --- /dev/null +++ b/backends/lean/Base/Progress.lean @@ -0,0 +1 @@ +import Base.Progress.Progress diff --git a/backends/lean/Base/Progress/Base.lean b/backends/lean/Base/Progress/Base.lean new file mode 100644 index 00000000..3f44f46c --- /dev/null +++ b/backends/lean/Base/Progress/Base.lean @@ -0,0 +1,175 @@ +import Lean +import Base.Utils +import Base.Primitives + +namespace Progress + +open Lean Elab Term Meta +open Utils + +-- We can't define and use trace classes in the same file +initialize registerTraceClass `Progress + +-- Return the first conjunct if the expression is a conjunction, or the +-- expression itself otherwise. Also return the second conjunct if it is a +-- conjunction. +def getFirstConj (e : Expr) : MetaM (Expr × Option Expr) := do + e.withApp fun f args => + if f.isConstOf ``And ∧ args.size = 2 then pure (args.get! 0, some (args.get! 1)) + else pure (e, none) + +-- Destruct an equaliy and return the two sides +def destEq (e : Expr) : MetaM (Expr × Expr) := do + e.withApp fun f args => + if f.isConstOf ``Eq ∧ args.size = 3 then pure (args.get! 1, args.get! 2) + else throwError "Not an equality: {e}" + +-- Return the set of FVarIds in the expression +partial def getFVarIds (e : Expr) (hs : HashSet FVarId := HashSet.empty) : MetaM (HashSet FVarId) := do + e.withApp fun body args => do + let hs := if body.isFVar then hs.insert body.fvarId! else hs + args.foldlM (fun hs arg => getFVarIds arg hs) hs + +/- # Progress tactic -/ + +structure PSpecDesc where + -- The universally quantified variables + fvars : Array Expr + -- The existentially quantified variables + evars : Array Expr + -- The function + fName : Name + -- The function arguments + fLevels : List Level + args : Array Expr + -- The universally quantified variables which appear in the function arguments + argsFVars : Array FVarId + -- The returned value + ret : Expr + -- The postcondition (if there is) + post : Option Expr + +section Methods + variable [MonadLiftT MetaM m] [MonadControlT MetaM m] [Monad m] [MonadOptions m] + variable [MonadTrace m] [MonadLiftT IO m] [MonadRef m] [AddMessageContext m] + variable [MonadError m] + variable {a : Type} + + /- Analyze a pspec theorem to decompose its arguments. + + PSpec theorems should be of the following shape: + ``` + ∀ x1 ... xn, H1 → ... Hn → ∃ y1 ... ym. f x1 ... xn = .ret ... ∧ Post1 ∧ ... ∧ Postk + ``` + + The continuation `k` receives the following inputs: + - universally quantified variables + - assumptions + - existentially quantified variables + - function name + - function arguments + - return + - postconditions + + TODO: generalize for when we do inductive proofs + -/ + partial + def withPSpec [Inhabited (m a)] [Nonempty (m a)] (th : Expr) (k : PSpecDesc → m a) + (sanityChecks : Bool := false) : + m a := do + trace[Progress] "Theorem: {th}" + -- Dive into the quantified variables and the assumptions + forallTelescope th fun fvars th => do + trace[Progress] "All argumens: {fvars}" + /- -- Filter the argumens which are not propositions + let rec getFirstPropIdx (i : Nat) : MetaM Nat := do + if i ≥ fargs.size then pure i + else do + let x := fargs.get! i + if ← Meta.isProp (← inferType x) then pure i + else getFirstPropIdx (i + 1) + let i ← getFirstPropIdx 0 + let fvars := fargs.extract 0 i + let hyps := fargs.extract i fargs.size + trace[Progress] "Quantified variables: {fvars}" + trace[Progress] "Assumptions: {hyps}" + -- Sanity check: all hypotheses are propositions (in particular, all the + -- quantified variables are at the beginning) + let hypsAreProp ← hyps.allM fun x => do Meta.isProp (← inferType x) + if ¬ hypsAreProp then + throwError "The theorem doesn't have the proper shape: all the quantified arguments should be at the beginning" + -/ + -- Dive into the existentials + existsTelescope th fun evars th => do + trace[Progress] "Existentials: {evars}" + -- Take the first conjunct + let (th, post) ← getFirstConj th + -- Destruct the equality + let (th, ret) ← destEq th + -- Destruct the application to get the name + th.withApp fun f args => do + if ¬ f.isConst then throwError "Not a constant: {f}" + -- Compute the set of universally quantified variables which appear in the function arguments + let allArgsFVars ← args.foldlM (fun hs arg => getFVarIds arg hs) HashSet.empty + -- Sanity check + if sanityChecks then + let fvarsSet : HashSet FVarId := HashSet.ofArray (fvars.map (fun x => x.fvarId!)) + let filtArgsFVars := allArgsFVars.toArray.filter (fun fvar => ¬ fvarsSet.contains fvar) + if ¬ filtArgsFVars.isEmpty then + let filtArgsFVars := filtArgsFVars.map (fun fvarId => Expr.fvar fvarId) + throwError "Some of the function inputs are not universally quantified: {filtArgsFVars}" + let argsFVars := fvars.map (fun x => x.fvarId!) + let argsFVars := argsFVars.filter (fun fvar => allArgsFVars.contains fvar) + -- Return + trace[Progress] "Function: {f.constName!}"; + let thDesc := { + fvars := fvars + evars := evars + fName := f.constName! + fLevels := f.constLevels! + args := args + argsFVars + ret := ret + post := post + } + k thDesc +end Methods + + +def getPSpecFunName (th : Expr) : MetaM Name := + withPSpec th (fun d => do pure d.fName) true + +structure PSpecAttr where + attr : AttributeImpl + ext : MapDeclarationExtension Name + deriving Inhabited + +/- The persistent map from function to pspec theorems. -/ +initialize pspecAttr : PSpecAttr ← do + let ext ← mkMapDeclarationExtension `pspecMap + let attrImpl := { + name := `pspec + descr := "Marks theorems to use with the `progress` tactic" + add := fun thName stx attrKind => do + Attribute.Builtin.ensureNoArgs stx + -- TODO: use the attribute kind + unless attrKind == AttributeKind.global do + throwError "invalid attribute 'pspec', must be global" + -- Lookup the theorem + let env ← getEnv + let thDecl := env.constants.find! thName + let fName ← MetaM.run' (getPSpecFunName thDecl.type) + trace[Progress] "Registering spec theorem for {fName}" + let env := ext.addEntry env (fName, thName) + setEnv env + pure () + } + registerBuiltinAttribute attrImpl + pure { attr := attrImpl, ext := ext } + +def PSpecAttr.find? (s : PSpecAttr) (name : Name) : MetaM (Option Name) := do + return (s.ext.getState (← getEnv)).find? name + --return s.ext.find? (← getEnv) name + + +end Progress diff --git a/backends/lean/Base/Progress/Progress.lean b/backends/lean/Base/Progress/Progress.lean new file mode 100644 index 00000000..1b9ee55c --- /dev/null +++ b/backends/lean/Base/Progress/Progress.lean @@ -0,0 +1,112 @@ +import Lean +import Base.Arith +import Base.Progress.Base + +namespace Progress + +open Lean Elab Term Meta Tactic +open Utils + +namespace Test + open Primitives + + set_option trace.Progress true + + @[pspec] + theorem vec_index_test (α : Type u) (v: Vec α) (i: Usize) (h: i.val < v.val.length) : + ∃ x, v.index α i = .ret x := by + apply + sorry + + #eval pspecAttr.find? ``Primitives.Vec.index +end Test + +#check isDefEq +#check allGoals + +def progressLookupTheorem (asmTac : TacticM Unit) : TacticM Unit := do + withMainContext do + -- Retrieve the goal + let mgoal ← Tactic.getMainGoal + let goalTy ← mgoal.getType + -- Dive into the goal to lookup the theorem + let (fName, fLevels, args) ← do + withPSpec goalTy fun desc => + -- TODO: check that no universally quantified variables in the arguments + pure (desc.fName, desc.fLevels, desc.args) + -- TODO: also try the assumptions + trace[Progress] "Function: {fName}" + -- TODO: use a list of theorems, and try them one by one? + let thName ← do + match ← pspecAttr.find? fName with + | none => throwError "Could not find a pspec theorem for {fName}" + | some thName => pure thName + trace[Progress] "Lookuped up: {thName}" + /- Apply the theorem + We try to match the theorem with the goal + In order to do so, we introduce meta-variables for all the parameters + (i.e., quantified variables and assumpions), and unify those with the goal. + Remark: we do not introduce meta-variables for the quantified variables + which don't appear in the function arguments (we want to let them + quantified). + We also make sure that all the meta variables which appear in the + function arguments have been instantiated + -/ + let env ← getEnv + let thDecl := env.constants.find! thName + let thTy := thDecl.type + -- TODO: the tactic fails if we uncomment withNewMCtxDepth + -- withNewMCtxDepth do + let (mvars, binders, thExBody) ← forallMetaTelescope thTy + -- Introduce the existentially quantified variables and the post-condition + -- in the context + let thBody ← + existsTelescope thExBody fun _evars thBody => do + let (thBody, _) ← destEq thBody + -- There shouldn't be any existential variables in thBody + pure thBody + -- Match the body with the target + let target := mkAppN (.const fName fLevels) args + trace[Progress] "mvars:\n{mvars.map Expr.mvarId!}" + trace[Progress] "thBody: {thBody}" + trace[Progress] "target: {target}" + let ok ← isDefEq thBody target + if ¬ ok then throwError "Could not unify the theorem with the target:\n- theorem: {thBody}\n- target: {target}" + postprocessAppMVars `progress mgoal mvars binders true true + Term.synthesizeSyntheticMVarsNoPostponing + let thBody ← instantiateMVars thBody + trace[Progress] "thBody (after instantiation): {thBody}" + -- Add the instantiated theorem to the assumptions (we apply it on the metavariables). + let th ← mkAppOptM thName (mvars.map some) + let asmName ← mkFreshUserName `h + let thTy ← inferType th + let thAsm ← Utils.addDecl asmName th thTy (asLet := false) + -- Update the set of goals + let curGoals ← getUnsolvedGoals + let newGoals := mvars.map Expr.mvarId! + let newGoals ← newGoals.filterM fun mvar => not <$> mvar.isAssigned + trace[Progress] "new goals: {newGoals}" + setGoals newGoals.toList + allGoals asmTac + let newGoals ← getUnsolvedGoals + setGoals (newGoals ++ curGoals) + -- + pure () + +elab "progress" : tactic => do + progressLookupTheorem (firstTac [assumptionTac, Arith.intTac]) + +namespace Test + open Primitives + + set_option trace.Progress true + + @[pspec] + theorem vec_index_test2 (α : Type u) (v: Vec α) (i: Usize) (h: i.val < v.val.length) : + ∃ x, v.index α i = .ret x := by + progress + tauto + +end Test + +end Progress diff --git a/backends/lean/Base/Utils.lean b/backends/lean/Base/Utils.lean index 2ce63620..1351f3d4 100644 --- a/backends/lean/Base/Utils.lean +++ b/backends/lean/Base/Utils.lean @@ -1,4 +1,5 @@ import Lean +import Mathlib.Tactic.Core namespace Utils @@ -211,4 +212,31 @@ example : Nat := by example (x : Bool) : Nat := by cases x <;> custom_let x := 3 <;> apply x +-- Repeatedly apply a tactic +partial def repeatTac (tac : Tactic.TacticM Unit) : Tactic.TacticM Unit := do + try + tac + Tactic.allGoals (Tactic.focus (repeatTac tac)) + -- TODO: does this restore the state? + catch _ => pure () + +def firstTac (tacl : List (Tactic.TacticM Unit)) : Tactic.TacticM Unit := do + match tacl with + | [] => pure () + | tac :: tacl => + try tac + catch _ => firstTac tacl + +-- Split the goal if it is a conjunction +def splitConjTarget : Tactic.TacticM Unit := do + Tactic.withMainContext do + let and_intro := Expr.const ``And.intro [] + let mvarIds' ← _root_.Lean.MVarId.apply (← Tactic.getMainGoal) and_intro + Term.synthesizeSyntheticMVarsNoPostponing + Tactic.replaceMainGoal mvarIds' + +-- Taken from Lean.Elab.Tactic.evalAssumption +def assumptionTac : Tactic.TacticM Unit := + Tactic.liftMetaTactic fun mvarId => do mvarId.assumption; pure [] + end Utils -- cgit v1.2.3 From a18d899a2c2b9bdd36f4a5a4b70472c12a835a96 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Wed, 12 Jul 2023 14:34:55 +0200 Subject: Finish a first version of the progress tactic --- backends/lean/Base/Arith/Arith.lean | 122 ++++----------- backends/lean/Base/Diverge/Base.lean | 2 +- backends/lean/Base/Primitives.lean | 89 ++++++++--- backends/lean/Base/Progress/Base.lean | 24 +-- backends/lean/Base/Progress/Progress.lean | 36 ++++- backends/lean/Base/Utils.lean | 247 +++++++++++++++++++++++++++--- 6 files changed, 349 insertions(+), 171 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Arith/Arith.lean b/backends/lean/Base/Arith/Arith.lean index ff628cf3..3557d350 100644 --- a/backends/lean/Base/Arith/Arith.lean +++ b/backends/lean/Base/Arith/Arith.lean @@ -230,25 +230,20 @@ def introInstances (declToUnfold : Name) (lookup : Expr → MetaM (Option Expr)) let type ← inferType e let name ← mkFreshUserName `h -- Add a declaration - let nval ← Utils.addDecl name e type (asLet := false) + let nval ← Utils.addDeclTac name e type (asLet := false) -- Simplify to unfold the declaration to unfold (i.e., the projector) - let simpTheorems ← Tactic.simpOnlyBuiltins.foldlM (·.addConst ·) ({} : SimpTheorems) - -- Add the equational theorem for the decl to unfold - let simpTheorems ← simpTheorems.addDeclToUnfold declToUnfold - let congrTheorems ← getSimpCongrTheorems - let ctx : Simp.Context := { simpTheorems := #[simpTheorems], congrTheorems } - -- Where to apply the simplifier - let loc := Tactic.Location.targets #[mkIdent name] false - -- Apply the simplifier - let _ ← Tactic.simpLocation ctx (discharge? := .none) loc + Utils.simpAt [declToUnfold] [] [] (Tactic.Location.targets #[mkIdent name] false) -- Return the new value pure nval +def introHasPropInstances : Tactic.TacticM (Array Expr) := do + trace[Arith] "Introducing the HasProp instances" + introInstances ``HasProp.prop_ty lookupHasProp + -- Lookup the instances of `HasProp for all the sub-expressions in the context, -- and introduce the corresponding assumptions elab "intro_has_prop_instances" : tactic => do - trace[Arith] "Introducing the HasProp instances" - let _ ← introInstances ``HasProp.prop_ty lookupHasProp + let _ ← introHasPropInstances example (x y : U32) : x.val ≤ Scalar.max ScalarTy.U32 := by intro_has_prop_instances @@ -258,74 +253,6 @@ example {a: Type} (v : Vec a) : v.val.length ≤ Scalar.max ScalarTy.Usize := by intro_has_prop_instances simp_all [Scalar.max, Scalar.min] --- Tactic to split on a disjunction. --- The expression `h` should be an fvar. -def splitDisj (h : Expr) (kleft kright : Tactic.TacticM Unit) : Tactic.TacticM Unit := do - trace[Arith] "assumption on which to split: {h}" - -- Retrieve the main goal - Tactic.withMainContext do - let goalType ← Tactic.getMainTarget - let hDecl := (← getLCtx).get! h.fvarId! - let hName := hDecl.userName - -- Case disjunction - let hTy ← inferType h - hTy.withApp fun f xs => do - trace[Arith] "as app: {f} {xs}" - -- Sanity check - if ¬ (f.isConstOf ``Or ∧ xs.size = 2) then throwError "Invalid argument to splitDisj" - let a := xs.get! 0 - let b := xs.get! 1 - -- Introduce the new goals - -- Returns: - -- - the match branch - -- - a fresh new mvar id - let mkGoal (hTy : Expr) (nGoalName : String) : MetaM (Expr × MVarId) := do - -- Introduce a variable for the assumption (`a` or `b`). Note that we reuse - -- the name of the assumption we split. - withLocalDeclD hName hTy fun var => do - -- The new goal - let mgoal ← mkFreshExprSyntheticOpaqueMVar goalType (tag := Name.mkSimple nGoalName) - -- Clear the assumption that we split - let mgoal ← mgoal.mvarId!.tryClearMany #[h.fvarId!] - -- The branch expression - let branch ← mkLambdaFVars #[var] (mkMVar mgoal) - pure (branch, mgoal) - let (inl, mleft) ← mkGoal a "left" - let (inr, mright) ← mkGoal b "right" - trace[Arith] "left: {inl}: {mleft}" - trace[Arith] "right: {inr}: {mright}" - -- Create the match expression - withLocalDeclD (← mkFreshUserName `h) hTy fun hVar => do - let motive ← mkLambdaFVars #[hVar] goalType - let casesExpr ← mkAppOptM ``Or.casesOn #[a, b, motive, h, inl, inr] - let mgoal ← Tactic.getMainGoal - trace[Arith] "goals: {← Tactic.getUnsolvedGoals}" - trace[Arith] "main goal: {mgoal}" - mgoal.assign casesExpr - let goals ← Tactic.getUnsolvedGoals - -- Focus on the left - Tactic.setGoals [mleft] - kleft - let leftGoals ← Tactic.getUnsolvedGoals - -- Focus on the right - Tactic.setGoals [mright] - kright - let rightGoals ← Tactic.getUnsolvedGoals - -- Put all the goals back - Tactic.setGoals (leftGoals ++ rightGoals ++ goals) - trace[Arith] "new goals: {← Tactic.getUnsolvedGoals}" - -elab "split_disj " n:ident : tactic => do - Tactic.withMainContext do - let decl ← Lean.Meta.getLocalDeclFromUserName n.getId - let fvar := mkFVar decl.fvarId - splitDisj fvar (fun _ => pure ()) (fun _ => pure ()) - -example (x y : Int) (h0 : x ≤ y ∨ x ≥ y) : x ≤ y ∨ x ≥ y := by - split_disj h0 - . left; assumption - . right; assumption - -- Lookup the instances of `PropHasImp for all the sub-expressions in the context, -- and introduce the corresponding assumptions elab "intro_prop_has_imp_instances" : tactic => do @@ -357,7 +284,7 @@ def intTacPreprocess : Tactic.TacticM Unit := do | [] => pure () | asm :: asms => let k := splitOnAsms asms - splitDisj asm k k + Utils.splitDisjTac asm k k -- Introduce let asms ← introInstances ``PropHasImp.concl lookupPropHasImp -- Split @@ -403,18 +330,27 @@ example (x y : Int) (h0: 0 ≤ x) (h1: x ≠ 0) (h2 : 0 ≤ y) (h3 : y ≠ 0) : int_tac -- A tactic to solve linear arithmetic goals in the presence of scalars -syntax "scalar_tac" : tactic -macro_rules - | `(tactic| scalar_tac) => - `(tactic| - intro_has_prop_instances; - have := Scalar.cMin_bound ScalarTy.Usize; - have := Scalar.cMin_bound ScalarTy.Isize; - have := Scalar.cMax_bound ScalarTy.Usize; - have := Scalar.cMax_bound ScalarTy.Isize; - -- TODO: not too sure about that - simp only [*, Scalar.max, Scalar.min, Scalar.cMin, Scalar.cMax] at *; - int_tac) +def scalarTac : Tactic.TacticM Unit := do + Tactic.withMainContext do + -- Introduce the scalar bounds + let _ ← introHasPropInstances + Tactic.allGoals do + -- Inroduce the bounds for the isize/usize types + let add (e : Expr) : Tactic.TacticM Unit := do + let ty ← inferType e + let _ ← Utils.addDeclTac (← mkFreshUserName `h) e ty (asLet := false) + add (← mkAppM ``Scalar.cMin_bound #[.const ``ScalarTy.Usize []]) + add (← mkAppM ``Scalar.cMin_bound #[.const ``ScalarTy.Isize []]) + add (← mkAppM ``Scalar.cMax_bound #[.const ``ScalarTy.Usize []]) + add (← mkAppM ``Scalar.cMax_bound #[.const ``ScalarTy.Isize []]) + -- Reveal the concrete bounds - TODO: not too sure about that. + -- Maybe we should reveal the "concrete" bounds (after normalization) + Utils.simpAt [``Scalar.max, ``Scalar.min, ``Scalar.cMin, ``Scalar.cMax] [] [] .wildcard + -- Apply the integer tactic + intTac + +elab "scalar_tac" : tactic => + scalarTac example (x y : U32) : x.val ≤ Scalar.max ScalarTy.U32 := by scalar_tac diff --git a/backends/lean/Base/Diverge/Base.lean b/backends/lean/Base/Diverge/Base.lean index e22eb914..d2c91ff8 100644 --- a/backends/lean/Base/Diverge/Base.lean +++ b/backends/lean/Base/Diverge/Base.lean @@ -14,7 +14,7 @@ TODO: Actually, the cases from mathlib seems already quite powerful (https://leanprover-community.github.io/mathlib_docs/tactics.html#cases) For instance: cases h : e - Also: cases_matching + Also: **casesm** - better split tactic - we need conversions to operate on the head of applications. Actually, something like this works: diff --git a/backends/lean/Base/Primitives.lean b/backends/lean/Base/Primitives.lean index 14f5971e..6210688d 100644 --- a/backends/lean/Base/Primitives.lean +++ b/backends/lean/Base/Primitives.lean @@ -175,27 +175,28 @@ open System.Platform.getNumBits @[simp] def U128.min : Int := 0 @[simp] def U128.max : Int := HPow.hPow 2 128 - 1 -#assert (I8.min == -128) -#assert (I8.max == 127) -#assert (I16.min == -32768) -#assert (I16.max == 32767) -#assert (I32.min == -2147483648) -#assert (I32.max == 2147483647) -#assert (I64.min == -9223372036854775808) -#assert (I64.max == 9223372036854775807) -#assert (I128.min == -170141183460469231731687303715884105728) -#assert (I128.max == 170141183460469231731687303715884105727) -#assert (U8.min == 0) -#assert (U8.max == 255) -#assert (U16.min == 0) -#assert (U16.max == 65535) -#assert (U32.min == 0) -#assert (U32.max == 4294967295) -#assert (U64.min == 0) -#assert (U64.max == 18446744073709551615) -#assert (U128.min == 0) -#assert (U128.max == 340282366920938463463374607431768211455) - +-- The normalized bounds +@[simp] def I8.norm_min := -128 +@[simp] def I8.norm_max := 127 +@[simp] def I16.norm_min := -32768 +@[simp] def I16.norm_max := 32767 +@[simp] def I32.norm_min := -2147483648 +@[simp] def I32.norm_max := 2147483647 +@[simp] def I64.norm_min := -9223372036854775808 +@[simp] def I64.norm_max := 9223372036854775807 +@[simp] def I128.norm_min := -170141183460469231731687303715884105728 +@[simp] def I128.norm_max := 170141183460469231731687303715884105727 +@[simp] def U8.norm_min := 0 +@[simp] def U8.norm_max := 255 +@[simp] def U16.norm_min := 0 +@[simp] def U16.norm_max := 65535 +@[simp] def U32.norm_min := 0 +@[simp] def U32.norm_max := 4294967295 +@[simp] def U64.norm_min := 0 +@[simp] def U64.norm_max := 18446744073709551615 +@[simp] def U128.norm_min := 0 +@[simp] def U128.norm_max := 340282366920938463463374607431768211455 + inductive ScalarTy := | Isize | I8 @@ -240,6 +241,46 @@ def Scalar.max (ty : ScalarTy) : Int := | .U64 => U64.max | .U128 => U128.max +@[simp] def Scalar.norm_min (ty : ScalarTy) : Int := + match ty with + -- We can't normalize the bounds for isize/usize + | .Isize => Isize.min + | .Usize => Usize.min + -- + | .I8 => I8.norm_min + | .I16 => I16.norm_min + | .I32 => I32.norm_min + | .I64 => I64.norm_min + | .I128 => I128.norm_min + | .U8 => U8.norm_min + | .U16 => U16.norm_min + | .U32 => U32.norm_min + | .U64 => U64.norm_min + | .U128 => U128.norm_min + +@[simp] def Scalar.norm_max (ty : ScalarTy) : Int := + match ty with + -- We can't normalize the bounds for isize/usize + | .Isize => Isize.max + | .Usize => Usize.max + -- + | .I8 => I8.norm_max + | .I16 => I16.norm_max + | .I32 => I32.norm_max + | .I64 => I64.norm_max + | .I128 => I128.norm_max + | .U8 => U8.norm_max + | .U16 => U16.norm_max + | .U32 => U32.norm_max + | .U64 => U64.norm_max + | .U128 => U128.norm_max + +def Scalar.norm_min_eq (ty : ScalarTy) : Scalar.min ty = Scalar.norm_min ty := by + cases ty <;> rfl + +def Scalar.norm_max_eq (ty : ScalarTy) : Scalar.max ty = Scalar.norm_max ty := by + cases ty <;> rfl + -- "Conservative" bounds -- We use those because we can't compare to the isize bounds (which can't -- reduce at compile-time). Whenever we perform an arithmetic operation like @@ -249,13 +290,13 @@ def Scalar.max (ty : ScalarTy) : Int := -- type-checking time. def Scalar.cMin (ty : ScalarTy) : Int := match ty with - | .Isize => I32.min + | .Isize => Scalar.min .I32 | _ => Scalar.min ty def Scalar.cMax (ty : ScalarTy) : Int := match ty with - | .Isize => I32.max - | .Usize => U32.max + | .Isize => Scalar.max .I32 + | .Usize => Scalar.max .U32 | _ => Scalar.max ty theorem Scalar.cMin_bound ty : Scalar.min ty ≤ Scalar.cMin ty := by diff --git a/backends/lean/Base/Progress/Base.lean b/backends/lean/Base/Progress/Base.lean index 3f44f46c..613f38f8 100644 --- a/backends/lean/Base/Progress/Base.lean +++ b/backends/lean/Base/Progress/Base.lean @@ -10,26 +10,6 @@ open Utils -- We can't define and use trace classes in the same file initialize registerTraceClass `Progress --- Return the first conjunct if the expression is a conjunction, or the --- expression itself otherwise. Also return the second conjunct if it is a --- conjunction. -def getFirstConj (e : Expr) : MetaM (Expr × Option Expr) := do - e.withApp fun f args => - if f.isConstOf ``And ∧ args.size = 2 then pure (args.get! 0, some (args.get! 1)) - else pure (e, none) - --- Destruct an equaliy and return the two sides -def destEq (e : Expr) : MetaM (Expr × Expr) := do - e.withApp fun f args => - if f.isConstOf ``Eq ∧ args.size = 3 then pure (args.get! 1, args.get! 2) - else throwError "Not an equality: {e}" - --- Return the set of FVarIds in the expression -partial def getFVarIds (e : Expr) (hs : HashSet FVarId := HashSet.empty) : MetaM (HashSet FVarId) := do - e.withApp fun body args => do - let hs := if body.isFVar then hs.insert body.fvarId! else hs - args.foldlM (fun hs arg => getFVarIds arg hs) hs - /- # Progress tactic -/ structure PSpecDesc where @@ -103,7 +83,7 @@ section Methods existsTelescope th fun evars th => do trace[Progress] "Existentials: {evars}" -- Take the first conjunct - let (th, post) ← getFirstConj th + let (th, post) ← optSplitConj th -- Destruct the equality let (th, ret) ← destEq th -- Destruct the application to get the name @@ -169,7 +149,5 @@ initialize pspecAttr : PSpecAttr ← do def PSpecAttr.find? (s : PSpecAttr) (name : Name) : MetaM (Option Name) := do return (s.ext.getState (← getEnv)).find? name - --return s.ext.find? (← getEnv) name - end Progress diff --git a/backends/lean/Base/Progress/Progress.lean b/backends/lean/Base/Progress/Progress.lean index 1b9ee55c..4c68b3bd 100644 --- a/backends/lean/Base/Progress/Progress.lean +++ b/backends/lean/Base/Progress/Progress.lean @@ -21,9 +21,6 @@ namespace Test #eval pspecAttr.find? ``Primitives.Vec.index end Test -#check isDefEq -#check allGoals - def progressLookupTheorem (asmTac : TacticM Unit) : TacticM Unit := do withMainContext do -- Retrieve the goal @@ -80,7 +77,28 @@ def progressLookupTheorem (asmTac : TacticM Unit) : TacticM Unit := do let th ← mkAppOptM thName (mvars.map some) let asmName ← mkFreshUserName `h let thTy ← inferType th - let thAsm ← Utils.addDecl asmName th thTy (asLet := false) + let thAsm ← Utils.addDeclTac asmName th thTy (asLet := false) + withMainContext do -- The context changed - TODO: remove once addDeclTac is updated + let ngoal ← getMainGoal + trace[Progress] "current goal: {ngoal}" + trace[Progress] "current goal: {← ngoal.isAssigned}" + -- The assumption should be of the shape: + -- `∃ x1 ... xn, f args = ... ∧ ...` + -- We introduce the existentially quantified variables and split the top-most + -- conjunction if there is one + splitAllExistsTac thAsm fun h => do + -- Split the conjunction + let splitConj (k : Expr → TacticM Unit) : TacticM Unit := do + if ← isConj (← inferType h) then + splitConjTac h (fun h _ => k h) + else k h + -- Simplify the target by using the equality + splitConj fun h => do + simpAt [] [] [h.fvarId!] (.targets #[] true) + -- Clear the equality + let mgoal ← getMainGoal + let mgoal ← mgoal.tryClearMany #[h.fvarId!] + setGoals (mgoal :: (← getUnsolvedGoals)) -- Update the set of goals let curGoals ← getUnsolvedGoals let newGoals := mvars.map Expr.mvarId! @@ -94,7 +112,7 @@ def progressLookupTheorem (asmTac : TacticM Unit) : TacticM Unit := do pure () elab "progress" : tactic => do - progressLookupTheorem (firstTac [assumptionTac, Arith.intTac]) + progressLookupTheorem (firstTac [assumptionTac, Arith.scalarTac]) namespace Test open Primitives @@ -103,10 +121,12 @@ namespace Test @[pspec] theorem vec_index_test2 (α : Type u) (v: Vec α) (i: Usize) (h: i.val < v.val.length) : - ∃ x, v.index α i = .ret x := by + ∃ (x: α), v.index α i = .ret x := by progress - tauto - + simp + + set_option trace.Progress false + end Test end Progress diff --git a/backends/lean/Base/Utils.lean b/backends/lean/Base/Utils.lean index 1351f3d4..14feb567 100644 --- a/backends/lean/Base/Utils.lean +++ b/backends/lean/Base/Utils.lean @@ -1,9 +1,10 @@ import Lean import Mathlib.Tactic.Core +import Mathlib.Tactic.LeftRight namespace Utils -open Lean Elab Term Meta +open Lean Elab Term Meta Tactic -- Useful helper to explore definitions and figure out the variant -- of their sub-expressions. @@ -156,9 +157,10 @@ section Methods end Methods -def addDecl (name : Name) (val : Expr) (type : Expr) (asLet : Bool) : Tactic.TacticM Expr := +-- TODO: this should take a continuation +def addDeclTac (name : Name) (val : Expr) (type : Expr) (asLet : Bool) : TacticM Expr := -- I don't think we need that - Lean.Elab.Tactic.withMainContext do + withMainContext do -- Insert the new declaration let withDecl := if asLet then withLetDecl name type val else withLocalDeclD name type withDecl fun nval => do @@ -169,7 +171,7 @@ def addDecl (name : Name) (val : Expr) (type : Expr) (asLet : Bool) : Tactic.Tac trace[Arith] " new decl: \"{decl.userName}\" ({nval}) : {decl.type} := {decl.value}" -- -- Tranform the main goal `?m0` to `let x = nval in ?m1` - let mvarId ← Tactic.getMainGoal + let mvarId ← getMainGoal let newMVar ← mkFreshExprSyntheticOpaqueMVar (← mvarId.getType) let newVal ← mkLetFVars #[nval] newMVar -- There are two cases: @@ -179,30 +181,30 @@ def addDecl (name : Name) (val : Expr) (type : Expr) (asLet : Bool) : Tactic.Tac let newVal := if asLet then newVal else mkAppN newVal #[val] -- Assign the main goal and update the current goal mvarId.assign newVal - let goals ← Tactic.getUnsolvedGoals - Lean.Elab.Tactic.setGoals (newMVar.mvarId! :: goals) + let goals ← getUnsolvedGoals + setGoals (newMVar.mvarId! :: goals) -- Return the new value - note: we are in the *new* context, created -- after the declaration was added, so it will persist pure nval -def addDeclSyntax (name : Name) (val : Syntax) (asLet : Bool) : Tactic.TacticM Unit := +def addDeclTacSyntax (name : Name) (val : Syntax) (asLet : Bool) : TacticM Unit := -- I don't think we need that - Lean.Elab.Tactic.withMainContext do + withMainContext do -- - let val ← elabTerm val .none + let val ← Term.elabTerm val .none let type ← inferType val -- In some situations, the type will be left as a metavariable (for instance, -- if the term is `3`, Lean has the choice between `Nat` and `Int` and will -- not choose): we force the instantiation of the meta-variable synthesizeSyntheticMVarsUsingDefault -- - let _ ← addDecl name val type asLet + let _ ← addDeclTac name val type asLet elab "custom_let " n:ident " := " v:term : tactic => do - addDeclSyntax n.getId v (asLet := true) + addDeclTacSyntax n.getId v (asLet := true) elab "custom_have " n:ident " := " v:term : tactic => - addDeclSyntax n.getId v (asLet := false) + addDeclTacSyntax n.getId v (asLet := false) example : Nat := by custom_let x := 4 @@ -213,14 +215,14 @@ example (x : Bool) : Nat := by cases x <;> custom_let x := 3 <;> apply x -- Repeatedly apply a tactic -partial def repeatTac (tac : Tactic.TacticM Unit) : Tactic.TacticM Unit := do +partial def repeatTac (tac : TacticM Unit) : TacticM Unit := do try tac - Tactic.allGoals (Tactic.focus (repeatTac tac)) + allGoals (focus (repeatTac tac)) -- TODO: does this restore the state? catch _ => pure () -def firstTac (tacl : List (Tactic.TacticM Unit)) : Tactic.TacticM Unit := do +def firstTac (tacl : List (TacticM Unit)) : TacticM Unit := do match tacl with | [] => pure () | tac :: tacl => @@ -228,15 +230,216 @@ def firstTac (tacl : List (Tactic.TacticM Unit)) : Tactic.TacticM Unit := do catch _ => firstTac tacl -- Split the goal if it is a conjunction -def splitConjTarget : Tactic.TacticM Unit := do - Tactic.withMainContext do +def splitConjTarget : TacticM Unit := do + withMainContext do let and_intro := Expr.const ``And.intro [] - let mvarIds' ← _root_.Lean.MVarId.apply (← Tactic.getMainGoal) and_intro + let mvarIds' ← _root_.Lean.MVarId.apply (← getMainGoal) and_intro Term.synthesizeSyntheticMVarsNoPostponing - Tactic.replaceMainGoal mvarIds' + replaceMainGoal mvarIds' --- Taken from Lean.Elab.Tactic.evalAssumption -def assumptionTac : Tactic.TacticM Unit := - Tactic.liftMetaTactic fun mvarId => do mvarId.assumption; pure [] +-- Taken from Lean.Elab.evalAssumption +def assumptionTac : TacticM Unit := + liftMetaTactic fun mvarId => do mvarId.assumption; pure [] + +def isConj (e : Expr) : MetaM Bool := + e.withApp fun f args => pure (f.isConstOf ``And ∧ args.size = 2) + +-- Return the first conjunct if the expression is a conjunction, or the +-- expression itself otherwise. Also return the second conjunct if it is a +-- conjunction. +def optSplitConj (e : Expr) : MetaM (Expr × Option Expr) := do + e.withApp fun f args => + if f.isConstOf ``And ∧ args.size = 2 then pure (args.get! 0, some (args.get! 1)) + else pure (e, none) + +-- Destruct an equaliy and return the two sides +def destEq (e : Expr) : MetaM (Expr × Expr) := do + e.withApp fun f args => + if f.isConstOf ``Eq ∧ args.size = 3 then pure (args.get! 1, args.get! 2) + else throwError "Not an equality: {e}" + +-- Return the set of FVarIds in the expression +partial def getFVarIds (e : Expr) (hs : HashSet FVarId := HashSet.empty) : MetaM (HashSet FVarId) := do + e.withApp fun body args => do + let hs := if body.isFVar then hs.insert body.fvarId! else hs + args.foldlM (fun hs arg => getFVarIds arg hs) hs + +-- Tactic to split on a disjunction. +-- The expression `h` should be an fvar. +-- TODO: there must be simpler. Use use _root_.Lean.MVarId.cases for instance +def splitDisjTac (h : Expr) (kleft kright : TacticM Unit) : TacticM Unit := do + trace[Arith] "assumption on which to split: {h}" + -- Retrieve the main goal + withMainContext do + let goalType ← getMainTarget + let hDecl := (← getLCtx).get! h.fvarId! + let hName := hDecl.userName + -- Case disjunction + let hTy ← inferType h + hTy.withApp fun f xs => do + trace[Arith] "as app: {f} {xs}" + -- Sanity check + if ¬ (f.isConstOf ``Or ∧ xs.size = 2) then throwError "Invalid argument to splitDisjTac" + let a := xs.get! 0 + let b := xs.get! 1 + -- Introduce the new goals + -- Returns: + -- - the match branch + -- - a fresh new mvar id + let mkGoal (hTy : Expr) (nGoalName : String) : MetaM (Expr × MVarId) := do + -- Introduce a variable for the assumption (`a` or `b`). Note that we reuse + -- the name of the assumption we split. + withLocalDeclD hName hTy fun var => do + -- The new goal + let mgoal ← mkFreshExprSyntheticOpaqueMVar goalType (tag := Name.mkSimple nGoalName) + -- Clear the assumption that we split + let mgoal ← mgoal.mvarId!.tryClearMany #[h.fvarId!] + -- The branch expression + let branch ← mkLambdaFVars #[var] (mkMVar mgoal) + pure (branch, mgoal) + let (inl, mleft) ← mkGoal a "left" + let (inr, mright) ← mkGoal b "right" + trace[Arith] "left: {inl}: {mleft}" + trace[Arith] "right: {inr}: {mright}" + -- Create the match expression + withLocalDeclD (← mkFreshUserName `h) hTy fun hVar => do + let motive ← mkLambdaFVars #[hVar] goalType + let casesExpr ← mkAppOptM ``Or.casesOn #[a, b, motive, h, inl, inr] + let mgoal ← getMainGoal + trace[Arith] "goals: {← getUnsolvedGoals}" + trace[Arith] "main goal: {mgoal}" + mgoal.assign casesExpr + let goals ← getUnsolvedGoals + -- Focus on the left + setGoals [mleft] + withMainContext kleft + let leftGoals ← getUnsolvedGoals + -- Focus on the right + setGoals [mright] + withMainContext kright + let rightGoals ← getUnsolvedGoals + -- Put all the goals back + setGoals (leftGoals ++ rightGoals ++ goals) + trace[Arith] "new goals: {← getUnsolvedGoals}" + +elab "split_disj " n:ident : tactic => do + withMainContext do + let decl ← Lean.Meta.getLocalDeclFromUserName n.getId + let fvar := mkFVar decl.fvarId + splitDisjTac fvar (fun _ => pure ()) (fun _ => pure ()) + +example (x y : Int) (h0 : x ≤ y ∨ x ≥ y) : x ≤ y ∨ x ≥ y := by + split_disj h0 + . left; assumption + . right; assumption + + +-- Tactic to split on an exists +def splitExistsTac (h : Expr) (k : Expr → Expr → TacticM α) : TacticM α := do + withMainContext do + let goal ← getMainGoal + let hTy ← inferType h + if isExists hTy then do + let newGoals ← goal.cases h.fvarId! #[] + -- There should be exactly one goal + match newGoals.toList with + | [ newGoal ] => + -- Set the new goal + let goals ← getUnsolvedGoals + setGoals (newGoal.mvarId :: goals) + -- There should be exactly two fields + let fields := newGoal.fields + withMainContext do + k (fields.get! 0) (fields.get! 1) + | _ => + throwError "Unreachable" + else + throwError "Not a conjunction" + +partial def splitAllExistsTac [Inhabited α] (h : Expr) (k : Expr → TacticM α) : TacticM α := do + try + splitExistsTac h (fun _ body => splitAllExistsTac body k) + catch _ => k h + +-- Tactic to split on a conjunction. +def splitConjTac (h : Expr) (k : Expr → Expr → TacticM α) : TacticM α := do + withMainContext do + let goal ← getMainGoal + let hTy ← inferType h + if ← isConj hTy then do + let newGoals ← goal.cases h.fvarId! #[] + -- There should be exactly one goal + match newGoals.toList with + | [ newGoal ] => + -- Set the new goal + let goals ← getUnsolvedGoals + setGoals (newGoal.mvarId :: goals) + -- There should be exactly two fields + let fields := newGoal.fields + withMainContext do + k (fields.get! 0) (fields.get! 1) + | _ => + throwError "Unreachable" + else + throwError "Not a conjunction" + +elab "split_conj " n:ident : tactic => do + withMainContext do + let decl ← Lean.Meta.getLocalDeclFromUserName n.getId + let fvar := mkFVar decl.fvarId + splitConjTac fvar (fun _ _ => pure ()) + +elab "split_all_exists " n:ident : tactic => do + withMainContext do + let decl ← Lean.Meta.getLocalDeclFromUserName n.getId + let fvar := mkFVar decl.fvarId + splitAllExistsTac fvar (fun _ => pure ()) + +example (h : a ∧ b) : a := by + split_all_exists h + split_conj h + assumption + +example (h : ∃ x y z, x + y + z ≥ 0) : ∃ x, x ≥ 0 := by + split_all_exists h + rename_i x y z h + exists x + y + z + +/- Call the simp tactic. + The initialization of the context is adapted from Tactic.elabSimpArgs. + Something very annoying is that there is no function which allows to + initialize a simp context without doing an elaboration - as a consequence + we write our own here. -/ +def simpAt (declsToUnfold : List Name) (thms : List Name) (hypsToUse : List FVarId) + (loc : Tactic.Location) : + Tactic.TacticM Unit := do + -- Initialize with the builtin simp theorems + let simpThms ← Tactic.simpOnlyBuiltins.foldlM (·.addConst ·) ({} : SimpTheorems) + -- Add the equational theorem for the declarations to unfold + let simpThms ← + declsToUnfold.foldlM (fun thms decl => thms.addDeclToUnfold decl) simpThms + -- Add the hypotheses and the rewriting theorems + let simpThms ← + hypsToUse.foldlM (fun thms fvarId => + -- post: TODO: don't know what that is + -- inv: invert the equality + thms.add (.fvar fvarId) #[] (mkFVar fvarId) (post := false) (inv := false) + -- thms.eraseCore (.fvar fvar) + ) simpThms + -- Add the rewriting theorems to use + let simpThms ← + thms.foldlM (fun thms thmName => do + let info ← getConstInfo thmName + if (← isProp info.type) then + -- post: TODO: don't know what that is + -- inv: invert the equality + thms.addConst thmName (post := false) (inv := false) + else + throwError "Not a proposition: {thmName}" + ) simpThms + let congrTheorems ← getSimpCongrTheorems + let ctx : Simp.Context := { simpTheorems := #[simpThms], congrTheorems } + -- Apply the simplifier + let _ ← Tactic.simpLocation ctx (discharge? := .none) loc end Utils -- cgit v1.2.3 From 59e4a06480b5365f48dc68de80f44841f94094ed Mon Sep 17 00:00:00 2001 From: Son Ho Date: Wed, 12 Jul 2023 15:41:29 +0200 Subject: Improve the handling of arithmetic bounds --- backends/lean/Base/Arith/Arith.lean | 8 +- backends/lean/Base/Primitives.lean | 228 +++++++++++++++--------------- backends/lean/Base/Progress/Progress.lean | 5 +- 3 files changed, 126 insertions(+), 115 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Arith/Arith.lean b/backends/lean/Base/Arith/Arith.lean index 3557d350..20420f36 100644 --- a/backends/lean/Base/Arith/Arith.lean +++ b/backends/lean/Base/Arith/Arith.lean @@ -339,13 +339,17 @@ def scalarTac : Tactic.TacticM Unit := do let add (e : Expr) : Tactic.TacticM Unit := do let ty ← inferType e let _ ← Utils.addDeclTac (← mkFreshUserName `h) e ty (asLet := false) - add (← mkAppM ``Scalar.cMin_bound #[.const ``ScalarTy.Usize []]) add (← mkAppM ``Scalar.cMin_bound #[.const ``ScalarTy.Isize []]) add (← mkAppM ``Scalar.cMax_bound #[.const ``ScalarTy.Usize []]) add (← mkAppM ``Scalar.cMax_bound #[.const ``ScalarTy.Isize []]) -- Reveal the concrete bounds - TODO: not too sure about that. -- Maybe we should reveal the "concrete" bounds (after normalization) - Utils.simpAt [``Scalar.max, ``Scalar.min, ``Scalar.cMin, ``Scalar.cMax] [] [] .wildcard + Utils.simpAt [``Scalar.min, ``Scalar.max, ``Scalar.cMin, ``Scalar.cMax, + ``I8.min, ``I16.min, ``I32.min, ``I64.min, ``I128.min, + ``I8.max, ``I16.max, ``I32.max, ``I64.max, ``I128.max, + ``U8.min, ``U16.min, ``U32.min, ``U64.min, ``U128.min, + ``U8.max, ``U16.max, ``U32.max, ``U64.max, ``U128.max + ] [] [] .wildcard -- Apply the integer tactic intTac diff --git a/backends/lean/Base/Primitives.lean b/backends/lean/Base/Primitives.lean index 6210688d..37abdede 100644 --- a/backends/lean/Base/Primitives.lean +++ b/backends/lean/Base/Primitives.lean @@ -149,54 +149,78 @@ open System.Platform.getNumBits @[simp] def size_num_bits : Nat := (System.Platform.getNumBits ()).val -- Remark: Lean seems to use < for the comparisons with the upper bounds by convention. --- We keep the F* convention for now. -@[simp] def Isize.min : Int := - (HPow.hPow 2 (size_num_bits - 1)) -@[simp] def Isize.max : Int := (HPow.hPow 2 (size_num_bits - 1)) - 1 -@[simp] def I8.min : Int := - (HPow.hPow 2 7) -@[simp] def I8.max : Int := HPow.hPow 2 7 - 1 -@[simp] def I16.min : Int := - (HPow.hPow 2 15) -@[simp] def I16.max : Int := HPow.hPow 2 15 - 1 -@[simp] def I32.min : Int := -(HPow.hPow 2 31) -@[simp] def I32.max : Int := HPow.hPow 2 31 - 1 -@[simp] def I64.min : Int := -(HPow.hPow 2 63) -@[simp] def I64.max : Int := HPow.hPow 2 63 - 1 -@[simp] def I128.min : Int := -(HPow.hPow 2 127) -@[simp] def I128.max : Int := HPow.hPow 2 127 - 1 -@[simp] def Usize.min : Int := 0 -@[simp] def Usize.max : Int := HPow.hPow 2 size_num_bits - 1 -@[simp] def U8.min : Int := 0 -@[simp] def U8.max : Int := HPow.hPow 2 8 - 1 -@[simp] def U16.min : Int := 0 -@[simp] def U16.max : Int := HPow.hPow 2 16 - 1 -@[simp] def U32.min : Int := 0 -@[simp] def U32.max : Int := HPow.hPow 2 32 - 1 -@[simp] def U64.min : Int := 0 -@[simp] def U64.max : Int := HPow.hPow 2 64 - 1 -@[simp] def U128.min : Int := 0 -@[simp] def U128.max : Int := HPow.hPow 2 128 - 1 - --- The normalized bounds -@[simp] def I8.norm_min := -128 -@[simp] def I8.norm_max := 127 -@[simp] def I16.norm_min := -32768 -@[simp] def I16.norm_max := 32767 -@[simp] def I32.norm_min := -2147483648 -@[simp] def I32.norm_max := 2147483647 -@[simp] def I64.norm_min := -9223372036854775808 -@[simp] def I64.norm_max := 9223372036854775807 -@[simp] def I128.norm_min := -170141183460469231731687303715884105728 -@[simp] def I128.norm_max := 170141183460469231731687303715884105727 -@[simp] def U8.norm_min := 0 -@[simp] def U8.norm_max := 255 -@[simp] def U16.norm_min := 0 -@[simp] def U16.norm_max := 65535 -@[simp] def U32.norm_min := 0 -@[simp] def U32.norm_max := 4294967295 -@[simp] def U64.norm_min := 0 -@[simp] def U64.norm_max := 18446744073709551615 -@[simp] def U128.norm_min := 0 -@[simp] def U128.norm_max := 340282366920938463463374607431768211455 - + +-- The "structured" bounds +def Isize.smin : Int := - (HPow.hPow 2 (size_num_bits - 1)) +def Isize.smax : Int := (HPow.hPow 2 (size_num_bits - 1)) - 1 +def I8.smin : Int := - (HPow.hPow 2 7) +def I8.smax : Int := HPow.hPow 2 7 - 1 +def I16.smin : Int := - (HPow.hPow 2 15) +def I16.smax : Int := HPow.hPow 2 15 - 1 +def I32.smin : Int := -(HPow.hPow 2 31) +def I32.smax : Int := HPow.hPow 2 31 - 1 +def I64.smin : Int := -(HPow.hPow 2 63) +def I64.smax : Int := HPow.hPow 2 63 - 1 +def I128.smin : Int := -(HPow.hPow 2 127) +def I128.smax : Int := HPow.hPow 2 127 - 1 +def Usize.smin : Int := 0 +def Usize.smax : Int := HPow.hPow 2 size_num_bits - 1 +def U8.smin : Int := 0 +def U8.smax : Int := HPow.hPow 2 8 - 1 +def U16.smin : Int := 0 +def U16.smax : Int := HPow.hPow 2 16 - 1 +def U32.smin : Int := 0 +def U32.smax : Int := HPow.hPow 2 32 - 1 +def U64.smin : Int := 0 +def U64.smax : Int := HPow.hPow 2 64 - 1 +def U128.smin : Int := 0 +def U128.smax : Int := HPow.hPow 2 128 - 1 + +-- The "normalized" bounds, that we use in practice +def I8.min := -128 +def I8.max := 127 +def I16.min := -32768 +def I16.max := 32767 +def I32.min := -2147483648 +def I32.max := 2147483647 +def I64.min := -9223372036854775808 +def I64.max := 9223372036854775807 +def I128.min := -170141183460469231731687303715884105728 +def I128.max := 170141183460469231731687303715884105727 +@[simp] def U8.min := 0 +def U8.max := 255 +@[simp] def U16.min := 0 +def U16.max := 65535 +@[simp] def U32.min := 0 +def U32.max := 4294967295 +@[simp] def U64.min := 0 +def U64.max := 18446744073709551615 +@[simp] def U128.min := 0 +def U128.max := 340282366920938463463374607431768211455 +@[simp] def Usize.min := 0 + +def Isize.refined_min : { n:Int // n = I32.min ∨ n = I64.min } := + ⟨ Isize.smin, by + simp [Isize.smin] + cases System.Platform.numBits_eq <;> + unfold System.Platform.numBits at * <;> simp [*] ⟩ + +def Isize.refined_max : { n:Int // n = I32.max ∨ n = I64.max } := + ⟨ Isize.smax, by + simp [Isize.smax] + cases System.Platform.numBits_eq <;> + unfold System.Platform.numBits at * <;> simp [*] ⟩ + +def Usize.refined_max : { n:Int // n = U32.max ∨ n = U64.max } := + ⟨ Usize.smax, by + simp [Usize.smax] + cases System.Platform.numBits_eq <;> + unfold System.Platform.numBits at * <;> simp [*] ⟩ + +def Isize.min := Isize.refined_min.val +def Isize.max := Isize.refined_max.val +def Usize.max := Usize.refined_max.val + inductive ScalarTy := | Isize | I8 @@ -211,6 +235,36 @@ inductive ScalarTy := | U64 | U128 +def Scalar.smin (ty : ScalarTy) : Int := + match ty with + | .Isize => Isize.smin + | .I8 => I8.smin + | .I16 => I16.smin + | .I32 => I32.smin + | .I64 => I64.smin + | .I128 => I128.smin + | .Usize => Usize.smin + | .U8 => U8.smin + | .U16 => U16.smin + | .U32 => U32.smin + | .U64 => U64.smin + | .U128 => U128.smin + +def Scalar.smax (ty : ScalarTy) : Int := + match ty with + | .Isize => Isize.smax + | .I8 => I8.smax + | .I16 => I16.smax + | .I32 => I32.smax + | .I64 => I64.smax + | .I128 => I128.smax + | .Usize => Usize.smax + | .U8 => U8.smax + | .U16 => U16.smax + | .U32 => U32.smax + | .U64 => U64.smax + | .U128 => U128.smax + def Scalar.min (ty : ScalarTy) : Int := match ty with | .Isize => Isize.min @@ -241,44 +295,10 @@ def Scalar.max (ty : ScalarTy) : Int := | .U64 => U64.max | .U128 => U128.max -@[simp] def Scalar.norm_min (ty : ScalarTy) : Int := - match ty with - -- We can't normalize the bounds for isize/usize - | .Isize => Isize.min - | .Usize => Usize.min - -- - | .I8 => I8.norm_min - | .I16 => I16.norm_min - | .I32 => I32.norm_min - | .I64 => I64.norm_min - | .I128 => I128.norm_min - | .U8 => U8.norm_min - | .U16 => U16.norm_min - | .U32 => U32.norm_min - | .U64 => U64.norm_min - | .U128 => U128.norm_min - -@[simp] def Scalar.norm_max (ty : ScalarTy) : Int := - match ty with - -- We can't normalize the bounds for isize/usize - | .Isize => Isize.max - | .Usize => Usize.max - -- - | .I8 => I8.norm_max - | .I16 => I16.norm_max - | .I32 => I32.norm_max - | .I64 => I64.norm_max - | .I128 => I128.norm_max - | .U8 => U8.norm_max - | .U16 => U16.norm_max - | .U32 => U32.norm_max - | .U64 => U64.norm_max - | .U128 => U128.norm_max - -def Scalar.norm_min_eq (ty : ScalarTy) : Scalar.min ty = Scalar.norm_min ty := by +def Scalar.smin_eq (ty : ScalarTy) : Scalar.min ty = Scalar.smin ty := by cases ty <;> rfl -def Scalar.norm_max_eq (ty : ScalarTy) : Scalar.max ty = Scalar.norm_max ty := by +def Scalar.smax_eq (ty : ScalarTy) : Scalar.max ty = Scalar.smax ty := by cases ty <;> rfl -- "Conservative" bounds @@ -301,30 +321,22 @@ def Scalar.cMax (ty : ScalarTy) : Int := theorem Scalar.cMin_bound ty : Scalar.min ty ≤ Scalar.cMin ty := by cases ty <;> simp [Scalar.min, Scalar.max, Scalar.cMin, Scalar.cMax] at * - cases System.Platform.numBits_eq <;> - unfold System.Platform.numBits at * <;> - simp [*] + have h := Isize.refined_min.property + cases h <;> simp [*, Isize.min] theorem Scalar.cMax_bound ty : Scalar.cMax ty ≤ Scalar.max ty := by - cases ty <;> simp [Scalar.min, Scalar.max, Scalar.cMin, Scalar.cMax] at * <;> - cases System.Platform.numBits_eq <;> - unfold System.Platform.numBits at * <;> - simp [*] + cases ty <;> simp [Scalar.min, Scalar.max, Scalar.cMin, Scalar.cMax] at * + . have h := Isize.refined_max.property + cases h <;> simp [*, Isize.max] + . have h := Usize.refined_max.property + cases h <;> simp [*, Usize.max] theorem Scalar.cMin_suffices ty (h : Scalar.cMin ty ≤ x) : Scalar.min ty ≤ x := by - cases ty <;> simp [Scalar.min, Scalar.max, Scalar.cMin, Scalar.cMax] at * <;> - cases System.Platform.numBits_eq <;> - unfold System.Platform.numBits at * <;> - simp [*] at * - -- TODO: I would have expected terms like `-(1 + 1) ^ 63` to be simplified + have := Scalar.cMin_bound ty linarith theorem Scalar.cMax_suffices ty (h : x ≤ Scalar.cMax ty) : x ≤ Scalar.max ty := by - cases ty <;> simp [Scalar.min, Scalar.max, Scalar.cMin, Scalar.cMax] at * <;> - cases System.Platform.numBits_eq <;> - unfold System.Platform.numBits at * <;> - simp [*] at * <;> - -- TODO: I would have expected terms like `-(1 + 1) ^ 63` to be simplified + have := Scalar.cMax_bound ty linarith structure Scalar (ty : ScalarTy) where @@ -609,7 +621,7 @@ def Vec.new (α : Type u): Vec α := ⟨ [], by apply Scalar.cMax_suffices .Usiz def Vec.len (α : Type u) (v : Vec α) : Usize := let ⟨ v, l ⟩ := v - Usize.ofIntCore (List.length v) (by simp [Scalar.min]) l + Usize.ofIntCore (List.length v) (by simp [Scalar.min, Usize.min]) l -- This shouldn't be used def Vec.push_fwd (α : Type u) (_ : Vec α) (_ : α) : Unit := () @@ -620,13 +632,9 @@ def Vec.push (α : Type u) (v : Vec α) (x : α) : Result (Vec α) let nlen := List.length v.val + 1 if h : nlen ≤ U32.max || nlen ≤ Usize.max then have h : nlen ≤ Usize.max := by - simp at * - cases System.Platform.numBits_eq <;> - unfold System.Platform.numBits at * <;> - simp [*] at * <;> - try assumption - cases h <;> - linarith + simp [Usize.max] at * + have hm := Usize.refined_max.property + cases h <;> cases hm <;> simp [U32.max, U64.max] at * <;> try linarith return ⟨ List.concat v.val x, by simp at *; assumption ⟩ else fail maximumSizeExceeded @@ -647,7 +655,6 @@ def Vec.insert (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec α) := .ret ⟨ List.set v.val i x, by have h: List.length v.val ≤ Usize.max := v.property simp [*] at * - assumption ⟩ else .fail arrayOutOfBounds @@ -688,7 +695,6 @@ def Vec.index_mut_back (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec .ret ⟨ List.set v.val i x, by have h: List.length v.val ≤ Usize.max := v.property simp [*] at * - assumption ⟩ else .fail arrayOutOfBounds diff --git a/backends/lean/Base/Progress/Progress.lean b/backends/lean/Base/Progress/Progress.lean index 4c68b3bd..a4df5c96 100644 --- a/backends/lean/Base/Progress/Progress.lean +++ b/backends/lean/Base/Progress/Progress.lean @@ -92,9 +92,10 @@ def progressLookupTheorem (asmTac : TacticM Unit) : TacticM Unit := do if ← isConj (← inferType h) then splitConjTac h (fun h _ => k h) else k h - -- Simplify the target by using the equality + -- Simplify the target by using the equality and some monad simplifications splitConj fun h => do - simpAt [] [] [h.fvarId!] (.targets #[] true) + simpAt [] [``Primitives.bind_tc_ret, ``Primitives.bind_tc_fail, ``Primitives.bind_tc_div] + [h.fvarId!] (.targets #[] true) -- Clear the equality let mgoal ← getMainGoal let mgoal ← mgoal.tryClearMany #[h.fvarId!] -- cgit v1.2.3 From e010c10fb9a1e2d88b52a4f6b4a0865448276013 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Wed, 12 Jul 2023 15:58:38 +0200 Subject: Make the `by inlit` implicit --- backends/lean/Base/Primitives.lean | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Primitives.lean b/backends/lean/Base/Primitives.lean index 37abdede..0506f4c0 100644 --- a/backends/lean/Base/Primitives.lean +++ b/backends/lean/Base/Primitives.lean @@ -356,8 +356,14 @@ def Scalar.ofIntCore {ty : ScalarTy} (x : Int) (hmin : Scalar.min ty ≤ x) (hmax : x ≤ Scalar.max ty) : Scalar ty := { val := x, hmin := hmin, hmax := hmax } +-- Tactic to prove that integers are in bounds +-- TODO: use this: https://leanprover.zulipchat.com/#narrow/stream/270676-lean4/topic/instance.20with.20tactic.20autoparam +syntax "intlit" : tactic +macro_rules + | `(tactic| intlit) => `(tactic| apply Scalar.bound_suffices; decide) + def Scalar.ofInt {ty : ScalarTy} (x : Int) - (h : Scalar.min ty ≤ x ∧ x ≤ Scalar.max ty) : Scalar ty := + (h : Scalar.min ty ≤ x ∧ x ≤ Scalar.max ty := by intlit) : Scalar ty := -- Remark: we initially wrote: -- let ⟨ hmin, hmax ⟩ := h -- Scalar.ofIntCore x hmin hmax @@ -573,13 +579,6 @@ instance (ty : ScalarTy) : DecidableEq (Scalar ty) := def Scalar.toInt {ty} (n : Scalar ty) : Int := n.val --- Tactic to prove that integers are in bounds --- TODO: use this: https://leanprover.zulipchat.com/#narrow/stream/270676-lean4/topic/instance.20with.20tactic.20autoparam -syntax "intlit" : tactic - -macro_rules - | `(tactic| intlit) => `(tactic| apply Scalar.bound_suffices ; decide) - -- -- We now define a type class that subsumes the various machine integer types, so -- -- as to write a concise definition for scalar_cast, rather than exhaustively -- -- enumerating all of the possible pairs. We remark that Rust has sane semantics -- cgit v1.2.3 From eb97bdb6761437e492bcf1a95b4fa43d2b69601b Mon Sep 17 00:00:00 2001 From: Son Ho Date: Wed, 12 Jul 2023 18:04:19 +0200 Subject: Improve progress to use assumptions and start working on a nice syntax --- backends/lean/Base/Arith/Arith.lean | 42 +----------- backends/lean/Base/Diverge/Base.lean | 22 ------ backends/lean/Base/Progress/Base.lean | 2 +- backends/lean/Base/Progress/Progress.lean | 107 +++++++++++++++++++++--------- backends/lean/Base/Utils.lean | 62 +++++++++++++++++ 5 files changed, 140 insertions(+), 95 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Arith/Arith.lean b/backends/lean/Base/Arith/Arith.lean index 20420f36..ab4fd182 100644 --- a/backends/lean/Base/Arith/Arith.lean +++ b/backends/lean/Base/Arith/Arith.lean @@ -11,49 +11,9 @@ import Base.Primitives import Base.Utils import Base.Arith.Base -/- -Mathlib tactics: -- rcases: https://leanprover-community.github.io/mathlib_docs/tactics.html#rcases -- split_ifs: https://leanprover-community.github.io/mathlib_docs/tactics.html#split_ifs -- norm_num: https://leanprover-community.github.io/mathlib_docs/tactics.html#norm_num -- should we use linarith or omega? -- hint: https://leanprover-community.github.io/mathlib_docs/tactics.html#hint -- classical: https://leanprover-community.github.io/mathlib_docs/tactics.html#classical --/ - -namespace List - - -- TODO: I could not find this function?? - @[simp] def flatten {a : Type u} : List (List a) → List a - | [] => [] - | x :: ls => x ++ flatten ls - -end List - -namespace Lean - -namespace LocalContext - - open Lean Lean.Elab Command Term Lean.Meta - - -- Small utility: return the list of declarations in the context, from - -- the last to the first. - def getAllDecls (lctx : Lean.LocalContext) : MetaM (List Lean.LocalDecl) := - lctx.foldrM (fun d ls => do let d ← instantiateLocalDeclMVars d; pure (d :: ls)) [] - - -- Return the list of declarations in the context, but filter the - -- declarations which are considered as implementation details - def getDecls (lctx : Lean.LocalContext) : MetaM (List Lean.LocalDecl) := do - let ls ← lctx.getAllDecls - pure (ls.filter (fun d => not d.isImplementationDetail)) - -end LocalContext - -end Lean - namespace Arith -open Primitives +open Primitives Utils -- TODO: move? theorem ne_zero_is_lt_or_gt {x : Int} (hne : x ≠ 0) : x < 0 ∨ x > 0 := by diff --git a/backends/lean/Base/Diverge/Base.lean b/backends/lean/Base/Diverge/Base.lean index d2c91ff8..0a9ea4c4 100644 --- a/backends/lean/Base/Diverge/Base.lean +++ b/backends/lean/Base/Diverge/Base.lean @@ -6,28 +6,6 @@ import Mathlib.Tactic.Linarith import Base.Primitives -/- -TODO: -- we want an easier to use cases: - - keeps in the goal an equation of the shape: `t = case` - - if called on Prop terms, uses Classical.em - Actually, the cases from mathlib seems already quite powerful - (https://leanprover-community.github.io/mathlib_docs/tactics.html#cases) - For instance: cases h : e - Also: **casesm** -- better split tactic -- we need conversions to operate on the head of applications. - Actually, something like this works: - ``` - conv at Hl => - apply congr_fun - simp [fix_fuel_P] - ``` - Maybe we need a rpt ... ; focus? -- simplifier/rewriter have a strange behavior sometimes --/ - - /- TODO: this is very useful, but is there more? -/ set_option profiler true set_option profiler.threshold 100 diff --git a/backends/lean/Base/Progress/Base.lean b/backends/lean/Base/Progress/Base.lean index 613f38f8..a288d889 100644 --- a/backends/lean/Base/Progress/Base.lean +++ b/backends/lean/Base/Progress/Base.lean @@ -60,7 +60,7 @@ section Methods trace[Progress] "Theorem: {th}" -- Dive into the quantified variables and the assumptions forallTelescope th fun fvars th => do - trace[Progress] "All argumens: {fvars}" + trace[Progress] "All arguments: {fvars}" /- -- Filter the argumens which are not propositions let rec getFirstPropIdx (i : Nat) : MetaM Nat := do if i ≥ fargs.size then pure i diff --git a/backends/lean/Base/Progress/Progress.lean b/backends/lean/Base/Progress/Progress.lean index a4df5c96..b0db465d 100644 --- a/backends/lean/Base/Progress/Progress.lean +++ b/backends/lean/Base/Progress/Progress.lean @@ -21,24 +21,11 @@ namespace Test #eval pspecAttr.find? ``Primitives.Vec.index end Test -def progressLookupTheorem (asmTac : TacticM Unit) : TacticM Unit := do - withMainContext do - -- Retrieve the goal - let mgoal ← Tactic.getMainGoal - let goalTy ← mgoal.getType - -- Dive into the goal to lookup the theorem - let (fName, fLevels, args) ← do - withPSpec goalTy fun desc => - -- TODO: check that no universally quantified variables in the arguments - pure (desc.fName, desc.fLevels, desc.args) - -- TODO: also try the assumptions - trace[Progress] "Function: {fName}" - -- TODO: use a list of theorems, and try them one by one? - let thName ← do - match ← pspecAttr.find? fName with - | none => throwError "Could not find a pspec theorem for {fName}" - | some thName => pure thName - trace[Progress] "Lookuped up: {thName}" +inductive TheoremOrLocal where +| Theorem (thName : Name) +| Local (asm : LocalDecl) + +def progressWith (fnExpr : Expr) (th : TheoremOrLocal) (ids : Array Name) (asmTac : TacticM Unit) : TacticM Unit := do /- Apply the theorem We try to match the theorem with the goal In order to do so, we introduce meta-variables for all the parameters @@ -48,10 +35,14 @@ def progressLookupTheorem (asmTac : TacticM Unit) : TacticM Unit := do quantified). We also make sure that all the meta variables which appear in the function arguments have been instantiated - -/ + -/ let env ← getEnv - let thDecl := env.constants.find! thName - let thTy := thDecl.type + let thTy ← do + match th with + | .Theorem thName => + let thDecl := env.constants.find! thName + pure thDecl.type + | .Local asmDecl => pure asmDecl.type -- TODO: the tactic fails if we uncomment withNewMCtxDepth -- withNewMCtxDepth do let (mvars, binders, thExBody) ← forallMetaTelescope thTy @@ -63,18 +54,19 @@ def progressLookupTheorem (asmTac : TacticM Unit) : TacticM Unit := do -- There shouldn't be any existential variables in thBody pure thBody -- Match the body with the target - let target := mkAppN (.const fName fLevels) args - trace[Progress] "mvars:\n{mvars.map Expr.mvarId!}" - trace[Progress] "thBody: {thBody}" - trace[Progress] "target: {target}" - let ok ← isDefEq thBody target - if ¬ ok then throwError "Could not unify the theorem with the target:\n- theorem: {thBody}\n- target: {target}" + trace[Progress] "Maching `{thBody}` with `{fnExpr}`" + let ok ← isDefEq thBody fnExpr + if ¬ ok then throwError "Could not unify the theorem with the target:\n- theorem: {thBody}\n- target: {fnExpr}" + let mgoal ← Tactic.getMainGoal postprocessAppMVars `progress mgoal mvars binders true true Term.synthesizeSyntheticMVarsNoPostponing let thBody ← instantiateMVars thBody trace[Progress] "thBody (after instantiation): {thBody}" -- Add the instantiated theorem to the assumptions (we apply it on the metavariables). - let th ← mkAppOptM thName (mvars.map some) + let th ← do + match th with + | .Theorem thName => mkAppOptM thName (mvars.map some) + | .Local decl => mkAppOptM' (mkFVar decl.fvarId) (mvars.map some) let asmName ← mkFreshUserName `h let thTy ← inferType th let thAsm ← Utils.addDeclTac asmName th thTy (asLet := false) @@ -112,18 +104,71 @@ def progressLookupTheorem (asmTac : TacticM Unit) : TacticM Unit := do -- pure () -elab "progress" : tactic => do - progressLookupTheorem (firstTac [assumptionTac, Arith.scalarTac]) +-- The array of ids are identifiers to use when introducing fresh variables +def progressAsmsOrLookupTheorem (ids : Array Name) (asmTac : TacticM Unit) : TacticM Unit := do + withMainContext do + -- Retrieve the goal + let mgoal ← Tactic.getMainGoal + let goalTy ← mgoal.getType + -- Dive into the goal to lookup the theorem + let (fName, fLevels, args) ← do + withPSpec goalTy fun desc => + -- TODO: check that no universally quantified variables in the arguments + pure (desc.fName, desc.fLevels, desc.args) + -- TODO: this should be in the pspec desc + let fnExpr := mkAppN (.const fName fLevels) args + trace[Progress] "Function: {fName}" + -- Try all the assumptions one by one and if it fails try to lookup a theorem + let ctx ← Lean.MonadLCtx.getLCtx + let decls ← ctx.getDecls + for decl in decls.reverse do + trace[Progress] "Trying assumption: {decl.userName} : {decl.type}" + try + progressWith fnExpr (.Local decl) ids asmTac + return () + catch _ => continue + -- It failed: try to lookup a theorem + -- TODO: use a list of theorems, and try them one by one? + trace[Progress] "No assumption succeeded: trying to lookup a theorem" + let thName ← do + match ← pspecAttr.find? fName with + | none => throwError "Could not find a pspec theorem for {fName}" + | some thName => pure thName + trace[Progress] "Lookuped up: {thName}" + -- Apply the theorem + progressWith fnExpr (.Theorem thName) ids asmTac + +#check Syntax +syntax progressArgs := ("as" " ⟨ " (ident)+ " ⟩")? + +def evalProgress (args : TSyntax `Progress.progressArgs) : TacticM Unit := do + let args := args.raw + -- Process the arguments to retrieve the identifiers to use + trace[Progress] "Progressing arguments: {args}" + let args := args.getArgs + let ids := + if args.size > 0 then + let args := (args.get! 0).getArgs + let args := (args.get! 2).getArgs + args.map Syntax.getId + else #[] + trace[Progress] "Ids: {ids}" + --if args[0] ≠ some "as" then throwError "Invalid syntax: should be: `progress as ⟨ ... ⟩`" + progressAsmsOrLookupTheorem ids (firstTac [assumptionTac, Arith.scalarTac]) + +elab "progress" args:progressArgs : tactic => + evalProgress args namespace Test open Primitives set_option trace.Progress true + set_option pp.rawOnError true @[pspec] theorem vec_index_test2 (α : Type u) (v: Vec α) (i: Usize) (h: i.val < v.val.length) : ∃ (x: α), v.index α i = .ret x := by - progress + progress as ⟨ x y z ⟩ simp set_option trace.Progress false diff --git a/backends/lean/Base/Utils.lean b/backends/lean/Base/Utils.lean index 14feb567..505412b9 100644 --- a/backends/lean/Base/Utils.lean +++ b/backends/lean/Base/Utils.lean @@ -2,6 +2,68 @@ import Lean import Mathlib.Tactic.Core import Mathlib.Tactic.LeftRight +/- +Mathlib tactics: +- rcases: https://leanprover-community.github.io/mathlib_docs/tactics.html#rcases +- split_ifs: https://leanprover-community.github.io/mathlib_docs/tactics.html#split_ifs +- norm_num: https://leanprover-community.github.io/mathlib_docs/tactics.html#norm_num +- should we use linarith or omega? +- hint: https://leanprover-community.github.io/mathlib_docs/tactics.html#hint +- classical: https://leanprover-community.github.io/mathlib_docs/tactics.html#classical +-/ + +/- +TODO: +- we want an easier to use cases: + - keeps in the goal an equation of the shape: `t = case` + - if called on Prop terms, uses Classical.em + Actually, the cases from mathlib seems already quite powerful + (https://leanprover-community.github.io/mathlib_docs/tactics.html#cases) + For instance: cases h : e + Also: **casesm** +- better split tactic +- we need conversions to operate on the head of applications. + Actually, something like this works: + ``` + conv at Hl => + apply congr_fun + simp [fix_fuel_P] + ``` + Maybe we need a rpt ... ; focus? +- simplifier/rewriter have a strange behavior sometimes +-/ + + +namespace List + + -- TODO: I could not find this function?? + @[simp] def flatten {a : Type u} : List (List a) → List a + | [] => [] + | x :: ls => x ++ flatten ls + +end List + +namespace Lean + +namespace LocalContext + + open Lean Lean.Elab Command Term Lean.Meta + + -- Small utility: return the list of declarations in the context, from + -- the last to the first. + def getAllDecls (lctx : Lean.LocalContext) : MetaM (List Lean.LocalDecl) := + lctx.foldrM (fun d ls => do let d ← instantiateLocalDeclMVars d; pure (d :: ls)) [] + + -- Return the list of declarations in the context, but filter the + -- declarations which are considered as implementation details + def getDecls (lctx : Lean.LocalContext) : MetaM (List Lean.LocalDecl) := do + let ls ← lctx.getAllDecls + pure (ls.filter (fun d => not d.isImplementationDetail)) + +end LocalContext + +end Lean + namespace Utils open Lean Elab Term Meta Tactic -- cgit v1.2.3 From 6cc0279045d40231f1cce83f0edb7aada1e59d92 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Thu, 13 Jul 2023 10:37:16 +0200 Subject: Finish implementing the syntax for `progress` --- backends/lean/Base/Progress/Progress.lean | 98 ++++++++++++++++++++++--------- backends/lean/Base/Utils.lean | 47 +++++++++++---- 2 files changed, 107 insertions(+), 38 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Progress/Progress.lean b/backends/lean/Base/Progress/Progress.lean index b0db465d..835dc468 100644 --- a/backends/lean/Base/Progress/Progress.lean +++ b/backends/lean/Base/Progress/Progress.lean @@ -25,7 +25,17 @@ inductive TheoremOrLocal where | Theorem (thName : Name) | Local (asm : LocalDecl) -def progressWith (fnExpr : Expr) (th : TheoremOrLocal) (ids : Array Name) (asmTac : TacticM Unit) : TacticM Unit := do +/- Type to propagate the errors of `progressWith`. + We need this because we use the exceptions to backtrack, when trying to + use the assumptions for instance. When there is actually an error we want + to propagate to the user, we return it. -/ +inductive ProgressError +| Ok +| Error (msg : MessageData) +deriving Inhabited + +def progressWith (fnExpr : Expr) (th : TheoremOrLocal) (ids : Array Name) + (asmTac : TacticM Unit) : TacticM ProgressError := do /- Apply the theorem We try to match the theorem with the goal In order to do so, we introduce meta-variables for all the parameters @@ -77,32 +87,62 @@ def progressWith (fnExpr : Expr) (th : TheoremOrLocal) (ids : Array Name) (asmTa -- The assumption should be of the shape: -- `∃ x1 ... xn, f args = ... ∧ ...` -- We introduce the existentially quantified variables and split the top-most - -- conjunction if there is one - splitAllExistsTac thAsm fun h => do - -- Split the conjunction - let splitConj (k : Expr → TacticM Unit) : TacticM Unit := do - if ← isConj (← inferType h) then - splitConjTac h (fun h _ => k h) - else k h - -- Simplify the target by using the equality and some monad simplifications - splitConj fun h => do + -- conjunction if there is one. We use the provided `ids` list to name the + -- introduced variables. + let res ← splitAllExistsTac thAsm ids.toList fun h ids => do + -- Split the conjunctions. + -- For the conjunctions, we split according once to separate the equality `f ... = .ret ...` + -- from the postcondition, if there is, then continue to split the postcondition if there + -- are remaining ids. + let splitEqAndPost (k : Expr → Option Expr → List Name → TacticM ProgressError) : TacticM ProgressError := do + if ← isConj (← inferType h) then do + let hName := (← h.fvarId!.getDecl).userName + let (optId, ids) := listTryPopHead ids + let optIds := match optId with | none => none | some id => some (hName, id) + splitConjTac h optIds (fun hEq hPost => k hEq (some hPost) ids) + else k h none ids + -- Simplify the target by using the equality and some monad simplifications, + -- then continue splitting the post-condition + splitEqAndPost fun hEq hPost ids => do + trace[Progress] "eq and post:\n{hEq} : {← inferType hEq}\n{hPost}" simpAt [] [``Primitives.bind_tc_ret, ``Primitives.bind_tc_fail, ``Primitives.bind_tc_div] - [h.fvarId!] (.targets #[] true) + [hEq.fvarId!] (.targets #[] true) -- Clear the equality let mgoal ← getMainGoal - let mgoal ← mgoal.tryClearMany #[h.fvarId!] + let mgoal ← mgoal.tryClearMany #[hEq.fvarId!] setGoals (mgoal :: (← getUnsolvedGoals)) - -- Update the set of goals - let curGoals ← getUnsolvedGoals - let newGoals := mvars.map Expr.mvarId! - let newGoals ← newGoals.filterM fun mvar => not <$> mvar.isAssigned - trace[Progress] "new goals: {newGoals}" - setGoals newGoals.toList - allGoals asmTac - let newGoals ← getUnsolvedGoals - setGoals (newGoals ++ curGoals) - -- - pure () + trace[Progress] "Goal after splitting eq and post and simplifying the target: {mgoal}" + -- Continue splitting following the ids provided by the user + if ¬ ids.isEmpty then + let hPost ← + match hPost with + | none => do return (.Error m!"Too many ids provided ({ids}): there is no postcondition to split") + | some hPost => pure hPost + let curPostId := (← hPost.fvarId!.getDecl).userName + let rec splitPost (hPost : Expr) (ids : List Name) : TacticM ProgressError := do + match ids with + | [] => pure .Ok -- Stop + | nid :: ids => do + -- Split + if ← isConj hPost then + splitConjTac hPost (some (nid, curPostId)) (λ _ nhPost => splitPost nhPost ids) + else return (.Error m!"Too many ids provided ({nid :: ids}) not enough conjuncts to split in the postcondition") + splitPost hPost ids + else return .Ok + match res with + | .Error _ => return res -- Can we get there? We're using "return" + | .Ok => + -- Update the set of goals + let curGoals ← getUnsolvedGoals + let newGoals := mvars.map Expr.mvarId! + let newGoals ← newGoals.filterM fun mvar => not <$> mvar.isAssigned + trace[Progress] "new goals: {newGoals}" + setGoals newGoals.toList + allGoals asmTac + let newGoals ← getUnsolvedGoals + setGoals (newGoals ++ curGoals) + -- + pure .Ok -- The array of ids are identifiers to use when introducing fresh variables def progressAsmsOrLookupTheorem (ids : Array Name) (asmTac : TacticM Unit) : TacticM Unit := do @@ -124,8 +164,9 @@ def progressAsmsOrLookupTheorem (ids : Array Name) (asmTac : TacticM Unit) : Tac for decl in decls.reverse do trace[Progress] "Trying assumption: {decl.userName} : {decl.type}" try - progressWith fnExpr (.Local decl) ids asmTac - return () + match ← progressWith fnExpr (.Local decl) ids asmTac with + | .Ok => return () + | .Error msg => throwError msg catch _ => continue -- It failed: try to lookup a theorem -- TODO: use a list of theorems, and try them one by one? @@ -136,9 +177,10 @@ def progressAsmsOrLookupTheorem (ids : Array Name) (asmTac : TacticM Unit) : Tac | some thName => pure thName trace[Progress] "Lookuped up: {thName}" -- Apply the theorem - progressWith fnExpr (.Theorem thName) ids asmTac + match ← progressWith fnExpr (.Theorem thName) ids asmTac with + | .Ok => return () + | .Error msg => throwError msg -#check Syntax syntax progressArgs := ("as" " ⟨ " (ident)+ " ⟩")? def evalProgress (args : TSyntax `Progress.progressArgs) : TacticM Unit := do @@ -168,7 +210,7 @@ namespace Test @[pspec] theorem vec_index_test2 (α : Type u) (v: Vec α) (i: Usize) (h: i.val < v.val.length) : ∃ (x: α), v.index α i = .ret x := by - progress as ⟨ x y z ⟩ + progress as ⟨ x ⟩ simp set_option trace.Progress false diff --git a/backends/lean/Base/Utils.lean b/backends/lean/Base/Utils.lean index 505412b9..599c3a9f 100644 --- a/backends/lean/Base/Utils.lean +++ b/backends/lean/Base/Utils.lean @@ -396,13 +396,21 @@ example (x y : Int) (h0 : x ≤ y ∨ x ≥ y) : x ≤ y ∨ x ≥ y := by . right; assumption --- Tactic to split on an exists -def splitExistsTac (h : Expr) (k : Expr → Expr → TacticM α) : TacticM α := do +-- Tactic to split on an exists. +-- `h` must be an FVar +def splitExistsTac (h : Expr) (optId : Option Name) (k : Expr → Expr → TacticM α) : TacticM α := do withMainContext do let goal ← getMainGoal let hTy ← inferType h if isExists hTy then do - let newGoals ← goal.cases h.fvarId! #[] + -- Try to use the user-provided names + let altVarNames ← + match optId with + | none => pure #[] + | some id => do + let hDecl ← h.fvarId!.getDecl + pure #[{ varNames := [id, hDecl.userName] }] + let newGoals ← goal.cases h.fvarId! altVarNames -- There should be exactly one goal match newGoals.toList with | [ newGoal ] => @@ -418,18 +426,37 @@ def splitExistsTac (h : Expr) (k : Expr → Expr → TacticM α) : TacticM α := else throwError "Not a conjunction" -partial def splitAllExistsTac [Inhabited α] (h : Expr) (k : Expr → TacticM α) : TacticM α := do +-- TODO: move +def listTryPopHead (ls : List α) : Option α × List α := + match ls with + | [] => (none, ls) + | hd :: tl => (some hd, tl) + +/- Destruct all the existentials appearing in `h`, and introduce them as variables + in the context. + + If `ids` is not empty, we use it to name the introduced variables. We + transmit the stripped expression and the remaining ids to the continuation. + -/ +partial def splitAllExistsTac [Inhabited α] (h : Expr) (ids : List Name) (k : Expr → List Name → TacticM α) : TacticM α := do try - splitExistsTac h (fun _ body => splitAllExistsTac body k) - catch _ => k h + let (optId, ids) := listTryPopHead ids + splitExistsTac h optId (fun _ body => splitAllExistsTac body ids k) + catch _ => k h ids -- Tactic to split on a conjunction. -def splitConjTac (h : Expr) (k : Expr → Expr → TacticM α) : TacticM α := do +def splitConjTac (h : Expr) (optIds : Option (Name × Name)) (k : Expr → Expr → TacticM α) : TacticM α := do withMainContext do let goal ← getMainGoal let hTy ← inferType h if ← isConj hTy then do - let newGoals ← goal.cases h.fvarId! #[] + -- Try to use the user-provided names + let altVarNames ← + match optIds with + | none => pure #[] + | some (id0, id1) => do + pure #[{ varNames := [id0, id1] }] + let newGoals ← goal.cases h.fvarId! altVarNames -- There should be exactly one goal match newGoals.toList with | [ newGoal ] => @@ -449,13 +476,13 @@ elab "split_conj " n:ident : tactic => do withMainContext do let decl ← Lean.Meta.getLocalDeclFromUserName n.getId let fvar := mkFVar decl.fvarId - splitConjTac fvar (fun _ _ => pure ()) + splitConjTac fvar none (fun _ _ => pure ()) elab "split_all_exists " n:ident : tactic => do withMainContext do let decl ← Lean.Meta.getLocalDeclFromUserName n.getId let fvar := mkFVar decl.fvarId - splitAllExistsTac fvar (fun _ => pure ()) + splitAllExistsTac fvar [] (fun _ _ => pure ()) example (h : a ∧ b) : a := by split_all_exists h -- cgit v1.2.3 From 2dbd529b499c2bb9dae754df0e449cad577ac7a0 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Thu, 13 Jul 2023 14:00:11 +0200 Subject: Add IList.lean --- backends/hol4/ilistScript.sml | 3 + backends/lean/Base/Arith/Arith.lean | 136 +++++++++++++++++++----------------- backends/lean/Base/Arith/Base.lean | 40 +++++++++++ backends/lean/Base/IList.lean | 127 +++++++++++++++++++++++++++++++++ 4 files changed, 242 insertions(+), 64 deletions(-) create mode 100644 backends/lean/Base/IList.lean (limited to 'backends') diff --git a/backends/hol4/ilistScript.sml b/backends/hol4/ilistScript.sml index fb0c7688..2b465af3 100644 --- a/backends/hol4/ilistScript.sml +++ b/backends/hol4/ilistScript.sml @@ -23,6 +23,8 @@ val _ = BasicProvers.export_rewrites ["len_def"] Remark: we initially added the following case, so that we wouldn't need the premise [i < len ls] is [index_eq_EL]: “index (i : int) [] = EL (Num i) []” + + TODO: this can be simplified. See the Lean backend. *) val index_def = Define ‘ index (i : int) (x :: ls) = if i = 0 then x else (if 0 < i then index (i - 1) ls else ARB) @@ -44,6 +46,7 @@ Proof exfalso >> cooper_tac QED +(* TODO: this can be simplified. See the Lean backend. *) val update_def = Define ‘ update ([] : 'a list) (i : int) (y : 'a) : 'a list = [] ∧ diff --git a/backends/lean/Base/Arith/Arith.lean b/backends/lean/Base/Arith/Arith.lean index ab4fd182..2ff030fe 100644 --- a/backends/lean/Base/Arith/Arith.lean +++ b/backends/lean/Base/Arith/Arith.lean @@ -15,25 +15,6 @@ namespace Arith open Primitives Utils --- TODO: move? -theorem ne_zero_is_lt_or_gt {x : Int} (hne : x ≠ 0) : x < 0 ∨ x > 0 := by - cases h: x <;> simp_all - . rename_i n; - cases n <;> simp_all - . apply Int.negSucc_lt_zero - --- TODO: move? -theorem ne_is_lt_or_gt {x y : Int} (hne : x ≠ y) : x < y ∨ x > y := by - have hne : x - y ≠ 0 := by - simp - intro h - have: x = y := by linarith - simp_all - have h := ne_zero_is_lt_or_gt hne - match h with - | .inl _ => left; linarith - | .inr _ => right; linarith - -- TODO: move instance Vec.cast (a : Type): Coe (Vec a) (List a) where coe := λ v => v.val @@ -48,17 +29,21 @@ instance Vec.cast (a : Type): Coe (Vec a) (List a) where coe := λ v => v.val -/ def Scalar.toInt {ty : ScalarTy} (x : Scalar ty) : Int := x.val --- Remark: I tried a version of the shape `HasProp {a : Type} (x : a)` +-- Remark: I tried a version of the shape `HasScalarProp {a : Type} (x : a)` -- but the lookup didn't work -class HasProp (a : Sort u) where +class HasScalarProp (a : Sort u) where + prop_ty : a → Prop + prop : ∀ x:a, prop_ty x + +class HasIntProp (a : Sort u) where prop_ty : a → Prop prop : ∀ x:a, prop_ty x -instance (ty : ScalarTy) : HasProp (Scalar ty) where +instance (ty : ScalarTy) : HasScalarProp (Scalar ty) where -- prop_ty is inferred prop := λ x => And.intro x.hmin x.hmax -instance (a : Type) : HasProp (Vec a) where +instance (a : Type) : HasScalarProp (Vec a) where prop_ty := λ v => v.val.length ≤ Scalar.max ScalarTy.Usize prop := λ ⟨ _, l ⟩ => l @@ -117,37 +102,49 @@ def collectInstancesFromMainCtx (k : Expr → MetaM (Option Expr)) : Tactic.Tact let decls ← ctx.getDecls decls.foldlM (fun hs d => collectInstances k hs d.toExpr) hs --- Return an instance of `HasProp` for `e` if it has some -def lookupHasProp (e : Expr) : MetaM (Option Expr) := do - trace[Arith] "lookupHasProp" +-- Helper +def lookupProp (fName : String) (className : Name) (e : Expr) : MetaM (Option Expr) := do + trace[Arith] fName -- TODO: do we need Lean.observing? -- This actually eliminates the error messages Lean.observing? do - trace[Arith] "lookupHasProp: observing" + trace[Arith] m!"{fName}: observing" let ty ← Lean.Meta.inferType e - let hasProp ← mkAppM ``HasProp #[ty] + let hasProp ← mkAppM className #[ty] let hasPropInst ← trySynthInstance hasProp match hasPropInst with | LOption.some i => - trace[Arith] "Found HasProp instance" + trace[Arith] "Found HasScalarProp instance" let i_prop ← mkProjection i (Name.mkSimple "prop") some (← mkAppM' i_prop #[e]) | _ => none --- Collect the instances of `HasProp` for the subexpressions in the context -def collectHasPropInstancesFromMainCtx : Tactic.TacticM (HashSet Expr) := do - collectInstancesFromMainCtx lookupHasProp +-- Return an instance of `HasIntProp` for `e` if it has some +def lookupHasIntProp (e : Expr) : MetaM (Option Expr) := + lookupProp "lookupHasScalarProp" ``HasIntProp e + +-- Return an instance of `HasScalarProp` for `e` if it has some +def lookupHasScalarProp (e : Expr) : MetaM (Option Expr) := + lookupProp "lookupHasScalarProp" ``HasScalarProp e + +-- Collect the instances of `HasIntProp` for the subexpressions in the context +def collectHasIntPropInstancesFromMainCtx : Tactic.TacticM (HashSet Expr) := do + collectInstancesFromMainCtx lookupHasIntProp + +-- Collect the instances of `HasScalarProp` for the subexpressions in the context +def collectHasScalarPropInstancesFromMainCtx : Tactic.TacticM (HashSet Expr) := do + collectInstancesFromMainCtx lookupHasScalarProp elab "display_has_prop_instances" : tactic => do - trace[Arith] "Displaying the HasProp instances" - let hs ← collectHasPropInstancesFromMainCtx + trace[Arith] "Displaying the HasScalarProp instances" + let hs ← collectHasScalarPropInstancesFromMainCtx hs.forM fun e => do - trace[Arith] "+ HasProp instance: {e}" + trace[Arith] "+ HasScalarProp instance: {e}" example (x : U32) : True := by - let i : HasProp U32 := inferInstance - have p := @HasProp.prop _ i x - simp only [HasProp.prop_ty] at p + let i : HasScalarProp U32 := inferInstance + have p := @HasScalarProp.prop _ i x + simp only [HasScalarProp.prop_ty] at p display_has_prop_instances simp @@ -196,14 +193,18 @@ def introInstances (declToUnfold : Name) (lookup : Expr → MetaM (Option Expr)) -- Return the new value pure nval -def introHasPropInstances : Tactic.TacticM (Array Expr) := do - trace[Arith] "Introducing the HasProp instances" - introInstances ``HasProp.prop_ty lookupHasProp +def introHasIntPropInstances : Tactic.TacticM (Array Expr) := do + trace[Arith] "Introducing the HasIntProp instances" + introInstances ``HasIntProp.prop_ty lookupHasIntProp + +def introHasScalarPropInstances : Tactic.TacticM (Array Expr) := do + trace[Arith] "Introducing the HasScalarProp instances" + introInstances ``HasScalarProp.prop_ty lookupHasScalarProp --- Lookup the instances of `HasProp for all the sub-expressions in the context, +-- Lookup the instances of `HasScalarProp for all the sub-expressions in the context, -- and introduce the corresponding assumptions elab "intro_has_prop_instances" : tactic => do - let _ ← introHasPropInstances + let _ ← introHasScalarPropInstances example (x y : U32) : x.val ≤ Scalar.max ScalarTy.U32 := by intro_has_prop_instances @@ -246,6 +247,7 @@ def intTacPreprocess : Tactic.TacticM Unit := do let k := splitOnAsms asms Utils.splitDisjTac asm k k -- Introduce + let _ ← introHasIntPropInstances let asms ← introInstances ``PropHasImp.concl lookupPropHasImp -- Split splitOnAsms asms.toList @@ -289,29 +291,35 @@ example (x y : Int) (h0: 0 ≤ x) (h1: x ≠ 0) (h2 : 0 ≤ y) (h3 : y ≠ 0) : example (x y : Int) (h0: 0 ≤ x) (h1: x ≠ 0) (h2 : 0 ≤ y) (h3 : y ≠ 0) : 0 < x ∧ 0 < y ∧ x + y ≥ 2 := by int_tac +def scalarTacPreprocess (tac : Tactic.TacticM Unit) : Tactic.TacticM Unit := do + Tactic.withMainContext do + -- Introduce the scalar bounds + let _ ← introHasScalarPropInstances + Tactic.allGoals do + -- Inroduce the bounds for the isize/usize types + let add (e : Expr) : Tactic.TacticM Unit := do + let ty ← inferType e + let _ ← Utils.addDeclTac (← mkFreshUserName `h) e ty (asLet := false) + add (← mkAppM ``Scalar.cMin_bound #[.const ``ScalarTy.Isize []]) + add (← mkAppM ``Scalar.cMax_bound #[.const ``ScalarTy.Usize []]) + add (← mkAppM ``Scalar.cMax_bound #[.const ``ScalarTy.Isize []]) + -- Reveal the concrete bounds - TODO: not too sure about that. + -- Maybe we should reveal the "concrete" bounds (after normalization) + Utils.simpAt [``Scalar.min, ``Scalar.max, ``Scalar.cMin, ``Scalar.cMax, + ``I8.min, ``I16.min, ``I32.min, ``I64.min, ``I128.min, + ``I8.max, ``I16.max, ``I32.max, ``I64.max, ``I128.max, + ``U8.min, ``U16.min, ``U32.min, ``U64.min, ``U128.min, + ``U8.max, ``U16.max, ``U32.max, ``U64.max, ``U128.max + ] [] [] .wildcard + -- Finish the proof + tac + +elab "scalar_tac_preprocess" : tactic => + scalarTacPreprocess intTacPreprocess + -- A tactic to solve linear arithmetic goals in the presence of scalars def scalarTac : Tactic.TacticM Unit := do - Tactic.withMainContext do - -- Introduce the scalar bounds - let _ ← introHasPropInstances - Tactic.allGoals do - -- Inroduce the bounds for the isize/usize types - let add (e : Expr) : Tactic.TacticM Unit := do - let ty ← inferType e - let _ ← Utils.addDeclTac (← mkFreshUserName `h) e ty (asLet := false) - add (← mkAppM ``Scalar.cMin_bound #[.const ``ScalarTy.Isize []]) - add (← mkAppM ``Scalar.cMax_bound #[.const ``ScalarTy.Usize []]) - add (← mkAppM ``Scalar.cMax_bound #[.const ``ScalarTy.Isize []]) - -- Reveal the concrete bounds - TODO: not too sure about that. - -- Maybe we should reveal the "concrete" bounds (after normalization) - Utils.simpAt [``Scalar.min, ``Scalar.max, ``Scalar.cMin, ``Scalar.cMax, - ``I8.min, ``I16.min, ``I32.min, ``I64.min, ``I128.min, - ``I8.max, ``I16.max, ``I32.max, ``I64.max, ``I128.max, - ``U8.min, ``U16.min, ``U32.min, ``U64.min, ``U128.min, - ``U8.max, ``U16.max, ``U32.max, ``U64.max, ``U128.max - ] [] [] .wildcard - -- Apply the integer tactic - intTac + scalarTacPreprocess intTac elab "scalar_tac" : tactic => scalarTac diff --git a/backends/lean/Base/Arith/Base.lean b/backends/lean/Base/Arith/Base.lean index ddd2dc24..a6e59b74 100644 --- a/backends/lean/Base/Arith/Base.lean +++ b/backends/lean/Base/Arith/Base.lean @@ -1,4 +1,6 @@ import Lean +import Std.Data.Int.Lemmas +import Mathlib.Tactic.Linarith namespace Arith @@ -7,4 +9,42 @@ open Lean Elab Term Meta -- We can't define and use trace classes in the same file initialize registerTraceClass `Arith +-- TODO: move? +theorem ne_zero_is_lt_or_gt {x : Int} (hne : x ≠ 0) : x < 0 ∨ x > 0 := by + cases h: x <;> simp_all + . rename_i n; + cases n <;> simp_all + . apply Int.negSucc_lt_zero + +-- TODO: move? +theorem ne_is_lt_or_gt {x y : Int} (hne : x ≠ y) : x < y ∨ x > y := by + have hne : x - y ≠ 0 := by + simp + intro h + have: x = y := by linarith + simp_all + have h := ne_zero_is_lt_or_gt hne + match h with + | .inl _ => left; linarith + | .inr _ => right; linarith + + +/- Induction over positive integers -/ +-- TODO: move +theorem int_pos_ind (p : Int → Prop) : + (zero:p 0) → (pos:∀ i, 0 ≤ i → p i → p (i + 1)) → ∀ i, 0 ≤ i → p i := by + intro h0 hr i hpos +-- have heq : Int.toNat i = i := by +-- cases i <;> simp_all + have ⟨ n, heq ⟩ : {n:Nat // n = i } := ⟨ Int.toNat i, by cases i <;> simp_all ⟩ + revert i + induction n + . intro i hpos heq + cases i <;> simp_all + . rename_i n hi + intro i hpos heq + cases i <;> simp_all + rename_i m + cases m <;> simp_all + end Arith diff --git a/backends/lean/Base/IList.lean b/backends/lean/Base/IList.lean new file mode 100644 index 00000000..7e764d63 --- /dev/null +++ b/backends/lean/Base/IList.lean @@ -0,0 +1,127 @@ +/- Complementary list functions and lemmas which operate on integers rather + than natural numbers. -/ + +import Std.Data.Int.Lemmas +import Mathlib.Tactic.Linarith +import Base.Arith + +namespace List + +#check List.get +def len (ls : List α) : Int := + match ls with + | [] => 0 + | _ :: tl => 1 + len tl + +-- Remark: if i < 0, then the result is none +def optIndex (i : Int) (ls : List α) : Option α := + match ls with + | [] => none + | hd :: tl => if i = 0 then some hd else optIndex (i - 1) tl + +-- Remark: if i < 0, then the result is the defaul element +def index [Inhabited α] (i : Int) (ls : List α) : α := + match ls with + | [] => Inhabited.default + | x :: tl => + if i = 0 then x else index (i - 1) tl + +-- Remark: the list is unchanged if the index is not in bounds (in particular +-- if it is < 0) +def update (ls : List α) (i : Int) (y : α) : List α := + match ls with + | [] => [] + | x :: tl => if i = 0 then y :: tl else x :: update tl (i - 1) y + +-- Remark: the whole list is dropped if the index is not in bounds (in particular +-- if it is < 0) +def idrop (i : Int) (ls : List α) : List α := + match ls with + | [] => [] + | x :: tl => if i = 0 then x :: tl else idrop (i - 1) tl + +@[simp] theorem len_nil : len ([] : List α) = 0 := by simp [len] +@[simp] theorem len_cons : len ((x :: tl) : List α) = 1 + len tl := by simp [len] + +@[simp] theorem index_zero_cons [Inhabited α] : index 0 ((x :: tl) : List α) = x := by simp [index] +@[simp] theorem index_nzero_cons [Inhabited α] (hne : i ≠ 0) : index i ((x :: tl) : List α) = index (i - 1) tl := by simp [*, index] + +@[simp] theorem update_nil : update ([] : List α) i y = [] := by simp [update] +@[simp] theorem update_zero_cons : update ((x :: tl) : List α) 0 y = y :: tl := by simp [update] +@[simp] theorem update_nzero_cons (hne : i ≠ 0) : update ((x :: tl) : List α) i y = x :: update tl (i - 1) y := by simp [*, update] + +@[simp] theorem idrop_nil : idrop i ([] : List α) = [] := by simp [idrop] +@[simp] theorem idrop_zero : idrop 0 (ls : List α) = ls := by cases ls <;> simp [idrop] +@[simp] theorem idrop_nzero_cons (hne : i ≠ 0) : idrop i ((x :: tl) : List α) = idrop (i - 1) tl := by simp [*, idrop] + +theorem len_eq_length (ls : List α) : ls.len = ls.length := by + induction ls + . rfl + . simp [*, Int.ofNat_succ, Int.add_comm] + +theorem len_pos : 0 ≤ (ls : List α).len := by + induction ls <;> simp [*] + linarith + +instance (a : Type u) : Arith.HasIntProp (List a) where + prop_ty := λ ls => 0 ≤ ls.len + prop := λ ls => ls.len_pos + +@[simp] theorem len_append (l1 l2 : List α) : (l1 ++ l2).len = l1.len + l2.len := by + -- Remark: simp loops here because of the following rewritings: + -- @Nat.cast_add: ↑(List.length l1 + List.length l2) ==> ↑(List.length l1) + ↑(List.length l2) + -- Int.ofNat_add_ofNat: ↑(List.length l1) + ↑(List.length l2) ==> ↑(List.length l1 + List.length l2) + -- TODO: post an issue? + simp only [len_eq_length] + simp only [length_append] + simp only [Int.ofNat_add] + +theorem left_length_eq_append_eq (l1 l2 l1' l2' : List α) (heq : l1.length = l1'.length) : + l1 ++ l2 = l1' ++ l2' ↔ l1 = l1' ∧ l2 = l2' := by + revert l1' + induction l1 + . intro l1'; cases l1' <;> simp [*] + . intro l1'; cases l1' <;> simp_all; tauto + +theorem right_length_eq_append_eq (l1 l2 l1' l2' : List α) (heq : l2.length = l2'.length) : + l1 ++ l2 = l1' ++ l2' ↔ l1 = l1' ∧ l2 = l2' := by + have := left_length_eq_append_eq l1 l2 l1' l2' + constructor <;> intro heq2 <;> + have : l1.length + l2.length = l1'.length + l2'.length := by + have : (l1 ++ l2).length = (l1' ++ l2').length := by simp [*] + simp only [length_append] at this + apply this + . simp [heq] at this + tauto + . tauto + +theorem left_len_eq_append_eq (l1 l2 l1' l2' : List α) (heq : l1.len = l1'.len) : + l1 ++ l2 = l1' ++ l2' ↔ l1 = l1' ∧ l2 = l2' := by + simp [len_eq_length] at heq + apply left_length_eq_append_eq + assumption + +theorem right_len_eq_append_eq (l1 l2 l1' l2' : List α) (heq : l2.len = l2'.len) : + l1 ++ l2 = l1' ++ l2' ↔ l1 = l1' ∧ l2 = l2' := by + simp [len_eq_length] at heq + apply right_length_eq_append_eq + assumption + +open Arith in +theorem idrop_eq_nil_of_le (hineq : ls.len ≤ i) : idrop i ls = [] := by + revert i + induction ls <;> simp [*] + rename_i hd tl hi + intro i hineq + if heq: i = 0 then + simp [*] at * + have := tl.len_pos + linarith + else + simp at hineq + have : 0 < i := by int_tac + simp [*] + apply hi + linarith + +end List -- cgit v1.2.3 From a9a3376443e4c6d9a5257bdd310966a59aa9e716 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Thu, 13 Jul 2023 14:00:48 +0200 Subject: Update a comment --- backends/lean/Base/Arith/Arith.lean | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Arith/Arith.lean b/backends/lean/Base/Arith/Arith.lean index 2ff030fe..8bfad6ae 100644 --- a/backends/lean/Base/Arith/Arith.lean +++ b/backends/lean/Base/Arith/Arith.lean @@ -303,8 +303,7 @@ def scalarTacPreprocess (tac : Tactic.TacticM Unit) : Tactic.TacticM Unit := do add (← mkAppM ``Scalar.cMin_bound #[.const ``ScalarTy.Isize []]) add (← mkAppM ``Scalar.cMax_bound #[.const ``ScalarTy.Usize []]) add (← mkAppM ``Scalar.cMax_bound #[.const ``ScalarTy.Isize []]) - -- Reveal the concrete bounds - TODO: not too sure about that. - -- Maybe we should reveal the "concrete" bounds (after normalization) + -- Reveal the concrete bounds Utils.simpAt [``Scalar.min, ``Scalar.max, ``Scalar.cMin, ``Scalar.cMax, ``I8.min, ``I16.min, ``I32.min, ``I64.min, ``I128.min, ``I8.max, ``I16.max, ``I32.max, ``I64.max, ``I128.max, -- cgit v1.2.3 From 4f7ebc2358d78d31d63a609a32e5a732b82d468e Mon Sep 17 00:00:00 2001 From: Son Ho Date: Mon, 17 Jul 2023 12:12:34 +0200 Subject: Update the lean dependencies and update IList --- backends/lean/Base.lean | 1 + backends/lean/Base/IList.lean | 1 - backends/lean/Base/Progress/Progress.lean | 3 --- backends/lean/lake-manifest.json | 14 ++++++++++---- backends/lean/lean-toolchain | 2 +- 5 files changed, 12 insertions(+), 9 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base.lean b/backends/lean/Base.lean index 51211704..2077d410 100644 --- a/backends/lean/Base.lean +++ b/backends/lean/Base.lean @@ -3,3 +3,4 @@ import Base.Primitives import Base.Diverge import Base.Arith import Base.Progress +import Base.IList diff --git a/backends/lean/Base/IList.lean b/backends/lean/Base/IList.lean index 7e764d63..3db00cbb 100644 --- a/backends/lean/Base/IList.lean +++ b/backends/lean/Base/IList.lean @@ -7,7 +7,6 @@ import Base.Arith namespace List -#check List.get def len (ls : List α) : Int := match ls with | [] => 0 diff --git a/backends/lean/Base/Progress/Progress.lean b/backends/lean/Base/Progress/Progress.lean index 835dc468..35a3c25a 100644 --- a/backends/lean/Base/Progress/Progress.lean +++ b/backends/lean/Base/Progress/Progress.lean @@ -15,7 +15,6 @@ namespace Test @[pspec] theorem vec_index_test (α : Type u) (v: Vec α) (i: Usize) (h: i.val < v.val.length) : ∃ x, v.index α i = .ret x := by - apply sorry #eval pspecAttr.find? ``Primitives.Vec.index @@ -195,7 +194,6 @@ def evalProgress (args : TSyntax `Progress.progressArgs) : TacticM Unit := do args.map Syntax.getId else #[] trace[Progress] "Ids: {ids}" - --if args[0] ≠ some "as" then throwError "Invalid syntax: should be: `progress as ⟨ ... ⟩`" progressAsmsOrLookupTheorem ids (firstTac [assumptionTac, Arith.scalarTac]) elab "progress" args:progressArgs : tactic => @@ -205,7 +203,6 @@ namespace Test open Primitives set_option trace.Progress true - set_option pp.rawOnError true @[pspec] theorem vec_index_test2 (α : Type u) (v: Vec α) (i: Usize) (h: i.val < v.val.length) : diff --git a/backends/lean/lake-manifest.json b/backends/lean/lake-manifest.json index f4759ad3..5a089838 100644 --- a/backends/lean/lake-manifest.json +++ b/backends/lean/lake-manifest.json @@ -7,27 +7,33 @@ "rev": "c43db94a8f495dad37829e9d7ad65483d68c86b8", "name": "proofwidgets", "inputRev?": "v0.0.11"}}, + {"git": + {"url": "https://github.com/mhuisi/lean4-cli.git", + "subDir?": null, + "rev": "5a858c32963b6b19be0d477a30a1f4b6c120be7e", + "name": "Cli", + "inputRev?": "nightly"}}, {"git": {"url": "https://github.com/leanprover-community/mathlib4.git", "subDir?": null, - "rev": "cc5d11f24e1b92db65ec3389bb5142f4b2d7670e", + "rev": "fa05951a270fef2873666c46f138e90338cd48d6", "name": "mathlib", "inputRev?": null}}, {"git": {"url": "https://github.com/gebner/quote4", "subDir?": null, - "rev": "c71f94e34c1cda52eef5c93dc9da409ab2727420", + "rev": "c0d9516f44d07feee01c1103c8f2f7c24a822b55", "name": "Qq", "inputRev?": "master"}}, {"git": {"url": "https://github.com/JLimperg/aesop", "subDir?": null, - "rev": "ca73109cc40837bc61df8024c9016da4b4f99d4c", + "rev": "f04538ab6ad07642368cf11d2702acc0a9b4bcee", "name": "aesop", "inputRev?": "master"}}, {"git": {"url": "https://github.com/leanprover/std4", "subDir?": null, - "rev": "e68aa8f5fe47aad78987df45f99094afbcb5e936", + "rev": "dff883c55395438ae2a5c65ad5ddba084b600feb", "name": "std", "inputRev?": "main"}}]} diff --git a/backends/lean/lean-toolchain b/backends/lean/lean-toolchain index 42e7d786..334c5053 100644 --- a/backends/lean/lean-toolchain +++ b/backends/lean/lean-toolchain @@ -1 +1 @@ -leanprover/lean4:nightly-2023-06-20 \ No newline at end of file +leanprover/lean4:nightly-2023-07-12 \ No newline at end of file -- cgit v1.2.3 From d45c6ed9e8049b81170c3e6950043d08006ba9f2 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Mon, 17 Jul 2023 12:14:03 +0200 Subject: Move a definition --- backends/lean/Base/Arith/Arith.lean | 3 --- backends/lean/Base/Primitives.lean | 3 +++ 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Arith/Arith.lean b/backends/lean/Base/Arith/Arith.lean index 8bfad6ae..da263e86 100644 --- a/backends/lean/Base/Arith/Arith.lean +++ b/backends/lean/Base/Arith/Arith.lean @@ -15,9 +15,6 @@ namespace Arith open Primitives Utils --- TODO: move -instance Vec.cast (a : Type): Coe (Vec a) (List a) where coe := λ v => v.val - -- TODO: move /- Remark: we can't write the following instance because of restrictions about the type class parameters (`ty` doesn't appear in the return type, which is diff --git a/backends/lean/Base/Primitives.lean b/backends/lean/Base/Primitives.lean index 0506f4c0..1a0c665d 100644 --- a/backends/lean/Base/Primitives.lean +++ b/backends/lean/Base/Primitives.lean @@ -616,6 +616,9 @@ def Scalar.toInt {ty} (n : Scalar ty) : Int := n.val def Vec (α : Type u) := { l : List α // List.length l ≤ Usize.max } +-- TODO: do we really need it? It should be with Subtype by default +instance Vec.cast (a : Type): Coe (Vec a) (List a) where coe := λ v => v.val + def Vec.new (α : Type u): Vec α := ⟨ [], by apply Scalar.cMax_suffices .Usize; simp ⟩ def Vec.len (α : Type u) (v : Vec α) : Usize := -- cgit v1.2.3 From 3e8060b5501ec83940a4309389a68898df26ebd0 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Mon, 17 Jul 2023 23:37:31 +0200 Subject: Reorganize the Lean backend --- backends/lean/Base/Arith.lean | 3 +- backends/lean/Base/Arith/Arith.lean | 329 -------------- backends/lean/Base/Arith/Int.lean | 236 ++++++++++ backends/lean/Base/Arith/Scalar.lean | 48 ++ backends/lean/Base/IList.lean | 127 +----- backends/lean/Base/IList/IList.lean | 142 ++++++ backends/lean/Base/Primitives.lean | 718 +----------------------------- backends/lean/Base/Primitives/Base.lean | 130 ++++++ backends/lean/Base/Primitives/Scalar.lean | 507 +++++++++++++++++++++ backends/lean/Base/Primitives/Vec.lean | 113 +++++ backends/lean/Base/Progress/Progress.lean | 2 + 11 files changed, 1184 insertions(+), 1171 deletions(-) create mode 100644 backends/lean/Base/Arith/Int.lean create mode 100644 backends/lean/Base/Arith/Scalar.lean create mode 100644 backends/lean/Base/IList/IList.lean create mode 100644 backends/lean/Base/Primitives/Base.lean create mode 100644 backends/lean/Base/Primitives/Scalar.lean create mode 100644 backends/lean/Base/Primitives/Vec.lean (limited to 'backends') diff --git a/backends/lean/Base/Arith.lean b/backends/lean/Base/Arith.lean index fd5698c5..c0d09fd2 100644 --- a/backends/lean/Base/Arith.lean +++ b/backends/lean/Base/Arith.lean @@ -1 +1,2 @@ -import Base.Arith.Arith +import Base.Arith.Int +import Base.Arith.Scalar diff --git a/backends/lean/Base/Arith/Arith.lean b/backends/lean/Base/Arith/Arith.lean index da263e86..e69de29b 100644 --- a/backends/lean/Base/Arith/Arith.lean +++ b/backends/lean/Base/Arith/Arith.lean @@ -1,329 +0,0 @@ -/- This file contains tactics to solve arithmetic goals -/ - -import Lean -import Lean.Meta.Tactic.Simp -import Init.Data.List.Basic -import Mathlib.Tactic.RunCmd -import Mathlib.Tactic.Linarith --- TODO: there is no Omega tactic for now - it seems it hasn't been ported yet ---import Mathlib.Tactic.Omega -import Base.Primitives -import Base.Utils -import Base.Arith.Base - -namespace Arith - -open Primitives Utils - --- TODO: move -/- Remark: we can't write the following instance because of restrictions about - the type class parameters (`ty` doesn't appear in the return type, which is - forbidden): - - ``` - instance Scalar.cast (ty : ScalarTy) : Coe (Scalar ty) Int where coe := λ v => v.val - ``` - -/ -def Scalar.toInt {ty : ScalarTy} (x : Scalar ty) : Int := x.val - --- Remark: I tried a version of the shape `HasScalarProp {a : Type} (x : a)` --- but the lookup didn't work -class HasScalarProp (a : Sort u) where - prop_ty : a → Prop - prop : ∀ x:a, prop_ty x - -class HasIntProp (a : Sort u) where - prop_ty : a → Prop - prop : ∀ x:a, prop_ty x - -instance (ty : ScalarTy) : HasScalarProp (Scalar ty) where - -- prop_ty is inferred - prop := λ x => And.intro x.hmin x.hmax - -instance (a : Type) : HasScalarProp (Vec a) where - prop_ty := λ v => v.val.length ≤ Scalar.max ScalarTy.Usize - prop := λ ⟨ _, l ⟩ => l - -class PropHasImp (x : Prop) where - concl : Prop - prop : x → concl - --- This also works for `x ≠ y` because this expression reduces to `¬ x = y` --- and `Ne` is marked as `reducible` -instance (x y : Int) : PropHasImp (¬ x = y) where - concl := x < y ∨ x > y - prop := λ (h:x ≠ y) => ne_is_lt_or_gt h - -open Lean Lean.Elab Command Term Lean.Meta - --- Small utility: print all the declarations in the context -elab "print_all_decls" : tactic => do - let ctx ← Lean.MonadLCtx.getLCtx - for decl in ← ctx.getDecls do - let ty ← Lean.Meta.inferType decl.toExpr - logInfo m!"{decl.toExpr} : {ty}" - pure () - --- Explore a term by decomposing the applications (we explore the applied --- functions and their arguments, but ignore lambdas, forall, etc. - --- should we go inside?). -partial def foldTermApps (k : α → Expr → MetaM α) (s : α) (e : Expr) : MetaM α := do - -- We do it in a very simpler manner: we deconstruct applications, - -- and recursively explore the sub-expressions. Note that we do - -- not go inside foralls and abstractions (should we?). - e.withApp fun f args => do - let s ← k s f - args.foldlM (foldTermApps k) s - --- Provided a function `k` which lookups type class instances on an expression, --- collect all the instances lookuped by applying `k` on the sub-expressions of `e`. -def collectInstances - (k : Expr → MetaM (Option Expr)) (s : HashSet Expr) (e : Expr) : MetaM (HashSet Expr) := do - let k s e := do - match ← k e with - | none => pure s - | some i => pure (s.insert i) - foldTermApps k s e - --- Similar to `collectInstances`, but explores all the local declarations in the --- main context. -def collectInstancesFromMainCtx (k : Expr → MetaM (Option Expr)) : Tactic.TacticM (HashSet Expr) := do - Tactic.withMainContext do - -- Get the local context - let ctx ← Lean.MonadLCtx.getLCtx - -- Just a matter of precaution - let ctx ← instantiateLCtxMVars ctx - -- Initialize the hashset - let hs := HashSet.empty - -- Explore the declarations - let decls ← ctx.getDecls - decls.foldlM (fun hs d => collectInstances k hs d.toExpr) hs - --- Helper -def lookupProp (fName : String) (className : Name) (e : Expr) : MetaM (Option Expr) := do - trace[Arith] fName - -- TODO: do we need Lean.observing? - -- This actually eliminates the error messages - Lean.observing? do - trace[Arith] m!"{fName}: observing" - let ty ← Lean.Meta.inferType e - let hasProp ← mkAppM className #[ty] - let hasPropInst ← trySynthInstance hasProp - match hasPropInst with - | LOption.some i => - trace[Arith] "Found HasScalarProp instance" - let i_prop ← mkProjection i (Name.mkSimple "prop") - some (← mkAppM' i_prop #[e]) - | _ => none - --- Return an instance of `HasIntProp` for `e` if it has some -def lookupHasIntProp (e : Expr) : MetaM (Option Expr) := - lookupProp "lookupHasScalarProp" ``HasIntProp e - --- Return an instance of `HasScalarProp` for `e` if it has some -def lookupHasScalarProp (e : Expr) : MetaM (Option Expr) := - lookupProp "lookupHasScalarProp" ``HasScalarProp e - --- Collect the instances of `HasIntProp` for the subexpressions in the context -def collectHasIntPropInstancesFromMainCtx : Tactic.TacticM (HashSet Expr) := do - collectInstancesFromMainCtx lookupHasIntProp - --- Collect the instances of `HasScalarProp` for the subexpressions in the context -def collectHasScalarPropInstancesFromMainCtx : Tactic.TacticM (HashSet Expr) := do - collectInstancesFromMainCtx lookupHasScalarProp - -elab "display_has_prop_instances" : tactic => do - trace[Arith] "Displaying the HasScalarProp instances" - let hs ← collectHasScalarPropInstancesFromMainCtx - hs.forM fun e => do - trace[Arith] "+ HasScalarProp instance: {e}" - -example (x : U32) : True := by - let i : HasScalarProp U32 := inferInstance - have p := @HasScalarProp.prop _ i x - simp only [HasScalarProp.prop_ty] at p - display_has_prop_instances - simp - --- Return an instance of `PropHasImp` for `e` if it has some -def lookupPropHasImp (e : Expr) : MetaM (Option Expr) := do - trace[Arith] "lookupPropHasImp" - -- TODO: do we need Lean.observing? - -- This actually eliminates the error messages - Lean.observing? do - trace[Arith] "lookupPropHasImp: observing" - let ty ← Lean.Meta.inferType e - trace[Arith] "lookupPropHasImp: ty: {ty}" - let cl ← mkAppM ``PropHasImp #[ty] - let inst ← trySynthInstance cl - match inst with - | LOption.some i => - trace[Arith] "Found PropHasImp instance" - let i_prop ← mkProjection i (Name.mkSimple "prop") - some (← mkAppM' i_prop #[e]) - | _ => none - --- Collect the instances of `PropHasImp` for the subexpressions in the context -def collectPropHasImpInstancesFromMainCtx : Tactic.TacticM (HashSet Expr) := do - collectInstancesFromMainCtx lookupPropHasImp - -elab "display_prop_has_imp_instances" : tactic => do - trace[Arith] "Displaying the PropHasImp instances" - let hs ← collectPropHasImpInstancesFromMainCtx - hs.forM fun e => do - trace[Arith] "+ PropHasImp instance: {e}" - -example (x y : Int) (_ : x ≠ y) (_ : ¬ x = y) : True := by - display_prop_has_imp_instances - simp - --- Lookup instances in a context and introduce them with additional declarations. -def introInstances (declToUnfold : Name) (lookup : Expr → MetaM (Option Expr)) : Tactic.TacticM (Array Expr) := do - let hs ← collectInstancesFromMainCtx lookup - hs.toArray.mapM fun e => do - let type ← inferType e - let name ← mkFreshUserName `h - -- Add a declaration - let nval ← Utils.addDeclTac name e type (asLet := false) - -- Simplify to unfold the declaration to unfold (i.e., the projector) - Utils.simpAt [declToUnfold] [] [] (Tactic.Location.targets #[mkIdent name] false) - -- Return the new value - pure nval - -def introHasIntPropInstances : Tactic.TacticM (Array Expr) := do - trace[Arith] "Introducing the HasIntProp instances" - introInstances ``HasIntProp.prop_ty lookupHasIntProp - -def introHasScalarPropInstances : Tactic.TacticM (Array Expr) := do - trace[Arith] "Introducing the HasScalarProp instances" - introInstances ``HasScalarProp.prop_ty lookupHasScalarProp - --- Lookup the instances of `HasScalarProp for all the sub-expressions in the context, --- and introduce the corresponding assumptions -elab "intro_has_prop_instances" : tactic => do - let _ ← introHasScalarPropInstances - -example (x y : U32) : x.val ≤ Scalar.max ScalarTy.U32 := by - intro_has_prop_instances - simp [*] - -example {a: Type} (v : Vec a) : v.val.length ≤ Scalar.max ScalarTy.Usize := by - intro_has_prop_instances - simp_all [Scalar.max, Scalar.min] - --- Lookup the instances of `PropHasImp for all the sub-expressions in the context, --- and introduce the corresponding assumptions -elab "intro_prop_has_imp_instances" : tactic => do - trace[Arith] "Introducing the PropHasImp instances" - let _ ← introInstances ``PropHasImp.concl lookupPropHasImp - -example (x y : Int) (h0 : x ≤ y) (h1 : x ≠ y) : x < y := by - intro_prop_has_imp_instances - rename_i h - split_disj h - . linarith - . linarith - -/- Boosting a bit the linarith tac. - - We do the following: - - for all the assumptions of the shape `(x : Int) ≠ y` or `¬ (x = y), we - introduce two goals with the assumptions `x < y` and `x > y` - TODO: we could create a PR for mathlib. - -/ -def intTacPreprocess : Tactic.TacticM Unit := do - Tactic.withMainContext do - -- Lookup the instances of PropHasImp (this is how we detect assumptions - -- of the proper shape), introduce assumptions in the context and split - -- on those - -- TODO: get rid of the assumptions that we split - let rec splitOnAsms (asms : List Expr) : Tactic.TacticM Unit := - match asms with - | [] => pure () - | asm :: asms => - let k := splitOnAsms asms - Utils.splitDisjTac asm k k - -- Introduce - let _ ← introHasIntPropInstances - let asms ← introInstances ``PropHasImp.concl lookupPropHasImp - -- Split - splitOnAsms asms.toList - -elab "int_tac_preprocess" : tactic => - intTacPreprocess - -def intTac : Tactic.TacticM Unit := do - Tactic.withMainContext do - Tactic.focus do - -- Preprocess - wondering if we should do this before or after splitting - -- the goal. I think before leads to a smaller proof term? - Tactic.allGoals intTacPreprocess - -- Split the conjunctions in the goal - Utils.repeatTac Utils.splitConjTarget - -- Call linarith - let linarith := - let cfg : Linarith.LinarithConfig := { - -- We do this with our custom preprocessing - splitNe := false - } - Tactic.liftMetaFinishingTactic <| Linarith.linarith false [] cfg - Tactic.allGoals linarith - -elab "int_tac" : tactic => - intTac - -example (x : Int) (h0: 0 ≤ x) (h1: x ≠ 0) : 0 < x := by - int_tac_preprocess - linarith - linarith - -example (x : Int) (h0: 0 ≤ x) (h1: x ≠ 0) : 0 < x := by - int_tac - --- Checking that things append correctly when there are several disjunctions -example (x y : Int) (h0: 0 ≤ x) (h1: x ≠ 0) (h2 : 0 ≤ y) (h3 : y ≠ 0) : 0 < x ∧ 0 < y := by - int_tac - --- Checking that things append correctly when there are several disjunctions -example (x y : Int) (h0: 0 ≤ x) (h1: x ≠ 0) (h2 : 0 ≤ y) (h3 : y ≠ 0) : 0 < x ∧ 0 < y ∧ x + y ≥ 2 := by - int_tac - -def scalarTacPreprocess (tac : Tactic.TacticM Unit) : Tactic.TacticM Unit := do - Tactic.withMainContext do - -- Introduce the scalar bounds - let _ ← introHasScalarPropInstances - Tactic.allGoals do - -- Inroduce the bounds for the isize/usize types - let add (e : Expr) : Tactic.TacticM Unit := do - let ty ← inferType e - let _ ← Utils.addDeclTac (← mkFreshUserName `h) e ty (asLet := false) - add (← mkAppM ``Scalar.cMin_bound #[.const ``ScalarTy.Isize []]) - add (← mkAppM ``Scalar.cMax_bound #[.const ``ScalarTy.Usize []]) - add (← mkAppM ``Scalar.cMax_bound #[.const ``ScalarTy.Isize []]) - -- Reveal the concrete bounds - Utils.simpAt [``Scalar.min, ``Scalar.max, ``Scalar.cMin, ``Scalar.cMax, - ``I8.min, ``I16.min, ``I32.min, ``I64.min, ``I128.min, - ``I8.max, ``I16.max, ``I32.max, ``I64.max, ``I128.max, - ``U8.min, ``U16.min, ``U32.min, ``U64.min, ``U128.min, - ``U8.max, ``U16.max, ``U32.max, ``U64.max, ``U128.max - ] [] [] .wildcard - -- Finish the proof - tac - -elab "scalar_tac_preprocess" : tactic => - scalarTacPreprocess intTacPreprocess - --- A tactic to solve linear arithmetic goals in the presence of scalars -def scalarTac : Tactic.TacticM Unit := do - scalarTacPreprocess intTac - -elab "scalar_tac" : tactic => - scalarTac - -example (x y : U32) : x.val ≤ Scalar.max ScalarTy.U32 := by - scalar_tac - -example {a: Type} (v : Vec a) : v.val.length ≤ Scalar.max ScalarTy.Usize := by - scalar_tac - -end Arith diff --git a/backends/lean/Base/Arith/Int.lean b/backends/lean/Base/Arith/Int.lean new file mode 100644 index 00000000..5f00ab52 --- /dev/null +++ b/backends/lean/Base/Arith/Int.lean @@ -0,0 +1,236 @@ +/- This file contains tactics to solve arithmetic goals -/ + +import Lean +import Lean.Meta.Tactic.Simp +import Init.Data.List.Basic +import Mathlib.Tactic.RunCmd +import Mathlib.Tactic.Linarith +-- TODO: there is no Omega tactic for now - it seems it hasn't been ported yet +--import Mathlib.Tactic.Omega +import Base.Utils +import Base.Arith.Base + +namespace Arith + +open Utils + +-- Remark: I tried a version of the shape `HasScalarProp {a : Type} (x : a)` +-- but the lookup didn't work +class HasIntProp (a : Sort u) where + prop_ty : a → Prop + prop : ∀ x:a, prop_ty x + +class PropHasImp (x : Prop) where + concl : Prop + prop : x → concl + +-- This also works for `x ≠ y` because this expression reduces to `¬ x = y` +-- and `Ne` is marked as `reducible` +instance (x y : Int) : PropHasImp (¬ x = y) where + concl := x < y ∨ x > y + prop := λ (h:x ≠ y) => ne_is_lt_or_gt h + +open Lean Lean.Elab Lean.Meta + +-- Small utility: print all the declarations in the context +elab "print_all_decls" : tactic => do + let ctx ← Lean.MonadLCtx.getLCtx + for decl in ← ctx.getDecls do + let ty ← Lean.Meta.inferType decl.toExpr + logInfo m!"{decl.toExpr} : {ty}" + pure () + +-- Explore a term by decomposing the applications (we explore the applied +-- functions and their arguments, but ignore lambdas, forall, etc. - +-- should we go inside?). +partial def foldTermApps (k : α → Expr → MetaM α) (s : α) (e : Expr) : MetaM α := do + -- We do it in a very simpler manner: we deconstruct applications, + -- and recursively explore the sub-expressions. Note that we do + -- not go inside foralls and abstractions (should we?). + e.withApp fun f args => do + let s ← k s f + args.foldlM (foldTermApps k) s + +-- Provided a function `k` which lookups type class instances on an expression, +-- collect all the instances lookuped by applying `k` on the sub-expressions of `e`. +def collectInstances + (k : Expr → MetaM (Option Expr)) (s : HashSet Expr) (e : Expr) : MetaM (HashSet Expr) := do + let k s e := do + match ← k e with + | none => pure s + | some i => pure (s.insert i) + foldTermApps k s e + +-- Similar to `collectInstances`, but explores all the local declarations in the +-- main context. +def collectInstancesFromMainCtx (k : Expr → MetaM (Option Expr)) : Tactic.TacticM (HashSet Expr) := do + Tactic.withMainContext do + -- Get the local context + let ctx ← Lean.MonadLCtx.getLCtx + -- Just a matter of precaution + let ctx ← instantiateLCtxMVars ctx + -- Initialize the hashset + let hs := HashSet.empty + -- Explore the declarations + let decls ← ctx.getDecls + decls.foldlM (fun hs d => collectInstances k hs d.toExpr) hs + +-- Helper +def lookupProp (fName : String) (className : Name) (e : Expr) : MetaM (Option Expr) := do + trace[Arith] fName + -- TODO: do we need Lean.observing? + -- This actually eliminates the error messages + Lean.observing? do + trace[Arith] m!"{fName}: observing" + let ty ← Lean.Meta.inferType e + let hasProp ← mkAppM className #[ty] + let hasPropInst ← trySynthInstance hasProp + match hasPropInst with + | LOption.some i => + trace[Arith] "Found {fName} instance" + let i_prop ← mkProjection i (Name.mkSimple "prop") + some (← mkAppM' i_prop #[e]) + | _ => none + +-- Return an instance of `HasIntProp` for `e` if it has some +def lookupHasIntProp (e : Expr) : MetaM (Option Expr) := + lookupProp "lookupHasIntProp" ``HasIntProp e + +-- Collect the instances of `HasIntProp` for the subexpressions in the context +def collectHasIntPropInstancesFromMainCtx : Tactic.TacticM (HashSet Expr) := do + collectInstancesFromMainCtx lookupHasIntProp + +-- Return an instance of `PropHasImp` for `e` if it has some +def lookupPropHasImp (e : Expr) : MetaM (Option Expr) := do + trace[Arith] "lookupPropHasImp" + -- TODO: do we need Lean.observing? + -- This actually eliminates the error messages + Lean.observing? do + trace[Arith] "lookupPropHasImp: observing" + let ty ← Lean.Meta.inferType e + trace[Arith] "lookupPropHasImp: ty: {ty}" + let cl ← mkAppM ``PropHasImp #[ty] + let inst ← trySynthInstance cl + match inst with + | LOption.some i => + trace[Arith] "Found PropHasImp instance" + let i_prop ← mkProjection i (Name.mkSimple "prop") + some (← mkAppM' i_prop #[e]) + | _ => none + +-- Collect the instances of `PropHasImp` for the subexpressions in the context +def collectPropHasImpInstancesFromMainCtx : Tactic.TacticM (HashSet Expr) := do + collectInstancesFromMainCtx lookupPropHasImp + +elab "display_prop_has_imp_instances" : tactic => do + trace[Arith] "Displaying the PropHasImp instances" + let hs ← collectPropHasImpInstancesFromMainCtx + hs.forM fun e => do + trace[Arith] "+ PropHasImp instance: {e}" + +example (x y : Int) (_ : x ≠ y) (_ : ¬ x = y) : True := by + display_prop_has_imp_instances + simp + +-- Lookup instances in a context and introduce them with additional declarations. +def introInstances (declToUnfold : Name) (lookup : Expr → MetaM (Option Expr)) : Tactic.TacticM (Array Expr) := do + let hs ← collectInstancesFromMainCtx lookup + hs.toArray.mapM fun e => do + let type ← inferType e + let name ← mkFreshUserName `h + -- Add a declaration + let nval ← Utils.addDeclTac name e type (asLet := false) + -- Simplify to unfold the declaration to unfold (i.e., the projector) + Utils.simpAt [declToUnfold] [] [] (Tactic.Location.targets #[mkIdent name] false) + -- Return the new value + pure nval + +def introHasIntPropInstances : Tactic.TacticM (Array Expr) := do + trace[Arith] "Introducing the HasIntProp instances" + introInstances ``HasIntProp.prop_ty lookupHasIntProp + +-- Lookup the instances of `HasIntProp for all the sub-expressions in the context, +-- and introduce the corresponding assumptions +elab "intro_has_int_prop_instances" : tactic => do + let _ ← introHasIntPropInstances + +-- Lookup the instances of `PropHasImp for all the sub-expressions in the context, +-- and introduce the corresponding assumptions +elab "intro_prop_has_imp_instances" : tactic => do + trace[Arith] "Introducing the PropHasImp instances" + let _ ← introInstances ``PropHasImp.concl lookupPropHasImp + +example (x y : Int) (h0 : x ≤ y) (h1 : x ≠ y) : x < y := by + intro_prop_has_imp_instances + rename_i h + split_disj h + . linarith + . linarith + +/- Boosting a bit the linarith tac. + + We do the following: + - for all the assumptions of the shape `(x : Int) ≠ y` or `¬ (x = y), we + introduce two goals with the assumptions `x < y` and `x > y` + TODO: we could create a PR for mathlib. + -/ +def intTacPreprocess (extraPreprocess : Tactic.TacticM Unit) : Tactic.TacticM Unit := do + Tactic.withMainContext do + -- Lookup the instances of PropHasImp (this is how we detect assumptions + -- of the proper shape), introduce assumptions in the context and split + -- on those + -- TODO: get rid of the assumptions that we split + let rec splitOnAsms (asms : List Expr) : Tactic.TacticM Unit := + match asms with + | [] => pure () + | asm :: asms => + let k := splitOnAsms asms + Utils.splitDisjTac asm k k + -- Introduce the scalar bounds + let _ ← introHasIntPropInstances + -- Extra preprocessing, before we split on the disjunctions + extraPreprocess + -- Split + let asms ← introInstances ``PropHasImp.concl lookupPropHasImp + splitOnAsms asms.toList + +elab "int_tac_preprocess" : tactic => + intTacPreprocess (do pure ()) + +def intTac (extraPreprocess : Tactic.TacticM Unit) : Tactic.TacticM Unit := do + Tactic.withMainContext do + Tactic.focus do + -- Preprocess - wondering if we should do this before or after splitting + -- the goal. I think before leads to a smaller proof term? + Tactic.allGoals (intTacPreprocess extraPreprocess) + -- Split the conjunctions in the goal + Tactic.allGoals (Utils.repeatTac Utils.splitConjTarget) + -- Call linarith + let linarith := + let cfg : Linarith.LinarithConfig := { + -- We do this with our custom preprocessing + splitNe := false + } + Tactic.liftMetaFinishingTactic <| Linarith.linarith false [] cfg + Tactic.allGoals linarith + +elab "int_tac" : tactic => + intTac (do pure ()) + +example (x : Int) (h0: 0 ≤ x) (h1: x ≠ 0) : 0 < x := by + int_tac_preprocess + linarith + linarith + +example (x : Int) (h0: 0 ≤ x) (h1: x ≠ 0) : 0 < x := by + int_tac + +-- Checking that things append correctly when there are several disjunctions +example (x y : Int) (h0: 0 ≤ x) (h1: x ≠ 0) (h2 : 0 ≤ y) (h3 : y ≠ 0) : 0 < x ∧ 0 < y := by + int_tac + +-- Checking that things append correctly when there are several disjunctions +example (x y : Int) (h0: 0 ≤ x) (h1: x ≠ 0) (h2 : 0 ≤ y) (h3 : y ≠ 0) : 0 < x ∧ 0 < y ∧ x + y ≥ 2 := by + int_tac + +end Arith diff --git a/backends/lean/Base/Arith/Scalar.lean b/backends/lean/Base/Arith/Scalar.lean new file mode 100644 index 00000000..f8903ecf --- /dev/null +++ b/backends/lean/Base/Arith/Scalar.lean @@ -0,0 +1,48 @@ +import Base.Arith.Int +import Base.Primitives.Scalar + +/- Automation for scalars - TODO: not sure it is worth having two files (Int.lean and Scalar.lean) -/ +namespace Arith + +open Lean Lean.Elab Lean.Meta +open Primitives + +def scalarTacExtraPreprocess : Tactic.TacticM Unit := do + Tactic.withMainContext do + -- Inroduce the bounds for the isize/usize types + let add (e : Expr) : Tactic.TacticM Unit := do + let ty ← inferType e + let _ ← Utils.addDeclTac (← mkFreshUserName `h) e ty (asLet := false) + add (← mkAppM ``Scalar.cMin_bound #[.const ``ScalarTy.Isize []]) + add (← mkAppM ``Scalar.cMax_bound #[.const ``ScalarTy.Usize []]) + add (← mkAppM ``Scalar.cMax_bound #[.const ``ScalarTy.Isize []]) + -- Reveal the concrete bounds + Utils.simpAt [``Scalar.min, ``Scalar.max, ``Scalar.cMin, ``Scalar.cMax, + ``I8.min, ``I16.min, ``I32.min, ``I64.min, ``I128.min, + ``I8.max, ``I16.max, ``I32.max, ``I64.max, ``I128.max, + ``U8.min, ``U16.min, ``U32.min, ``U64.min, ``U128.min, + ``U8.max, ``U16.max, ``U32.max, ``U64.max, ``U128.max + ] [] [] .wildcard + +elab "scalar_tac_preprocess" : tactic => + intTacPreprocess scalarTacExtraPreprocess + +-- A tactic to solve linear arithmetic goals in the presence of scalars +def scalarTac : Tactic.TacticM Unit := do + intTac scalarTacExtraPreprocess + +elab "scalar_tac" : tactic => + scalarTac + +instance (ty : ScalarTy) : HasIntProp (Scalar ty) where + -- prop_ty is inferred + prop := λ x => And.intro x.hmin x.hmax + +example (x y : U32) : x.val ≤ Scalar.max ScalarTy.U32 := by + intro_has_int_prop_instances + simp [*] + +example (x y : U32) : x.val ≤ Scalar.max ScalarTy.U32 := by + scalar_tac + +end Arith diff --git a/backends/lean/Base/IList.lean b/backends/lean/Base/IList.lean index 3db00cbb..31b66ffa 100644 --- a/backends/lean/Base/IList.lean +++ b/backends/lean/Base/IList.lean @@ -1,126 +1 @@ -/- Complementary list functions and lemmas which operate on integers rather - than natural numbers. -/ - -import Std.Data.Int.Lemmas -import Mathlib.Tactic.Linarith -import Base.Arith - -namespace List - -def len (ls : List α) : Int := - match ls with - | [] => 0 - | _ :: tl => 1 + len tl - --- Remark: if i < 0, then the result is none -def optIndex (i : Int) (ls : List α) : Option α := - match ls with - | [] => none - | hd :: tl => if i = 0 then some hd else optIndex (i - 1) tl - --- Remark: if i < 0, then the result is the defaul element -def index [Inhabited α] (i : Int) (ls : List α) : α := - match ls with - | [] => Inhabited.default - | x :: tl => - if i = 0 then x else index (i - 1) tl - --- Remark: the list is unchanged if the index is not in bounds (in particular --- if it is < 0) -def update (ls : List α) (i : Int) (y : α) : List α := - match ls with - | [] => [] - | x :: tl => if i = 0 then y :: tl else x :: update tl (i - 1) y - --- Remark: the whole list is dropped if the index is not in bounds (in particular --- if it is < 0) -def idrop (i : Int) (ls : List α) : List α := - match ls with - | [] => [] - | x :: tl => if i = 0 then x :: tl else idrop (i - 1) tl - -@[simp] theorem len_nil : len ([] : List α) = 0 := by simp [len] -@[simp] theorem len_cons : len ((x :: tl) : List α) = 1 + len tl := by simp [len] - -@[simp] theorem index_zero_cons [Inhabited α] : index 0 ((x :: tl) : List α) = x := by simp [index] -@[simp] theorem index_nzero_cons [Inhabited α] (hne : i ≠ 0) : index i ((x :: tl) : List α) = index (i - 1) tl := by simp [*, index] - -@[simp] theorem update_nil : update ([] : List α) i y = [] := by simp [update] -@[simp] theorem update_zero_cons : update ((x :: tl) : List α) 0 y = y :: tl := by simp [update] -@[simp] theorem update_nzero_cons (hne : i ≠ 0) : update ((x :: tl) : List α) i y = x :: update tl (i - 1) y := by simp [*, update] - -@[simp] theorem idrop_nil : idrop i ([] : List α) = [] := by simp [idrop] -@[simp] theorem idrop_zero : idrop 0 (ls : List α) = ls := by cases ls <;> simp [idrop] -@[simp] theorem idrop_nzero_cons (hne : i ≠ 0) : idrop i ((x :: tl) : List α) = idrop (i - 1) tl := by simp [*, idrop] - -theorem len_eq_length (ls : List α) : ls.len = ls.length := by - induction ls - . rfl - . simp [*, Int.ofNat_succ, Int.add_comm] - -theorem len_pos : 0 ≤ (ls : List α).len := by - induction ls <;> simp [*] - linarith - -instance (a : Type u) : Arith.HasIntProp (List a) where - prop_ty := λ ls => 0 ≤ ls.len - prop := λ ls => ls.len_pos - -@[simp] theorem len_append (l1 l2 : List α) : (l1 ++ l2).len = l1.len + l2.len := by - -- Remark: simp loops here because of the following rewritings: - -- @Nat.cast_add: ↑(List.length l1 + List.length l2) ==> ↑(List.length l1) + ↑(List.length l2) - -- Int.ofNat_add_ofNat: ↑(List.length l1) + ↑(List.length l2) ==> ↑(List.length l1 + List.length l2) - -- TODO: post an issue? - simp only [len_eq_length] - simp only [length_append] - simp only [Int.ofNat_add] - -theorem left_length_eq_append_eq (l1 l2 l1' l2' : List α) (heq : l1.length = l1'.length) : - l1 ++ l2 = l1' ++ l2' ↔ l1 = l1' ∧ l2 = l2' := by - revert l1' - induction l1 - . intro l1'; cases l1' <;> simp [*] - . intro l1'; cases l1' <;> simp_all; tauto - -theorem right_length_eq_append_eq (l1 l2 l1' l2' : List α) (heq : l2.length = l2'.length) : - l1 ++ l2 = l1' ++ l2' ↔ l1 = l1' ∧ l2 = l2' := by - have := left_length_eq_append_eq l1 l2 l1' l2' - constructor <;> intro heq2 <;> - have : l1.length + l2.length = l1'.length + l2'.length := by - have : (l1 ++ l2).length = (l1' ++ l2').length := by simp [*] - simp only [length_append] at this - apply this - . simp [heq] at this - tauto - . tauto - -theorem left_len_eq_append_eq (l1 l2 l1' l2' : List α) (heq : l1.len = l1'.len) : - l1 ++ l2 = l1' ++ l2' ↔ l1 = l1' ∧ l2 = l2' := by - simp [len_eq_length] at heq - apply left_length_eq_append_eq - assumption - -theorem right_len_eq_append_eq (l1 l2 l1' l2' : List α) (heq : l2.len = l2'.len) : - l1 ++ l2 = l1' ++ l2' ↔ l1 = l1' ∧ l2 = l2' := by - simp [len_eq_length] at heq - apply right_length_eq_append_eq - assumption - -open Arith in -theorem idrop_eq_nil_of_le (hineq : ls.len ≤ i) : idrop i ls = [] := by - revert i - induction ls <;> simp [*] - rename_i hd tl hi - intro i hineq - if heq: i = 0 then - simp [*] at * - have := tl.len_pos - linarith - else - simp at hineq - have : 0 < i := by int_tac - simp [*] - apply hi - linarith - -end List +import Base.IList.IList diff --git a/backends/lean/Base/IList/IList.lean b/backends/lean/Base/IList/IList.lean new file mode 100644 index 00000000..2a335cac --- /dev/null +++ b/backends/lean/Base/IList/IList.lean @@ -0,0 +1,142 @@ +/- Complementary list functions and lemmas which operate on integers rather + than natural numbers. -/ + +import Std.Data.Int.Lemmas +import Base.Arith + +namespace List + +def len (ls : List α) : Int := + match ls with + | [] => 0 + | _ :: tl => 1 + len tl + +-- Remark: if i < 0, then the result is none +def indexOpt (ls : List α) (i : Int) : Option α := + match ls with + | [] => none + | hd :: tl => if i = 0 then some hd else indexOpt tl (i - 1) + +-- Remark: if i < 0, then the result is the defaul element +def index [Inhabited α] (ls : List α) (i : Int) : α := + match ls with + | [] => Inhabited.default + | x :: tl => + if i = 0 then x else index tl (i - 1) + +-- Remark: the list is unchanged if the index is not in bounds (in particular +-- if it is < 0) +def update (ls : List α) (i : Int) (y : α) : List α := + match ls with + | [] => [] + | x :: tl => if i = 0 then y :: tl else x :: update tl (i - 1) y + +-- Remark: the whole list is dropped if the index is not in bounds (in particular +-- if it is < 0) +def idrop (i : Int) (ls : List α) : List α := + match ls with + | [] => [] + | x :: tl => if i = 0 then x :: tl else idrop (i - 1) tl + +section Lemmas + +variable {α : Type u} + +@[simp] theorem len_nil : len ([] : List α) = 0 := by simp [len] +@[simp] theorem len_cons : len ((x :: tl) : List α) = 1 + len tl := by simp [len] + +@[simp] theorem index_zero_cons [Inhabited α] : index ((x :: tl) : List α) 0 = x := by simp [index] +@[simp] theorem index_nzero_cons [Inhabited α] (hne : i ≠ 0) : index ((x :: tl) : List α) i = index tl (i - 1) := by simp [*, index] + +@[simp] theorem update_nil : update ([] : List α) i y = [] := by simp [update] +@[simp] theorem update_zero_cons : update ((x :: tl) : List α) 0 y = y :: tl := by simp [update] +@[simp] theorem update_nzero_cons (hne : i ≠ 0) : update ((x :: tl) : List α) i y = x :: update tl (i - 1) y := by simp [*, update] + +@[simp] theorem idrop_nil : idrop i ([] : List α) = [] := by simp [idrop] +@[simp] theorem idrop_zero : idrop 0 (ls : List α) = ls := by cases ls <;> simp [idrop] +@[simp] theorem idrop_nzero_cons (hne : i ≠ 0) : idrop i ((x :: tl) : List α) = idrop (i - 1) tl := by simp [*, idrop] + +theorem len_eq_length (ls : List α) : ls.len = ls.length := by + induction ls + . rfl + . simp [*, Int.ofNat_succ, Int.add_comm] + +@[simp] theorem len_append (l1 l2 : List α) : (l1 ++ l2).len = l1.len + l2.len := by + -- Remark: simp loops here because of the following rewritings: + -- @Nat.cast_add: ↑(List.length l1 + List.length l2) ==> ↑(List.length l1) + ↑(List.length l2) + -- Int.ofNat_add_ofNat: ↑(List.length l1) + ↑(List.length l2) ==> ↑(List.length l1 + List.length l2) + -- TODO: post an issue? + simp only [len_eq_length] + simp only [length_append] + simp only [Int.ofNat_add] + +@[simp] +theorem length_update (ls : List α) (i : Int) (x : α) : (ls.update i x).length = ls.length := by + revert i + induction ls <;> simp_all [length, update] + intro; split <;> simp [*] + +@[simp] +theorem len_update (ls : List α) (i : Int) (x : α) : (ls.update i x).len = ls.len := by + simp [len_eq_length] + + +theorem len_pos : 0 ≤ (ls : List α).len := by + induction ls <;> simp [*] + linarith + +instance (a : Type u) : Arith.HasIntProp (List a) where + prop_ty := λ ls => 0 ≤ ls.len + prop := λ ls => ls.len_pos + +theorem left_length_eq_append_eq (l1 l2 l1' l2' : List α) (heq : l1.length = l1'.length) : + l1 ++ l2 = l1' ++ l2' ↔ l1 = l1' ∧ l2 = l2' := by + revert l1' + induction l1 + . intro l1'; cases l1' <;> simp [*] + . intro l1'; cases l1' <;> simp_all; tauto + +theorem right_length_eq_append_eq (l1 l2 l1' l2' : List α) (heq : l2.length = l2'.length) : + l1 ++ l2 = l1' ++ l2' ↔ l1 = l1' ∧ l2 = l2' := by + have := left_length_eq_append_eq l1 l2 l1' l2' + constructor <;> intro heq2 <;> + have : l1.length + l2.length = l1'.length + l2'.length := by + have : (l1 ++ l2).length = (l1' ++ l2').length := by simp [*] + simp only [length_append] at this + apply this + . simp [heq] at this + tauto + . tauto + +theorem left_len_eq_append_eq (l1 l2 l1' l2' : List α) (heq : l1.len = l1'.len) : + l1 ++ l2 = l1' ++ l2' ↔ l1 = l1' ∧ l2 = l2' := by + simp [len_eq_length] at heq + apply left_length_eq_append_eq + assumption + +theorem right_len_eq_append_eq (l1 l2 l1' l2' : List α) (heq : l2.len = l2'.len) : + l1 ++ l2 = l1' ++ l2' ↔ l1 = l1' ∧ l2 = l2' := by + simp [len_eq_length] at heq + apply right_length_eq_append_eq + assumption + +open Arith in +theorem idrop_eq_nil_of_le (hineq : ls.len ≤ i) : idrop i ls = [] := by + revert i + induction ls <;> simp [*] + rename_i hd tl hi + intro i hineq + if heq: i = 0 then + simp [*] at * + have := tl.len_pos + linarith + else + simp at hineq + have : 0 < i := by int_tac + simp [*] + apply hi + linarith + +end Lemmas + +end List diff --git a/backends/lean/Base/Primitives.lean b/backends/lean/Base/Primitives.lean index 1a0c665d..91823cb6 100644 --- a/backends/lean/Base/Primitives.lean +++ b/backends/lean/Base/Primitives.lean @@ -1,715 +1,3 @@ -import Lean -import Lean.Meta.Tactic.Simp -import Init.Data.List.Basic -import Mathlib.Tactic.RunCmd -import Mathlib.Tactic.Linarith - -namespace Primitives - --------------------- --- ASSERT COMMAND --Std. --------------------- - -open Lean Elab Command Term Meta - -syntax (name := assert) "#assert" term: command - -@[command_elab assert] -unsafe -def assertImpl : CommandElab := fun (_stx: Syntax) => do - runTermElabM (fun _ => do - let r ← evalTerm Bool (mkConst ``Bool) _stx[1] - if not r then - logInfo ("Assertion failed for:\n" ++ _stx[1]) - throwError ("Expression reduced to false:\n" ++ _stx[1]) - pure ()) - -#eval 2 == 2 -#assert (2 == 2) - -------------- --- PRELUDE -- -------------- - --- Results & monadic combinators - -inductive Error where - | assertionFailure: Error - | integerOverflow: Error - | divisionByZero: Error - | arrayOutOfBounds: Error - | maximumSizeExceeded: Error - | panic: Error -deriving Repr, BEq - -open Error - -inductive Result (α : Type u) where - | ret (v: α): Result α - | fail (e: Error): Result α - | div -deriving Repr, BEq - -open Result - -instance Result_Inhabited (α : Type u) : Inhabited (Result α) := - Inhabited.mk (fail panic) - -instance Result_Nonempty (α : Type u) : Nonempty (Result α) := - Nonempty.intro div - -/- HELPERS -/ - -def ret? {α: Type u} (r: Result α): Bool := - match r with - | ret _ => true - | fail _ | div => false - -def div? {α: Type u} (r: Result α): Bool := - match r with - | div => true - | ret _ | fail _ => false - -def massert (b:Bool) : Result Unit := - if b then ret () else fail assertionFailure - -def eval_global {α: Type u} (x: Result α) (_: ret? x): α := - match x with - | fail _ | div => by contradiction - | ret x => x - -/- DO-DSL SUPPORT -/ - -def bind {α : Type u} {β : Type v} (x: Result α) (f: α -> Result β) : Result β := - match x with - | ret v => f v - | fail v => fail v - | div => div - --- Allows using Result in do-blocks -instance : Bind Result where - bind := bind - --- Allows using return x in do-blocks -instance : Pure Result where - pure := fun x => ret x - -@[simp] theorem bind_ret (x : α) (f : α → Result β) : bind (.ret x) f = f x := by simp [bind] -@[simp] theorem bind_fail (x : Error) (f : α → Result β) : bind (.fail x) f = .fail x := by simp [bind] -@[simp] theorem bind_div (f : α → Result β) : bind .div f = .div := by simp [bind] - -/- CUSTOM-DSL SUPPORT -/ - --- Let-binding the Result of a monadic operation is oftentimes not sufficient, --- because we may need a hypothesis for equational reasoning in the scope. We --- rely on subtype, and a custom let-binding operator, in effect recreating our --- own variant of the do-dsl - -def Result.attach {α: Type} (o : Result α): Result { x : α // o = ret x } := - match o with - | ret x => ret ⟨x, rfl⟩ - | fail e => fail e - | div => div - -@[simp] theorem bind_tc_ret (x : α) (f : α → Result β) : - (do let y ← .ret x; f y) = f x := by simp [Bind.bind, bind] - -@[simp] theorem bind_tc_fail (x : Error) (f : α → Result β) : - (do let y ← fail x; f y) = fail x := by simp [Bind.bind, bind] - -@[simp] theorem bind_tc_div (f : α → Result β) : - (do let y ← div; f y) = div := by simp [Bind.bind, bind] - ----------------------- --- MACHINE INTEGERS -- ----------------------- - --- We redefine our machine integers types. - --- For Isize/Usize, we reuse `getNumBits` from `USize`. You cannot reduce `getNumBits` --- using the simplifier, meaning that proofs do not depend on the compile-time value of --- USize.size. (Lean assumes 32 or 64-bit platforms, and Rust doesn't really support, at --- least officially, 16-bit microcontrollers, so this seems like a fine design decision --- for now.) - --- Note from Chris Bailey: "If there's more than one salient property of your --- definition then the subtyping strategy might get messy, and the property part --- of a subtype is less discoverable by the simplifier or tactics like --- library_search." So, we will not add refinements on the return values of the --- operations defined on Primitives, but will rather rely on custom lemmas to --- invert on possible return values of the primitive operations. - --- Machine integer constants, done via `ofNatCore`, which requires a proof that --- the `Nat` fits within the desired integer type. We provide a custom tactic. - -open System.Platform.getNumBits - --- TODO: is there a way of only importing System.Platform.getNumBits? --- -@[simp] def size_num_bits : Nat := (System.Platform.getNumBits ()).val - --- Remark: Lean seems to use < for the comparisons with the upper bounds by convention. - --- The "structured" bounds -def Isize.smin : Int := - (HPow.hPow 2 (size_num_bits - 1)) -def Isize.smax : Int := (HPow.hPow 2 (size_num_bits - 1)) - 1 -def I8.smin : Int := - (HPow.hPow 2 7) -def I8.smax : Int := HPow.hPow 2 7 - 1 -def I16.smin : Int := - (HPow.hPow 2 15) -def I16.smax : Int := HPow.hPow 2 15 - 1 -def I32.smin : Int := -(HPow.hPow 2 31) -def I32.smax : Int := HPow.hPow 2 31 - 1 -def I64.smin : Int := -(HPow.hPow 2 63) -def I64.smax : Int := HPow.hPow 2 63 - 1 -def I128.smin : Int := -(HPow.hPow 2 127) -def I128.smax : Int := HPow.hPow 2 127 - 1 -def Usize.smin : Int := 0 -def Usize.smax : Int := HPow.hPow 2 size_num_bits - 1 -def U8.smin : Int := 0 -def U8.smax : Int := HPow.hPow 2 8 - 1 -def U16.smin : Int := 0 -def U16.smax : Int := HPow.hPow 2 16 - 1 -def U32.smin : Int := 0 -def U32.smax : Int := HPow.hPow 2 32 - 1 -def U64.smin : Int := 0 -def U64.smax : Int := HPow.hPow 2 64 - 1 -def U128.smin : Int := 0 -def U128.smax : Int := HPow.hPow 2 128 - 1 - --- The "normalized" bounds, that we use in practice -def I8.min := -128 -def I8.max := 127 -def I16.min := -32768 -def I16.max := 32767 -def I32.min := -2147483648 -def I32.max := 2147483647 -def I64.min := -9223372036854775808 -def I64.max := 9223372036854775807 -def I128.min := -170141183460469231731687303715884105728 -def I128.max := 170141183460469231731687303715884105727 -@[simp] def U8.min := 0 -def U8.max := 255 -@[simp] def U16.min := 0 -def U16.max := 65535 -@[simp] def U32.min := 0 -def U32.max := 4294967295 -@[simp] def U64.min := 0 -def U64.max := 18446744073709551615 -@[simp] def U128.min := 0 -def U128.max := 340282366920938463463374607431768211455 -@[simp] def Usize.min := 0 - -def Isize.refined_min : { n:Int // n = I32.min ∨ n = I64.min } := - ⟨ Isize.smin, by - simp [Isize.smin] - cases System.Platform.numBits_eq <;> - unfold System.Platform.numBits at * <;> simp [*] ⟩ - -def Isize.refined_max : { n:Int // n = I32.max ∨ n = I64.max } := - ⟨ Isize.smax, by - simp [Isize.smax] - cases System.Platform.numBits_eq <;> - unfold System.Platform.numBits at * <;> simp [*] ⟩ - -def Usize.refined_max : { n:Int // n = U32.max ∨ n = U64.max } := - ⟨ Usize.smax, by - simp [Usize.smax] - cases System.Platform.numBits_eq <;> - unfold System.Platform.numBits at * <;> simp [*] ⟩ - -def Isize.min := Isize.refined_min.val -def Isize.max := Isize.refined_max.val -def Usize.max := Usize.refined_max.val - -inductive ScalarTy := -| Isize -| I8 -| I16 -| I32 -| I64 -| I128 -| Usize -| U8 -| U16 -| U32 -| U64 -| U128 - -def Scalar.smin (ty : ScalarTy) : Int := - match ty with - | .Isize => Isize.smin - | .I8 => I8.smin - | .I16 => I16.smin - | .I32 => I32.smin - | .I64 => I64.smin - | .I128 => I128.smin - | .Usize => Usize.smin - | .U8 => U8.smin - | .U16 => U16.smin - | .U32 => U32.smin - | .U64 => U64.smin - | .U128 => U128.smin - -def Scalar.smax (ty : ScalarTy) : Int := - match ty with - | .Isize => Isize.smax - | .I8 => I8.smax - | .I16 => I16.smax - | .I32 => I32.smax - | .I64 => I64.smax - | .I128 => I128.smax - | .Usize => Usize.smax - | .U8 => U8.smax - | .U16 => U16.smax - | .U32 => U32.smax - | .U64 => U64.smax - | .U128 => U128.smax - -def Scalar.min (ty : ScalarTy) : Int := - match ty with - | .Isize => Isize.min - | .I8 => I8.min - | .I16 => I16.min - | .I32 => I32.min - | .I64 => I64.min - | .I128 => I128.min - | .Usize => Usize.min - | .U8 => U8.min - | .U16 => U16.min - | .U32 => U32.min - | .U64 => U64.min - | .U128 => U128.min - -def Scalar.max (ty : ScalarTy) : Int := - match ty with - | .Isize => Isize.max - | .I8 => I8.max - | .I16 => I16.max - | .I32 => I32.max - | .I64 => I64.max - | .I128 => I128.max - | .Usize => Usize.max - | .U8 => U8.max - | .U16 => U16.max - | .U32 => U32.max - | .U64 => U64.max - | .U128 => U128.max - -def Scalar.smin_eq (ty : ScalarTy) : Scalar.min ty = Scalar.smin ty := by - cases ty <;> rfl - -def Scalar.smax_eq (ty : ScalarTy) : Scalar.max ty = Scalar.smax ty := by - cases ty <;> rfl - --- "Conservative" bounds --- We use those because we can't compare to the isize bounds (which can't --- reduce at compile-time). Whenever we perform an arithmetic operation like --- addition we need to check that the result is in bounds: we first compare --- to the conservative bounds, which reduce, then compare to the real bounds. --- This is useful for the various #asserts that we want to reduce at --- type-checking time. -def Scalar.cMin (ty : ScalarTy) : Int := - match ty with - | .Isize => Scalar.min .I32 - | _ => Scalar.min ty - -def Scalar.cMax (ty : ScalarTy) : Int := - match ty with - | .Isize => Scalar.max .I32 - | .Usize => Scalar.max .U32 - | _ => Scalar.max ty - -theorem Scalar.cMin_bound ty : Scalar.min ty ≤ Scalar.cMin ty := by - cases ty <;> simp [Scalar.min, Scalar.max, Scalar.cMin, Scalar.cMax] at * - have h := Isize.refined_min.property - cases h <;> simp [*, Isize.min] - -theorem Scalar.cMax_bound ty : Scalar.cMax ty ≤ Scalar.max ty := by - cases ty <;> simp [Scalar.min, Scalar.max, Scalar.cMin, Scalar.cMax] at * - . have h := Isize.refined_max.property - cases h <;> simp [*, Isize.max] - . have h := Usize.refined_max.property - cases h <;> simp [*, Usize.max] - -theorem Scalar.cMin_suffices ty (h : Scalar.cMin ty ≤ x) : Scalar.min ty ≤ x := by - have := Scalar.cMin_bound ty - linarith - -theorem Scalar.cMax_suffices ty (h : x ≤ Scalar.cMax ty) : x ≤ Scalar.max ty := by - have := Scalar.cMax_bound ty - linarith - -structure Scalar (ty : ScalarTy) where - val : Int - hmin : Scalar.min ty ≤ val - hmax : val ≤ Scalar.max ty -deriving Repr - -theorem Scalar.bound_suffices (ty : ScalarTy) (x : Int) : - Scalar.cMin ty ≤ x ∧ x ≤ Scalar.cMax ty -> - Scalar.min ty ≤ x ∧ x ≤ Scalar.max ty - := - λ h => by - apply And.intro <;> have hmin := Scalar.cMin_bound ty <;> have hmax := Scalar.cMax_bound ty <;> linarith - -def Scalar.ofIntCore {ty : ScalarTy} (x : Int) - (hmin : Scalar.min ty ≤ x) (hmax : x ≤ Scalar.max ty) : Scalar ty := - { val := x, hmin := hmin, hmax := hmax } - --- Tactic to prove that integers are in bounds --- TODO: use this: https://leanprover.zulipchat.com/#narrow/stream/270676-lean4/topic/instance.20with.20tactic.20autoparam -syntax "intlit" : tactic -macro_rules - | `(tactic| intlit) => `(tactic| apply Scalar.bound_suffices; decide) - -def Scalar.ofInt {ty : ScalarTy} (x : Int) - (h : Scalar.min ty ≤ x ∧ x ≤ Scalar.max ty := by intlit) : Scalar ty := - -- Remark: we initially wrote: - -- let ⟨ hmin, hmax ⟩ := h - -- Scalar.ofIntCore x hmin hmax - -- We updated to the line below because a similar pattern in `Scalar.tryMk` - -- made reduction block. Both versions seem to work for `Scalar.ofInt`, though. - -- TODO: investigate - Scalar.ofIntCore x h.left h.right - -@[simp] def Scalar.check_bounds (ty : ScalarTy) (x : Int) : Bool := - (Scalar.cMin ty ≤ x || Scalar.min ty ≤ x) ∧ (x ≤ Scalar.cMax ty || x ≤ Scalar.max ty) - -theorem Scalar.check_bounds_prop {ty : ScalarTy} {x : Int} (h: Scalar.check_bounds ty x) : - Scalar.min ty ≤ x ∧ x ≤ Scalar.max ty := by - simp at * - have ⟨ hmin, hmax ⟩ := h - have hbmin := Scalar.cMin_bound ty - have hbmax := Scalar.cMax_bound ty - cases hmin <;> cases hmax <;> apply And.intro <;> linarith - --- Further thoughts: look at what has been done here: --- https://github.com/leanprover-community/mathlib4/blob/master/Mathlib/Data/Fin/Basic.lean --- and --- https://github.com/leanprover-community/mathlib4/blob/master/Mathlib/Data/UInt.lean --- which both contain a fair amount of reasoning already! -def Scalar.tryMk (ty : ScalarTy) (x : Int) : Result (Scalar ty) := - if h:Scalar.check_bounds ty x then - -- If we do: - -- ``` - -- let ⟨ hmin, hmax ⟩ := (Scalar.check_bounds_prop h) - -- Scalar.ofIntCore x hmin hmax - -- ``` - -- then normalization blocks (for instance, some proofs which use reflexivity fail). - -- However, the version below doesn't block reduction (TODO: investigate): - return Scalar.ofInt x (Scalar.check_bounds_prop h) - else fail integerOverflow - -def Scalar.neg {ty : ScalarTy} (x : Scalar ty) : Result (Scalar ty) := Scalar.tryMk ty (- x.val) - -def Scalar.div {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) := - if y.val != 0 then Scalar.tryMk ty (x.val / y.val) else fail divisionByZero - --- Our custom remainder operation, which satisfies the semantics of Rust --- TODO: is there a better way? -def scalar_rem (x y : Int) : Int := - if 0 ≤ x then |x| % |y| - else - (|x| % |y|) - --- Our custom division operation, which satisfies the semantics of Rust --- TODO: is there a better way? -def scalar_div (x y : Int) : Int := - if 0 ≤ x && 0 ≤ y then |x| / |y| - else if 0 ≤ x && y < 0 then - (|x| / |y|) - else if x < 0 && 0 ≤ y then - (|x| / |y|) - else |x| / |y| - --- Checking that the remainder operation is correct -#assert scalar_rem 1 2 = 1 -#assert scalar_rem (-1) 2 = -1 -#assert scalar_rem 1 (-2) = 1 -#assert scalar_rem (-1) (-2) = -1 -#assert scalar_rem 7 3 = (1:Int) -#assert scalar_rem (-7) 3 = -1 -#assert scalar_rem 7 (-3) = 1 -#assert scalar_rem (-7) (-3) = -1 - --- Checking that the division operation is correct -#assert scalar_div 3 2 = 1 -#assert scalar_div (-3) 2 = -1 -#assert scalar_div 3 (-2) = -1 -#assert scalar_div (-3) (-2) = 1 -#assert scalar_div 7 3 = 2 -#assert scalar_div (-7) 3 = -2 -#assert scalar_div 7 (-3) = -2 -#assert scalar_div (-7) (-3) = 2 - -def Scalar.rem {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) := - if y.val != 0 then Scalar.tryMk ty (x.val % y.val) else fail divisionByZero - -def Scalar.add {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) := - Scalar.tryMk ty (x.val + y.val) - -def Scalar.sub {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) := - Scalar.tryMk ty (x.val - y.val) - -def Scalar.mul {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) := - Scalar.tryMk ty (x.val * y.val) - --- TODO: instances of +, -, * etc. for scalars - --- Cast an integer from a [src_ty] to a [tgt_ty] --- TODO: check the semantics of casts in Rust -def Scalar.cast {src_ty : ScalarTy} (tgt_ty : ScalarTy) (x : Scalar src_ty) : Result (Scalar tgt_ty) := - Scalar.tryMk tgt_ty x.val - --- The scalar types --- We declare the definitions as reducible so that Lean can unfold them (useful --- for type class resolution for instance). -@[reducible] def Isize := Scalar .Isize -@[reducible] def I8 := Scalar .I8 -@[reducible] def I16 := Scalar .I16 -@[reducible] def I32 := Scalar .I32 -@[reducible] def I64 := Scalar .I64 -@[reducible] def I128 := Scalar .I128 -@[reducible] def Usize := Scalar .Usize -@[reducible] def U8 := Scalar .U8 -@[reducible] def U16 := Scalar .U16 -@[reducible] def U32 := Scalar .U32 -@[reducible] def U64 := Scalar .U64 -@[reducible] def U128 := Scalar .U128 - --- TODO: below: not sure this is the best way. --- Should we rather overload operations like +, -, etc.? --- Also, it is possible to automate the generation of those definitions --- with macros (but would it be a good idea? It would be less easy to --- read the file, which is not supposed to change a lot) - --- Negation - -/-- -Remark: there is no heterogeneous negation in the Lean prelude: we thus introduce -one here. - -The notation typeclass for heterogeneous addition. -This enables the notation `- a : β` where `a : α`. --/ -class HNeg (α : Type u) (β : outParam (Type v)) where - /-- `- a` computes the negation of `a`. - The meaning of this notation is type-dependent. -/ - hNeg : α → β - -prefix:75 "-" => HNeg.hNeg - -instance : HNeg Isize (Result Isize) where hNeg x := Scalar.neg x -instance : HNeg I8 (Result I8) where hNeg x := Scalar.neg x -instance : HNeg I16 (Result I16) where hNeg x := Scalar.neg x -instance : HNeg I32 (Result I32) where hNeg x := Scalar.neg x -instance : HNeg I64 (Result I64) where hNeg x := Scalar.neg x -instance : HNeg I128 (Result I128) where hNeg x := Scalar.neg x - --- Addition -instance {ty} : HAdd (Scalar ty) (Scalar ty) (Result (Scalar ty)) where - hAdd x y := Scalar.add x y - --- Substraction -instance {ty} : HSub (Scalar ty) (Scalar ty) (Result (Scalar ty)) where - hSub x y := Scalar.sub x y - --- Multiplication -instance {ty} : HMul (Scalar ty) (Scalar ty) (Result (Scalar ty)) where - hMul x y := Scalar.mul x y - --- Division -instance {ty} : HDiv (Scalar ty) (Scalar ty) (Result (Scalar ty)) where - hDiv x y := Scalar.div x y - --- Remainder -instance {ty} : HMod (Scalar ty) (Scalar ty) (Result (Scalar ty)) where - hMod x y := Scalar.rem x y - --- ofIntCore --- TODO: typeclass? -def Isize.ofIntCore := @Scalar.ofIntCore .Isize -def I8.ofIntCore := @Scalar.ofIntCore .I8 -def I16.ofIntCore := @Scalar.ofIntCore .I16 -def I32.ofIntCore := @Scalar.ofIntCore .I32 -def I64.ofIntCore := @Scalar.ofIntCore .I64 -def I128.ofIntCore := @Scalar.ofIntCore .I128 -def Usize.ofIntCore := @Scalar.ofIntCore .Usize -def U8.ofIntCore := @Scalar.ofIntCore .U8 -def U16.ofIntCore := @Scalar.ofIntCore .U16 -def U32.ofIntCore := @Scalar.ofIntCore .U32 -def U64.ofIntCore := @Scalar.ofIntCore .U64 -def U128.ofIntCore := @Scalar.ofIntCore .U128 - --- ofInt --- TODO: typeclass? -def Isize.ofInt := @Scalar.ofInt .Isize -def I8.ofInt := @Scalar.ofInt .I8 -def I16.ofInt := @Scalar.ofInt .I16 -def I32.ofInt := @Scalar.ofInt .I32 -def I64.ofInt := @Scalar.ofInt .I64 -def I128.ofInt := @Scalar.ofInt .I128 -def Usize.ofInt := @Scalar.ofInt .Usize -def U8.ofInt := @Scalar.ofInt .U8 -def U16.ofInt := @Scalar.ofInt .U16 -def U32.ofInt := @Scalar.ofInt .U32 -def U64.ofInt := @Scalar.ofInt .U64 -def U128.ofInt := @Scalar.ofInt .U128 - --- Comparisons -instance {ty} : LT (Scalar ty) where - lt a b := LT.lt a.val b.val - -instance {ty} : LE (Scalar ty) where le a b := LE.le a.val b.val - -instance Scalar.decLt {ty} (a b : Scalar ty) : Decidable (LT.lt a b) := Int.decLt .. -instance Scalar.decLe {ty} (a b : Scalar ty) : Decidable (LE.le a b) := Int.decLe .. - -theorem Scalar.eq_of_val_eq {ty} : ∀ {i j : Scalar ty}, Eq i.val j.val → Eq i j - | ⟨_, _, _⟩, ⟨_, _, _⟩, rfl => rfl - -theorem Scalar.val_eq_of_eq {ty} {i j : Scalar ty} (h : Eq i j) : Eq i.val j.val := - h ▸ rfl - -theorem Scalar.ne_of_val_ne {ty} {i j : Scalar ty} (h : Not (Eq i.val j.val)) : Not (Eq i j) := - fun h' => absurd (val_eq_of_eq h') h - -instance (ty : ScalarTy) : DecidableEq (Scalar ty) := - fun i j => - match decEq i.val j.val with - | isTrue h => isTrue (Scalar.eq_of_val_eq h) - | isFalse h => isFalse (Scalar.ne_of_val_ne h) - -def Scalar.toInt {ty} (n : Scalar ty) : Int := n.val - --- -- We now define a type class that subsumes the various machine integer types, so --- -- as to write a concise definition for scalar_cast, rather than exhaustively --- -- enumerating all of the possible pairs. We remark that Rust has sane semantics --- -- and fails if a cast operation would involve a truncation or modulo. - --- class MachineInteger (t: Type) where --- size: Nat --- val: t -> Fin size --- ofNatCore: (n:Nat) -> LT.lt n size -> t - --- set_option hygiene false in --- run_cmd --- for typeName in [`UInt8, `UInt16, `UInt32, `UInt64, `USize].map Lean.mkIdent do --- Lean.Elab.Command.elabCommand (← `( --- namespace $typeName --- instance: MachineInteger $typeName where --- size := size --- val := val --- ofNatCore := ofNatCore --- end $typeName --- )) - --- -- Aeneas only instantiates the destination type (`src` is implicit). We rely on --- -- Lean to infer `src`. - --- def scalar_cast { src: Type } (dst: Type) [ MachineInteger src ] [ MachineInteger dst ] (x: src): Result dst := --- if h: MachineInteger.val x < MachineInteger.size dst then --- .ret (MachineInteger.ofNatCore (MachineInteger.val x).val h) --- else --- .fail integerOverflow - -------------- --- VECTORS -- -------------- - -def Vec (α : Type u) := { l : List α // List.length l ≤ Usize.max } - --- TODO: do we really need it? It should be with Subtype by default -instance Vec.cast (a : Type): Coe (Vec a) (List a) where coe := λ v => v.val - -def Vec.new (α : Type u): Vec α := ⟨ [], by apply Scalar.cMax_suffices .Usize; simp ⟩ - -def Vec.len (α : Type u) (v : Vec α) : Usize := - let ⟨ v, l ⟩ := v - Usize.ofIntCore (List.length v) (by simp [Scalar.min, Usize.min]) l - --- This shouldn't be used -def Vec.push_fwd (α : Type u) (_ : Vec α) (_ : α) : Unit := () - --- This is actually the backward function -def Vec.push (α : Type u) (v : Vec α) (x : α) : Result (Vec α) - := - let nlen := List.length v.val + 1 - if h : nlen ≤ U32.max || nlen ≤ Usize.max then - have h : nlen ≤ Usize.max := by - simp [Usize.max] at * - have hm := Usize.refined_max.property - cases h <;> cases hm <;> simp [U32.max, U64.max] at * <;> try linarith - return ⟨ List.concat v.val x, by simp at *; assumption ⟩ - else - fail maximumSizeExceeded - --- This shouldn't be used -def Vec.insert_fwd (α : Type u) (v: Vec α) (i: Usize) (_: α): Result Unit := - if i.val < List.length v.val then - .ret () - else - .fail arrayOutOfBounds - --- This is actually the backward function -def Vec.insert (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec α) := - if i.val < List.length v.val then - -- TODO: maybe we should redefine a list library which uses integers - -- (instead of natural numbers) - let i := i.val.toNat - .ret ⟨ List.set v.val i x, by - have h: List.length v.val ≤ Usize.max := v.property - simp [*] at * - ⟩ - else - .fail arrayOutOfBounds - -def Vec.index_to_fin {α : Type u} {v: Vec α} {i: Usize} (h : i.val < List.length v.val) : - Fin (List.length v.val) := - let j := i.val.toNat - let h: j < List.length v.val := by - have heq := @Int.toNat_lt (List.length v.val) i.val i.hmin - apply heq.mpr - assumption - ⟨j, h⟩ - -def Vec.index (α : Type u) (v: Vec α) (i: Usize): Result α := - if h: i.val < List.length v.val then - let i := Vec.index_to_fin h - .ret (List.get v.val i) - else - .fail arrayOutOfBounds - --- This shouldn't be used -def Vec.index_back (α : Type u) (v: Vec α) (i: Usize) (_: α): Result Unit := - if i.val < List.length v.val then - .ret () - else - .fail arrayOutOfBounds - -def Vec.index_mut (α : Type u) (v: Vec α) (i: Usize): Result α := - if h: i.val < List.length v.val then - let i := Vec.index_to_fin h - .ret (List.get v.val i) - else - .fail arrayOutOfBounds - -def Vec.index_mut_back (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec α) := - if h: i.val < List.length v.val then - let i := Vec.index_to_fin h - .ret ⟨ List.set v.val i x, by - have h: List.length v.val ≤ Usize.max := v.property - simp [*] at * - ⟩ - else - .fail arrayOutOfBounds - ----------- --- MISC -- ----------- - -@[simp] def mem.replace (a : Type) (x : a) (_ : a) : a := x -@[simp] def mem.replace_back (a : Type) (_ : a) (y : a) : a := y - -/-- Aeneas-translated function -- useful to reduce non-recursive definitions. - Use with `simp [ aeneas ]` -/ -register_simp_attr aeneas - -end Primitives +import Base.Primitives.Base +import Base.Primitives.Scalar +import Base.Primitives.Vec diff --git a/backends/lean/Base/Primitives/Base.lean b/backends/lean/Base/Primitives/Base.lean new file mode 100644 index 00000000..db462c38 --- /dev/null +++ b/backends/lean/Base/Primitives/Base.lean @@ -0,0 +1,130 @@ +import Lean + +namespace Primitives + +-------------------- +-- ASSERT COMMAND --Std. +-------------------- + +open Lean Elab Command Term Meta + +syntax (name := assert) "#assert" term: command + +@[command_elab assert] +unsafe +def assertImpl : CommandElab := fun (_stx: Syntax) => do + runTermElabM (fun _ => do + let r ← evalTerm Bool (mkConst ``Bool) _stx[1] + if not r then + logInfo ("Assertion failed for:\n" ++ _stx[1]) + throwError ("Expression reduced to false:\n" ++ _stx[1]) + pure ()) + +#eval 2 == 2 +#assert (2 == 2) + +------------- +-- PRELUDE -- +------------- + +-- Results & monadic combinators + +inductive Error where + | assertionFailure: Error + | integerOverflow: Error + | divisionByZero: Error + | arrayOutOfBounds: Error + | maximumSizeExceeded: Error + | panic: Error +deriving Repr, BEq + +open Error + +inductive Result (α : Type u) where + | ret (v: α): Result α + | fail (e: Error): Result α + | div +deriving Repr, BEq + +open Result + +instance Result_Inhabited (α : Type u) : Inhabited (Result α) := + Inhabited.mk (fail panic) + +instance Result_Nonempty (α : Type u) : Nonempty (Result α) := + Nonempty.intro div + +/- HELPERS -/ + +def ret? {α: Type u} (r: Result α): Bool := + match r with + | ret _ => true + | fail _ | div => false + +def div? {α: Type u} (r: Result α): Bool := + match r with + | div => true + | ret _ | fail _ => false + +def massert (b:Bool) : Result Unit := + if b then ret () else fail assertionFailure + +def eval_global {α: Type u} (x: Result α) (_: ret? x): α := + match x with + | fail _ | div => by contradiction + | ret x => x + +/- DO-DSL SUPPORT -/ + +def bind {α : Type u} {β : Type v} (x: Result α) (f: α -> Result β) : Result β := + match x with + | ret v => f v + | fail v => fail v + | div => div + +-- Allows using Result in do-blocks +instance : Bind Result where + bind := bind + +-- Allows using return x in do-blocks +instance : Pure Result where + pure := fun x => ret x + +@[simp] theorem bind_ret (x : α) (f : α → Result β) : bind (.ret x) f = f x := by simp [bind] +@[simp] theorem bind_fail (x : Error) (f : α → Result β) : bind (.fail x) f = .fail x := by simp [bind] +@[simp] theorem bind_div (f : α → Result β) : bind .div f = .div := by simp [bind] + +/- CUSTOM-DSL SUPPORT -/ + +-- Let-binding the Result of a monadic operation is oftentimes not sufficient, +-- because we may need a hypothesis for equational reasoning in the scope. We +-- rely on subtype, and a custom let-binding operator, in effect recreating our +-- own variant of the do-dsl + +def Result.attach {α: Type} (o : Result α): Result { x : α // o = ret x } := + match o with + | ret x => ret ⟨x, rfl⟩ + | fail e => fail e + | div => div + +@[simp] theorem bind_tc_ret (x : α) (f : α → Result β) : + (do let y ← .ret x; f y) = f x := by simp [Bind.bind, bind] + +@[simp] theorem bind_tc_fail (x : Error) (f : α → Result β) : + (do let y ← fail x; f y) = fail x := by simp [Bind.bind, bind] + +@[simp] theorem bind_tc_div (f : α → Result β) : + (do let y ← div; f y) = div := by simp [Bind.bind, bind] + +---------- +-- MISC -- +---------- + +@[simp] def mem.replace (a : Type) (x : a) (_ : a) : a := x +@[simp] def mem.replace_back (a : Type) (_ : a) (y : a) : a := y + +/-- Aeneas-translated function -- useful to reduce non-recursive definitions. + Use with `simp [ aeneas ]` -/ +register_simp_attr aeneas + +end Primitives diff --git a/backends/lean/Base/Primitives/Scalar.lean b/backends/lean/Base/Primitives/Scalar.lean new file mode 100644 index 00000000..241dfa07 --- /dev/null +++ b/backends/lean/Base/Primitives/Scalar.lean @@ -0,0 +1,507 @@ +import Lean +import Lean.Meta.Tactic.Simp +import Mathlib.Tactic.Linarith +import Base.Primitives.Base + +namespace Primitives + +---------------------- +-- MACHINE INTEGERS -- +---------------------- + +-- We redefine our machine integers types. + +-- For Isize/Usize, we reuse `getNumBits` from `USize`. You cannot reduce `getNumBits` +-- using the simplifier, meaning that proofs do not depend on the compile-time value of +-- USize.size. (Lean assumes 32 or 64-bit platforms, and Rust doesn't really support, at +-- least officially, 16-bit microcontrollers, so this seems like a fine design decision +-- for now.) + +-- Note from Chris Bailey: "If there's more than one salient property of your +-- definition then the subtyping strategy might get messy, and the property part +-- of a subtype is less discoverable by the simplifier or tactics like +-- library_search." So, we will not add refinements on the return values of the +-- operations defined on Primitives, but will rather rely on custom lemmas to +-- invert on possible return values of the primitive operations. + +-- Machine integer constants, done via `ofNatCore`, which requires a proof that +-- the `Nat` fits within the desired integer type. We provide a custom tactic. + +open Result Error +open System.Platform.getNumBits + +-- TODO: is there a way of only importing System.Platform.getNumBits? +-- +@[simp] def size_num_bits : Nat := (System.Platform.getNumBits ()).val + +-- Remark: Lean seems to use < for the comparisons with the upper bounds by convention. + +-- The "structured" bounds +def Isize.smin : Int := - (HPow.hPow 2 (size_num_bits - 1)) +def Isize.smax : Int := (HPow.hPow 2 (size_num_bits - 1)) - 1 +def I8.smin : Int := - (HPow.hPow 2 7) +def I8.smax : Int := HPow.hPow 2 7 - 1 +def I16.smin : Int := - (HPow.hPow 2 15) +def I16.smax : Int := HPow.hPow 2 15 - 1 +def I32.smin : Int := -(HPow.hPow 2 31) +def I32.smax : Int := HPow.hPow 2 31 - 1 +def I64.smin : Int := -(HPow.hPow 2 63) +def I64.smax : Int := HPow.hPow 2 63 - 1 +def I128.smin : Int := -(HPow.hPow 2 127) +def I128.smax : Int := HPow.hPow 2 127 - 1 +def Usize.smin : Int := 0 +def Usize.smax : Int := HPow.hPow 2 size_num_bits - 1 +def U8.smin : Int := 0 +def U8.smax : Int := HPow.hPow 2 8 - 1 +def U16.smin : Int := 0 +def U16.smax : Int := HPow.hPow 2 16 - 1 +def U32.smin : Int := 0 +def U32.smax : Int := HPow.hPow 2 32 - 1 +def U64.smin : Int := 0 +def U64.smax : Int := HPow.hPow 2 64 - 1 +def U128.smin : Int := 0 +def U128.smax : Int := HPow.hPow 2 128 - 1 + +-- The "normalized" bounds, that we use in practice +def I8.min := -128 +def I8.max := 127 +def I16.min := -32768 +def I16.max := 32767 +def I32.min := -2147483648 +def I32.max := 2147483647 +def I64.min := -9223372036854775808 +def I64.max := 9223372036854775807 +def I128.min := -170141183460469231731687303715884105728 +def I128.max := 170141183460469231731687303715884105727 +@[simp] def U8.min := 0 +def U8.max := 255 +@[simp] def U16.min := 0 +def U16.max := 65535 +@[simp] def U32.min := 0 +def U32.max := 4294967295 +@[simp] def U64.min := 0 +def U64.max := 18446744073709551615 +@[simp] def U128.min := 0 +def U128.max := 340282366920938463463374607431768211455 +@[simp] def Usize.min := 0 + +def Isize.refined_min : { n:Int // n = I32.min ∨ n = I64.min } := + ⟨ Isize.smin, by + simp [Isize.smin] + cases System.Platform.numBits_eq <;> + unfold System.Platform.numBits at * <;> simp [*] ⟩ + +def Isize.refined_max : { n:Int // n = I32.max ∨ n = I64.max } := + ⟨ Isize.smax, by + simp [Isize.smax] + cases System.Platform.numBits_eq <;> + unfold System.Platform.numBits at * <;> simp [*] ⟩ + +def Usize.refined_max : { n:Int // n = U32.max ∨ n = U64.max } := + ⟨ Usize.smax, by + simp [Usize.smax] + cases System.Platform.numBits_eq <;> + unfold System.Platform.numBits at * <;> simp [*] ⟩ + +def Isize.min := Isize.refined_min.val +def Isize.max := Isize.refined_max.val +def Usize.max := Usize.refined_max.val + +inductive ScalarTy := +| Isize +| I8 +| I16 +| I32 +| I64 +| I128 +| Usize +| U8 +| U16 +| U32 +| U64 +| U128 + +def Scalar.smin (ty : ScalarTy) : Int := + match ty with + | .Isize => Isize.smin + | .I8 => I8.smin + | .I16 => I16.smin + | .I32 => I32.smin + | .I64 => I64.smin + | .I128 => I128.smin + | .Usize => Usize.smin + | .U8 => U8.smin + | .U16 => U16.smin + | .U32 => U32.smin + | .U64 => U64.smin + | .U128 => U128.smin + +def Scalar.smax (ty : ScalarTy) : Int := + match ty with + | .Isize => Isize.smax + | .I8 => I8.smax + | .I16 => I16.smax + | .I32 => I32.smax + | .I64 => I64.smax + | .I128 => I128.smax + | .Usize => Usize.smax + | .U8 => U8.smax + | .U16 => U16.smax + | .U32 => U32.smax + | .U64 => U64.smax + | .U128 => U128.smax + +def Scalar.min (ty : ScalarTy) : Int := + match ty with + | .Isize => Isize.min + | .I8 => I8.min + | .I16 => I16.min + | .I32 => I32.min + | .I64 => I64.min + | .I128 => I128.min + | .Usize => Usize.min + | .U8 => U8.min + | .U16 => U16.min + | .U32 => U32.min + | .U64 => U64.min + | .U128 => U128.min + +def Scalar.max (ty : ScalarTy) : Int := + match ty with + | .Isize => Isize.max + | .I8 => I8.max + | .I16 => I16.max + | .I32 => I32.max + | .I64 => I64.max + | .I128 => I128.max + | .Usize => Usize.max + | .U8 => U8.max + | .U16 => U16.max + | .U32 => U32.max + | .U64 => U64.max + | .U128 => U128.max + +def Scalar.smin_eq (ty : ScalarTy) : Scalar.min ty = Scalar.smin ty := by + cases ty <;> rfl + +def Scalar.smax_eq (ty : ScalarTy) : Scalar.max ty = Scalar.smax ty := by + cases ty <;> rfl + +-- "Conservative" bounds +-- We use those because we can't compare to the isize bounds (which can't +-- reduce at compile-time). Whenever we perform an arithmetic operation like +-- addition we need to check that the result is in bounds: we first compare +-- to the conservative bounds, which reduce, then compare to the real bounds. +-- This is useful for the various #asserts that we want to reduce at +-- type-checking time. +def Scalar.cMin (ty : ScalarTy) : Int := + match ty with + | .Isize => Scalar.min .I32 + | _ => Scalar.min ty + +def Scalar.cMax (ty : ScalarTy) : Int := + match ty with + | .Isize => Scalar.max .I32 + | .Usize => Scalar.max .U32 + | _ => Scalar.max ty + +theorem Scalar.cMin_bound ty : Scalar.min ty ≤ Scalar.cMin ty := by + cases ty <;> simp [Scalar.min, Scalar.max, Scalar.cMin, Scalar.cMax] at * + have h := Isize.refined_min.property + cases h <;> simp [*, Isize.min] + +theorem Scalar.cMax_bound ty : Scalar.cMax ty ≤ Scalar.max ty := by + cases ty <;> simp [Scalar.min, Scalar.max, Scalar.cMin, Scalar.cMax] at * + . have h := Isize.refined_max.property + cases h <;> simp [*, Isize.max] + . have h := Usize.refined_max.property + cases h <;> simp [*, Usize.max] + +theorem Scalar.cMin_suffices ty (h : Scalar.cMin ty ≤ x) : Scalar.min ty ≤ x := by + have := Scalar.cMin_bound ty + linarith + +theorem Scalar.cMax_suffices ty (h : x ≤ Scalar.cMax ty) : x ≤ Scalar.max ty := by + have := Scalar.cMax_bound ty + linarith + +structure Scalar (ty : ScalarTy) where + val : Int + hmin : Scalar.min ty ≤ val + hmax : val ≤ Scalar.max ty +deriving Repr + +theorem Scalar.bound_suffices (ty : ScalarTy) (x : Int) : + Scalar.cMin ty ≤ x ∧ x ≤ Scalar.cMax ty -> + Scalar.min ty ≤ x ∧ x ≤ Scalar.max ty + := + λ h => by + apply And.intro <;> have hmin := Scalar.cMin_bound ty <;> have hmax := Scalar.cMax_bound ty <;> linarith + +def Scalar.ofIntCore {ty : ScalarTy} (x : Int) + (hmin : Scalar.min ty ≤ x) (hmax : x ≤ Scalar.max ty) : Scalar ty := + { val := x, hmin := hmin, hmax := hmax } + +-- Tactic to prove that integers are in bounds +-- TODO: use this: https://leanprover.zulipchat.com/#narrow/stream/270676-lean4/topic/instance.20with.20tactic.20autoparam +syntax "intlit" : tactic +macro_rules + | `(tactic| intlit) => `(tactic| apply Scalar.bound_suffices; decide) + +def Scalar.ofInt {ty : ScalarTy} (x : Int) + (h : Scalar.min ty ≤ x ∧ x ≤ Scalar.max ty := by intlit) : Scalar ty := + -- Remark: we initially wrote: + -- let ⟨ hmin, hmax ⟩ := h + -- Scalar.ofIntCore x hmin hmax + -- We updated to the line below because a similar pattern in `Scalar.tryMk` + -- made reduction block. Both versions seem to work for `Scalar.ofInt`, though. + -- TODO: investigate + Scalar.ofIntCore x h.left h.right + +@[simp] def Scalar.check_bounds (ty : ScalarTy) (x : Int) : Bool := + (Scalar.cMin ty ≤ x || Scalar.min ty ≤ x) ∧ (x ≤ Scalar.cMax ty || x ≤ Scalar.max ty) + +theorem Scalar.check_bounds_prop {ty : ScalarTy} {x : Int} (h: Scalar.check_bounds ty x) : + Scalar.min ty ≤ x ∧ x ≤ Scalar.max ty := by + simp at * + have ⟨ hmin, hmax ⟩ := h + have hbmin := Scalar.cMin_bound ty + have hbmax := Scalar.cMax_bound ty + cases hmin <;> cases hmax <;> apply And.intro <;> linarith + +-- Further thoughts: look at what has been done here: +-- https://github.com/leanprover-community/mathlib4/blob/master/Mathlib/Data/Fin/Basic.lean +-- and +-- https://github.com/leanprover-community/mathlib4/blob/master/Mathlib/Data/UInt.lean +-- which both contain a fair amount of reasoning already! +def Scalar.tryMk (ty : ScalarTy) (x : Int) : Result (Scalar ty) := + if h:Scalar.check_bounds ty x then + -- If we do: + -- ``` + -- let ⟨ hmin, hmax ⟩ := (Scalar.check_bounds_prop h) + -- Scalar.ofIntCore x hmin hmax + -- ``` + -- then normalization blocks (for instance, some proofs which use reflexivity fail). + -- However, the version below doesn't block reduction (TODO: investigate): + return Scalar.ofInt x (Scalar.check_bounds_prop h) + else fail integerOverflow + +def Scalar.neg {ty : ScalarTy} (x : Scalar ty) : Result (Scalar ty) := Scalar.tryMk ty (- x.val) + +def Scalar.div {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) := + if y.val != 0 then Scalar.tryMk ty (x.val / y.val) else fail divisionByZero + +-- Our custom remainder operation, which satisfies the semantics of Rust +-- TODO: is there a better way? +def scalar_rem (x y : Int) : Int := + if 0 ≤ x then |x| % |y| + else - (|x| % |y|) + +-- Our custom division operation, which satisfies the semantics of Rust +-- TODO: is there a better way? +def scalar_div (x y : Int) : Int := + if 0 ≤ x && 0 ≤ y then |x| / |y| + else if 0 ≤ x && y < 0 then - (|x| / |y|) + else if x < 0 && 0 ≤ y then - (|x| / |y|) + else |x| / |y| + +-- Checking that the remainder operation is correct +#assert scalar_rem 1 2 = 1 +#assert scalar_rem (-1) 2 = -1 +#assert scalar_rem 1 (-2) = 1 +#assert scalar_rem (-1) (-2) = -1 +#assert scalar_rem 7 3 = (1:Int) +#assert scalar_rem (-7) 3 = -1 +#assert scalar_rem 7 (-3) = 1 +#assert scalar_rem (-7) (-3) = -1 + +-- Checking that the division operation is correct +#assert scalar_div 3 2 = 1 +#assert scalar_div (-3) 2 = -1 +#assert scalar_div 3 (-2) = -1 +#assert scalar_div (-3) (-2) = 1 +#assert scalar_div 7 3 = 2 +#assert scalar_div (-7) 3 = -2 +#assert scalar_div 7 (-3) = -2 +#assert scalar_div (-7) (-3) = 2 + +def Scalar.rem {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) := + if y.val != 0 then Scalar.tryMk ty (x.val % y.val) else fail divisionByZero + +def Scalar.add {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) := + Scalar.tryMk ty (x.val + y.val) + +def Scalar.sub {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) := + Scalar.tryMk ty (x.val - y.val) + +def Scalar.mul {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) := + Scalar.tryMk ty (x.val * y.val) + +-- TODO: instances of +, -, * etc. for scalars + +-- Cast an integer from a [src_ty] to a [tgt_ty] +-- TODO: check the semantics of casts in Rust +def Scalar.cast {src_ty : ScalarTy} (tgt_ty : ScalarTy) (x : Scalar src_ty) : Result (Scalar tgt_ty) := + Scalar.tryMk tgt_ty x.val + +-- The scalar types +-- We declare the definitions as reducible so that Lean can unfold them (useful +-- for type class resolution for instance). +@[reducible] def Isize := Scalar .Isize +@[reducible] def I8 := Scalar .I8 +@[reducible] def I16 := Scalar .I16 +@[reducible] def I32 := Scalar .I32 +@[reducible] def I64 := Scalar .I64 +@[reducible] def I128 := Scalar .I128 +@[reducible] def Usize := Scalar .Usize +@[reducible] def U8 := Scalar .U8 +@[reducible] def U16 := Scalar .U16 +@[reducible] def U32 := Scalar .U32 +@[reducible] def U64 := Scalar .U64 +@[reducible] def U128 := Scalar .U128 + +-- TODO: below: not sure this is the best way. +-- Should we rather overload operations like +, -, etc.? +-- Also, it is possible to automate the generation of those definitions +-- with macros (but would it be a good idea? It would be less easy to +-- read the file, which is not supposed to change a lot) + +-- Negation + +/-- +Remark: there is no heterogeneous negation in the Lean prelude: we thus introduce +one here. + +The notation typeclass for heterogeneous addition. +This enables the notation `- a : β` where `a : α`. +-/ +class HNeg (α : Type u) (β : outParam (Type v)) where + /-- `- a` computes the negation of `a`. + The meaning of this notation is type-dependent. -/ + hNeg : α → β + +prefix:75 "-" => HNeg.hNeg + +instance : HNeg Isize (Result Isize) where hNeg x := Scalar.neg x +instance : HNeg I8 (Result I8) where hNeg x := Scalar.neg x +instance : HNeg I16 (Result I16) where hNeg x := Scalar.neg x +instance : HNeg I32 (Result I32) where hNeg x := Scalar.neg x +instance : HNeg I64 (Result I64) where hNeg x := Scalar.neg x +instance : HNeg I128 (Result I128) where hNeg x := Scalar.neg x + +-- Addition +instance {ty} : HAdd (Scalar ty) (Scalar ty) (Result (Scalar ty)) where + hAdd x y := Scalar.add x y + +-- Substraction +instance {ty} : HSub (Scalar ty) (Scalar ty) (Result (Scalar ty)) where + hSub x y := Scalar.sub x y + +-- Multiplication +instance {ty} : HMul (Scalar ty) (Scalar ty) (Result (Scalar ty)) where + hMul x y := Scalar.mul x y + +-- Division +instance {ty} : HDiv (Scalar ty) (Scalar ty) (Result (Scalar ty)) where + hDiv x y := Scalar.div x y + +-- Remainder +instance {ty} : HMod (Scalar ty) (Scalar ty) (Result (Scalar ty)) where + hMod x y := Scalar.rem x y + +-- ofIntCore +-- TODO: typeclass? +def Isize.ofIntCore := @Scalar.ofIntCore .Isize +def I8.ofIntCore := @Scalar.ofIntCore .I8 +def I16.ofIntCore := @Scalar.ofIntCore .I16 +def I32.ofIntCore := @Scalar.ofIntCore .I32 +def I64.ofIntCore := @Scalar.ofIntCore .I64 +def I128.ofIntCore := @Scalar.ofIntCore .I128 +def Usize.ofIntCore := @Scalar.ofIntCore .Usize +def U8.ofIntCore := @Scalar.ofIntCore .U8 +def U16.ofIntCore := @Scalar.ofIntCore .U16 +def U32.ofIntCore := @Scalar.ofIntCore .U32 +def U64.ofIntCore := @Scalar.ofIntCore .U64 +def U128.ofIntCore := @Scalar.ofIntCore .U128 + +-- ofInt +-- TODO: typeclass? +def Isize.ofInt := @Scalar.ofInt .Isize +def I8.ofInt := @Scalar.ofInt .I8 +def I16.ofInt := @Scalar.ofInt .I16 +def I32.ofInt := @Scalar.ofInt .I32 +def I64.ofInt := @Scalar.ofInt .I64 +def I128.ofInt := @Scalar.ofInt .I128 +def Usize.ofInt := @Scalar.ofInt .Usize +def U8.ofInt := @Scalar.ofInt .U8 +def U16.ofInt := @Scalar.ofInt .U16 +def U32.ofInt := @Scalar.ofInt .U32 +def U64.ofInt := @Scalar.ofInt .U64 +def U128.ofInt := @Scalar.ofInt .U128 + +-- Comparisons +instance {ty} : LT (Scalar ty) where + lt a b := LT.lt a.val b.val + +instance {ty} : LE (Scalar ty) where le a b := LE.le a.val b.val + +instance Scalar.decLt {ty} (a b : Scalar ty) : Decidable (LT.lt a b) := Int.decLt .. +instance Scalar.decLe {ty} (a b : Scalar ty) : Decidable (LE.le a b) := Int.decLe .. + +theorem Scalar.eq_of_val_eq {ty} : ∀ {i j : Scalar ty}, Eq i.val j.val → Eq i j + | ⟨_, _, _⟩, ⟨_, _, _⟩, rfl => rfl + +theorem Scalar.val_eq_of_eq {ty} {i j : Scalar ty} (h : Eq i j) : Eq i.val j.val := + h ▸ rfl + +theorem Scalar.ne_of_val_ne {ty} {i j : Scalar ty} (h : Not (Eq i.val j.val)) : Not (Eq i j) := + fun h' => absurd (val_eq_of_eq h') h + +instance (ty : ScalarTy) : DecidableEq (Scalar ty) := + fun i j => + match decEq i.val j.val with + | isTrue h => isTrue (Scalar.eq_of_val_eq h) + | isFalse h => isFalse (Scalar.ne_of_val_ne h) + +/- Remark: we can't write the following instance because of restrictions about + the type class parameters (`ty` doesn't appear in the return type, which is + forbidden): + + ``` + instance Scalar.cast (ty : ScalarTy) : Coe (Scalar ty) Int where coe := λ v => v.val + ``` + -/ +def Scalar.toInt {ty} (n : Scalar ty) : Int := n.val + +-- -- We now define a type class that subsumes the various machine integer types, so +-- -- as to write a concise definition for scalar_cast, rather than exhaustively +-- -- enumerating all of the possible pairs. We remark that Rust has sane semantics +-- -- and fails if a cast operation would involve a truncation or modulo. + +-- class MachineInteger (t: Type) where +-- size: Nat +-- val: t -> Fin size +-- ofNatCore: (n:Nat) -> LT.lt n size -> t + +-- set_option hygiene false in +-- run_cmd +-- for typeName in [`UInt8, `UInt16, `UInt32, `UInt64, `USize].map Lean.mkIdent do +-- Lean.Elab.Command.elabCommand (← `( +-- namespace $typeName +-- instance: MachineInteger $typeName where +-- size := size +-- val := val +-- ofNatCore := ofNatCore +-- end $typeName +-- )) + +-- -- Aeneas only instantiates the destination type (`src` is implicit). We rely on +-- -- Lean to infer `src`. + +-- def scalar_cast { src: Type } (dst: Type) [ MachineInteger src ] [ MachineInteger dst ] (x: src): Result dst := +-- if h: MachineInteger.val x < MachineInteger.size dst then +-- .ret (MachineInteger.ofNatCore (MachineInteger.val x).val h) +-- else +-- .fail integerOverflow + +end Primitives diff --git a/backends/lean/Base/Primitives/Vec.lean b/backends/lean/Base/Primitives/Vec.lean new file mode 100644 index 00000000..7851a232 --- /dev/null +++ b/backends/lean/Base/Primitives/Vec.lean @@ -0,0 +1,113 @@ +import Lean +import Lean.Meta.Tactic.Simp +import Init.Data.List.Basic +import Mathlib.Tactic.RunCmd +import Mathlib.Tactic.Linarith +import Base.IList +import Base.Primitives.Scalar +import Base.Arith + +namespace Primitives + +open Result Error + +------------- +-- VECTORS -- +------------- + +def Vec (α : Type u) := { l : List α // List.length l ≤ Usize.max } + +-- TODO: do we really need it? It should be with Subtype by default +instance Vec.cast (a : Type): Coe (Vec a) (List a) where coe := λ v => v.val + +instance (a : Type) : Arith.HasIntProp (Vec a) where + prop_ty := λ v => v.val.length ≤ Scalar.max ScalarTy.Usize + prop := λ ⟨ _, l ⟩ => l + +example {a: Type} (v : Vec a) : v.val.length ≤ Scalar.max ScalarTy.Usize := by + intro_has_int_prop_instances + simp_all [Scalar.max, Scalar.min] + +example {a: Type} (v : Vec a) : v.val.length ≤ Scalar.max ScalarTy.Usize := by + scalar_tac + +def Vec.new (α : Type u): Vec α := ⟨ [], by apply Scalar.cMax_suffices .Usize; simp ⟩ + +def Vec.len (α : Type u) (v : Vec α) : Usize := + let ⟨ v, l ⟩ := v + Usize.ofIntCore (List.length v) (by simp [Scalar.min, Usize.min]) l + +def Vec.length {α : Type u} (v : Vec α) : Int := v.val.len + +-- This shouldn't be used +def Vec.push_fwd (α : Type u) (_ : Vec α) (_ : α) : Unit := () + +-- This is actually the backward function +def Vec.push (α : Type u) (v : Vec α) (x : α) : Result (Vec α) + := + let nlen := List.length v.val + 1 + if h : nlen ≤ U32.max || nlen ≤ Usize.max then + have h : nlen ≤ Usize.max := by + simp [Usize.max] at * + have hm := Usize.refined_max.property + cases h <;> cases hm <;> simp [U32.max, U64.max] at * <;> try linarith + return ⟨ List.concat v.val x, by simp at *; assumption ⟩ + else + fail maximumSizeExceeded + +-- This shouldn't be used +def Vec.insert_fwd (α : Type u) (v: Vec α) (i: Usize) (_: α): Result Unit := + if i.val < List.length v.val then + .ret () + else + .fail arrayOutOfBounds + +-- This is actually the backward function +def Vec.insert (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec α) := + if i.val < List.length v.val then + -- TODO: maybe we should redefine a list library which uses integers + -- (instead of natural numbers) + .ret ⟨ v.val.update i.val x, by have := v.property; simp [*] ⟩ + else + .fail arrayOutOfBounds + +-- TODO: remove +def Vec.index_to_fin {α : Type u} {v: Vec α} {i: Usize} (h : i.val < List.length v.val) : + Fin (List.length v.val) := + let j := i.val.toNat + let h: j < List.length v.val := by + have heq := @Int.toNat_lt (List.length v.val) i.val i.hmin + apply heq.mpr + assumption + ⟨j, h⟩ + +def Vec.index (α : Type u) (v: Vec α) (i: Usize): Result α := + match v.val.indexOpt i.val with + | none => fail .arrayOutOfBounds + | some x => ret x + +-- This shouldn't be used +def Vec.index_back (α : Type u) (v: Vec α) (i: Usize) (_: α): Result Unit := + if i.val < List.length v.val then + .ret () + else + .fail arrayOutOfBounds + +def Vec.index_mut (α : Type u) (v: Vec α) (i: Usize): Result α := + if h: i.val < List.length v.val then + let i := Vec.index_to_fin h + .ret (List.get v.val i) + else + .fail arrayOutOfBounds + +def Vec.index_mut_back (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec α) := + if h: i.val < List.length v.val then + let i := Vec.index_to_fin h + .ret ⟨ List.set v.val i x, by + have h: List.length v.val ≤ Usize.max := v.property + simp [*] at * + ⟩ + else + .fail arrayOutOfBounds + +end Primitives diff --git a/backends/lean/Base/Progress/Progress.lean b/backends/lean/Base/Progress/Progress.lean index 35a3c25a..af7b426a 100644 --- a/backends/lean/Base/Progress/Progress.lean +++ b/backends/lean/Base/Progress/Progress.lean @@ -7,6 +7,7 @@ namespace Progress open Lean Elab Term Meta Tactic open Utils +-- TODO: remove namespace Test open Primitives @@ -199,6 +200,7 @@ def evalProgress (args : TSyntax `Progress.progressArgs) : TacticM Unit := do elab "progress" args:progressArgs : tactic => evalProgress args +-- TODO: remove namespace Test open Primitives -- cgit v1.2.3 From 2fa3cb8ee04dd7ff4184e3e1000fdc025abc50a4 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Mon, 17 Jul 2023 23:37:48 +0200 Subject: Start proving theorems for primitive definitions --- backends/lean/Base/Diverge/Base.lean | 3 +- backends/lean/Base/IList/IList.lean | 66 ++++++++++++++++++----- backends/lean/Base/Primitives/Scalar.lean | 1 + backends/lean/Base/Primitives/Vec.lean | 89 +++++++++++++++++++------------ backends/lean/Base/Progress/Base.lean | 3 +- backends/lean/Base/Progress/Progress.lean | 4 ++ 6 files changed, 116 insertions(+), 50 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Diverge/Base.lean b/backends/lean/Base/Diverge/Base.lean index 0a9ea4c4..4ff1d923 100644 --- a/backends/lean/Base/Diverge/Base.lean +++ b/backends/lean/Base/Diverge/Base.lean @@ -3,8 +3,7 @@ import Lean.Meta.Tactic.Simp import Init.Data.List.Basic import Mathlib.Tactic.RunCmd import Mathlib.Tactic.Linarith - -import Base.Primitives +import Base.Primitives.Base /- TODO: this is very useful, but is there more? -/ set_option profiler true diff --git a/backends/lean/Base/IList/IList.lean b/backends/lean/Base/IList/IList.lean index 2a335cac..ddb10236 100644 --- a/backends/lean/Base/IList/IList.lean +++ b/backends/lean/Base/IList/IList.lean @@ -11,12 +11,27 @@ def len (ls : List α) : Int := | [] => 0 | _ :: tl => 1 + len tl +@[simp] theorem len_nil : len ([] : List α) = 0 := by simp [len] +@[simp] theorem len_cons : len ((x :: tl) : List α) = 1 + len tl := by simp [len] + +theorem len_pos : 0 ≤ (ls : List α).len := by + induction ls <;> simp [*] + linarith + +instance (a : Type u) : Arith.HasIntProp (List a) where + prop_ty := λ ls => 0 ≤ ls.len + prop := λ ls => ls.len_pos + -- Remark: if i < 0, then the result is none def indexOpt (ls : List α) (i : Int) : Option α := match ls with | [] => none | hd :: tl => if i = 0 then some hd else indexOpt tl (i - 1) +@[simp] theorem indexOpt_nil : indexOpt ([] : List α) i = none := by simp [indexOpt] +@[simp] theorem indexOpt_zero_cons : indexOpt ((x :: tl) : List α) 0 = some x := by simp [indexOpt] +@[simp] theorem indexOpt_nzero_cons (hne : i ≠ 0) : indexOpt ((x :: tl) : List α) i = indexOpt tl (i - 1) := by simp [*, indexOpt] + -- Remark: if i < 0, then the result is the defaul element def index [Inhabited α] (ls : List α) (i : Int) : α := match ls with @@ -24,6 +39,43 @@ def index [Inhabited α] (ls : List α) (i : Int) : α := | x :: tl => if i = 0 then x else index tl (i - 1) +@[simp] theorem index_zero_cons [Inhabited α] : index ((x :: tl) : List α) 0 = x := by simp [index] +@[simp] theorem index_nzero_cons [Inhabited α] (hne : i ≠ 0) : index ((x :: tl) : List α) i = index tl (i - 1) := by simp [*, index] + +theorem indexOpt_bounds (ls : List α) (i : Int) : + ls.indexOpt i = none ↔ i < 0 ∨ ls.len ≤ i := + match ls with + | [] => + have : ¬ (i < 0) → 0 ≤ i := by intro; linarith -- TODO: simplify (we could boost int_tac) + by simp; tauto + | _ :: tl => + have := indexOpt_bounds tl (i - 1) + if h: i = 0 then + by + simp [*]; + -- TODO: int_tac/scalar_tac should also explore the goal! + have := tl.len_pos + linarith + else by + simp [*] + constructor <;> intros <;> + -- TODO: tactic to split all disjunctions + rename_i hor <;> cases hor <;> + first | left; int_tac | right; int_tac + +theorem indexOpt_eq_index [Inhabited α] (ls : List α) (i : Int) : + 0 ≤ i → + i < ls.len → + ls.indexOpt i = some (ls.index i) := + match ls with + | [] => by simp; intros; linarith + | hd :: tl => + if h: i = 0 then + by simp [*] + else + have hi := indexOpt_eq_index tl (i - 1) + by simp [*]; intros; apply hi <;> int_tac + -- Remark: the list is unchanged if the index is not in bounds (in particular -- if it is < 0) def update (ls : List α) (i : Int) (y : α) : List α := @@ -42,12 +94,6 @@ section Lemmas variable {α : Type u} -@[simp] theorem len_nil : len ([] : List α) = 0 := by simp [len] -@[simp] theorem len_cons : len ((x :: tl) : List α) = 1 + len tl := by simp [len] - -@[simp] theorem index_zero_cons [Inhabited α] : index ((x :: tl) : List α) 0 = x := by simp [index] -@[simp] theorem index_nzero_cons [Inhabited α] (hne : i ≠ 0) : index ((x :: tl) : List α) i = index tl (i - 1) := by simp [*, index] - @[simp] theorem update_nil : update ([] : List α) i y = [] := by simp [update] @[simp] theorem update_zero_cons : update ((x :: tl) : List α) 0 y = y :: tl := by simp [update] @[simp] theorem update_nzero_cons (hne : i ≠ 0) : update ((x :: tl) : List α) i y = x :: update tl (i - 1) y := by simp [*, update] @@ -81,14 +127,6 @@ theorem len_update (ls : List α) (i : Int) (x : α) : (ls.update i x).len = ls. simp [len_eq_length] -theorem len_pos : 0 ≤ (ls : List α).len := by - induction ls <;> simp [*] - linarith - -instance (a : Type u) : Arith.HasIntProp (List a) where - prop_ty := λ ls => 0 ≤ ls.len - prop := λ ls => ls.len_pos - theorem left_length_eq_append_eq (l1 l2 l1' l2' : List α) (heq : l1.length = l1'.length) : l1 ++ l2 = l1' ++ l2' ↔ l1 = l1' ∧ l2 = l2' := by revert l1' diff --git a/backends/lean/Base/Primitives/Scalar.lean b/backends/lean/Base/Primitives/Scalar.lean index 241dfa07..3f88caa2 100644 --- a/backends/lean/Base/Primitives/Scalar.lean +++ b/backends/lean/Base/Primitives/Scalar.lean @@ -2,6 +2,7 @@ import Lean import Lean.Meta.Tactic.Simp import Mathlib.Tactic.Linarith import Base.Primitives.Base +import Base.Diverge.Base namespace Primitives diff --git a/backends/lean/Base/Primitives/Vec.lean b/backends/lean/Base/Primitives/Vec.lean index 7851a232..4ecfa28f 100644 --- a/backends/lean/Base/Primitives/Vec.lean +++ b/backends/lean/Base/Primitives/Vec.lean @@ -6,6 +6,7 @@ import Mathlib.Tactic.Linarith import Base.IList import Base.Primitives.Scalar import Base.Arith +import Base.Progress.Base namespace Primitives @@ -56,58 +57,80 @@ def Vec.push (α : Type u) (v : Vec α) (x : α) : Result (Vec α) fail maximumSizeExceeded -- This shouldn't be used -def Vec.insert_fwd (α : Type u) (v: Vec α) (i: Usize) (_: α): Result Unit := - if i.val < List.length v.val then +def Vec.insert_fwd (α : Type u) (v: Vec α) (i: Usize) (_: α) : Result Unit := + if i.val < v.length then .ret () else .fail arrayOutOfBounds -- This is actually the backward function -def Vec.insert (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec α) := - if i.val < List.length v.val then - -- TODO: maybe we should redefine a list library which uses integers - -- (instead of natural numbers) +def Vec.insert (α : Type u) (v: Vec α) (i: Usize) (x: α) : Result (Vec α) := + if i.val < v.length then .ret ⟨ v.val.update i.val x, by have := v.property; simp [*] ⟩ else .fail arrayOutOfBounds --- TODO: remove -def Vec.index_to_fin {α : Type u} {v: Vec α} {i: Usize} (h : i.val < List.length v.val) : - Fin (List.length v.val) := - let j := i.val.toNat - let h: j < List.length v.val := by - have heq := @Int.toNat_lt (List.length v.val) i.val i.hmin - apply heq.mpr - assumption - ⟨j, h⟩ - -def Vec.index (α : Type u) (v: Vec α) (i: Usize): Result α := +@[pspec] +theorem Vec.insert_spec {α : Type u} (v: Vec α) (i: Usize) (x: α) : + i.val < v.length → + ∃ nv, v.insert α i x = ret nv ∧ nv.val = v.val.update i.val x := by + intro h + simp [insert, *] + +def Vec.index (α : Type u) (v: Vec α) (i: Usize) : Result α := match v.val.indexOpt i.val with | none => fail .arrayOutOfBounds | some x => ret x +@[pspec] +theorem Vec.index_spec {α : Type u} [Inhabited α] (v: Vec α) (i: Usize) : + i.val < v.length → + v.index α i = ret (v.val.index i.val) := by + intro + simp only [index] + -- TODO: dependent rewrite + have h := List.indexOpt_eq_index v.val i.val (by scalar_tac) (by simp[length] at *; simp [*]) + simp only [*] + -- This shouldn't be used -def Vec.index_back (α : Type u) (v: Vec α) (i: Usize) (_: α): Result Unit := +def Vec.index_back (α : Type u) (v: Vec α) (i: Usize) (_: α) : Result Unit := if i.val < List.length v.val then .ret () else .fail arrayOutOfBounds -def Vec.index_mut (α : Type u) (v: Vec α) (i: Usize): Result α := - if h: i.val < List.length v.val then - let i := Vec.index_to_fin h - .ret (List.get v.val i) - else - .fail arrayOutOfBounds +def Vec.index_mut (α : Type u) (v: Vec α) (i: Usize) : Result α := + match v.val.indexOpt i.val with + | none => fail .arrayOutOfBounds + | some x => ret x -def Vec.index_mut_back (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec α) := - if h: i.val < List.length v.val then - let i := Vec.index_to_fin h - .ret ⟨ List.set v.val i x, by - have h: List.length v.val ≤ Usize.max := v.property - simp [*] at * - ⟩ - else - .fail arrayOutOfBounds +@[pspec] +theorem Vec.index_mut_spec {α : Type u} [Inhabited α] (v: Vec α) (i: Usize) : + i.val < v.length → + v.index_mut α i = ret (v.val.index i.val) := by + intro + simp only [index_mut] + -- TODO: dependent rewrite + have h := List.indexOpt_eq_index v.val i.val (by scalar_tac) (by simp[length] at *; simp [*]) + simp only [*] + +def Vec.index_mut_back (α : Type u) (v: Vec α) (i: Usize) (x: α) : Result (Vec α) := + match v.val.indexOpt i.val with + | none => fail .arrayOutOfBounds + | some _ => + .ret ⟨ v.val.update i.val x, by have := v.property; simp [*] ⟩ + +@[pspec] +theorem Vec.index_mut_back_spec {α : Type u} (v: Vec α) (i: Usize) (x : α) : + i.val < v.length → + ∃ nv, v.index_mut_back α i x = ret nv ∧ + nv.val = v.val.update i.val x + := by + intro + simp only [index_mut_back] + have h := List.indexOpt_bounds v.val i.val + split + . simp_all [length]; cases h <;> scalar_tac + . simp_all end Primitives diff --git a/backends/lean/Base/Progress/Base.lean b/backends/lean/Base/Progress/Base.lean index a288d889..00b0a478 100644 --- a/backends/lean/Base/Progress/Base.lean +++ b/backends/lean/Base/Progress/Base.lean @@ -1,6 +1,7 @@ import Lean +import Std.Lean.HashSet import Base.Utils -import Base.Primitives +import Base.Primitives.Base namespace Progress diff --git a/backends/lean/Base/Progress/Progress.lean b/backends/lean/Base/Progress/Progress.lean index af7b426a..001967e5 100644 --- a/backends/lean/Base/Progress/Progress.lean +++ b/backends/lean/Base/Progress/Progress.lean @@ -7,6 +7,7 @@ namespace Progress open Lean Elab Term Meta Tactic open Utils +/- -- TODO: remove namespace Test open Primitives @@ -20,6 +21,7 @@ namespace Test #eval pspecAttr.find? ``Primitives.Vec.index end Test +-/ inductive TheoremOrLocal where | Theorem (thName : Name) @@ -200,6 +202,7 @@ def evalProgress (args : TSyntax `Progress.progressArgs) : TacticM Unit := do elab "progress" args:progressArgs : tactic => evalProgress args +/- -- TODO: remove namespace Test open Primitives @@ -215,5 +218,6 @@ namespace Test set_option trace.Progress false end Test +-/ end Progress -- cgit v1.2.3 From e07177ee2de3fd1346ab6b1fc09aefbcb0e24459 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Tue, 18 Jul 2023 12:22:59 +0200 Subject: Improve progress --- backends/lean/Base/Progress/Base.lean | 11 ++++++++--- backends/lean/Base/Progress/Progress.lean | 11 +++++++++-- backends/lean/Base/Utils.lean | 4 ++++ 3 files changed, 21 insertions(+), 5 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Progress/Base.lean b/backends/lean/Base/Progress/Base.lean index 00b0a478..7eace667 100644 --- a/backends/lean/Base/Progress/Base.lean +++ b/backends/lean/Base/Progress/Base.lean @@ -58,10 +58,10 @@ section Methods def withPSpec [Inhabited (m a)] [Nonempty (m a)] (th : Expr) (k : PSpecDesc → m a) (sanityChecks : Bool := false) : m a := do - trace[Progress] "Theorem: {th}" + trace[Progress] "Proposition: {th}" -- Dive into the quantified variables and the assumptions forallTelescope th fun fvars th => do - trace[Progress] "All arguments: {fvars}" + trace[Progress] "Universally quantified arguments and assumptions: {fvars}" /- -- Filter the argumens which are not propositions let rec getFirstPropIdx (i : Nat) : MetaM Nat := do if i ≥ fargs.size then pure i @@ -83,12 +83,16 @@ section Methods -- Dive into the existentials existsTelescope th fun evars th => do trace[Progress] "Existentials: {evars}" + trace[Progress] "Proposition after stripping the quantifiers: {th}" -- Take the first conjunct let (th, post) ← optSplitConj th + trace[Progress] "After splitting the conjunction:\n- eq: {th}\n- post: {post}" -- Destruct the equality let (th, ret) ← destEq th + trace[Progress] "After splitting the equality:\n- lhs: {th}\n- rhs: {ret}" -- Destruct the application to get the name - th.withApp fun f args => do + th.consumeMData.withApp fun f args => do + trace[Progress] "After stripping the arguments:\n- f: {f}\n- args: {args}" if ¬ f.isConst then throwError "Not a constant: {f}" -- Compute the set of universally quantified variables which appear in the function arguments let allArgsFVars ← args.foldlM (fun hs arg => getFVarIds arg hs) HashSet.empty @@ -114,6 +118,7 @@ section Methods post := post } k thDesc + end Methods diff --git a/backends/lean/Base/Progress/Progress.lean b/backends/lean/Base/Progress/Progress.lean index 001967e5..ace92f4f 100644 --- a/backends/lean/Base/Progress/Progress.lean +++ b/backends/lean/Base/Progress/Progress.lean @@ -55,14 +55,20 @@ def progressWith (fnExpr : Expr) (th : TheoremOrLocal) (ids : Array Name) let thDecl := env.constants.find! thName pure thDecl.type | .Local asmDecl => pure asmDecl.type + trace[Progress] "theorem/assumption type: {thTy}" -- TODO: the tactic fails if we uncomment withNewMCtxDepth -- withNewMCtxDepth do let (mvars, binders, thExBody) ← forallMetaTelescope thTy + trace[Progress] "After stripping foralls: {thExBody}" -- Introduce the existentially quantified variables and the post-condition -- in the context let thBody ← existsTelescope thExBody fun _evars thBody => do + trace[Progress] "After stripping existentials: {thBody}" + let (thBody, _) ← optSplitConj thBody + trace[Progress] "After splitting the conjunction: {thBody}" let (thBody, _) ← destEq thBody + trace[Progress] "After splitting equality: {thBody}" -- There shouldn't be any existential variables in thBody pure thBody -- Match the body with the target @@ -152,6 +158,7 @@ def progressAsmsOrLookupTheorem (ids : Array Name) (asmTac : TacticM Unit) : Tac -- Retrieve the goal let mgoal ← Tactic.getMainGoal let goalTy ← mgoal.getType + trace[Progress] "goal: {goalTy}" -- Dive into the goal to lookup the theorem let (fName, fLevels, args) ← do withPSpec goalTy fun desc => @@ -188,7 +195,7 @@ syntax progressArgs := ("as" " ⟨ " (ident)+ " ⟩")? def evalProgress (args : TSyntax `Progress.progressArgs) : TacticM Unit := do let args := args.raw -- Process the arguments to retrieve the identifiers to use - trace[Progress] "Progressing arguments: {args}" + trace[Progress] "Progress arguments: {args}" let args := args.getArgs let ids := if args.size > 0 then @@ -196,7 +203,7 @@ def evalProgress (args : TSyntax `Progress.progressArgs) : TacticM Unit := do let args := (args.get! 2).getArgs args.map Syntax.getId else #[] - trace[Progress] "Ids: {ids}" + trace[Progress] "User-provided ids: {ids}" progressAsmsOrLookupTheorem ids (firstTac [assumptionTac, Arith.scalarTac]) elab "progress" args:progressArgs : tactic => diff --git a/backends/lean/Base/Utils.lean b/backends/lean/Base/Utils.lean index 599c3a9f..9cd0db23 100644 --- a/backends/lean/Base/Utils.lean +++ b/backends/lean/Base/Utils.lean @@ -43,6 +43,10 @@ namespace List end List +-- TODO: move? +@[simp] +theorem neq_imp {α : Type u} {x y : α} (h : ¬ x = y) : ¬ y = x := by intro; simp_all + namespace Lean namespace LocalContext -- cgit v1.2.3 From 0f430c055c3a531ceab83635adc5df92f0015c6e Mon Sep 17 00:00:00 2001 From: Son Ho Date: Tue, 18 Jul 2023 16:55:27 +0200 Subject: Make modifications to Vec.lean --- backends/lean/Base/Primitives/Vec.lean | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Primitives/Vec.lean b/backends/lean/Base/Primitives/Vec.lean index 4ecfa28f..be3a0e5b 100644 --- a/backends/lean/Base/Primitives/Vec.lean +++ b/backends/lean/Base/Primitives/Vec.lean @@ -38,7 +38,8 @@ def Vec.len (α : Type u) (v : Vec α) : Usize := let ⟨ v, l ⟩ := v Usize.ofIntCore (List.length v) (by simp [Scalar.min, Usize.min]) l -def Vec.length {α : Type u} (v : Vec α) : Int := v.val.len +@[simp] +abbrev Vec.length {α : Type u} (v : Vec α) : Int := v.val.len -- This shouldn't be used def Vec.push_fwd (α : Type u) (_ : Vec α) (_ : α) : Unit := () @@ -89,7 +90,7 @@ theorem Vec.index_spec {α : Type u} [Inhabited α] (v: Vec α) (i: Usize) : intro simp only [index] -- TODO: dependent rewrite - have h := List.indexOpt_eq_index v.val i.val (by scalar_tac) (by simp[length] at *; simp [*]) + have h := List.indexOpt_eq_index v.val i.val (by scalar_tac) (by simp [*]) simp only [*] -- This shouldn't be used @@ -111,13 +112,14 @@ theorem Vec.index_mut_spec {α : Type u} [Inhabited α] (v: Vec α) (i: Usize) : intro simp only [index_mut] -- TODO: dependent rewrite - have h := List.indexOpt_eq_index v.val i.val (by scalar_tac) (by simp[length] at *; simp [*]) + have h := List.indexOpt_eq_index v.val i.val (by scalar_tac) (by simp [*]) simp only [*] def Vec.index_mut_back (α : Type u) (v: Vec α) (i: Usize) (x: α) : Result (Vec α) := match v.val.indexOpt i.val with | none => fail .arrayOutOfBounds | some _ => + -- TODO: int_tac: introduce the refinements in the context? .ret ⟨ v.val.update i.val x, by have := v.property; simp [*] ⟩ @[pspec] -- cgit v1.2.3 From 0a8211041814b5eafac0b9e2dbcd956957a322b5 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Tue, 18 Jul 2023 18:02:03 +0200 Subject: Move an arithmetic lemma --- backends/lean/Base/Arith/Base.lean | 6 ++++++ backends/lean/Base/Diverge/Base.lean | 14 ++++---------- 2 files changed, 10 insertions(+), 10 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Arith/Base.lean b/backends/lean/Base/Arith/Base.lean index a6e59b74..e008f7b9 100644 --- a/backends/lean/Base/Arith/Base.lean +++ b/backends/lean/Base/Arith/Base.lean @@ -28,6 +28,12 @@ theorem ne_is_lt_or_gt {x y : Int} (hne : x ≠ y) : x < y ∨ x > y := by | .inl _ => left; linarith | .inr _ => right; linarith +-- TODO: move? +theorem add_one_le_iff_le_ne (n m : Nat) (h1 : m ≤ n) (h2 : m ≠ n) : m + 1 ≤ n := by + -- Damn, those proofs on natural numbers are hard - I wish Omega was in mathlib4... + simp [Nat.add_one_le_iff] + simp [Nat.lt_iff_le_and_ne] + simp_all /- Induction over positive integers -/ -- TODO: move diff --git a/backends/lean/Base/Diverge/Base.lean b/backends/lean/Base/Diverge/Base.lean index 4ff1d923..1d548389 100644 --- a/backends/lean/Base/Diverge/Base.lean +++ b/backends/lean/Base/Diverge/Base.lean @@ -4,6 +4,7 @@ import Init.Data.List.Basic import Mathlib.Tactic.RunCmd import Mathlib.Tactic.Linarith import Base.Primitives.Base +import Base.Arith.Base /- TODO: this is very useful, but is there more? -/ set_option profiler true @@ -537,23 +538,16 @@ namespace FixI let j: Fin tys1.length := ⟨ j, jLt ⟩ Eq.mp (by simp) (get_fun tl j) - -- TODO: move - theorem add_one_le_iff_le_ne (n m : Nat) (h1 : m ≤ n) (h2 : m ≠ n) : m + 1 ≤ n := by - -- Damn, those proofs on natural numbers are hard - I wish Omega was in mathlib4... - simp [Nat.add_one_le_iff] - simp [Nat.lt_iff_le_and_ne] - simp_all - def for_all_fin_aux {n : Nat} (f : Fin n → Prop) (m : Nat) (h : m ≤ n) : Prop := if heq: m = n then True else f ⟨ m, by simp_all [Nat.lt_iff_le_and_ne] ⟩ ∧ - for_all_fin_aux f (m + 1) (by simp_all [add_one_le_iff_le_ne]) + for_all_fin_aux f (m + 1) (by simp_all [Arith.add_one_le_iff_le_ne]) termination_by for_all_fin_aux n _ m h => n - m decreasing_by simp_wf apply Nat.sub_add_lt_sub <;> simp - simp_all [add_one_le_iff_le_ne] + simp_all [Arith.add_one_le_iff_le_ne] def for_all_fin {n : Nat} (f : Fin n → Prop) := for_all_fin_aux f 0 (by simp) @@ -603,7 +597,7 @@ namespace FixI apply hi <;> simp_all . unfold for_all_fin_aux at hf simp_all - . simp_all [add_one_le_iff_le_ne] + . simp_all [Arith.add_one_le_iff_le_ne] -- TODO: this is not necessary anymore theorem for_all_fin_imp_forall (n : Nat) (f : Fin n → Prop) : -- cgit v1.2.3 From 204742bf2449c88abaea8ebd284c55d98b43488a Mon Sep 17 00:00:00 2001 From: Son Ho Date: Wed, 19 Jul 2023 14:48:08 +0200 Subject: Improve progress --- backends/lean/Base/Arith/Int.lean | 8 --- backends/lean/Base/Progress/Progress.lean | 115 +++++++++++++++++++++--------- backends/lean/Base/Utils.lean | 19 +++++ 3 files changed, 99 insertions(+), 43 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Arith/Int.lean b/backends/lean/Base/Arith/Int.lean index 5f00ab52..ac011998 100644 --- a/backends/lean/Base/Arith/Int.lean +++ b/backends/lean/Base/Arith/Int.lean @@ -32,14 +32,6 @@ instance (x y : Int) : PropHasImp (¬ x = y) where open Lean Lean.Elab Lean.Meta --- Small utility: print all the declarations in the context -elab "print_all_decls" : tactic => do - let ctx ← Lean.MonadLCtx.getLCtx - for decl in ← ctx.getDecls do - let ty ← Lean.Meta.inferType decl.toExpr - logInfo m!"{decl.toExpr} : {ty}" - pure () - -- Explore a term by decomposing the applications (we explore the applied -- functions and their arguments, but ignore lambdas, forall, etc. - -- should we go inside?). diff --git a/backends/lean/Base/Progress/Progress.lean b/backends/lean/Base/Progress/Progress.lean index ace92f4f..3b0248fe 100644 --- a/backends/lean/Base/Progress/Progress.lean +++ b/backends/lean/Base/Progress/Progress.lean @@ -55,7 +55,7 @@ def progressWith (fnExpr : Expr) (th : TheoremOrLocal) (ids : Array Name) let thDecl := env.constants.find! thName pure thDecl.type | .Local asmDecl => pure asmDecl.type - trace[Progress] "theorem/assumption type: {thTy}" + trace[Progress] "Looked up theorem/assumption type: {thTy}" -- TODO: the tactic fails if we uncomment withNewMCtxDepth -- withNewMCtxDepth do let (mvars, binders, thExBody) ← forallMetaTelescope thTy @@ -84,7 +84,7 @@ def progressWith (fnExpr : Expr) (th : TheoremOrLocal) (ids : Array Name) let th ← do match th with | .Theorem thName => mkAppOptM thName (mvars.map some) - | .Local decl => mkAppOptM' (mkFVar decl.fvarId) (mvars.map some) + | .Local decl => mkAppOptM' (mkFVar decl.fvarId) (mvars.map some) let asmName ← mkFreshUserName `h let thTy ← inferType th let thAsm ← Utils.addDeclTac asmName th thTy (asLet := false) @@ -153,7 +153,7 @@ def progressWith (fnExpr : Expr) (th : TheoremOrLocal) (ids : Array Name) pure .Ok -- The array of ids are identifiers to use when introducing fresh variables -def progressAsmsOrLookupTheorem (ids : Array Name) (asmTac : TacticM Unit) : TacticM Unit := do +def progressAsmsOrLookupTheorem (withTh : Option TheoremOrLocal) (ids : Array Name) (asmTac : TacticM Unit) : TacticM Unit := do withMainContext do -- Retrieve the goal let mgoal ← Tactic.getMainGoal @@ -167,44 +167,89 @@ def progressAsmsOrLookupTheorem (ids : Array Name) (asmTac : TacticM Unit) : Tac -- TODO: this should be in the pspec desc let fnExpr := mkAppN (.const fName fLevels) args trace[Progress] "Function: {fName}" - -- Try all the assumptions one by one and if it fails try to lookup a theorem - let ctx ← Lean.MonadLCtx.getLCtx - let decls ← ctx.getDecls - for decl in decls.reverse do - trace[Progress] "Trying assumption: {decl.userName} : {decl.type}" - try - match ← progressWith fnExpr (.Local decl) ids asmTac with + -- If the user provided a theorem/assumption: use it. + -- Otherwise, lookup one. + match withTh with + | some th => do + match ← progressWith fnExpr th ids asmTac with + | .Ok => return () + | .Error msg => throwError msg + | none => + -- Try all the assumptions one by one and if it fails try to lookup a theorem. + let ctx ← Lean.MonadLCtx.getLCtx + let decls ← ctx.getDecls + for decl in decls.reverse do + trace[Progress] "Trying assumption: {decl.userName} : {decl.type}" + let res ← do try progressWith fnExpr (.Local decl) ids asmTac catch _ => continue + match res with | .Ok => return () | .Error msg => throwError msg - catch _ => continue - -- It failed: try to lookup a theorem - -- TODO: use a list of theorems, and try them one by one? - trace[Progress] "No assumption succeeded: trying to lookup a theorem" - let thName ← do - match ← pspecAttr.find? fName with - | none => throwError "Could not find a pspec theorem for {fName}" - | some thName => pure thName - trace[Progress] "Lookuped up: {thName}" - -- Apply the theorem - match ← progressWith fnExpr (.Theorem thName) ids asmTac with - | .Ok => return () - | .Error msg => throwError msg - -syntax progressArgs := ("as" " ⟨ " (ident)+ " ⟩")? + -- It failed: try to lookup a theorem + -- TODO: use a list of theorems, and try them one by one? + trace[Progress] "No assumption succeeded: trying to lookup a theorem" + let thName ← do + match ← pspecAttr.find? fName with + | none => throwError "Could not find a pspec theorem for {fName}" + | some thName => pure thName + trace[Progress] "Lookuped up theorem: {thName}" + -- Apply the theorem + let res ← do + try + let res ← progressWith fnExpr (.Theorem thName) ids asmTac + pure (some res) + catch _ => none + match res with + | some .Ok => return () + | some (.Error msg) => throwError msg + | none => + -- Try a recursive call - we try the assumptions of kind "auxDecl" + let ctx ← Lean.MonadLCtx.getLCtx + let decls ← ctx.getAllDecls + let decls := decls.filter (λ decl => match decl.kind with + | .default | .implDetail => false | .auxDecl => true) + for decl in decls.reverse do + trace[Progress] "Trying recursive assumption: {decl.userName} : {decl.type}" + let res ← do try progressWith fnExpr (.Local decl) ids asmTac catch _ => continue + match res with + | .Ok => return () + | .Error msg => throwError msg + -- Nothing worked: failed + throwError "Progress failed" + +syntax progressArgs := ("with" ident)? ("as" " ⟨ " (ident)+ " ⟩")? +#check Environment +#check ConstMap def evalProgress (args : TSyntax `Progress.progressArgs) : TacticM Unit := do let args := args.raw -- Process the arguments to retrieve the identifiers to use trace[Progress] "Progress arguments: {args}" let args := args.getArgs - let ids := - if args.size > 0 then - let args := (args.get! 0).getArgs - let args := (args.get! 2).getArgs - args.map Syntax.getId - else #[] + let withArg := (args.get! 0).getArgs + let withArg ← do + if withArg.size > 0 then + let id := withArg.get! 1 + trace[Progress] "With arg: {id}" + -- Attempt to lookup a local declaration + match (← getLCtx).findFromUserName? id.getId with + | some decl => do + trace[Progress] "With arg: local decl" + pure (some (.Local decl)) + | none => do + -- Not a local declaration: should be a theorem + trace[Progress] "With arg: theorem" + addCompletionInfo <| CompletionInfo.id id id.getId (danglingDot := false) {} none + let cs ← resolveGlobalConstWithInfos id + match cs with + | [] => throwError "Could not find theorem {id}" + | id :: _ => + pure (some (.Theorem id)) + else pure none + let args := (args.get! 1).getArgs + let args := (args.get! 2).getArgs + let ids := args.map Syntax.getId trace[Progress] "User-provided ids: {ids}" - progressAsmsOrLookupTheorem ids (firstTac [assumptionTac, Arith.scalarTac]) + progressAsmsOrLookupTheorem withArg ids (firstTac [assumptionTac, Arith.scalarTac]) elab "progress" args:progressArgs : tactic => evalProgress args @@ -215,16 +260,16 @@ namespace Test open Primitives set_option trace.Progress true + set_option pp.rawOnError true @[pspec] theorem vec_index_test2 (α : Type u) (v: Vec α) (i: Usize) (h: i.val < v.val.length) : ∃ (x: α), v.index α i = .ret x := by - progress as ⟨ x ⟩ + progress with vec_index_test as ⟨ x ⟩ simp set_option trace.Progress false -end Test --/ +end Test -/ end Progress diff --git a/backends/lean/Base/Utils.lean b/backends/lean/Base/Utils.lean index 9cd0db23..acaeb26a 100644 --- a/backends/lean/Base/Utils.lean +++ b/backends/lean/Base/Utils.lean @@ -142,6 +142,25 @@ private def test2 (x : Nat) : Nat := x print_decl test1 print_decl test2 +#check LocalDecl + +def printDecls (decls : List LocalDecl) : MetaM Unit := do + let decls ← decls.foldrM (λ decl msg => do + pure (m!"\n{decl.toExpr} : {← inferType decl.toExpr}" ++ msg)) m!"" + logInfo m!"# Ctx decls:{decls}" + +-- Small utility: print all the declarations in the context (including the "implementation details") +elab "print_all_ctx_decls" : tactic => do + let ctx ← Lean.MonadLCtx.getLCtx + let decls ← ctx.getAllDecls + printDecls decls + +-- Small utility: print all declarations in the context +elab "print_ctx_decls" : tactic => do + let ctx ← Lean.MonadLCtx.getLCtx + let decls ← ctx.getDecls + printDecls decls + -- A map visitor function for expressions (adapted from `AbstractNestedProofs.visit`) -- The continuation takes as parameters: -- - the current depth of the expression (useful for printing/debugging) -- cgit v1.2.3 From 753907aafc2502ced0cd8c3f9bc43fb1c4b30e93 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Wed, 19 Jul 2023 14:48:36 +0200 Subject: Cleanup a bit --- backends/lean/Base/Progress/Progress.lean | 2 -- 1 file changed, 2 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Progress/Progress.lean b/backends/lean/Base/Progress/Progress.lean index 3b0248fe..9e2461a2 100644 --- a/backends/lean/Base/Progress/Progress.lean +++ b/backends/lean/Base/Progress/Progress.lean @@ -218,8 +218,6 @@ def progressAsmsOrLookupTheorem (withTh : Option TheoremOrLocal) (ids : Array Na syntax progressArgs := ("with" ident)? ("as" " ⟨ " (ident)+ " ⟩")? -#check Environment -#check ConstMap def evalProgress (args : TSyntax `Progress.progressArgs) : TacticM Unit := do let args := args.raw -- Process the arguments to retrieve the identifiers to use -- cgit v1.2.3 From 985fc7b1c08eaf027ec3a7c1e7ea635f53c00d72 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Wed, 19 Jul 2023 14:49:03 +0200 Subject: Cleanup more --- backends/lean/Base/Utils.lean | 3 --- 1 file changed, 3 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Utils.lean b/backends/lean/Base/Utils.lean index acaeb26a..8aa76d8e 100644 --- a/backends/lean/Base/Utils.lean +++ b/backends/lean/Base/Utils.lean @@ -138,12 +138,9 @@ open Lean.Elab.Command private def test1 : Nat := 0 private def test2 (x : Nat) : Nat := x - print_decl test1 print_decl test2 -#check LocalDecl - def printDecls (decls : List LocalDecl) : MetaM Unit := do let decls ← decls.foldrM (λ decl msg => do pure (m!"\n{decl.toExpr} : {← inferType decl.toExpr}" ++ msg)) m!"" -- cgit v1.2.3 From 36258c9ba583f19b5ddcb3b90e6521f9845b8944 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Wed, 19 Jul 2023 16:41:22 +0200 Subject: Start implementing support for some type classes for progress --- backends/lean/Base/Progress/Base.lean | 64 ++++++++++++++++++++++++- backends/lean/Base/Progress/Progress.lean | 78 ++++++++++++++++++++++--------- 2 files changed, 117 insertions(+), 25 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Progress/Base.lean b/backends/lean/Base/Progress/Base.lean index 7eace667..0032c33d 100644 --- a/backends/lean/Base/Progress/Base.lean +++ b/backends/lean/Base/Progress/Base.lean @@ -121,19 +121,42 @@ section Methods end Methods - def getPSpecFunName (th : Expr) : MetaM Name := withPSpec th (fun d => do pure d.fName) true +def getPSpecClassFunNames (th : Expr) : MetaM (Name × Name) := + withPSpec th (fun d => do + let arg0 := d.args.get! 0 + arg0.withApp fun f _ => do + if ¬ f.isConst then throwError "Not a constant: {f}" + pure (d.fName, f.constName) + ) true + +-- "Regular" pspec attribute structure PSpecAttr where attr : AttributeImpl ext : MapDeclarationExtension Name deriving Inhabited +/- pspec attribute for type classes: we use the name of the type class to + lookup another map. We use the *first* argument of the type class to lookup + into this second map. + + Example: + ======== + We use type classes for addition. For instance, the addition between two + U32 is written (without syntactic sugar) as `HAdd.add (Scalar ) x y`. As a consequence, + we store the theorem through the bindings: HAdd.add → Scalar → ... +-/ +structure PSpecClassAttr where + attr : AttributeImpl + ext : MapDeclarationExtension (NameMap Name) + deriving Inhabited + /- The persistent map from function to pspec theorems. -/ initialize pspecAttr : PSpecAttr ← do let ext ← mkMapDeclarationExtension `pspecMap - let attrImpl := { + let attrImpl : AttributeImpl := { name := `pspec descr := "Marks theorems to use with the `progress` tactic" add := fun thName stx attrKind => do @@ -153,7 +176,44 @@ initialize pspecAttr : PSpecAttr ← do registerBuiltinAttribute attrImpl pure { attr := attrImpl, ext := ext } +/- The persistent map from type classes to pspec theorems -/ +initialize pspecClassAttr : PSpecClassAttr ← do + let ext : MapDeclarationExtension (NameMap Name) ← mkMapDeclarationExtension `pspecClassMap + let attrImpl : AttributeImpl := { + name := `cpspec + descr := "Marks theorems to use for type classes with the `progress` tactic" + add := fun thName stx attrKind => do + Attribute.Builtin.ensureNoArgs stx + -- TODO: use the attribute kind + unless attrKind == AttributeKind.global do + throwError "invalid attribute 'cpspec', must be global" + -- Lookup the theorem + let env ← getEnv + let thDecl := env.constants.find! thName + let (fName, argName) ← MetaM.run' (getPSpecClassFunNames thDecl.type) + trace[Progress] "Registering class spec theorem for ({fName}, {argName})" + -- Update the entry if there is one, add an entry if there is none + let env := + match (ext.getState (← getEnv)).find? fName with + | none => + let m := RBMap.ofList [(argName, thName)] + ext.addEntry env (fName, m) + | some m => + let m := m.insert argName thName + ext.addEntry env (fName, m) + setEnv env + pure () + } + registerBuiltinAttribute attrImpl + pure { attr := attrImpl, ext := ext } + + def PSpecAttr.find? (s : PSpecAttr) (name : Name) : MetaM (Option Name) := do return (s.ext.getState (← getEnv)).find? name +def PSpecClassAttr.find? (s : PSpecClassAttr) (className argName : Name) : MetaM (Option Name) := do + match (s.ext.getState (← getEnv)).find? className with + | none => return none + | some map => return map.find? argName + end Progress diff --git a/backends/lean/Base/Progress/Progress.lean b/backends/lean/Base/Progress/Progress.lean index 9e2461a2..64d1c14a 100644 --- a/backends/lean/Base/Progress/Progress.lean +++ b/backends/lean/Base/Progress/Progress.lean @@ -152,6 +152,15 @@ def progressWith (fnExpr : Expr) (th : TheoremOrLocal) (ids : Array Name) -- pure .Ok +-- Small utility: if `args` is not empty, return the name of the app in the first +-- arg, if it is a const. +def getFirstArgAppName (args : Array Expr) : MetaM (Option Name) := do + if args.size = 0 then pure none + else + (args.get! 0).withApp fun f _ => do + if f.isConst then pure (some f.constName) + else pure none + -- The array of ids are identifiers to use when introducing fresh variables def progressAsmsOrLookupTheorem (withTh : Option TheoremOrLocal) (ids : Array Name) (asmTac : TacticM Unit) : TacticM Unit := do withMainContext do @@ -187,34 +196,57 @@ def progressAsmsOrLookupTheorem (withTh : Option TheoremOrLocal) (ids : Array Na -- It failed: try to lookup a theorem -- TODO: use a list of theorems, and try them one by one? trace[Progress] "No assumption succeeded: trying to lookup a theorem" - let thName ← do - match ← pspecAttr.find? fName with - | none => throwError "Could not find a pspec theorem for {fName}" - | some thName => pure thName - trace[Progress] "Lookuped up theorem: {thName}" - -- Apply the theorem - let res ← do - try - let res ← progressWith fnExpr (.Theorem thName) ids asmTac - pure (some res) - catch _ => none + let res ← + match ← pspecAttr.find? fName with + | some thName => + trace[Progress] "Lookuped up theorem: {thName}" + -- Apply the theorem + let res ← do + try + let res ← progressWith fnExpr (.Theorem thName) ids asmTac + pure (some res) + catch _ => none + | none => + trace[Progress] "Could not find a pspec theorem for {fName}" + throwError "TODO" match res with | some .Ok => return () | some (.Error msg) => throwError msg | none => - -- Try a recursive call - we try the assumptions of kind "auxDecl" - let ctx ← Lean.MonadLCtx.getLCtx - let decls ← ctx.getAllDecls - let decls := decls.filter (λ decl => match decl.kind with - | .default | .implDetail => false | .auxDecl => true) - for decl in decls.reverse do - trace[Progress] "Trying recursive assumption: {decl.userName} : {decl.type}" - let res ← do try progressWith fnExpr (.Local decl) ids asmTac catch _ => continue + -- It failed: try to lookup a *class* spec theorem + let res ← do + match ← getFirstArgAppName args with + | none => none + | some argName => do + match ← pspecClassAttr.find? fName argName with + | some thName => + trace[Progress] "Lookuped up class theorem: {thName}" + -- Apply the theorem + let res ← do + try + let res ← progressWith fnExpr (.Theorem thName) ids asmTac + pure (some res) + catch _ => none + | none => + trace[Progress] "Could not find a pspec theorem for {fName}" + pure none match res with - | .Ok => return () - | .Error msg => throwError msg - -- Nothing worked: failed - throwError "Progress failed" + | some .Ok => return () + | some (.Error msg) => throwError msg + | none => + -- Try a recursive call - we try the assumptions of kind "auxDecl" + let ctx ← Lean.MonadLCtx.getLCtx + let decls ← ctx.getAllDecls + let decls := decls.filter (λ decl => match decl.kind with + | .default | .implDetail => false | .auxDecl => true) + for decl in decls.reverse do + trace[Progress] "Trying recursive assumption: {decl.userName} : {decl.type}" + let res ← do try progressWith fnExpr (.Local decl) ids asmTac catch _ => continue + match res with + | .Ok => return () + | .Error msg => throwError msg + -- Nothing worked: failed + throwError "Progress failed" syntax progressArgs := ("with" ident)? ("as" " ⟨ " (ident)+ " ⟩")? -- cgit v1.2.3 From 3df0b36891975935c3d8035f56389ee6bbcbf251 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Wed, 19 Jul 2023 18:13:31 +0200 Subject: Add arithmetic spec lemmas --- backends/lean/Base/Primitives/Scalar.lean | 167 ++++++++++++++++++++++++++++-- 1 file changed, 161 insertions(+), 6 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Primitives/Scalar.lean b/backends/lean/Base/Primitives/Scalar.lean index 3f88caa2..aaa4027f 100644 --- a/backends/lean/Base/Primitives/Scalar.lean +++ b/backends/lean/Base/Primitives/Scalar.lean @@ -3,6 +3,8 @@ import Lean.Meta.Tactic.Simp import Mathlib.Tactic.Linarith import Base.Primitives.Base import Base.Diverge.Base +import Base.Progress.Base +import Base.Arith.Int namespace Primitives @@ -122,6 +124,22 @@ inductive ScalarTy := | U64 | U128 +def ScalarTy.isSigned (ty : ScalarTy) : Bool := + match ty with + | Isize + | I8 + | I16 + | I32 + | I64 + | I128 => true + | Usize + | U8 + | U16 + | U32 + | U64 + | U128 => false + + def Scalar.smin (ty : ScalarTy) : Int := match ty with | .Isize => Isize.smin @@ -289,23 +307,30 @@ def Scalar.tryMk (ty : ScalarTy) (x : Int) : Result (Scalar ty) := def Scalar.neg {ty : ScalarTy} (x : Scalar ty) : Result (Scalar ty) := Scalar.tryMk ty (- x.val) -def Scalar.div {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) := - if y.val != 0 then Scalar.tryMk ty (x.val / y.val) else fail divisionByZero - -- Our custom remainder operation, which satisfies the semantics of Rust -- TODO: is there a better way? def scalar_rem (x y : Int) : Int := - if 0 ≤ x then |x| % |y| + if 0 ≤ x then x % y else - (|x| % |y|) +@[simp] +def scalar_rem_nonneg {x y : Int} (hx : 0 ≤ x) : scalar_rem x y = x % y := by + intros + simp [*, scalar_rem] + -- Our custom division operation, which satisfies the semantics of Rust -- TODO: is there a better way? def scalar_div (x y : Int) : Int := - if 0 ≤ x && 0 ≤ y then |x| / |y| + if 0 ≤ x && 0 ≤ y then x / y else if 0 ≤ x && y < 0 then - (|x| / |y|) else if x < 0 && 0 ≤ y then - (|x| / |y|) else |x| / |y| +@[simp] +def scalar_div_nonneg {x y : Int} (hx : 0 ≤ x) (hy : 0 ≤ y) : scalar_div x y = x / y := by + intros + simp [*, scalar_div] + -- Checking that the remainder operation is correct #assert scalar_rem 1 2 = 1 #assert scalar_rem (-1) 2 = -1 @@ -326,8 +351,11 @@ def scalar_div (x y : Int) : Int := #assert scalar_div 7 (-3) = -2 #assert scalar_div (-7) (-3) = 2 +def Scalar.div {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) := + if y.val != 0 then Scalar.tryMk ty (scalar_div x.val y.val) else fail divisionByZero + def Scalar.rem {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) := - if y.val != 0 then Scalar.tryMk ty (x.val % y.val) else fail divisionByZero + if y.val != 0 then Scalar.tryMk ty (scalar_rem x.val y.val) else fail divisionByZero def Scalar.add {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) := Scalar.tryMk ty (x.val + y.val) @@ -410,6 +438,133 @@ instance {ty} : HDiv (Scalar ty) (Scalar ty) (Result (Scalar ty)) where instance {ty} : HMod (Scalar ty) (Scalar ty) (Result (Scalar ty)) where hMod x y := Scalar.rem x y +-- TODO: make progress work at a more fine grained level (see `Scalar.add_unsigned_spec`) +@[cpspec] +theorem Scalar.add_spec {ty} {x y : Scalar ty} + (hmin : Scalar.min ty ≤ x.val + y.val) + (hmax : x.val + y.val ≤ Scalar.max ty) : + ∃ z, x + y = ret z ∧ z.val = x.val + y.val := by + simp [HAdd.hAdd, add, Add.add] + simp [tryMk] + split + . simp [pure] + rfl + . tauto + +theorem Scalar.add_unsigned_spec {ty} (s: ¬ ty.isSigned) {x y : Scalar ty} + (hmax : x.val + y.val ≤ Scalar.max ty) : + ∃ z, x + y = ret z ∧ z.val = x.val + y.val := by + have hmin : Scalar.min ty ≤ x.val + y.val := by + have hx := x.hmin + have hy := y.hmin + cases ty <;> simp [min] at * <;> linarith + apply add_spec <;> assumption + +-- TODO: make it finer grained +@[cpspec] +theorem Scalar.sub_spec {ty} {x y : Scalar ty} + (hmin : Scalar.min ty ≤ x.val - y.val) + (hmax : x.val - y.val ≤ Scalar.max ty) : + ∃ z, x - y = ret z ∧ z.val = x.val - y.val := by + simp [HSub.hSub, sub, Sub.sub] + simp [tryMk] + split + . simp [pure] + rfl + . tauto + +theorem Scalar.sub_unsigned_spec {ty} (s: ¬ ty.isSigned) {x y : Scalar ty} + (hmin : Scalar.min ty ≤ x.val - y.val) : + ∃ z, x - y = ret z ∧ z.val = x.val - y.val := by + have : x.val - y.val ≤ Scalar.max ty := by + have hx := x.hmin + have hxm := x.hmax + have hy := y.hmin + cases ty <;> simp [min, max] at * <;> linarith + intros + apply sub_spec <;> assumption + +-- TODO: make it finer grained +@[cpspec] +theorem Scalar.mul_spec {ty} {x y : Scalar ty} + (hmin : Scalar.min ty ≤ x.val * y.val) + (hmax : x.val * y.val ≤ Scalar.max ty) : + ∃ z, x * y = ret z ∧ z.val = x.val * y.val := by + simp [HMul.hMul, mul, Mul.mul] + simp [tryMk] + split + . simp [pure] + rfl + . tauto + +theorem Scalar.mul_unsigned_spec {ty} (s: ¬ ty.isSigned) {x y : Scalar ty} + (hmax : x.val * y.val ≤ Scalar.max ty) : + ∃ z, x * y = ret z ∧ z.val = x.val * y.val := by + have : Scalar.min ty ≤ x.val * y.val := by + have hx := x.hmin + have hy := y.hmin + cases ty <;> simp at * <;> apply mul_nonneg hx hy + apply mul_spec <;> assumption + +-- TODO: make it finer grained +@[cpspec] +theorem Scalar.div_spec {ty} {x y : Scalar ty} + (hnz : y.val ≠ 0) + (hmin : Scalar.min ty ≤ scalar_div x.val y.val) + (hmax : scalar_div x.val y.val ≤ Scalar.max ty) : + ∃ z, x / y = ret z ∧ z.val = scalar_div x.val y.val := by + simp [HDiv.hDiv, div, Div.div] + simp [tryMk, *] + simp [pure] + rfl + +theorem Scalar.div_unsigned_spec {ty} (s: ¬ ty.isSigned) (x : Scalar ty) {y : Scalar ty} + (hnz : y.val ≠ 0) : + ∃ z, x / y = ret z ∧ z.val = x.val / y.val := by + have h : Scalar.min ty = 0 := by cases ty <;> simp at * + have hx := x.hmin + have hy := y.hmin + simp [h] at hx hy + have hmin : 0 ≤ x.val / y.val := Int.ediv_nonneg hx hy + have hmax : x.val / y.val ≤ Scalar.max ty := by + have := Int.ediv_le_self y.val hx + have := x.hmax + linarith + have hs := @div_spec ty x y hnz + simp [*] at hs + apply hs + +-- TODO: make it finer grained +@[cpspec] +theorem Scalar.rem_spec {ty} {x y : Scalar ty} + (hnz : y.val ≠ 0) + (hmin : Scalar.min ty ≤ scalar_rem x.val y.val) + (hmax : scalar_rem x.val y.val ≤ Scalar.max ty) : + ∃ z, x % y = ret z ∧ z.val = scalar_rem x.val y.val := by + simp [HMod.hMod, rem] + simp [tryMk, *] + simp [pure] + rfl + +theorem Scalar.rem_unsigned_spec {ty} (s: ¬ ty.isSigned) (x : Scalar ty) {y : Scalar ty} + (hnz : y.val ≠ 0) : + ∃ z, x % y = ret z ∧ z.val = scalar_rem x.val y.val := by + have h : Scalar.min ty = 0 := by cases ty <;> simp at * + have hx := x.hmin + have hy := y.hmin + simp [h] at hx hy + have hmin : 0 ≤ x.val % y.val := Int.emod_nonneg x.val hnz + have hmax : x.val % y.val ≤ Scalar.max ty := by + have h := @Int.ediv_emod_unique x.val y.val (x.val % y.val) (x.val / y.val) + simp at h + have : 0 < y.val := by int_tac + simp [*] at h + have := y.hmax + linarith + have hs := @rem_spec ty x y hnz + simp [*] at hs + simp [*] + -- ofIntCore -- TODO: typeclass? def Isize.ofIntCore := @Scalar.ofIntCore .Isize -- cgit v1.2.3 From abee28555eb9f95b1c548cc17b9fe746bc982b56 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Wed, 19 Jul 2023 18:50:19 +0200 Subject: Add some utilities for progress --- backends/lean/Base/Progress/Base.lean | 19 +++++++++++++++++++ backends/lean/Base/Progress/Progress.lean | 21 ++++++++++++++++----- 2 files changed, 35 insertions(+), 5 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Progress/Base.lean b/backends/lean/Base/Progress/Base.lean index 0032c33d..785b9362 100644 --- a/backends/lean/Base/Progress/Base.lean +++ b/backends/lean/Base/Progress/Base.lean @@ -211,9 +211,28 @@ initialize pspecClassAttr : PSpecClassAttr ← do def PSpecAttr.find? (s : PSpecAttr) (name : Name) : MetaM (Option Name) := do return (s.ext.getState (← getEnv)).find? name +def PSpecAttr.getState (s : PSpecAttr) : MetaM (NameMap Name) := do + pure (s.ext.getState (← getEnv)) + def PSpecClassAttr.find? (s : PSpecClassAttr) (className argName : Name) : MetaM (Option Name) := do match (s.ext.getState (← getEnv)).find? className with | none => return none | some map => return map.find? argName +def PSpecClassAttr.getState (s : PSpecClassAttr) : MetaM (NameMap (NameMap Name)) := do + pure (s.ext.getState (← getEnv)) + +def showStoredPSpec : MetaM Unit := do + let st ← pspecAttr.getState + let s := st.toList.foldl (fun s (f, th) => f!"{s}\n{f} → {th}") f!"" + IO.println s + +def showStoredPSpecClass : MetaM Unit := do + let st ← pspecClassAttr.getState + let s := st.toList.foldl (fun s (f, m) => + let ms := m.toList.foldl (fun s (f, th) => + f!"{s}\n {f} → {th}") f!"" + f!"{s}\n{f} → [{ms}]") f!"" + IO.println s + end Progress diff --git a/backends/lean/Base/Progress/Progress.lean b/backends/lean/Base/Progress/Progress.lean index 64d1c14a..9c75ee3c 100644 --- a/backends/lean/Base/Progress/Progress.lean +++ b/backends/lean/Base/Progress/Progress.lean @@ -208,7 +208,7 @@ def progressAsmsOrLookupTheorem (withTh : Option TheoremOrLocal) (ids : Array Na catch _ => none | none => trace[Progress] "Could not find a pspec theorem for {fName}" - throwError "TODO" + pure none match res with | some .Ok => return () | some (.Error msg) => throwError msg @@ -228,7 +228,7 @@ def progressAsmsOrLookupTheorem (withTh : Option TheoremOrLocal) (ids : Array Na pure (some res) catch _ => none | none => - trace[Progress] "Could not find a pspec theorem for {fName}" + trace[Progress] "Could not find a class pspec theorem for ({fName}, {argName})" pure none match res with | some .Ok => return () @@ -287,11 +287,22 @@ elab "progress" args:progressArgs : tactic => /- -- TODO: remove namespace Test - open Primitives + open Primitives Result set_option trace.Progress true - set_option pp.rawOnError true + -- #eval do pspecClassAttr.getState + -- #eval showStoredPSpec + -- #eval showStoredPSpecClass + +/- theorem Scalar.add_spec {ty} {x y : Scalar ty} + (hmin : Scalar.min ty ≤ x.val + y.val) + (hmax : x.val + y.val ≤ Scalar.max ty) : + ∃ z, x + y = ret z ∧ z.val = x.val + y.val := by + progress + simp [*] -/ + +/- @[pspec] theorem vec_index_test2 (α : Type u) (v: Vec α) (i: Usize) (h: i.val < v.val.length) : ∃ (x: α), v.index α i = .ret x := by @@ -299,7 +310,7 @@ namespace Test simp set_option trace.Progress false - +-/ end Test -/ end Progress -- cgit v1.2.3 From 821b09b14794ebc2fe7b7047fc60fd56fb2cd107 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Wed, 19 Jul 2023 19:03:17 +0200 Subject: Fix a small issue with the persistent state of progress --- backends/lean/Base/Progress/Base.lean | 9 +++++++++ backends/lean/Base/Progress/Progress.lean | 9 ++++----- 2 files changed, 13 insertions(+), 5 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Progress/Base.lean b/backends/lean/Base/Progress/Base.lean index 785b9362..72438d40 100644 --- a/backends/lean/Base/Progress/Base.lean +++ b/backends/lean/Base/Progress/Base.lean @@ -153,6 +153,15 @@ structure PSpecClassAttr where ext : MapDeclarationExtension (NameMap Name) deriving Inhabited +-- TODO: the original function doesn't define correctly the `addImportedFn`. Do a PR? +def mkMapDeclarationExtension [Inhabited α] (name : Name := by exact decl_name%) : IO (MapDeclarationExtension α) := + registerSimplePersistentEnvExtension { + name := name, + addImportedFn := fun a => a.foldl (fun s a => a.foldl (fun s (k, v) => s.insert k v) s) RBMap.empty, + addEntryFn := fun s n => s.insert n.1 n.2 , + toArrayFn := fun es => es.toArray.qsort (fun a b => Name.quickLt a.1 b.1) + } + /- The persistent map from function to pspec theorems. -/ initialize pspecAttr : PSpecAttr ← do let ext ← mkMapDeclarationExtension `pspecMap diff --git a/backends/lean/Base/Progress/Progress.lean b/backends/lean/Base/Progress/Progress.lean index 9c75ee3c..84053150 100644 --- a/backends/lean/Base/Progress/Progress.lean +++ b/backends/lean/Base/Progress/Progress.lean @@ -291,16 +291,15 @@ namespace Test set_option trace.Progress true - -- #eval do pspecClassAttr.getState - -- #eval showStoredPSpec - -- #eval showStoredPSpecClass + #eval showStoredPSpec + #eval showStoredPSpecClass -/- theorem Scalar.add_spec {ty} {x y : Scalar ty} + theorem Scalar.add_spec {ty} {x y : Scalar ty} (hmin : Scalar.min ty ≤ x.val + y.val) (hmax : x.val + y.val ≤ Scalar.max ty) : ∃ z, x + y = ret z ∧ z.val = x.val + y.val := by progress - simp [*] -/ + simp [*] /- @[pspec] -- cgit v1.2.3 From 975b7c555cbffef2648a6469b777d1f1760d926d Mon Sep 17 00:00:00 2001 From: Son Ho Date: Thu, 20 Jul 2023 11:22:18 +0200 Subject: Improve progress further --- backends/lean/Base/Progress/Base.lean | 102 +++++++++++++++++++++------ backends/lean/Base/Progress/Progress.lean | 112 +++++++++++++++++------------- 2 files changed, 142 insertions(+), 72 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Progress/Base.lean b/backends/lean/Base/Progress/Base.lean index 72438d40..3599d866 100644 --- a/backends/lean/Base/Progress/Base.lean +++ b/backends/lean/Base/Progress/Base.lean @@ -62,24 +62,6 @@ section Methods -- Dive into the quantified variables and the assumptions forallTelescope th fun fvars th => do trace[Progress] "Universally quantified arguments and assumptions: {fvars}" - /- -- Filter the argumens which are not propositions - let rec getFirstPropIdx (i : Nat) : MetaM Nat := do - if i ≥ fargs.size then pure i - else do - let x := fargs.get! i - if ← Meta.isProp (← inferType x) then pure i - else getFirstPropIdx (i + 1) - let i ← getFirstPropIdx 0 - let fvars := fargs.extract 0 i - let hyps := fargs.extract i fargs.size - trace[Progress] "Quantified variables: {fvars}" - trace[Progress] "Assumptions: {hyps}" - -- Sanity check: all hypotheses are propositions (in particular, all the - -- quantified variables are at the beginning) - let hypsAreProp ← hyps.allM fun x => do Meta.isProp (← inferType x) - if ¬ hypsAreProp then - throwError "The theorem doesn't have the proper shape: all the quantified arguments should be at the beginning" - -/ -- Dive into the existentials existsTelescope th fun evars th => do trace[Progress] "Existentials: {evars}" @@ -98,6 +80,8 @@ section Methods let allArgsFVars ← args.foldlM (fun hs arg => getFVarIds arg hs) HashSet.empty -- Sanity check if sanityChecks then + -- All the variables which appear in the inputs given to the function are + -- universally quantified (in particular, they are not *existentially* quantified) let fvarsSet : HashSet FVarId := HashSet.ofArray (fvars.map (fun x => x.fvarId!)) let filtArgsFVars := allArgsFVars.toArray.filter (fun fvar => ¬ fvarsSet.contains fvar) if ¬ filtArgsFVars.isEmpty then @@ -132,6 +116,12 @@ def getPSpecClassFunNames (th : Expr) : MetaM (Name × Name) := pure (d.fName, f.constName) ) true +def getPSpecClassFunNameArg (th : Expr) : MetaM (Name × Expr) := + withPSpec th (fun d => do + let arg0 := d.args.get! 0 + pure (d.fName, arg0) + ) true + -- "Regular" pspec attribute structure PSpecAttr where attr : AttributeImpl @@ -145,14 +135,26 @@ structure PSpecAttr where Example: ======== We use type classes for addition. For instance, the addition between two - U32 is written (without syntactic sugar) as `HAdd.add (Scalar ) x y`. As a consequence, + U32 is written (without syntactic sugar) as `HAdd.add (Scalar ty) x y`. As a consequence, we store the theorem through the bindings: HAdd.add → Scalar → ... + + SH: TODO: this (and `PSpecClassExprAttr`) is a bit ad-hoc. For now it works for the + specs of the scalar operations, which is what I really need, but I'm not sure it + applies well to other situations. A better way would probably to use type classes, but + I couldn't get them to work on those cases. It is worth retrying. -/ structure PSpecClassAttr where attr : AttributeImpl ext : MapDeclarationExtension (NameMap Name) deriving Inhabited +/- Same as `PSpecClassAttr` but we use the full first argument (it works when it + is a constant). -/ +structure PSpecClassExprAttr where + attr : AttributeImpl + ext : MapDeclarationExtension (HashMap Expr Name) + deriving Inhabited + -- TODO: the original function doesn't define correctly the `addImportedFn`. Do a PR? def mkMapDeclarationExtension [Inhabited α] (name : Name := by exact decl_name%) : IO (MapDeclarationExtension α) := registerSimplePersistentEnvExtension { @@ -216,21 +218,69 @@ initialize pspecClassAttr : PSpecClassAttr ← do registerBuiltinAttribute attrImpl pure { attr := attrImpl, ext := ext } +/- The 2nd persistent map from type classes to pspec theorems -/ +initialize pspecClassExprAttr : PSpecClassExprAttr ← do + let ext : MapDeclarationExtension (HashMap Expr Name) ← mkMapDeclarationExtension `pspecClassExprMap + let attrImpl : AttributeImpl := { + name := `cepspec + descr := "Marks theorems to use for type classes with the `progress` tactic" + add := fun thName stx attrKind => do + Attribute.Builtin.ensureNoArgs stx + -- TODO: use the attribute kind + unless attrKind == AttributeKind.global do + throwError "invalid attribute 'cpspec', must be global" + -- Lookup the theorem + let env ← getEnv + let thDecl := env.constants.find! thName + let (fName, arg) ← MetaM.run' (getPSpecClassFunNameArg thDecl.type) + -- Sanity check: no variables appear in the argument + MetaM.run' do + let fvars ← getFVarIds arg + if ¬ fvars.isEmpty then throwError "The first argument ({arg}) contains variables" + -- We store two bindings: + -- - arg to theorem name + -- - reduced arg to theorem name + let rarg ← MetaM.run' (reduce arg) + trace[Progress] "Registering class spec theorem for ({fName}, {arg}) and ({fName}, {rarg})" + -- Update the entry if there is one, add an entry if there is none + let env := + match (ext.getState (← getEnv)).find? fName with + | none => + let m := HashMap.ofList [(arg, thName), (rarg, thName)] + ext.addEntry env (fName, m) + | some m => + let m := m.insert arg thName + let m := m.insert rarg thName + ext.addEntry env (fName, m) + setEnv env + pure () + } + registerBuiltinAttribute attrImpl + pure { attr := attrImpl, ext := ext } + def PSpecAttr.find? (s : PSpecAttr) (name : Name) : MetaM (Option Name) := do return (s.ext.getState (← getEnv)).find? name -def PSpecAttr.getState (s : PSpecAttr) : MetaM (NameMap Name) := do - pure (s.ext.getState (← getEnv)) - def PSpecClassAttr.find? (s : PSpecClassAttr) (className argName : Name) : MetaM (Option Name) := do match (s.ext.getState (← getEnv)).find? className with | none => return none | some map => return map.find? argName +def PSpecClassExprAttr.find? (s : PSpecClassExprAttr) (className : Name) (arg : Expr) : MetaM (Option Name) := do + match (s.ext.getState (← getEnv)).find? className with + | none => return none + | some map => return map.find? arg + +def PSpecAttr.getState (s : PSpecAttr) : MetaM (NameMap Name) := do + pure (s.ext.getState (← getEnv)) + def PSpecClassAttr.getState (s : PSpecClassAttr) : MetaM (NameMap (NameMap Name)) := do pure (s.ext.getState (← getEnv)) +def PSpecClassExprAttr.getState (s : PSpecClassExprAttr) : MetaM (NameMap (HashMap Expr Name)) := do + pure (s.ext.getState (← getEnv)) + def showStoredPSpec : MetaM Unit := do let st ← pspecAttr.getState let s := st.toList.foldl (fun s (f, th) => f!"{s}\n{f} → {th}") f!"" @@ -244,4 +294,12 @@ def showStoredPSpecClass : MetaM Unit := do f!"{s}\n{f} → [{ms}]") f!"" IO.println s +def showStoredPSpecExprClass : MetaM Unit := do + let st ← pspecClassExprAttr.getState + let s := st.toList.foldl (fun s (f, m) => + let ms := m.toList.foldl (fun s (f, th) => + f!"{s}\n {f} → {th}") f!"" + f!"{s}\n{f} → [{ms}]") f!"" + IO.println s + end Progress diff --git a/backends/lean/Base/Progress/Progress.lean b/backends/lean/Base/Progress/Progress.lean index 84053150..974a6364 100644 --- a/backends/lean/Base/Progress/Progress.lean +++ b/backends/lean/Base/Progress/Progress.lean @@ -27,6 +27,9 @@ inductive TheoremOrLocal where | Theorem (thName : Name) | Local (asm : LocalDecl) +instance : ToMessageData TheoremOrLocal where + toMessageData := λ x => match x with | .Theorem thName => m!"{thName}" | .Local asm => m!"{asm.userName}" + /- Type to propagate the errors of `progressWith`. We need this because we use the exceptions to backtrack, when trying to use the assumptions for instance. When there is actually an error we want @@ -161,6 +164,32 @@ def getFirstArgAppName (args : Array Expr) : MetaM (Option Name) := do if f.isConst then pure (some f.constName) else pure none +def getFirstArg (args : Array Expr) : Option Expr := do + if args.size = 0 then none + else some (args.get! 0) + +/- Helper: try to lookup a theorem and apply it, or continue with another tactic + if it fails -/ +def tryLookupApply (ids : Array Name) (asmTac : TacticM Unit) (fnExpr : Expr) + (kind : String) (th : Option TheoremOrLocal) (x : TacticM Unit) : TacticM Unit := do + let res ← do + match th with + | none => + trace[Progress] "Could not find a {kind}" + pure none + | some th => do + trace[Progress] "Lookuped up {kind}: {th}" + -- Apply the theorem + let res ← do + try + let res ← progressWith fnExpr th ids asmTac + pure (some res) + catch _ => none + match res with + | some .Ok => return () + | some (.Error msg) => throwError msg + | none => x + -- The array of ids are identifiers to use when introducing fresh variables def progressAsmsOrLookupTheorem (withTh : Option TheoremOrLocal) (ids : Array Name) (asmTac : TacticM Unit) : TacticM Unit := do withMainContext do @@ -196,57 +225,40 @@ def progressAsmsOrLookupTheorem (withTh : Option TheoremOrLocal) (ids : Array Na -- It failed: try to lookup a theorem -- TODO: use a list of theorems, and try them one by one? trace[Progress] "No assumption succeeded: trying to lookup a theorem" - let res ← - match ← pspecAttr.find? fName with - | some thName => - trace[Progress] "Lookuped up theorem: {thName}" - -- Apply the theorem - let res ← do - try - let res ← progressWith fnExpr (.Theorem thName) ids asmTac - pure (some res) - catch _ => none - | none => - trace[Progress] "Could not find a pspec theorem for {fName}" - pure none - match res with - | some .Ok => return () - | some (.Error msg) => throwError msg - | none => - -- It failed: try to lookup a *class* spec theorem - let res ← do - match ← getFirstArgAppName args with - | none => none - | some argName => do - match ← pspecClassAttr.find? fName argName with - | some thName => - trace[Progress] "Lookuped up class theorem: {thName}" - -- Apply the theorem - let res ← do - try - let res ← progressWith fnExpr (.Theorem thName) ids asmTac - pure (some res) - catch _ => none - | none => - trace[Progress] "Could not find a class pspec theorem for ({fName}, {argName})" - pure none + let pspec ← do + let thName ← pspecAttr.find? fName + pure (thName.map fun th => .Theorem th) + tryLookupApply ids asmTac fnExpr "pspec theorem" pspec do + -- It failed: try to lookup a *class* expr spec theorem (those are more + -- specific than class spec theorems) + let pspecClassExpr ← do + match getFirstArg args with + | none => pure none + | some arg => do + let thName ← pspecClassExprAttr.find? fName arg + pure (thName.map fun th => .Theorem th) + tryLookupApply ids asmTac fnExpr "pspec class expr theorem" pspecClassExpr do + -- It failed: try to lookup a *class* spec theorem + let pspecClass ← do + match ← getFirstArgAppName args with + | none => pure none + | some argName => do + let thName ← pspecClassAttr.find? fName argName + pure (thName.map fun th => .Theorem th) + tryLookupApply ids asmTac fnExpr "pspec class theorem" pspecClass do + -- Try a recursive call - we try the assumptions of kind "auxDecl" + let ctx ← Lean.MonadLCtx.getLCtx + let decls ← ctx.getAllDecls + let decls := decls.filter (λ decl => match decl.kind with + | .default | .implDetail => false | .auxDecl => true) + for decl in decls.reverse do + trace[Progress] "Trying recursive assumption: {decl.userName} : {decl.type}" + let res ← do try progressWith fnExpr (.Local decl) ids asmTac catch _ => continue match res with - | some .Ok => return () - | some (.Error msg) => throwError msg - | none => - -- Try a recursive call - we try the assumptions of kind "auxDecl" - let ctx ← Lean.MonadLCtx.getLCtx - let decls ← ctx.getAllDecls - let decls := decls.filter (λ decl => match decl.kind with - | .default | .implDetail => false | .auxDecl => true) - for decl in decls.reverse do - trace[Progress] "Trying recursive assumption: {decl.userName} : {decl.type}" - let res ← do try progressWith fnExpr (.Local decl) ids asmTac catch _ => continue - match res with - | .Ok => return () - | .Error msg => throwError msg - -- Nothing worked: failed - throwError "Progress failed" + | .Ok => return () + | .Error msg => throwError msg + -- Nothing worked: failed + throwError "Progress failed" syntax progressArgs := ("with" ident)? ("as" " ⟨ " (ident)+ " ⟩")? -- cgit v1.2.3 From d87e35e1a53b2252cc2c8c554216115773fd9678 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Thu, 20 Jul 2023 11:38:55 +0200 Subject: Add fine-grained lemmas for the arithmetic operations --- backends/lean/Base/Primitives/Scalar.lean | 137 ++++++++++++++++++++++++++++-- backends/lean/Base/Progress/Base.lean | 2 +- 2 files changed, 131 insertions(+), 8 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Primitives/Scalar.lean b/backends/lean/Base/Primitives/Scalar.lean index aaa4027f..1e9b51c2 100644 --- a/backends/lean/Base/Primitives/Scalar.lean +++ b/backends/lean/Base/Primitives/Scalar.lean @@ -438,7 +438,7 @@ instance {ty} : HDiv (Scalar ty) (Scalar ty) (Result (Scalar ty)) where instance {ty} : HMod (Scalar ty) (Scalar ty) (Result (Scalar ty)) where hMod x y := Scalar.rem x y --- TODO: make progress work at a more fine grained level (see `Scalar.add_unsigned_spec`) +-- Generic theorem - shouldn't be used much @[cpspec] theorem Scalar.add_spec {ty} {x y : Scalar ty} (hmin : Scalar.min ty ≤ x.val + y.val) @@ -460,7 +460,32 @@ theorem Scalar.add_unsigned_spec {ty} (s: ¬ ty.isSigned) {x y : Scalar ty} cases ty <;> simp [min] at * <;> linarith apply add_spec <;> assumption --- TODO: make it finer grained +/- Fine-grained theorems -/ +@[cepspec] theorem Usize.add_spec {x y : Usize} (hmax : x.val + y.val ≤ Usize.max) : + ∃ z, x + y = ret z ∧ z.val = x.val + y.val := by + apply Scalar.add_unsigned_spec <;> simp only [Scalar.max, *] + +@[cepspec] theorem U8.add_spec {x y : U8} (hmax : x.val + y.val ≤ U8.max) : + ∃ z, x + y = ret z ∧ z.val = x.val + y.val := by + apply Scalar.add_unsigned_spec <;> simp only [Scalar.max, *] + +@[cepspec] theorem U16.add_spec {x y : U16} (hmax : x.val + y.val ≤ U16.max) : + ∃ z, x + y = ret z ∧ z.val = x.val + y.val := by + apply Scalar.add_unsigned_spec <;> simp only [Scalar.max, *] + +@[cepspec] theorem U32.add_spec {x y : U32} (hmax : x.val + y.val ≤ U32.max) : + ∃ z, x + y = ret z ∧ z.val = x.val + y.val := by + apply Scalar.add_unsigned_spec <;> simp only [Scalar.max, *] + +@[cepspec] theorem U64.add_spec {x y : U64} (hmax : x.val + y.val ≤ U64.max) : + ∃ z, x + y = ret z ∧ z.val = x.val + y.val := by + apply Scalar.add_unsigned_spec <;> simp only [Scalar.max, *] + +@[cepspec] theorem U128.add_spec {x y : U128} (hmax : x.val + y.val ≤ U128.max) : + ∃ z, x + y = ret z ∧ z.val = x.val + y.val := by + apply Scalar.add_unsigned_spec <;> simp only [Scalar.max, *] + +-- Generic theorem - shouldn't be used much @[cpspec] theorem Scalar.sub_spec {ty} {x y : Scalar ty} (hmin : Scalar.min ty ≤ x.val - y.val) @@ -484,8 +509,32 @@ theorem Scalar.sub_unsigned_spec {ty} (s: ¬ ty.isSigned) {x y : Scalar ty} intros apply sub_spec <;> assumption --- TODO: make it finer grained -@[cpspec] +/- Fine-grained theorems -/ +@[cepspec] theorem Usize.sub_spec {x y : Usize} (hmin : Usize.min ≤ x.val - y.val) : + ∃ z, x - y = ret z ∧ z.val = x.val - y.val := by + apply Scalar.sub_unsigned_spec <;> simp only [Scalar.min, *] + +@[cepspec] theorem U8.sub_spec {x y : U8} (hmin : U8.min ≤ x.val - y.val) : + ∃ z, x - y = ret z ∧ z.val = x.val - y.val := by + apply Scalar.sub_unsigned_spec <;> simp only [Scalar.min, *] + +@[cepspec] theorem U16.sub_spec {x y : U16} (hmin : U16.min ≤ x.val - y.val) : + ∃ z, x - y = ret z ∧ z.val = x.val - y.val := by + apply Scalar.sub_unsigned_spec <;> simp only [Scalar.min, *] + +@[cepspec] theorem U32.sub_spec {x y : U32} (hmin : U32.min ≤ x.val - y.val) : + ∃ z, x - y = ret z ∧ z.val = x.val - y.val := by + apply Scalar.sub_unsigned_spec <;> simp only [Scalar.min, *] + +@[cepspec] theorem U64.sub_spec {x y : U64} (hmin : U64.min ≤ x.val - y.val) : + ∃ z, x - y = ret z ∧ z.val = x.val - y.val := by + apply Scalar.sub_unsigned_spec <;> simp only [Scalar.min, *] + +@[cepspec] theorem U128.sub_spec {x y : U128} (hmin : U128.min ≤ x.val - y.val) : + ∃ z, x - y = ret z ∧ z.val = x.val - y.val := by + apply Scalar.sub_unsigned_spec <;> simp only [Scalar.min, *] + +-- Generic theorem - shouldn't be used much theorem Scalar.mul_spec {ty} {x y : Scalar ty} (hmin : Scalar.min ty ≤ x.val * y.val) (hmax : x.val * y.val ≤ Scalar.max ty) : @@ -506,7 +555,32 @@ theorem Scalar.mul_unsigned_spec {ty} (s: ¬ ty.isSigned) {x y : Scalar ty} cases ty <;> simp at * <;> apply mul_nonneg hx hy apply mul_spec <;> assumption --- TODO: make it finer grained +/- Fine-grained theorems -/ +@[cepspec] theorem Usize.mul_spec {x y : Usize} (hmax : x.val * y.val ≤ Usize.max) : + ∃ z, x * y = ret z ∧ z.val = x.val * y.val := by + apply Scalar.mul_unsigned_spec <;> simp only [Scalar.max, *] + +@[cepspec] theorem U8.mul_spec {x y : U8} (hmax : x.val * y.val ≤ U8.max) : + ∃ z, x * y = ret z ∧ z.val = x.val * y.val := by + apply Scalar.mul_unsigned_spec <;> simp only [Scalar.max, *] + +@[cepspec] theorem U16.mul_spec {x y : U16} (hmax : x.val * y.val ≤ U16.max) : + ∃ z, x * y = ret z ∧ z.val = x.val * y.val := by + apply Scalar.mul_unsigned_spec <;> simp only [Scalar.max, *] + +@[cepspec] theorem U32.mul_spec {x y : U32} (hmax : x.val * y.val ≤ U32.max) : + ∃ z, x * y = ret z ∧ z.val = x.val * y.val := by + apply Scalar.mul_unsigned_spec <;> simp only [Scalar.max, *] + +@[cepspec] theorem U64.mul_spec {x y : U64} (hmax : x.val * y.val ≤ U64.max) : + ∃ z, x * y = ret z ∧ z.val = x.val * y.val := by + apply Scalar.mul_unsigned_spec <;> simp only [Scalar.max, *] + +@[cepspec] theorem U128.mul_spec {x y : U128} (hmax : x.val * y.val ≤ U128.max) : + ∃ z, x * y = ret z ∧ z.val = x.val * y.val := by + apply Scalar.mul_unsigned_spec <;> simp only [Scalar.max, *] + +-- Generic theorem - shouldn't be used much @[cpspec] theorem Scalar.div_spec {ty} {x y : Scalar ty} (hnz : y.val ≠ 0) @@ -534,7 +608,32 @@ theorem Scalar.div_unsigned_spec {ty} (s: ¬ ty.isSigned) (x : Scalar ty) {y : S simp [*] at hs apply hs --- TODO: make it finer grained +/- Fine-grained theorems -/ +@[cepspec] theorem Usize.div_spec (x : Usize) {y : Usize} (hnz : y.val ≠ 0) : + ∃ z, x / y = ret z ∧ z.val = x.val / y.val := by + apply Scalar.div_unsigned_spec <;> simp [*] + +@[cepspec] theorem U8.div_spec (x : U8) {y : U8} (hnz : y.val ≠ 0) : + ∃ z, x / y = ret z ∧ z.val = x.val / y.val := by + apply Scalar.div_unsigned_spec <;> simp [Scalar.max, *] + +@[cepspec] theorem U16.div_spec (x : U16) {y : U16} (hnz : y.val ≠ 0) : + ∃ z, x / y = ret z ∧ z.val = x.val / y.val := by + apply Scalar.div_unsigned_spec <;> simp [Scalar.max, *] + +@[cepspec] theorem U32.div_spec (x : U32) {y : U32} (hnz : y.val ≠ 0) : + ∃ z, x / y = ret z ∧ z.val = x.val / y.val := by + apply Scalar.div_unsigned_spec <;> simp [Scalar.max, *] + +@[cepspec] theorem U64.div_spec (x : U64) {y : U64} (hnz : y.val ≠ 0) : + ∃ z, x / y = ret z ∧ z.val = x.val / y.val := by + apply Scalar.div_unsigned_spec <;> simp [Scalar.max, *] + +@[cepspec] theorem U128.div_spec (x : U128) {y : U128} (hnz : y.val ≠ 0) : + ∃ z, x / y = ret z ∧ z.val = x.val / y.val := by + apply Scalar.div_unsigned_spec <;> simp [Scalar.max, *] + +-- Generic theorem - shouldn't be used much @[cpspec] theorem Scalar.rem_spec {ty} {x y : Scalar ty} (hnz : y.val ≠ 0) @@ -548,7 +647,7 @@ theorem Scalar.rem_spec {ty} {x y : Scalar ty} theorem Scalar.rem_unsigned_spec {ty} (s: ¬ ty.isSigned) (x : Scalar ty) {y : Scalar ty} (hnz : y.val ≠ 0) : - ∃ z, x % y = ret z ∧ z.val = scalar_rem x.val y.val := by + ∃ z, x % y = ret z ∧ z.val = x.val % y.val := by have h : Scalar.min ty = 0 := by cases ty <;> simp at * have hx := x.hmin have hy := y.hmin @@ -565,6 +664,30 @@ theorem Scalar.rem_unsigned_spec {ty} (s: ¬ ty.isSigned) (x : Scalar ty) {y : S simp [*] at hs simp [*] +@[cepspec] theorem Usize.rem_spec (x : Usize) {y : Usize} (hnz : y.val ≠ 0) : + ∃ z, x % y = ret z ∧ z.val = x.val % y.val := by + apply Scalar.rem_unsigned_spec <;> simp [*] + +@[cepspec] theorem U8.rem_spec (x : U8) {y : U8} (hnz : y.val ≠ 0) : + ∃ z, x % y = ret z ∧ z.val = x.val % y.val := by + apply Scalar.rem_unsigned_spec <;> simp [Scalar.max, *] + +@[cepspec] theorem U16.rem_spec (x : U16) {y : U16} (hnz : y.val ≠ 0) : + ∃ z, x % y = ret z ∧ z.val = x.val % y.val := by + apply Scalar.rem_unsigned_spec <;> simp [Scalar.max, *] + +@[cepspec] theorem U32.rem_spec (x : U32) {y : U32} (hnz : y.val ≠ 0) : + ∃ z, x % y = ret z ∧ z.val = x.val % y.val := by + apply Scalar.rem_unsigned_spec <;> simp [Scalar.max, *] + +@[cepspec] theorem U64.rem_spec (x : U64) {y : U64} (hnz : y.val ≠ 0) : + ∃ z, x % y = ret z ∧ z.val = x.val % y.val := by + apply Scalar.rem_unsigned_spec <;> simp [Scalar.max, *] + +@[cepspec] theorem U128.rem_spec (x : U128) {y : U128} (hnz : y.val ≠ 0) : + ∃ z, x % y = ret z ∧ z.val = x.val % y.val := by + apply Scalar.rem_unsigned_spec <;> simp [Scalar.max, *] + -- ofIntCore -- TODO: typeclass? def Isize.ofIntCore := @Scalar.ofIntCore .Isize diff --git a/backends/lean/Base/Progress/Base.lean b/backends/lean/Base/Progress/Base.lean index 3599d866..2fbd24dd 100644 --- a/backends/lean/Base/Progress/Base.lean +++ b/backends/lean/Base/Progress/Base.lean @@ -240,7 +240,7 @@ initialize pspecClassExprAttr : PSpecClassExprAttr ← do -- We store two bindings: -- - arg to theorem name -- - reduced arg to theorem name - let rarg ← MetaM.run' (reduce arg) + let rarg ← MetaM.run' (reduceAll arg) trace[Progress] "Registering class spec theorem for ({fName}, {arg}) and ({fName}, {rarg})" -- Update the entry if there is one, add an entry if there is none let env := -- cgit v1.2.3 From 6ef1d360b89fd9f9383e63609888bf925a6a16ab Mon Sep 17 00:00:00 2001 From: Son Ho Date: Thu, 20 Jul 2023 12:08:09 +0200 Subject: Improve progress further and move some lemmas --- backends/lean/Base/IList/IList.lean | 67 +++++++++++++++++++++++++++++++ backends/lean/Base/Progress/Progress.lean | 55 +++++++++++++++---------- 2 files changed, 101 insertions(+), 21 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/IList/IList.lean b/backends/lean/Base/IList/IList.lean index ddb10236..1773e593 100644 --- a/backends/lean/Base/IList/IList.lean +++ b/backends/lean/Base/IList/IList.lean @@ -175,6 +175,73 @@ theorem idrop_eq_nil_of_le (hineq : ls.len ≤ i) : idrop i ls = [] := by apply hi linarith +@[simp] +theorem index_ne + {α : Type u} [Inhabited α] (l: List α) (i: ℤ) (j: ℤ) (x: α) : + 0 ≤ i → i < l.len → 0 ≤ j → j < l.len → j ≠ i → + (l.update i x).index j = l.index j + := + λ _ _ _ _ _ => match l with + | [] => by simp at * + | hd :: tl => + if h: i = 0 then + have : j ≠ 0 := by scalar_tac + by simp [*] + else if h : j = 0 then + have : i ≠ 0 := by scalar_tac + by simp [*] + else + by + simp [*] + simp at * + apply index_ne <;> scalar_tac + +@[simp] +theorem index_eq + {α : Type u} [Inhabited α] (l: List α) (i: ℤ) (x: α) : + 0 ≤ i → i < l.len → + (l.update i x).index i = x + := + fun _ _ => match l with + | [] => by simp at *; exfalso; scalar_tac -- TODO: exfalso needed. Son FIXME + | hd :: tl => + if h: i = 0 then + by + simp [*] + else + by + simp [*] + simp at * + apply index_eq <;> scalar_tac + +def allP {α : Type u} (l : List α) (p: α → Prop) : Prop := + foldr (fun a r => p a ∧ r) True l + +@[simp] +theorem allP_nil {α : Type u} (p: α → Prop) : allP [] p := + by simp [allP, foldr] + +@[simp] +theorem allP_cons {α : Type u} (hd: α) (tl : List α) (p: α → Prop) : + allP (hd :: tl) p ↔ p hd ∧ allP tl p + := by simp [allP, foldr] + +def pairwise_rel + {α : Type u} (rel : α → α → Prop) (l: List α) : Prop + := match l with + | [] => True + | hd :: tl => allP tl (rel hd) ∧ pairwise_rel rel tl + +@[simp] +theorem pairwise_rel_nil {α : Type u} (rel : α → α → Prop) : + pairwise_rel rel [] + := by simp [pairwise_rel] + +@[simp] +theorem pairwise_rel_cons {α : Type u} (rel : α → α → Prop) (hd: α) (tl: List α) : + pairwise_rel rel (hd :: tl) ↔ allP tl (rel hd) ∧ pairwise_rel rel tl + := by simp [pairwise_rel] + end Lemmas end List diff --git a/backends/lean/Base/Progress/Progress.lean b/backends/lean/Base/Progress/Progress.lean index 974a6364..1f734415 100644 --- a/backends/lean/Base/Progress/Progress.lean +++ b/backends/lean/Base/Progress/Progress.lean @@ -39,7 +39,7 @@ inductive ProgressError | Error (msg : MessageData) deriving Inhabited -def progressWith (fnExpr : Expr) (th : TheoremOrLocal) (ids : Array Name) +def progressWith (fnExpr : Expr) (th : TheoremOrLocal) (keep : Option Name) (ids : Array Name) (asmTac : TacticM Unit) : TacticM ProgressError := do /- Apply the theorem We try to match the theorem with the goal @@ -88,7 +88,7 @@ def progressWith (fnExpr : Expr) (th : TheoremOrLocal) (ids : Array Name) match th with | .Theorem thName => mkAppOptM thName (mvars.map some) | .Local decl => mkAppOptM' (mkFVar decl.fvarId) (mvars.map some) - let asmName ← mkFreshUserName `h + let asmName ← do match keep with | none => mkFreshUserName `h | some n => do pure n let thTy ← inferType th let thAsm ← Utils.addDeclTac asmName th thTy (asLet := false) withMainContext do -- The context changed - TODO: remove once addDeclTac is updated @@ -109,7 +109,9 @@ def progressWith (fnExpr : Expr) (th : TheoremOrLocal) (ids : Array Name) if ← isConj (← inferType h) then do let hName := (← h.fvarId!.getDecl).userName let (optId, ids) := listTryPopHead ids - let optIds := match optId with | none => none | some id => some (hName, id) + let optIds ← match optId with + | none => do pure (some (hName, ← mkFreshUserName `h)) + | some id => do pure (some (hName, id)) splitConjTac h optIds (fun hEq hPost => k hEq (some hPost) ids) else k h none ids -- Simplify the target by using the equality and some monad simplifications, @@ -118,9 +120,12 @@ def progressWith (fnExpr : Expr) (th : TheoremOrLocal) (ids : Array Name) trace[Progress] "eq and post:\n{hEq} : {← inferType hEq}\n{hPost}" simpAt [] [``Primitives.bind_tc_ret, ``Primitives.bind_tc_fail, ``Primitives.bind_tc_div] [hEq.fvarId!] (.targets #[] true) - -- Clear the equality - let mgoal ← getMainGoal - let mgoal ← mgoal.tryClearMany #[hEq.fvarId!] + -- Clear the equality, unless the user requests not to do so + let mgoal ← do + if keep.isSome then getMainGoal + else do + let mgoal ← getMainGoal + mgoal.tryClearMany #[hEq.fvarId!] setGoals (mgoal :: (← getUnsolvedGoals)) trace[Progress] "Goal after splitting eq and post and simplifying the target: {mgoal}" -- Continue splitting following the ids provided by the user @@ -170,7 +175,7 @@ def getFirstArg (args : Array Expr) : Option Expr := do /- Helper: try to lookup a theorem and apply it, or continue with another tactic if it fails -/ -def tryLookupApply (ids : Array Name) (asmTac : TacticM Unit) (fnExpr : Expr) +def tryLookupApply (keep : Option Name) (ids : Array Name) (asmTac : TacticM Unit) (fnExpr : Expr) (kind : String) (th : Option TheoremOrLocal) (x : TacticM Unit) : TacticM Unit := do let res ← do match th with @@ -182,7 +187,7 @@ def tryLookupApply (ids : Array Name) (asmTac : TacticM Unit) (fnExpr : Expr) -- Apply the theorem let res ← do try - let res ← progressWith fnExpr th ids asmTac + let res ← progressWith fnExpr th keep ids asmTac pure (some res) catch _ => none match res with @@ -191,7 +196,7 @@ def tryLookupApply (ids : Array Name) (asmTac : TacticM Unit) (fnExpr : Expr) | none => x -- The array of ids are identifiers to use when introducing fresh variables -def progressAsmsOrLookupTheorem (withTh : Option TheoremOrLocal) (ids : Array Name) (asmTac : TacticM Unit) : TacticM Unit := do +def progressAsmsOrLookupTheorem (keep : Option Name) (withTh : Option TheoremOrLocal) (ids : Array Name) (asmTac : TacticM Unit) : TacticM Unit := do withMainContext do -- Retrieve the goal let mgoal ← Tactic.getMainGoal @@ -209,7 +214,7 @@ def progressAsmsOrLookupTheorem (withTh : Option TheoremOrLocal) (ids : Array Na -- Otherwise, lookup one. match withTh with | some th => do - match ← progressWith fnExpr th ids asmTac with + match ← progressWith fnExpr th keep ids asmTac with | .Ok => return () | .Error msg => throwError msg | none => @@ -218,7 +223,7 @@ def progressAsmsOrLookupTheorem (withTh : Option TheoremOrLocal) (ids : Array Na let decls ← ctx.getDecls for decl in decls.reverse do trace[Progress] "Trying assumption: {decl.userName} : {decl.type}" - let res ← do try progressWith fnExpr (.Local decl) ids asmTac catch _ => continue + let res ← do try progressWith fnExpr (.Local decl) keep ids asmTac catch _ => continue match res with | .Ok => return () | .Error msg => throwError msg @@ -228,7 +233,7 @@ def progressAsmsOrLookupTheorem (withTh : Option TheoremOrLocal) (ids : Array Na let pspec ← do let thName ← pspecAttr.find? fName pure (thName.map fun th => .Theorem th) - tryLookupApply ids asmTac fnExpr "pspec theorem" pspec do + tryLookupApply keep ids asmTac fnExpr "pspec theorem" pspec do -- It failed: try to lookup a *class* expr spec theorem (those are more -- specific than class spec theorems) let pspecClassExpr ← do @@ -237,7 +242,7 @@ def progressAsmsOrLookupTheorem (withTh : Option TheoremOrLocal) (ids : Array Na | some arg => do let thName ← pspecClassExprAttr.find? fName arg pure (thName.map fun th => .Theorem th) - tryLookupApply ids asmTac fnExpr "pspec class expr theorem" pspecClassExpr do + tryLookupApply keep ids asmTac fnExpr "pspec class expr theorem" pspecClassExpr do -- It failed: try to lookup a *class* spec theorem let pspecClass ← do match ← getFirstArgAppName args with @@ -245,7 +250,7 @@ def progressAsmsOrLookupTheorem (withTh : Option TheoremOrLocal) (ids : Array Na | some argName => do let thName ← pspecClassAttr.find? fName argName pure (thName.map fun th => .Theorem th) - tryLookupApply ids asmTac fnExpr "pspec class theorem" pspecClass do + tryLookupApply keep ids asmTac fnExpr "pspec class theorem" pspecClass do -- Try a recursive call - we try the assumptions of kind "auxDecl" let ctx ← Lean.MonadLCtx.getLCtx let decls ← ctx.getAllDecls @@ -253,21 +258,29 @@ def progressAsmsOrLookupTheorem (withTh : Option TheoremOrLocal) (ids : Array Na | .default | .implDetail => false | .auxDecl => true) for decl in decls.reverse do trace[Progress] "Trying recursive assumption: {decl.userName} : {decl.type}" - let res ← do try progressWith fnExpr (.Local decl) ids asmTac catch _ => continue + let res ← do try progressWith fnExpr (.Local decl) keep ids asmTac catch _ => continue match res with | .Ok => return () | .Error msg => throwError msg -- Nothing worked: failed throwError "Progress failed" -syntax progressArgs := ("with" ident)? ("as" " ⟨ " (ident)+ " ⟩")? +syntax progressArgs := ("keep" ("as" (ident))?)? ("with" ident)? ("as" " ⟨ " (ident)+ " ⟩")? def evalProgress (args : TSyntax `Progress.progressArgs) : TacticM Unit := do let args := args.raw -- Process the arguments to retrieve the identifiers to use trace[Progress] "Progress arguments: {args}" let args := args.getArgs - let withArg := (args.get! 0).getArgs + let keep : Option Name ← do + let args := (args.get! 0).getArgs + if args.size > 0 then do + let args := (args.get! 1).getArgs + if args.size > 0 then pure (some (args.get! 1).getId) + else do pure (some (← mkFreshUserName `h)) + else pure none + trace[Progress] "Keep: {keep}" + let withArg := (args.get! 1).getArgs let withArg ← do if withArg.size > 0 then let id := withArg.get! 1 @@ -287,11 +300,11 @@ def evalProgress (args : TSyntax `Progress.progressArgs) : TacticM Unit := do | id :: _ => pure (some (.Theorem id)) else pure none - let args := (args.get! 1).getArgs + let args := (args.get! 2).getArgs let args := (args.get! 2).getArgs let ids := args.map Syntax.getId trace[Progress] "User-provided ids: {ids}" - progressAsmsOrLookupTheorem withArg ids (firstTac [assumptionTac, Arith.scalarTac]) + progressAsmsOrLookupTheorem keep withArg ids (firstTac [assumptionTac, Arith.scalarTac]) elab "progress" args:progressArgs : tactic => evalProgress args @@ -306,11 +319,11 @@ namespace Test #eval showStoredPSpec #eval showStoredPSpecClass - theorem Scalar.add_spec {ty} {x y : Scalar ty} + theorem Scalar.add_spec1 {ty} {x y : Scalar ty} (hmin : Scalar.min ty ≤ x.val + y.val) (hmax : x.val + y.val ≤ Scalar.max ty) : ∃ z, x + y = ret z ∧ z.val = x.val + y.val := by - progress + progress keep as h with Scalar.add_spec as ⟨ z ⟩ simp [*] /- -- cgit v1.2.3 From 03492250b45855fe9db5e0a28a96166607cd84a1 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Thu, 20 Jul 2023 14:14:34 +0200 Subject: Make some proofs in Hashmap/Properties.lean and improve progress --- backends/lean/Base/Arith/Int.lean | 2 +- backends/lean/Base/Progress/Base.lean | 25 ++++++++---- backends/lean/Base/Progress/Progress.lean | 68 ++++++++++--------------------- backends/lean/Base/Utils.lean | 32 +++++++++++---- 4 files changed, 64 insertions(+), 63 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Arith/Int.lean b/backends/lean/Base/Arith/Int.lean index ac011998..fa957293 100644 --- a/backends/lean/Base/Arith/Int.lean +++ b/backends/lean/Base/Arith/Int.lean @@ -198,7 +198,7 @@ def intTac (extraPreprocess : Tactic.TacticM Unit) : Tactic.TacticM Unit := do -- Split the conjunctions in the goal Tactic.allGoals (Utils.repeatTac Utils.splitConjTarget) -- Call linarith - let linarith := + let linarith := do let cfg : Linarith.LinarithConfig := { -- We do this with our custom preprocessing splitNe := false diff --git a/backends/lean/Base/Progress/Base.lean b/backends/lean/Base/Progress/Base.lean index 2fbd24dd..b54bdf7a 100644 --- a/backends/lean/Base/Progress/Base.lean +++ b/backends/lean/Base/Progress/Base.lean @@ -19,6 +19,7 @@ structure PSpecDesc where -- The existentially quantified variables evars : Array Expr -- The function + fExpr : Expr fName : Name -- The function arguments fLevels : List Level @@ -60,21 +61,30 @@ section Methods m a := do trace[Progress] "Proposition: {th}" -- Dive into the quantified variables and the assumptions - forallTelescope th fun fvars th => do + forallTelescope th.consumeMData fun fvars th => do trace[Progress] "Universally quantified arguments and assumptions: {fvars}" -- Dive into the existentials - existsTelescope th fun evars th => do + existsTelescope th.consumeMData fun evars th => do trace[Progress] "Existentials: {evars}" trace[Progress] "Proposition after stripping the quantifiers: {th}" -- Take the first conjunct - let (th, post) ← optSplitConj th + let (th, post) ← optSplitConj th.consumeMData trace[Progress] "After splitting the conjunction:\n- eq: {th}\n- post: {post}" -- Destruct the equality - let (th, ret) ← destEq th + let (mExpr, ret) ← destEq th.consumeMData trace[Progress] "After splitting the equality:\n- lhs: {th}\n- rhs: {ret}" - -- Destruct the application to get the name - th.consumeMData.withApp fun f args => do - trace[Progress] "After stripping the arguments:\n- f: {f}\n- args: {args}" + -- Destruct the monadic application to dive into the bind, if necessary (this + -- is for when we use `withPSpec` inside of the `progress` tactic), and + -- destruct the application to get the function name + mExpr.consumeMData.withApp fun mf margs => do + trace[Progress] "After stripping the arguments of the monad expression:\n- mf: {mf}\n- margs: {margs}" + let (fExpr, f, args) ← do + if mf.isConst ∧ mf.constName = ``Bind.bind then do + -- Dive into the bind + let fExpr := margs.get! 4 + fExpr.consumeMData.withApp fun f args => pure (fExpr, f, args) + else pure (mExpr, mf, margs) + trace[Progress] "After stripping the arguments of the function call:\n- f: {f}\n- args: {args}" if ¬ f.isConst then throwError "Not a constant: {f}" -- Compute the set of universally quantified variables which appear in the function arguments let allArgsFVars ← args.foldlM (fun hs arg => getFVarIds arg hs) HashSet.empty @@ -94,6 +104,7 @@ section Methods let thDesc := { fvars := fvars evars := evars + fExpr fName := f.constName! fLevels := f.constLevels! args := args diff --git a/backends/lean/Base/Progress/Progress.lean b/backends/lean/Base/Progress/Progress.lean index 1f734415..dabd25b8 100644 --- a/backends/lean/Base/Progress/Progress.lean +++ b/backends/lean/Base/Progress/Progress.lean @@ -7,22 +7,6 @@ namespace Progress open Lean Elab Term Meta Tactic open Utils -/- --- TODO: remove -namespace Test - open Primitives - - set_option trace.Progress true - - @[pspec] - theorem vec_index_test (α : Type u) (v: Vec α) (i: Usize) (h: i.val < v.val.length) : - ∃ x, v.index α i = .ret x := by - sorry - - #eval pspecAttr.find? ``Primitives.Vec.index -end Test --/ - inductive TheoremOrLocal where | Theorem (thName : Name) | Local (asm : LocalDecl) @@ -39,7 +23,7 @@ inductive ProgressError | Error (msg : MessageData) deriving Inhabited -def progressWith (fnExpr : Expr) (th : TheoremOrLocal) (keep : Option Name) (ids : Array Name) +def progressWith (fExpr : Expr) (th : TheoremOrLocal) (keep : Option Name) (ids : Array Name) (asmTac : TacticM Unit) : TacticM ProgressError := do /- Apply the theorem We try to match the theorem with the goal @@ -66,7 +50,7 @@ def progressWith (fnExpr : Expr) (th : TheoremOrLocal) (keep : Option Name) (ids -- Introduce the existentially quantified variables and the post-condition -- in the context let thBody ← - existsTelescope thExBody fun _evars thBody => do + existsTelescope thExBody.consumeMData fun _evars thBody => do trace[Progress] "After stripping existentials: {thBody}" let (thBody, _) ← optSplitConj thBody trace[Progress] "After splitting the conjunction: {thBody}" @@ -75,9 +59,9 @@ def progressWith (fnExpr : Expr) (th : TheoremOrLocal) (keep : Option Name) (ids -- There shouldn't be any existential variables in thBody pure thBody -- Match the body with the target - trace[Progress] "Maching `{thBody}` with `{fnExpr}`" - let ok ← isDefEq thBody fnExpr - if ¬ ok then throwError "Could not unify the theorem with the target:\n- theorem: {thBody}\n- target: {fnExpr}" + trace[Progress] "Matching `{thBody}` with `{fExpr}`" + let ok ← isDefEq thBody fExpr + if ¬ ok then throwError "Could not unify the theorem with the target:\n- theorem: {thBody}\n- target: {fExpr}" let mgoal ← Tactic.getMainGoal postprocessAppMVars `progress mgoal mvars binders true true Term.synthesizeSyntheticMVarsNoPostponing @@ -139,8 +123,9 @@ def progressWith (fnExpr : Expr) (th : TheoremOrLocal) (keep : Option Name) (ids match ids with | [] => pure .Ok -- Stop | nid :: ids => do + trace[Progress] "Splitting post: {hPost}" -- Split - if ← isConj hPost then + if ← isConj (← inferType hPost) then splitConjTac hPost (some (nid, curPostId)) (λ _ nhPost => splitPost nhPost ids) else return (.Error m!"Too many ids provided ({nid :: ids}) not enough conjuncts to split in the postcondition") splitPost hPost ids @@ -175,7 +160,7 @@ def getFirstArg (args : Array Expr) : Option Expr := do /- Helper: try to lookup a theorem and apply it, or continue with another tactic if it fails -/ -def tryLookupApply (keep : Option Name) (ids : Array Name) (asmTac : TacticM Unit) (fnExpr : Expr) +def tryLookupApply (keep : Option Name) (ids : Array Name) (asmTac : TacticM Unit) (fExpr : Expr) (kind : String) (th : Option TheoremOrLocal) (x : TacticM Unit) : TacticM Unit := do let res ← do match th with @@ -187,7 +172,7 @@ def tryLookupApply (keep : Option Name) (ids : Array Name) (asmTac : TacticM Uni -- Apply the theorem let res ← do try - let res ← progressWith fnExpr th keep ids asmTac + let res ← progressWith fExpr th keep ids asmTac pure (some res) catch _ => none match res with @@ -203,18 +188,16 @@ def progressAsmsOrLookupTheorem (keep : Option Name) (withTh : Option TheoremOrL let goalTy ← mgoal.getType trace[Progress] "goal: {goalTy}" -- Dive into the goal to lookup the theorem - let (fName, fLevels, args) ← do + let (fExpr, fName, args) ← do withPSpec goalTy fun desc => - -- TODO: check that no universally quantified variables in the arguments - pure (desc.fName, desc.fLevels, desc.args) - -- TODO: this should be in the pspec desc - let fnExpr := mkAppN (.const fName fLevels) args + -- TODO: check that no quantified variables in the arguments + pure (desc.fExpr, desc.fName, desc.args) trace[Progress] "Function: {fName}" -- If the user provided a theorem/assumption: use it. -- Otherwise, lookup one. match withTh with | some th => do - match ← progressWith fnExpr th keep ids asmTac with + match ← progressWith fExpr th keep ids asmTac with | .Ok => return () | .Error msg => throwError msg | none => @@ -223,7 +206,7 @@ def progressAsmsOrLookupTheorem (keep : Option Name) (withTh : Option TheoremOrL let decls ← ctx.getDecls for decl in decls.reverse do trace[Progress] "Trying assumption: {decl.userName} : {decl.type}" - let res ← do try progressWith fnExpr (.Local decl) keep ids asmTac catch _ => continue + let res ← do try progressWith fExpr (.Local decl) keep ids asmTac catch _ => continue match res with | .Ok => return () | .Error msg => throwError msg @@ -233,7 +216,7 @@ def progressAsmsOrLookupTheorem (keep : Option Name) (withTh : Option TheoremOrL let pspec ← do let thName ← pspecAttr.find? fName pure (thName.map fun th => .Theorem th) - tryLookupApply keep ids asmTac fnExpr "pspec theorem" pspec do + tryLookupApply keep ids asmTac fExpr "pspec theorem" pspec do -- It failed: try to lookup a *class* expr spec theorem (those are more -- specific than class spec theorems) let pspecClassExpr ← do @@ -242,7 +225,7 @@ def progressAsmsOrLookupTheorem (keep : Option Name) (withTh : Option TheoremOrL | some arg => do let thName ← pspecClassExprAttr.find? fName arg pure (thName.map fun th => .Theorem th) - tryLookupApply keep ids asmTac fnExpr "pspec class expr theorem" pspecClassExpr do + tryLookupApply keep ids asmTac fExpr "pspec class expr theorem" pspecClassExpr do -- It failed: try to lookup a *class* spec theorem let pspecClass ← do match ← getFirstArgAppName args with @@ -250,7 +233,7 @@ def progressAsmsOrLookupTheorem (keep : Option Name) (withTh : Option TheoremOrL | some argName => do let thName ← pspecClassAttr.find? fName argName pure (thName.map fun th => .Theorem th) - tryLookupApply keep ids asmTac fnExpr "pspec class theorem" pspecClass do + tryLookupApply keep ids asmTac fExpr "pspec class theorem" pspecClass do -- Try a recursive call - we try the assumptions of kind "auxDecl" let ctx ← Lean.MonadLCtx.getLCtx let decls ← ctx.getAllDecls @@ -258,7 +241,7 @@ def progressAsmsOrLookupTheorem (keep : Option Name) (withTh : Option TheoremOrL | .default | .implDetail => false | .auxDecl => true) for decl in decls.reverse do trace[Progress] "Trying recursive assumption: {decl.userName} : {decl.type}" - let res ← do try progressWith fnExpr (.Local decl) keep ids asmTac catch _ => continue + let res ← do try progressWith fExpr (.Local decl) keep ids asmTac catch _ => continue match res with | .Ok => return () | .Error msg => throwError msg @@ -310,7 +293,6 @@ elab "progress" args:progressArgs : tactic => evalProgress args /- --- TODO: remove namespace Test open Primitives Result @@ -319,22 +301,14 @@ namespace Test #eval showStoredPSpec #eval showStoredPSpecClass - theorem Scalar.add_spec1 {ty} {x y : Scalar ty} + example {ty} {x y : Scalar ty} (hmin : Scalar.min ty ≤ x.val + y.val) (hmax : x.val + y.val ≤ Scalar.max ty) : ∃ z, x + y = ret z ∧ z.val = x.val + y.val := by - progress keep as h with Scalar.add_spec as ⟨ z ⟩ +-- progress keep as h with Scalar.add_spec as ⟨ z ⟩ + progress keep as h simp [*] -/- - @[pspec] - theorem vec_index_test2 (α : Type u) (v: Vec α) (i: Usize) (h: i.val < v.val.length) : - ∃ (x: α), v.index α i = .ret x := by - progress with vec_index_test as ⟨ x ⟩ - simp - - set_option trace.Progress false --/ end Test -/ end Progress diff --git a/backends/lean/Base/Utils.lean b/backends/lean/Base/Utils.lean index 8aa76d8e..44590176 100644 --- a/backends/lean/Base/Utils.lean +++ b/backends/lean/Base/Utils.lean @@ -308,8 +308,23 @@ def firstTac (tacl : List (TacticM Unit)) : TacticM Unit := do match tacl with | [] => pure () | tac :: tacl => - try tac + -- Should use try ... catch or Lean.observing? + -- Generally speaking we should use Lean.observing? to restore the state, + -- but with tactics the try ... catch variant seems to work + try do + tac + -- Check that there are no remaining goals + let gl ← Tactic.getUnsolvedGoals + if ¬ gl.isEmpty then throwError "tactic failed" catch _ => firstTac tacl +/- let res ← Lean.observing? do + tac + -- Check that there are no remaining goals + let gl ← Tactic.getUnsolvedGoals + if ¬ gl.isEmpty then throwError "tactic failed" + match res with + | some _ => pure () + | none => firstTac tacl -/ -- Split the goal if it is a conjunction def splitConjTarget : TacticM Unit := do @@ -424,12 +439,13 @@ def splitExistsTac (h : Expr) (optId : Option Name) (k : Expr → Expr → Tacti let hTy ← inferType h if isExists hTy then do -- Try to use the user-provided names - let altVarNames ← - match optId with - | none => pure #[] - | some id => do - let hDecl ← h.fvarId!.getDecl - pure #[{ varNames := [id, hDecl.userName] }] + let altVarNames ← do + let hDecl ← h.fvarId!.getDecl + let id ← do + match optId with + | none => mkFreshUserName `x + | some id => pure id + pure #[{ varNames := [id, hDecl.userName] }] let newGoals ← goal.cases h.fvarId! altVarNames -- There should be exactly one goal match newGoals.toList with @@ -511,7 +527,7 @@ example (h : a ∧ b) : a := by example (h : ∃ x y z, x + y + z ≥ 0) : ∃ x, x ≥ 0 := by split_all_exists h - rename_i x y z h + rename_i x y z exists x + y + z /- Call the simp tactic. -- cgit v1.2.3 From e58872aa4dc31f0819fe17b13e6b7e4b0d9635c8 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Thu, 20 Jul 2023 15:46:11 +0200 Subject: Make progress on some of the hashmap proofs --- backends/lean/Base/Utils.lean | 30 ++++++++++++++++++++++-------- 1 file changed, 22 insertions(+), 8 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Utils.lean b/backends/lean/Base/Utils.lean index 44590176..f014e112 100644 --- a/backends/lean/Base/Utils.lean +++ b/backends/lean/Base/Utils.lean @@ -326,14 +326,6 @@ def firstTac (tacl : List (TacticM Unit)) : TacticM Unit := do | some _ => pure () | none => firstTac tacl -/ --- Split the goal if it is a conjunction -def splitConjTarget : TacticM Unit := do - withMainContext do - let and_intro := Expr.const ``And.intro [] - let mvarIds' ← _root_.Lean.MVarId.apply (← getMainGoal) and_intro - Term.synthesizeSyntheticMVarsNoPostponing - replaceMainGoal mvarIds' - -- Taken from Lean.Elab.evalAssumption def assumptionTac : TacticM Unit := liftMetaTactic fun mvarId => do mvarId.assumption; pure [] @@ -349,6 +341,24 @@ def optSplitConj (e : Expr) : MetaM (Expr × Option Expr) := do if f.isConstOf ``And ∧ args.size = 2 then pure (args.get! 0, some (args.get! 1)) else pure (e, none) +-- Split the goal if it is a conjunction +def splitConjTarget : TacticM Unit := do + withMainContext do + let g ← getMainTarget + -- The tactic was initially implemened with `_root_.Lean.MVarId.apply` + -- but it tended to mess the goal by unfolding terms, even when it failed + let (l, r) ← optSplitConj g + match r with + | none => do throwError "The goal is not a conjunction" + | some r => do + let lmvar ← mkFreshExprSyntheticOpaqueMVar l + let rmvar ← mkFreshExprSyntheticOpaqueMVar r + let and_intro ← mkAppM ``And.intro #[lmvar, rmvar] + let g ← getMainGoal + g.assign and_intro + let goals ← getUnsolvedGoals + setGoals (lmvar.mvarId! :: rmvar.mvarId! :: goals) + -- Destruct an equaliy and return the two sides def destEq (e : Expr) : MetaM (Expr × Expr) := do e.withApp fun f args => @@ -520,6 +530,10 @@ elab "split_all_exists " n:ident : tactic => do let fvar := mkFVar decl.fvarId splitAllExistsTac fvar [] (fun _ _ => pure ()) +elab "split_target_conjs" : tactic => + withMainContext do + repeatTac splitConjTarget + example (h : a ∧ b) : a := by split_all_exists h split_conj h -- cgit v1.2.3 From 9b1498aa7fe014ac430467919504d35b0a688934 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Thu, 20 Jul 2023 16:13:35 +0200 Subject: Fix a naming issue with progress --- backends/lean/Base/Progress/Progress.lean | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Progress/Progress.lean b/backends/lean/Base/Progress/Progress.lean index dabd25b8..1c509775 100644 --- a/backends/lean/Base/Progress/Progress.lean +++ b/backends/lean/Base/Progress/Progress.lean @@ -118,17 +118,17 @@ def progressWith (fExpr : Expr) (th : TheoremOrLocal) (keep : Option Name) (ids match hPost with | none => do return (.Error m!"Too many ids provided ({ids}): there is no postcondition to split") | some hPost => pure hPost - let curPostId := (← hPost.fvarId!.getDecl).userName - let rec splitPost (hPost : Expr) (ids : List Name) : TacticM ProgressError := do + let rec splitPost (prevId : Name) (hPost : Expr) (ids : List Name) : TacticM ProgressError := do match ids with | [] => pure .Ok -- Stop | nid :: ids => do trace[Progress] "Splitting post: {hPost}" -- Split if ← isConj (← inferType hPost) then - splitConjTac hPost (some (nid, curPostId)) (λ _ nhPost => splitPost nhPost ids) + splitConjTac hPost (some (prevId, nid)) (λ _ nhPost => splitPost nid nhPost ids) else return (.Error m!"Too many ids provided ({nid :: ids}) not enough conjuncts to split in the postcondition") - splitPost hPost ids + let curPostId := (← hPost.fvarId!.getDecl).userName + splitPost curPostId hPost ids else return .Ok match res with | .Error _ => return res -- Can we get there? We're using "return" -- cgit v1.2.3 From 2dd56a51df01421fe7766858c9d37998db4123b5 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Tue, 25 Jul 2023 11:53:49 +0200 Subject: Improve the syntax of progress: `as ⟨ x, y .. ⟩` --- backends/lean/Base/Progress/Progress.lean | 77 ++++++++++++++++++------------- backends/lean/Base/Utils.lean | 61 ++++++++++++++++++------ 2 files changed, 94 insertions(+), 44 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Progress/Progress.lean b/backends/lean/Base/Progress/Progress.lean index 1c509775..c8f94e9e 100644 --- a/backends/lean/Base/Progress/Progress.lean +++ b/backends/lean/Base/Progress/Progress.lean @@ -23,7 +23,8 @@ inductive ProgressError | Error (msg : MessageData) deriving Inhabited -def progressWith (fExpr : Expr) (th : TheoremOrLocal) (keep : Option Name) (ids : Array Name) +def progressWith (fExpr : Expr) (th : TheoremOrLocal) + (keep : Option Name) (ids : Array Name) (splitPost : Bool) (asmTac : TacticM Unit) : TacticM ProgressError := do /- Apply the theorem We try to match the theorem with the goal @@ -112,24 +113,31 @@ def progressWith (fExpr : Expr) (th : TheoremOrLocal) (keep : Option Name) (ids mgoal.tryClearMany #[hEq.fvarId!] setGoals (mgoal :: (← getUnsolvedGoals)) trace[Progress] "Goal after splitting eq and post and simplifying the target: {mgoal}" - -- Continue splitting following the ids provided by the user - if ¬ ids.isEmpty then - let hPost ← - match hPost with - | none => do return (.Error m!"Too many ids provided ({ids}): there is no postcondition to split") - | some hPost => pure hPost - let rec splitPost (prevId : Name) (hPost : Expr) (ids : List Name) : TacticM ProgressError := do + -- Continue splitting following the post following the user's instructions + match hPost with + | none => + -- Sanity check + if ¬ ids.isEmpty then + return (.Error m!"Too many ids provided ({ids}): there is no postcondition to split") + else return .Ok + | some hPost => do + let rec splitPostWithIds (prevId : Name) (hPost : Expr) (ids : List Name) : TacticM ProgressError := do match ids with - | [] => pure .Ok -- Stop + | [] => + /- We used all the user provided ids. + Split the remaining conjunctions by using fresh ids if the user + instructed to fully split the post-condition, otherwise stop -/ + if splitPost then + splitFullConjTac hPost (λ _ => pure .Ok) + else pure .Ok | nid :: ids => do trace[Progress] "Splitting post: {hPost}" -- Split if ← isConj (← inferType hPost) then - splitConjTac hPost (some (prevId, nid)) (λ _ nhPost => splitPost nid nhPost ids) + splitConjTac hPost (some (prevId, nid)) (λ _ nhPost => splitPostWithIds nid nhPost ids) else return (.Error m!"Too many ids provided ({nid :: ids}) not enough conjuncts to split in the postcondition") let curPostId := (← hPost.fvarId!.getDecl).userName - splitPost curPostId hPost ids - else return .Ok + splitPostWithIds curPostId hPost ids match res with | .Error _ => return res -- Can we get there? We're using "return" | .Ok => @@ -160,7 +168,8 @@ def getFirstArg (args : Array Expr) : Option Expr := do /- Helper: try to lookup a theorem and apply it, or continue with another tactic if it fails -/ -def tryLookupApply (keep : Option Name) (ids : Array Name) (asmTac : TacticM Unit) (fExpr : Expr) +def tryLookupApply (keep : Option Name) (ids : Array Name) (splitPost : Bool) + (asmTac : TacticM Unit) (fExpr : Expr) (kind : String) (th : Option TheoremOrLocal) (x : TacticM Unit) : TacticM Unit := do let res ← do match th with @@ -172,7 +181,7 @@ def tryLookupApply (keep : Option Name) (ids : Array Name) (asmTac : TacticM Uni -- Apply the theorem let res ← do try - let res ← progressWith fExpr th keep ids asmTac + let res ← progressWith fExpr th keep ids splitPost asmTac pure (some res) catch _ => none match res with @@ -181,7 +190,8 @@ def tryLookupApply (keep : Option Name) (ids : Array Name) (asmTac : TacticM Uni | none => x -- The array of ids are identifiers to use when introducing fresh variables -def progressAsmsOrLookupTheorem (keep : Option Name) (withTh : Option TheoremOrLocal) (ids : Array Name) (asmTac : TacticM Unit) : TacticM Unit := do +def progressAsmsOrLookupTheorem (keep : Option Name) (withTh : Option TheoremOrLocal) + (ids : Array Name) (splitPost : Bool) (asmTac : TacticM Unit) : TacticM Unit := do withMainContext do -- Retrieve the goal let mgoal ← Tactic.getMainGoal @@ -197,7 +207,7 @@ def progressAsmsOrLookupTheorem (keep : Option Name) (withTh : Option TheoremOrL -- Otherwise, lookup one. match withTh with | some th => do - match ← progressWith fExpr th keep ids asmTac with + match ← progressWith fExpr th keep ids splitPost asmTac with | .Ok => return () | .Error msg => throwError msg | none => @@ -206,7 +216,7 @@ def progressAsmsOrLookupTheorem (keep : Option Name) (withTh : Option TheoremOrL let decls ← ctx.getDecls for decl in decls.reverse do trace[Progress] "Trying assumption: {decl.userName} : {decl.type}" - let res ← do try progressWith fExpr (.Local decl) keep ids asmTac catch _ => continue + let res ← do try progressWith fExpr (.Local decl) keep ids splitPost asmTac catch _ => continue match res with | .Ok => return () | .Error msg => throwError msg @@ -216,7 +226,7 @@ def progressAsmsOrLookupTheorem (keep : Option Name) (withTh : Option TheoremOrL let pspec ← do let thName ← pspecAttr.find? fName pure (thName.map fun th => .Theorem th) - tryLookupApply keep ids asmTac fExpr "pspec theorem" pspec do + tryLookupApply keep ids splitPost asmTac fExpr "pspec theorem" pspec do -- It failed: try to lookup a *class* expr spec theorem (those are more -- specific than class spec theorems) let pspecClassExpr ← do @@ -225,7 +235,7 @@ def progressAsmsOrLookupTheorem (keep : Option Name) (withTh : Option TheoremOrL | some arg => do let thName ← pspecClassExprAttr.find? fName arg pure (thName.map fun th => .Theorem th) - tryLookupApply keep ids asmTac fExpr "pspec class expr theorem" pspecClassExpr do + tryLookupApply keep ids splitPost asmTac fExpr "pspec class expr theorem" pspecClassExpr do -- It failed: try to lookup a *class* spec theorem let pspecClass ← do match ← getFirstArgAppName args with @@ -233,7 +243,7 @@ def progressAsmsOrLookupTheorem (keep : Option Name) (withTh : Option TheoremOrL | some argName => do let thName ← pspecClassAttr.find? fName argName pure (thName.map fun th => .Theorem th) - tryLookupApply keep ids asmTac fExpr "pspec class theorem" pspecClass do + tryLookupApply keep ids splitPost asmTac fExpr "pspec class theorem" pspecClass do -- Try a recursive call - we try the assumptions of kind "auxDecl" let ctx ← Lean.MonadLCtx.getLCtx let decls ← ctx.getAllDecls @@ -241,14 +251,14 @@ def progressAsmsOrLookupTheorem (keep : Option Name) (withTh : Option TheoremOrL | .default | .implDetail => false | .auxDecl => true) for decl in decls.reverse do trace[Progress] "Trying recursive assumption: {decl.userName} : {decl.type}" - let res ← do try progressWith fExpr (.Local decl) keep ids asmTac catch _ => continue + let res ← do try progressWith fExpr (.Local decl) keep ids splitPost asmTac catch _ => continue match res with | .Ok => return () | .Error msg => throwError msg -- Nothing worked: failed throwError "Progress failed" -syntax progressArgs := ("keep" ("as" (ident))?)? ("with" ident)? ("as" " ⟨ " (ident)+ " ⟩")? +syntax progressArgs := ("keep" ("as" (ident))?)? ("with" ident)? ("as" " ⟨ " ident,* " .."? " ⟩")? def evalProgress (args : TSyntax `Progress.progressArgs) : TacticM Unit := do let args := args.raw @@ -263,8 +273,8 @@ def evalProgress (args : TSyntax `Progress.progressArgs) : TacticM Unit := do else do pure (some (← mkFreshUserName `h)) else pure none trace[Progress] "Keep: {keep}" - let withArg := (args.get! 1).getArgs let withArg ← do + let withArg := (args.get! 1).getArgs if withArg.size > 0 then let id := withArg.get! 1 trace[Progress] "With arg: {id}" @@ -283,20 +293,25 @@ def evalProgress (args : TSyntax `Progress.progressArgs) : TacticM Unit := do | id :: _ => pure (some (.Theorem id)) else pure none - let args := (args.get! 2).getArgs - let args := (args.get! 2).getArgs - let ids := args.map Syntax.getId + let ids := + let args := (args.get! 2).getArgs + let args := (args.get! 2).getSepArgs + args.map Syntax.getId trace[Progress] "User-provided ids: {ids}" - progressAsmsOrLookupTheorem keep withArg ids (firstTac [assumptionTac, Arith.scalarTac]) + let splitPost : Bool := + let args := (args.get! 2).getArgs + (args.get! 3).getArgs.size > 0 + trace[Progress] "Split post: {splitPost}" + progressAsmsOrLookupTheorem keep withArg ids splitPost (firstTac [assumptionTac, Arith.scalarTac]) elab "progress" args:progressArgs : tactic => evalProgress args -/- -namespace Test +/-namespace Test open Primitives Result set_option trace.Progress true + set_option pp.rawOnError true #eval showStoredPSpec #eval showStoredPSpecClass @@ -306,9 +321,9 @@ namespace Test (hmax : x.val + y.val ≤ Scalar.max ty) : ∃ z, x + y = ret z ∧ z.val = x.val + y.val := by -- progress keep as h with Scalar.add_spec as ⟨ z ⟩ - progress keep as h + progress keep as h as ⟨ z, h1 .. ⟩ simp [*] -end Test -/ +end Test-/ end Progress diff --git a/backends/lean/Base/Utils.lean b/backends/lean/Base/Utils.lean index f014e112..3b3d4729 100644 --- a/backends/lean/Base/Utils.lean +++ b/backends/lean/Base/Utils.lean @@ -499,7 +499,10 @@ def splitConjTac (h : Expr) (optIds : Option (Name × Name)) (k : Expr → Expr -- Try to use the user-provided names let altVarNames ← match optIds with - | none => pure #[] + | none => do + let id0 ← mkFreshUserName `h + let id1 ← mkFreshUserName `h + pure #[{ varNames := [id0, id1] }] | some (id0, id1) => do pure #[{ varNames := [id0, id1] }] let newGoals ← goal.cases h.fvarId! altVarNames @@ -518,29 +521,61 @@ def splitConjTac (h : Expr) (optIds : Option (Name × Name)) (k : Expr → Expr else throwError "Not a conjunction" -elab "split_conj " n:ident : tactic => do +-- Tactic to fully split a conjunction +partial def splitFullConjTacAux [Inhabited α] [Nonempty α] (l : List Expr) (h : Expr) (k : List Expr → TacticM α) : TacticM α := do + try + splitConjTac h none (λ h1 h2 => + splitFullConjTacAux l h1 (λ l1 => + splitFullConjTacAux l1 h2 (λ l2 => + k l2))) + catch _ => + k (h :: l) + +-- Tactic to fully split a conjunction +def splitFullConjTac [Inhabited α] [Nonempty α] (h : Expr) (k : List Expr → TacticM α) : TacticM α := do + splitFullConjTacAux [] h (λ l => k l.reverse) + +syntax optAtArgs := ("at" ident)? +def elabOptAtArgs (args : TSyntax `Utils.optAtArgs) : TacticM (Option Expr) := do withMainContext do - let decl ← Lean.Meta.getLocalDeclFromUserName n.getId - let fvar := mkFVar decl.fvarId - splitConjTac fvar none (fun _ _ => pure ()) + let args := (args.raw.getArgs.get! 0).getArgs + if args.size > 0 then do + let n := (args.get! 1).getId + let decl ← Lean.Meta.getLocalDeclFromUserName n + let fvar := mkFVar decl.fvarId + pure (some fvar) + else + pure none -elab "split_all_exists " n:ident : tactic => do +elab "split_conj" args:optAtArgs : tactic => do + withMainContext do + match ← elabOptAtArgs args with + | some fvar => + splitConjTac fvar none (fun _ _ => pure ()) + | none => + splitConjTarget + +elab "split_conjs" args:optAtArgs : tactic => do + withMainContext do + match ← elabOptAtArgs args with + | some fvar => + splitFullConjTac fvar (fun _ => pure ()) + | none => + repeatTac splitConjTarget + +elab "split_existsl" " at " n:ident : tactic => do withMainContext do let decl ← Lean.Meta.getLocalDeclFromUserName n.getId let fvar := mkFVar decl.fvarId splitAllExistsTac fvar [] (fun _ _ => pure ()) -elab "split_target_conjs" : tactic => - withMainContext do - repeatTac splitConjTarget - example (h : a ∧ b) : a := by - split_all_exists h - split_conj h + split_existsl at h + split_conj at h assumption example (h : ∃ x y z, x + y + z ≥ 0) : ∃ x, x ≥ 0 := by - split_all_exists h + split_existsl at h rename_i x y z exists x + y + z -- cgit v1.2.3 From c652e97f7ab13164150331b4aa3f2e7ef11d24b9 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Tue, 25 Jul 2023 12:13:20 +0200 Subject: Add the possibility of using "_" as ident for progress --- backends/lean/Base/Progress/Progress.lean | 37 ++++++++++++++++++------------- backends/lean/Base/Utils.lean | 7 ++++-- 2 files changed, 26 insertions(+), 18 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Progress/Progress.lean b/backends/lean/Base/Progress/Progress.lean index c8f94e9e..c0ddc63d 100644 --- a/backends/lean/Base/Progress/Progress.lean +++ b/backends/lean/Base/Progress/Progress.lean @@ -24,7 +24,7 @@ inductive ProgressError deriving Inhabited def progressWith (fExpr : Expr) (th : TheoremOrLocal) - (keep : Option Name) (ids : Array Name) (splitPost : Bool) + (keep : Option Name) (ids : Array (Option Name)) (splitPost : Bool) (asmTac : TacticM Unit) : TacticM ProgressError := do /- Apply the theorem We try to match the theorem with the goal @@ -90,13 +90,14 @@ def progressWith (fExpr : Expr) (th : TheoremOrLocal) -- For the conjunctions, we split according once to separate the equality `f ... = .ret ...` -- from the postcondition, if there is, then continue to split the postcondition if there -- are remaining ids. - let splitEqAndPost (k : Expr → Option Expr → List Name → TacticM ProgressError) : TacticM ProgressError := do + let splitEqAndPost (k : Expr → Option Expr → List (Option Name) → TacticM ProgressError) : TacticM ProgressError := do if ← isConj (← inferType h) then do let hName := (← h.fvarId!.getDecl).userName - let (optId, ids) := listTryPopHead ids - let optIds ← match optId with - | none => do pure (some (hName, ← mkFreshUserName `h)) - | some id => do pure (some (hName, id)) + let (optIds, ids) ← do + match ids with + | [] => do pure (some (hName, ← mkFreshUserName `h), []) + | none :: ids => do pure (some (hName, ← mkFreshUserName `h), ids) + | some id :: ids => do pure (some (hName, id), ids) splitConjTac h optIds (fun hEq hPost => k hEq (some hPost) ids) else k h none ids -- Simplify the target by using the equality and some monad simplifications, @@ -121,8 +122,8 @@ def progressWith (fExpr : Expr) (th : TheoremOrLocal) return (.Error m!"Too many ids provided ({ids}): there is no postcondition to split") else return .Ok | some hPost => do - let rec splitPostWithIds (prevId : Name) (hPost : Expr) (ids : List Name) : TacticM ProgressError := do - match ids with + let rec splitPostWithIds (prevId : Name) (hPost : Expr) (ids0 : List (Option Name)) : TacticM ProgressError := do + match ids0 with | [] => /- We used all the user provided ids. Split the remaining conjunctions by using fresh ids if the user @@ -133,9 +134,13 @@ def progressWith (fExpr : Expr) (th : TheoremOrLocal) | nid :: ids => do trace[Progress] "Splitting post: {hPost}" -- Split + let nid ← do + match nid with + | none => mkFreshUserName `h + | some nid => pure nid if ← isConj (← inferType hPost) then splitConjTac hPost (some (prevId, nid)) (λ _ nhPost => splitPostWithIds nid nhPost ids) - else return (.Error m!"Too many ids provided ({nid :: ids}) not enough conjuncts to split in the postcondition") + else return (.Error m!"Too many ids provided ({ids0}) not enough conjuncts to split in the postcondition") let curPostId := (← hPost.fvarId!.getDecl).userName splitPostWithIds curPostId hPost ids match res with @@ -168,7 +173,7 @@ def getFirstArg (args : Array Expr) : Option Expr := do /- Helper: try to lookup a theorem and apply it, or continue with another tactic if it fails -/ -def tryLookupApply (keep : Option Name) (ids : Array Name) (splitPost : Bool) +def tryLookupApply (keep : Option Name) (ids : Array (Option Name)) (splitPost : Bool) (asmTac : TacticM Unit) (fExpr : Expr) (kind : String) (th : Option TheoremOrLocal) (x : TacticM Unit) : TacticM Unit := do let res ← do @@ -191,7 +196,7 @@ def tryLookupApply (keep : Option Name) (ids : Array Name) (splitPost : Bool) -- The array of ids are identifiers to use when introducing fresh variables def progressAsmsOrLookupTheorem (keep : Option Name) (withTh : Option TheoremOrLocal) - (ids : Array Name) (splitPost : Bool) (asmTac : TacticM Unit) : TacticM Unit := do + (ids : Array (Option Name)) (splitPost : Bool) (asmTac : TacticM Unit) : TacticM Unit := do withMainContext do -- Retrieve the goal let mgoal ← Tactic.getMainGoal @@ -258,7 +263,7 @@ def progressAsmsOrLookupTheorem (keep : Option Name) (withTh : Option TheoremOrL -- Nothing worked: failed throwError "Progress failed" -syntax progressArgs := ("keep" ("as" (ident))?)? ("with" ident)? ("as" " ⟨ " ident,* " .."? " ⟩")? +syntax progressArgs := ("keep" ("as" (ident))?)? ("with" ident)? ("as" " ⟨ " (ident <|> "_"),* " .."? " ⟩")? def evalProgress (args : TSyntax `Progress.progressArgs) : TacticM Unit := do let args := args.raw @@ -296,7 +301,7 @@ def evalProgress (args : TSyntax `Progress.progressArgs) : TacticM Unit := do let ids := let args := (args.get! 2).getArgs let args := (args.get! 2).getSepArgs - args.map Syntax.getId + args.map (λ s => if s.isIdent then some s.getId else none) trace[Progress] "User-provided ids: {ids}" let splitPost : Bool := let args := (args.get! 2).getArgs @@ -307,7 +312,7 @@ def evalProgress (args : TSyntax `Progress.progressArgs) : TacticM Unit := do elab "progress" args:progressArgs : tactic => evalProgress args -/-namespace Test +/- namespace Test open Primitives Result set_option trace.Progress true @@ -321,9 +326,9 @@ elab "progress" args:progressArgs : tactic => (hmax : x.val + y.val ≤ Scalar.max ty) : ∃ z, x + y = ret z ∧ z.val = x.val + y.val := by -- progress keep as h with Scalar.add_spec as ⟨ z ⟩ - progress keep as h as ⟨ z, h1 .. ⟩ + progress keep as h as ⟨ x, h1 .. ⟩ simp [*] -end Test-/ +end Test -/ end Progress diff --git a/backends/lean/Base/Utils.lean b/backends/lean/Base/Utils.lean index 3b3d4729..66497a49 100644 --- a/backends/lean/Base/Utils.lean +++ b/backends/lean/Base/Utils.lean @@ -484,9 +484,12 @@ def listTryPopHead (ls : List α) : Option α × List α := If `ids` is not empty, we use it to name the introduced variables. We transmit the stripped expression and the remaining ids to the continuation. -/ -partial def splitAllExistsTac [Inhabited α] (h : Expr) (ids : List Name) (k : Expr → List Name → TacticM α) : TacticM α := do +partial def splitAllExistsTac [Inhabited α] (h : Expr) (ids : List (Option Name)) (k : Expr → List (Option Name) → TacticM α) : TacticM α := do try - let (optId, ids) := listTryPopHead ids + let (optId, ids) := + match ids with + | [] => (none, []) + | x :: ids => (x, ids) splitExistsTac h optId (fun _ body => splitAllExistsTac body ids k) catch _ => k h ids -- cgit v1.2.3 From 876137dff361620d8ade1a4ee94fa9274df0bdc6 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Tue, 25 Jul 2023 14:08:44 +0200 Subject: Improve int_tac and scalar_tac --- backends/lean/Base/Arith/Int.lean | 63 +++++++++++++++++++++++++++---- backends/lean/Base/Arith/Scalar.lean | 6 +-- backends/lean/Base/IList/IList.lean | 12 ++---- backends/lean/Base/Primitives/Vec.lean | 25 ++++++------ backends/lean/Base/Progress/Progress.lean | 13 ++++++- 5 files changed, 87 insertions(+), 32 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Arith/Int.lean b/backends/lean/Base/Arith/Int.lean index fa957293..3415866e 100644 --- a/backends/lean/Base/Arith/Int.lean +++ b/backends/lean/Base/Arith/Int.lean @@ -24,12 +24,29 @@ class PropHasImp (x : Prop) where concl : Prop prop : x → concl +instance (p : Int → Prop) : HasIntProp (Subtype p) where + prop_ty := λ x => p x + prop := λ x => x.property + -- This also works for `x ≠ y` because this expression reduces to `¬ x = y` -- and `Ne` is marked as `reducible` instance (x y : Int) : PropHasImp (¬ x = y) where concl := x < y ∨ x > y prop := λ (h:x ≠ y) => ne_is_lt_or_gt h +-- Check if a proposition is a linear integer proposition. +-- We notably use this to check the goals. +class IsLinearIntProp (x : Prop) where + +instance (x y : Int) : IsLinearIntProp (x < y) where +instance (x y : Int) : IsLinearIntProp (x > y) where +instance (x y : Int) : IsLinearIntProp (x ≤ y) where +instance (x y : Int) : IsLinearIntProp (x ≥ y) where +instance (x y : Int) : IsLinearIntProp (x ≥ y) where +/- It seems we don't need to do any special preprocessing when the *goal* + has the following shape - I guess `linarith` automatically calls `intro` -/ +instance (x y : Int) : IsLinearIntProp (¬ x = y) where + open Lean Lean.Elab Lean.Meta -- Explore a term by decomposing the applications (we explore the applied @@ -189,14 +206,27 @@ def intTacPreprocess (extraPreprocess : Tactic.TacticM Unit) : Tactic.TacticM U elab "int_tac_preprocess" : tactic => intTacPreprocess (do pure ()) -def intTac (extraPreprocess : Tactic.TacticM Unit) : Tactic.TacticM Unit := do +-- Check if the goal is a linear arithmetic goal +def goalIsLinearInt : Tactic.TacticM Bool := do + Tactic.withMainContext do + let gty ← Tactic.getMainTarget + match ← trySynthInstance (← mkAppM ``IsLinearIntProp #[gty]) with + | .some _ => pure true + | _ => pure false + +def intTac (splitGoalConjs : Bool) (extraPreprocess : Tactic.TacticM Unit) : Tactic.TacticM Unit := do Tactic.withMainContext do Tactic.focus do + let g ← Tactic.getMainGoal + trace[Arith] "Original goal: {g}" + -- Introduce all the universally quantified variables (includes the assumptions) + let (_, g) ← g.intros + Tactic.setGoals [g] -- Preprocess - wondering if we should do this before or after splitting -- the goal. I think before leads to a smaller proof term? Tactic.allGoals (intTacPreprocess extraPreprocess) -- Split the conjunctions in the goal - Tactic.allGoals (Utils.repeatTac Utils.splitConjTarget) + if splitGoalConjs then Tactic.allGoals (Utils.repeatTac Utils.splitConjTarget) -- Call linarith let linarith := do let cfg : Linarith.LinarithConfig := { @@ -204,10 +234,25 @@ def intTac (extraPreprocess : Tactic.TacticM Unit) : Tactic.TacticM Unit := do splitNe := false } Tactic.liftMetaFinishingTactic <| Linarith.linarith false [] cfg - Tactic.allGoals linarith - -elab "int_tac" : tactic => - intTac (do pure ()) + Tactic.allGoals do + -- We check if the goal is a linear arithmetic goal: if yes, we directly + -- call linarith, otherwise we first apply exfalso (we do this because + -- linarith is too general and sometimes fails to do this correctly). + if ← goalIsLinearInt then do + trace[Arith] "linarith goal: {← Tactic.getMainGoal}" + linarith + else do + let g ← Tactic.getMainGoal + let gs ← g.apply (Expr.const ``False.elim [.zero]) + let goals ← Tactic.getGoals + Tactic.setGoals (gs ++ goals) + Tactic.allGoals do + trace[Arith] "linarith goal: {← Tactic.getMainGoal}" + linarith + +elab "int_tac" args:(" split_goal"?): tactic => + let split := args.raw.getArgs.size > 0 + intTac split (do pure ()) example (x : Int) (h0: 0 ≤ x) (h1: x ≠ 0) : 0 < x := by int_tac_preprocess @@ -219,10 +264,14 @@ example (x : Int) (h0: 0 ≤ x) (h1: x ≠ 0) : 0 < x := by -- Checking that things append correctly when there are several disjunctions example (x y : Int) (h0: 0 ≤ x) (h1: x ≠ 0) (h2 : 0 ≤ y) (h3 : y ≠ 0) : 0 < x ∧ 0 < y := by - int_tac + int_tac split_goal -- Checking that things append correctly when there are several disjunctions example (x y : Int) (h0: 0 ≤ x) (h1: x ≠ 0) (h2 : 0 ≤ y) (h3 : y ≠ 0) : 0 < x ∧ 0 < y ∧ x + y ≥ 2 := by + int_tac split_goal + +-- Checking that we can prove exfalso +example (a : Prop) (x : Int) (h0: 0 < x) (h1: x < 0) : a := by int_tac end Arith diff --git a/backends/lean/Base/Arith/Scalar.lean b/backends/lean/Base/Arith/Scalar.lean index f8903ecf..a56ea08b 100644 --- a/backends/lean/Base/Arith/Scalar.lean +++ b/backends/lean/Base/Arith/Scalar.lean @@ -28,11 +28,11 @@ elab "scalar_tac_preprocess" : tactic => intTacPreprocess scalarTacExtraPreprocess -- A tactic to solve linear arithmetic goals in the presence of scalars -def scalarTac : Tactic.TacticM Unit := do - intTac scalarTacExtraPreprocess +def scalarTac (splitGoalConjs : Bool) : Tactic.TacticM Unit := do + intTac splitGoalConjs scalarTacExtraPreprocess elab "scalar_tac" : tactic => - scalarTac + scalarTac false instance (ty : ScalarTy) : HasIntProp (Scalar ty) where -- prop_ty is inferred diff --git a/backends/lean/Base/IList/IList.lean b/backends/lean/Base/IList/IList.lean index 1773e593..2443b1a6 100644 --- a/backends/lean/Base/IList/IList.lean +++ b/backends/lean/Base/IList/IList.lean @@ -46,21 +46,18 @@ theorem indexOpt_bounds (ls : List α) (i : Int) : ls.indexOpt i = none ↔ i < 0 ∨ ls.len ≤ i := match ls with | [] => - have : ¬ (i < 0) → 0 ≤ i := by intro; linarith -- TODO: simplify (we could boost int_tac) + have : ¬ (i < 0) → 0 ≤ i := by int_tac by simp; tauto | _ :: tl => have := indexOpt_bounds tl (i - 1) if h: i = 0 then by simp [*]; - -- TODO: int_tac/scalar_tac should also explore the goal! - have := tl.len_pos - linarith + int_tac else by simp [*] constructor <;> intros <;> - -- TODO: tactic to split all disjunctions - rename_i hor <;> cases hor <;> + casesm* _ ∨ _ <;> -- splits all the disjunctions first | left; int_tac | right; int_tac theorem indexOpt_eq_index [Inhabited α] (ls : List α) (i : Int) : @@ -126,7 +123,6 @@ theorem length_update (ls : List α) (i : Int) (x : α) : (ls.update i x).length theorem len_update (ls : List α) (i : Int) (x : α) : (ls.update i x).len = ls.len := by simp [len_eq_length] - theorem left_length_eq_append_eq (l1 l2 l1' l2' : List α) (heq : l1.length = l1'.length) : l1 ++ l2 = l1' ++ l2' ↔ l1 = l1' ∧ l2 = l2' := by revert l1' @@ -203,7 +199,7 @@ theorem index_eq (l.update i x).index i = x := fun _ _ => match l with - | [] => by simp at *; exfalso; scalar_tac -- TODO: exfalso needed. Son FIXME + | [] => by simp at *; scalar_tac | hd :: tl => if h: i = 0 then by diff --git a/backends/lean/Base/Primitives/Vec.lean b/backends/lean/Base/Primitives/Vec.lean index be3a0e5b..35092c29 100644 --- a/backends/lean/Base/Primitives/Vec.lean +++ b/backends/lean/Base/Primitives/Vec.lean @@ -16,20 +16,19 @@ open Result Error -- VECTORS -- ------------- -def Vec (α : Type u) := { l : List α // List.length l ≤ Usize.max } +def Vec (α : Type u) := { l : List α // l.length ≤ Usize.max } -- TODO: do we really need it? It should be with Subtype by default -instance Vec.cast (a : Type): Coe (Vec a) (List a) where coe := λ v => v.val +instance Vec.cast (a : Type u): Coe (Vec a) (List a) where coe := λ v => v.val -instance (a : Type) : Arith.HasIntProp (Vec a) where - prop_ty := λ v => v.val.length ≤ Scalar.max ScalarTy.Usize - prop := λ ⟨ _, l ⟩ => l +instance (a : Type u) : Arith.HasIntProp (Vec a) where + prop_ty := λ v => v.val.len ≤ Scalar.max ScalarTy.Usize + prop := λ ⟨ _, l ⟩ => by simp[Scalar.max, List.len_eq_length, *] -example {a: Type} (v : Vec a) : v.val.length ≤ Scalar.max ScalarTy.Usize := by - intro_has_int_prop_instances - simp_all [Scalar.max, Scalar.min] +@[simp] +abbrev Vec.length {α : Type u} (v : Vec α) : Int := v.val.len -example {a: Type} (v : Vec a) : v.val.length ≤ Scalar.max ScalarTy.Usize := by +example {a: Type u} (v : Vec a) : v.length ≤ Scalar.max ScalarTy.Usize := by scalar_tac def Vec.new (α : Type u): Vec α := ⟨ [], by apply Scalar.cMax_suffices .Usize; simp ⟩ @@ -38,9 +37,6 @@ def Vec.len (α : Type u) (v : Vec α) : Usize := let ⟨ v, l ⟩ := v Usize.ofIntCore (List.length v) (by simp [Scalar.min, Usize.min]) l -@[simp] -abbrev Vec.length {α : Type u} (v : Vec α) : Int := v.val.len - -- This shouldn't be used def Vec.push_fwd (α : Type u) (_ : Vec α) (_ : α) : Unit := () @@ -115,11 +111,14 @@ theorem Vec.index_mut_spec {α : Type u} [Inhabited α] (v: Vec α) (i: Usize) : have h := List.indexOpt_eq_index v.val i.val (by scalar_tac) (by simp [*]) simp only [*] +instance {α : Type u} (p : Vec α → Prop) : Arith.HasIntProp (Subtype p) where + prop_ty := λ x => p x + prop := λ x => x.property + def Vec.index_mut_back (α : Type u) (v: Vec α) (i: Usize) (x: α) : Result (Vec α) := match v.val.indexOpt i.val with | none => fail .arrayOutOfBounds | some _ => - -- TODO: int_tac: introduce the refinements in the context? .ret ⟨ v.val.update i.val x, by have := v.property; simp [*] ⟩ @[pspec] diff --git a/backends/lean/Base/Progress/Progress.lean b/backends/lean/Base/Progress/Progress.lean index c0ddc63d..a281f1d2 100644 --- a/backends/lean/Base/Progress/Progress.lean +++ b/backends/lean/Base/Progress/Progress.lean @@ -307,7 +307,18 @@ def evalProgress (args : TSyntax `Progress.progressArgs) : TacticM Unit := do let args := (args.get! 2).getArgs (args.get! 3).getArgs.size > 0 trace[Progress] "Split post: {splitPost}" - progressAsmsOrLookupTheorem keep withArg ids splitPost (firstTac [assumptionTac, Arith.scalarTac]) + /- For scalarTac we have a fast track: if the goal is not a linear + arithmetic goal, we skip (note that otherwise, scalarTac would try + to prove a contradiction) -/ + let scalarTac : TacticM Unit := do + if ← Arith.goalIsLinearInt then + -- Also: we don't try to split the goal if it is a conjunction + -- (it shouldn't be) + Arith.scalarTac false + else + throwError "Not a linear arithmetic goal" + progressAsmsOrLookupTheorem keep withArg ids splitPost ( + firstTac [assumptionTac, scalarTac]) elab "progress" args:progressArgs : tactic => evalProgress args -- cgit v1.2.3 From 1854c631a6a7a3f8d45ad18e05547f9d3782c3ee Mon Sep 17 00:00:00 2001 From: Son Ho Date: Tue, 25 Jul 2023 16:26:08 +0200 Subject: Make progress on the hashmap properties --- backends/lean/Base/Arith/Base.lean | 4 +++ backends/lean/Base/Arith/Int.lean | 2 ++ backends/lean/Base/Arith/Scalar.lean | 3 +- backends/lean/Base/Primitives/Scalar.lean | 48 +++++++++++++++++-------------- backends/lean/Base/Primitives/Vec.lean | 13 +++++++-- backends/lean/Base/Progress/Base.lean | 4 +-- backends/lean/Base/Progress/Progress.lean | 4 +-- 7 files changed, 49 insertions(+), 29 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Arith/Base.lean b/backends/lean/Base/Arith/Base.lean index e008f7b9..9c11ed45 100644 --- a/backends/lean/Base/Arith/Base.lean +++ b/backends/lean/Base/Arith/Base.lean @@ -53,4 +53,8 @@ theorem int_pos_ind (p : Int → Prop) : rename_i m cases m <;> simp_all +-- We sometimes need this to make sure no natural numbers appear in the goals +-- TODO: there is probably something more general to do +theorem nat_zero_eq_int_zero : (0 : Nat) = (0 : Int) := by simp + end Arith diff --git a/backends/lean/Base/Arith/Int.lean b/backends/lean/Base/Arith/Int.lean index 3415866e..bc0676d8 100644 --- a/backends/lean/Base/Arith/Int.lean +++ b/backends/lean/Base/Arith/Int.lean @@ -225,6 +225,8 @@ def intTac (splitGoalConjs : Bool) (extraPreprocess : Tactic.TacticM Unit) : Ta -- Preprocess - wondering if we should do this before or after splitting -- the goal. I think before leads to a smaller proof term? Tactic.allGoals (intTacPreprocess extraPreprocess) + -- More preprocessing + Tactic.allGoals (Utils.simpAt [] [``nat_zero_eq_int_zero] [] .wildcard) -- Split the conjunctions in the goal if splitGoalConjs then Tactic.allGoals (Utils.repeatTac Utils.splitConjTarget) -- Call linarith diff --git a/backends/lean/Base/Arith/Scalar.lean b/backends/lean/Base/Arith/Scalar.lean index a56ea08b..6f4a8eba 100644 --- a/backends/lean/Base/Arith/Scalar.lean +++ b/backends/lean/Base/Arith/Scalar.lean @@ -21,7 +21,8 @@ def scalarTacExtraPreprocess : Tactic.TacticM Unit := do ``I8.min, ``I16.min, ``I32.min, ``I64.min, ``I128.min, ``I8.max, ``I16.max, ``I32.max, ``I64.max, ``I128.max, ``U8.min, ``U16.min, ``U32.min, ``U64.min, ``U128.min, - ``U8.max, ``U16.max, ``U32.max, ``U64.max, ``U128.max + ``U8.max, ``U16.max, ``U32.max, ``U64.max, ``U128.max, + ``Usize.min ] [] [] .wildcard elab "scalar_tac_preprocess" : tactic => diff --git a/backends/lean/Base/Primitives/Scalar.lean b/backends/lean/Base/Primitives/Scalar.lean index 1e9b51c2..3beb7527 100644 --- a/backends/lean/Base/Primitives/Scalar.lean +++ b/backends/lean/Base/Primitives/Scalar.lean @@ -66,27 +66,33 @@ def U128.smin : Int := 0 def U128.smax : Int := HPow.hPow 2 128 - 1 -- The "normalized" bounds, that we use in practice -def I8.min := -128 -def I8.max := 127 -def I16.min := -32768 -def I16.max := 32767 -def I32.min := -2147483648 -def I32.max := 2147483647 -def I64.min := -9223372036854775808 -def I64.max := 9223372036854775807 -def I128.min := -170141183460469231731687303715884105728 -def I128.max := 170141183460469231731687303715884105727 -@[simp] def U8.min := 0 -def U8.max := 255 -@[simp] def U16.min := 0 -def U16.max := 65535 -@[simp] def U32.min := 0 -def U32.max := 4294967295 -@[simp] def U64.min := 0 -def U64.max := 18446744073709551615 -@[simp] def U128.min := 0 -def U128.max := 340282366920938463463374607431768211455 -@[simp] def Usize.min := 0 +def I8.min : Int := -128 +def I8.max : Int := 127 +def I16.min : Int := -32768 +def I16.max : Int := 32767 +def I32.min : Int := -2147483648 +def I32.max : Int := 2147483647 +def I64.min : Int := -9223372036854775808 +def I64.max : Int := 9223372036854775807 +def I128.min : Int := -170141183460469231731687303715884105728 +def I128.max : Int := 170141183460469231731687303715884105727 +@[simp] +def U8.min : Int := 0 +def U8.max : Int := 255 +@[simp] +def U16.min : Int := 0 +def U16.max : Int := 65535 +@[simp] +def U32.min : Int := 0 +def U32.max : Int := 4294967295 +@[simp] +def U64.min : Int := 0 +def U64.max : Int := 18446744073709551615 +@[simp] +def U128.min : Int := 0 +def U128.max : Int := 340282366920938463463374607431768211455 +@[simp] +def Usize.min : Int := 0 def Isize.refined_min : { n:Int // n = I32.min ∨ n = I64.min } := ⟨ Isize.smin, by diff --git a/backends/lean/Base/Primitives/Vec.lean b/backends/lean/Base/Primitives/Vec.lean index 35092c29..5a709566 100644 --- a/backends/lean/Base/Primitives/Vec.lean +++ b/backends/lean/Base/Primitives/Vec.lean @@ -22,20 +22,27 @@ def Vec (α : Type u) := { l : List α // l.length ≤ Usize.max } instance Vec.cast (a : Type u): Coe (Vec a) (List a) where coe := λ v => v.val instance (a : Type u) : Arith.HasIntProp (Vec a) where - prop_ty := λ v => v.val.len ≤ Scalar.max ScalarTy.Usize + prop_ty := λ v => 0 ≤ v.val.len ∧ v.val.len ≤ Scalar.max ScalarTy.Usize prop := λ ⟨ _, l ⟩ => by simp[Scalar.max, List.len_eq_length, *] @[simp] abbrev Vec.length {α : Type u} (v : Vec α) : Int := v.val.len +@[simp] +abbrev Vec.v {α : Type u} (v : Vec α) : List α := v.val + example {a: Type u} (v : Vec a) : v.length ≤ Scalar.max ScalarTy.Usize := by scalar_tac def Vec.new (α : Type u): Vec α := ⟨ [], by apply Scalar.cMax_suffices .Usize; simp ⟩ +-- TODO: very annoying that the α is an explicit parameter def Vec.len (α : Type u) (v : Vec α) : Usize := - let ⟨ v, l ⟩ := v - Usize.ofIntCore (List.length v) (by simp [Scalar.min, Usize.min]) l + Usize.ofIntCore v.val.len (by scalar_tac) (by scalar_tac) + +@[simp] +theorem Vec.len_val {α : Type u} (v : Vec α) : (Vec.len α v).val = v.length := + by rfl -- This shouldn't be used def Vec.push_fwd (α : Type u) (_ : Vec α) (_ : α) : Unit := () diff --git a/backends/lean/Base/Progress/Base.lean b/backends/lean/Base/Progress/Base.lean index b54bdf7a..6f820a84 100644 --- a/backends/lean/Base/Progress/Base.lean +++ b/backends/lean/Base/Progress/Base.lean @@ -81,8 +81,8 @@ section Methods let (fExpr, f, args) ← do if mf.isConst ∧ mf.constName = ``Bind.bind then do -- Dive into the bind - let fExpr := margs.get! 4 - fExpr.consumeMData.withApp fun f args => pure (fExpr, f, args) + let fExpr := (margs.get! 4).consumeMData + fExpr.withApp fun f args => pure (fExpr, f, args) else pure (mExpr, mf, margs) trace[Progress] "After stripping the arguments of the function call:\n- f: {f}\n- args: {args}" if ¬ f.isConst then throwError "Not a constant: {f}" diff --git a/backends/lean/Base/Progress/Progress.lean b/backends/lean/Base/Progress/Progress.lean index a281f1d2..a2c7764f 100644 --- a/backends/lean/Base/Progress/Progress.lean +++ b/backends/lean/Base/Progress/Progress.lean @@ -58,9 +58,9 @@ def progressWith (fExpr : Expr) (th : TheoremOrLocal) let (thBody, _) ← destEq thBody trace[Progress] "After splitting equality: {thBody}" -- There shouldn't be any existential variables in thBody - pure thBody + pure thBody.consumeMData -- Match the body with the target - trace[Progress] "Matching `{thBody}` with `{fExpr}`" + trace[Progress] "Matching:\n- body:\n{thBody}\n- target:\n{fExpr}" let ok ← isDefEq thBody fExpr if ¬ ok then throwError "Could not unify the theorem with the target:\n- theorem: {thBody}\n- target: {fExpr}" let mgoal ← Tactic.getMainGoal -- cgit v1.2.3 From 0cc3c78137434d848188eee2a66b1e2cacfd102e Mon Sep 17 00:00:00 2001 From: Son Ho Date: Tue, 25 Jul 2023 19:06:05 +0200 Subject: Make progress on the proofs of the hashmap --- backends/lean/Base/Arith/Int.lean | 1 + backends/lean/Base/IList/IList.lean | 41 +++++++++++++++++++++++++++++++ backends/lean/Base/Primitives/Base.lean | 2 +- backends/lean/Base/Primitives/Vec.lean | 20 ++++++--------- backends/lean/Base/Progress/Progress.lean | 34 ++++++++++++++++++++----- backends/lean/Base/Utils.lean | 36 ++++++++++++++++++--------- 6 files changed, 104 insertions(+), 30 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Arith/Int.lean b/backends/lean/Base/Arith/Int.lean index bc0676d8..48a30a49 100644 --- a/backends/lean/Base/Arith/Int.lean +++ b/backends/lean/Base/Arith/Int.lean @@ -43,6 +43,7 @@ instance (x y : Int) : IsLinearIntProp (x > y) where instance (x y : Int) : IsLinearIntProp (x ≤ y) where instance (x y : Int) : IsLinearIntProp (x ≥ y) where instance (x y : Int) : IsLinearIntProp (x ≥ y) where +instance (x y : Int) : IsLinearIntProp (x = y) where /- It seems we don't need to do any special preprocessing when the *goal* has the following shape - I guess `linarith` automatically calls `intro` -/ instance (x y : Int) : IsLinearIntProp (¬ x = y) where diff --git a/backends/lean/Base/IList/IList.lean b/backends/lean/Base/IList/IList.lean index 2443b1a6..93047a1b 100644 --- a/backends/lean/Base/IList/IList.lean +++ b/backends/lean/Base/IList/IList.lean @@ -123,6 +123,10 @@ theorem length_update (ls : List α) (i : Int) (x : α) : (ls.update i x).length theorem len_update (ls : List α) (i : Int) (x : α) : (ls.update i x).len = ls.len := by simp [len_eq_length] +@[simp] +theorem len_map (ls : List α) (f : α → β) : (ls.map f).len = ls.len := by + simp [len_eq_length] + theorem left_length_eq_append_eq (l1 l2 l1' l2' : List α) (heq : l1.length = l1'.length) : l1 ++ l2 = l1' ++ l2' ↔ l1 = l1' ∧ l2 = l2' := by revert l1' @@ -210,6 +214,43 @@ theorem index_eq simp at * apply index_eq <;> scalar_tac +theorem update_map_eq {α : Type u} {β : Type v} (ls : List α) (i : Int) (x : α) (f : α → β) : + (ls.update i x).map f = (ls.map f).update i (f x) := + match ls with + | [] => by simp + | hd :: tl => + if h : i = 0 then by simp [*] + else + have hi := update_map_eq tl (i - 1) x f + by simp [*] + +theorem len_flatten_update_eq {α : Type u} (ls : List (List α)) (i : Int) (x : List α) + (h0 : 0 ≤ i) (h1 : i < ls.len) : + (ls.update i x).flatten.len = ls.flatten.len + x.len - (ls.index i).len := + match ls with + | [] => by simp at h1; int_tac + | hd :: tl => by + simp at h1 + if h : i = 0 then simp [*]; int_tac + else + have hi := len_flatten_update_eq tl (i - 1) x (by int_tac) (by int_tac) + simp [*] + int_tac + +@[simp] +theorem index_map_eq {α : Type u} {β : Type v} [Inhabited α] [Inhabited β] (ls : List α) (i : Int) (f : α → β) + (h0 : 0 ≤ i) (h1 : i < ls.len) : + (ls.map f).index i = f (ls.index i) := + match ls with + | [] => by simp at h1; int_tac + | hd :: tl => + if h : i = 0 then by + simp [*] + else + have hi := index_map_eq tl (i - 1) f (by int_tac) (by simp at h1; int_tac) + by + simp [*] + def allP {α : Type u} (l : List α) (p: α → Prop) : Prop := foldr (fun a r => p a ∧ r) True l diff --git a/backends/lean/Base/Primitives/Base.lean b/backends/lean/Base/Primitives/Base.lean index db462c38..7c0fa3bb 100644 --- a/backends/lean/Base/Primitives/Base.lean +++ b/backends/lean/Base/Primitives/Base.lean @@ -76,7 +76,7 @@ def eval_global {α: Type u} (x: Result α) (_: ret? x): α := /- DO-DSL SUPPORT -/ -def bind {α : Type u} {β : Type v} (x: Result α) (f: α -> Result β) : Result β := +def bind {α : Type u} {β : Type v} (x: Result α) (f: α → Result β) : Result β := match x with | ret v => f v | fail v => fail v diff --git a/backends/lean/Base/Primitives/Vec.lean b/backends/lean/Base/Primitives/Vec.lean index 5a709566..523372bb 100644 --- a/backends/lean/Base/Primitives/Vec.lean +++ b/backends/lean/Base/Primitives/Vec.lean @@ -75,10 +75,9 @@ def Vec.insert (α : Type u) (v: Vec α) (i: Usize) (x: α) : Result (Vec α) := .fail arrayOutOfBounds @[pspec] -theorem Vec.insert_spec {α : Type u} (v: Vec α) (i: Usize) (x: α) : - i.val < v.length → +theorem Vec.insert_spec {α : Type u} (v: Vec α) (i: Usize) (x: α) + (hbound : i.val < v.length) : ∃ nv, v.insert α i x = ret nv ∧ nv.val = v.val.update i.val x := by - intro h simp [insert, *] def Vec.index (α : Type u) (v: Vec α) (i: Usize) : Result α := @@ -87,10 +86,9 @@ def Vec.index (α : Type u) (v: Vec α) (i: Usize) : Result α := | some x => ret x @[pspec] -theorem Vec.index_spec {α : Type u} [Inhabited α] (v: Vec α) (i: Usize) : - i.val < v.length → +theorem Vec.index_spec {α : Type u} [Inhabited α] (v: Vec α) (i: Usize) + (hbound : i.val < v.length) : v.index α i = ret (v.val.index i.val) := by - intro simp only [index] -- TODO: dependent rewrite have h := List.indexOpt_eq_index v.val i.val (by scalar_tac) (by simp [*]) @@ -109,10 +107,9 @@ def Vec.index_mut (α : Type u) (v: Vec α) (i: Usize) : Result α := | some x => ret x @[pspec] -theorem Vec.index_mut_spec {α : Type u} [Inhabited α] (v: Vec α) (i: Usize) : - i.val < v.length → +theorem Vec.index_mut_spec {α : Type u} [Inhabited α] (v: Vec α) (i: Usize) + (hbound : i.val < v.length) : v.index_mut α i = ret (v.val.index i.val) := by - intro simp only [index_mut] -- TODO: dependent rewrite have h := List.indexOpt_eq_index v.val i.val (by scalar_tac) (by simp [*]) @@ -129,12 +126,11 @@ def Vec.index_mut_back (α : Type u) (v: Vec α) (i: Usize) (x: α) : Result (Ve .ret ⟨ v.val.update i.val x, by have := v.property; simp [*] ⟩ @[pspec] -theorem Vec.index_mut_back_spec {α : Type u} (v: Vec α) (i: Usize) (x : α) : - i.val < v.length → +theorem Vec.index_mut_back_spec {α : Type u} (v: Vec α) (i: Usize) (x : α) + (hbound : i.val < v.length) : ∃ nv, v.index_mut_back α i x = ret nv ∧ nv.val = v.val.update i.val x := by - intro simp only [index_mut_back] have h := List.indexOpt_bounds v.val i.val split diff --git a/backends/lean/Base/Progress/Progress.lean b/backends/lean/Base/Progress/Progress.lean index a2c7764f..4a406bdf 100644 --- a/backends/lean/Base/Progress/Progress.lean +++ b/backends/lean/Base/Progress/Progress.lean @@ -1,6 +1,7 @@ import Lean import Base.Arith import Base.Progress.Base +import Base.Primitives -- TODO: remove? namespace Progress @@ -41,7 +42,12 @@ def progressWith (fExpr : Expr) (th : TheoremOrLocal) match th with | .Theorem thName => let thDecl := env.constants.find! thName - pure thDecl.type + -- We have to introduce fresh meta-variables for the universes already + let ul : List (Name × Level) ← + thDecl.levelParams.mapM (λ x => do pure (x, ← mkFreshLevelMVar)) + let ulMap : HashMap Name Level := HashMap.ofList ul + let thTy := thDecl.type.instantiateLevelParamsCore (λ x => ulMap.find! x) + pure thTy | .Local asmDecl => pure asmDecl.type trace[Progress] "Looked up theorem/assumption type: {thTy}" -- TODO: the tactic fails if we uncomment withNewMCtxDepth @@ -129,15 +135,16 @@ def progressWith (fExpr : Expr) (th : TheoremOrLocal) Split the remaining conjunctions by using fresh ids if the user instructed to fully split the post-condition, otherwise stop -/ if splitPost then - splitFullConjTac hPost (λ _ => pure .Ok) + splitFullConjTac true hPost (λ _ => pure .Ok) else pure .Ok | nid :: ids => do - trace[Progress] "Splitting post: {hPost}" + trace[Progress] "Splitting post: {← inferType hPost}" -- Split let nid ← do match nid with | none => mkFreshUserName `h | some nid => pure nid + trace[Progress] "\n- prevId: {prevId}\n- nid: {nid}\n- remaining ids: {ids}" if ← isConj (← inferType hPost) then splitConjTac hPost (some (prevId, nid)) (λ _ nhPost => splitPostWithIds nid nhPost ids) else return (.Error m!"Too many ids provided ({ids0}) not enough conjuncts to split in the postcondition") @@ -323,7 +330,7 @@ def evalProgress (args : TSyntax `Progress.progressArgs) : TacticM Unit := do elab "progress" args:progressArgs : tactic => evalProgress args -/- namespace Test +namespace Test open Primitives Result set_option trace.Progress true @@ -336,10 +343,25 @@ elab "progress" args:progressArgs : tactic => (hmin : Scalar.min ty ≤ x.val + y.val) (hmax : x.val + y.val ≤ Scalar.max ty) : ∃ z, x + y = ret z ∧ z.val = x.val + y.val := by --- progress keep as h with Scalar.add_spec as ⟨ z ⟩ progress keep as h as ⟨ x, h1 .. ⟩ simp [*] -end Test -/ + example {ty} {x y : Scalar ty} + (hmin : Scalar.min ty ≤ x.val + y.val) + (hmax : x.val + y.val ≤ Scalar.max ty) : + ∃ z, x + y = ret z ∧ z.val = x.val + y.val := by + progress keep as h with Scalar.add_spec as ⟨ z ⟩ + simp [*] + + /- Checking that universe instantiation works: the original spec uses + `α : Type u` where u is quantified, while here we use `α : Type 0` -/ + example {α : Type} (v: Vec α) (i: Usize) (x : α) + (hbounds : i.val < v.length) : + ∃ nv, v.index_mut_back α i x = ret nv ∧ + nv.val = v.val.update i.val x := by + progress + simp [*] + +end Test end Progress diff --git a/backends/lean/Base/Utils.lean b/backends/lean/Base/Utils.lean index 66497a49..f6dc45c7 100644 --- a/backends/lean/Base/Utils.lean +++ b/backends/lean/Base/Utils.lean @@ -1,6 +1,7 @@ import Lean import Mathlib.Tactic.Core import Mathlib.Tactic.LeftRight +import Base.UtilsBase /- Mathlib tactics: @@ -331,13 +332,13 @@ def assumptionTac : TacticM Unit := liftMetaTactic fun mvarId => do mvarId.assumption; pure [] def isConj (e : Expr) : MetaM Bool := - e.withApp fun f args => pure (f.isConstOf ``And ∧ args.size = 2) + e.consumeMData.withApp fun f args => pure (f.isConstOf ``And ∧ args.size = 2) -- Return the first conjunct if the expression is a conjunction, or the -- expression itself otherwise. Also return the second conjunct if it is a -- conjunction. def optSplitConj (e : Expr) : MetaM (Expr × Option Expr) := do - e.withApp fun f args => + e.consumeMData.withApp fun f args => if f.isConstOf ``And ∧ args.size = 2 then pure (args.get! 0, some (args.get! 1)) else pure (e, none) @@ -345,6 +346,7 @@ def optSplitConj (e : Expr) : MetaM (Expr × Option Expr) := do def splitConjTarget : TacticM Unit := do withMainContext do let g ← getMainTarget + trace[Utils] "splitConjTarget: goal: {g}" -- The tactic was initially implemened with `_root_.Lean.MVarId.apply` -- but it tended to mess the goal by unfolding terms, even when it failed let (l, r) ← optSplitConj g @@ -525,18 +527,26 @@ def splitConjTac (h : Expr) (optIds : Option (Name × Name)) (k : Expr → Expr throwError "Not a conjunction" -- Tactic to fully split a conjunction -partial def splitFullConjTacAux [Inhabited α] [Nonempty α] (l : List Expr) (h : Expr) (k : List Expr → TacticM α) : TacticM α := do +partial def splitFullConjTacAux [Inhabited α] [Nonempty α] (keepCurrentName : Bool) (l : List Expr) (h : Expr) (k : List Expr → TacticM α) : TacticM α := do try - splitConjTac h none (λ h1 h2 => - splitFullConjTacAux l h1 (λ l1 => - splitFullConjTacAux l1 h2 (λ l2 => + let ids ← do + if keepCurrentName then do + let cur := (← h.fvarId!.getDecl).userName + let nid ← mkFreshUserName `h + pure (some (cur, nid)) + else + pure none + splitConjTac h ids (λ h1 h2 => + splitFullConjTacAux keepCurrentName l h1 (λ l1 => + splitFullConjTacAux keepCurrentName l1 h2 (λ l2 => k l2))) catch _ => k (h :: l) -- Tactic to fully split a conjunction -def splitFullConjTac [Inhabited α] [Nonempty α] (h : Expr) (k : List Expr → TacticM α) : TacticM α := do - splitFullConjTacAux [] h (λ l => k l.reverse) +-- `keepCurrentName`: if `true`, then the first conjunct has the name of the original assumption +def splitFullConjTac [Inhabited α] [Nonempty α] (keepCurrentName : Bool) (h : Expr) (k : List Expr → TacticM α) : TacticM α := do + splitFullConjTacAux keepCurrentName [] h (λ l => k l.reverse) syntax optAtArgs := ("at" ident)? def elabOptAtArgs (args : TSyntax `Utils.optAtArgs) : TacticM (Option Expr) := do @@ -553,17 +563,21 @@ def elabOptAtArgs (args : TSyntax `Utils.optAtArgs) : TacticM (Option Expr) := d elab "split_conj" args:optAtArgs : tactic => do withMainContext do match ← elabOptAtArgs args with - | some fvar => + | some fvar => do + trace[Utils] "split at {fvar}" splitConjTac fvar none (fun _ _ => pure ()) - | none => + | none => do + trace[Utils] "split goal" splitConjTarget elab "split_conjs" args:optAtArgs : tactic => do withMainContext do match ← elabOptAtArgs args with | some fvar => - splitFullConjTac fvar (fun _ => pure ()) + trace[Utils] "split at {fvar}" + splitFullConjTac false fvar (fun _ => pure ()) | none => + trace[Utils] "split goal" repeatTac splitConjTarget elab "split_existsl" " at " n:ident : tactic => do -- cgit v1.2.3 From 9e8fccbe4b667fc341b6544030f85af05fe89307 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Tue, 25 Jul 2023 20:12:48 +0200 Subject: Make progress on the proofs of the hashmap --- backends/lean/Base/Primitives/Scalar.lean | 47 ++++++++++++++++++++++++++++--- 1 file changed, 43 insertions(+), 4 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Primitives/Scalar.lean b/backends/lean/Base/Primitives/Scalar.lean index 3beb7527..2e5be8bf 100644 --- a/backends/lean/Base/Primitives/Scalar.lean +++ b/backends/lean/Base/Primitives/Scalar.lean @@ -660,10 +660,8 @@ theorem Scalar.rem_unsigned_spec {ty} (s: ¬ ty.isSigned) (x : Scalar ty) {y : S simp [h] at hx hy have hmin : 0 ≤ x.val % y.val := Int.emod_nonneg x.val hnz have hmax : x.val % y.val ≤ Scalar.max ty := by - have h := @Int.ediv_emod_unique x.val y.val (x.val % y.val) (x.val / y.val) - simp at h - have : 0 < y.val := by int_tac - simp [*] at h + have h : 0 < y.val := by int_tac + have h := Int.emod_lt_of_pos x.val h have := y.hmax linarith have hs := @rem_spec ty x y hnz @@ -724,6 +722,47 @@ def U32.ofInt := @Scalar.ofInt .U32 def U64.ofInt := @Scalar.ofInt .U64 def U128.ofInt := @Scalar.ofInt .U128 +-- TODO: factor those lemmas out +@[simp] theorem Scalar.ofInt_val_eq {ty} (h : Scalar.min ty ≤ x ∧ x ≤ Scalar.max ty) : (Scalar.ofInt x h).val = x := by + simp [Scalar.ofInt, Scalar.ofIntCore] + +@[simp] theorem Isize.ofInt_val_eq (h : Scalar.min ScalarTy.Isize ≤ x ∧ x ≤ Scalar.max ScalarTy.Isize) : (Isize.ofInt x h).val = x := by + apply Scalar.ofInt_val_eq h + +@[simp] theorem I8.ofInt_val_eq (h : Scalar.min ScalarTy.I8 ≤ x ∧ x ≤ Scalar.max ScalarTy.I8) : (I8.ofInt x h).val = x := by + apply Scalar.ofInt_val_eq h + +@[simp] theorem I16.ofInt_val_eq (h : Scalar.min ScalarTy.I16 ≤ x ∧ x ≤ Scalar.max ScalarTy.I16) : (I16.ofInt x h).val = x := by + apply Scalar.ofInt_val_eq h + +@[simp] theorem I32.ofInt_val_eq (h : Scalar.min ScalarTy.I32 ≤ x ∧ x ≤ Scalar.max ScalarTy.I32) : (I32.ofInt x h).val = x := by + apply Scalar.ofInt_val_eq h + +@[simp] theorem I64.ofInt_val_eq (h : Scalar.min ScalarTy.I64 ≤ x ∧ x ≤ Scalar.max ScalarTy.I64) : (I64.ofInt x h).val = x := by + apply Scalar.ofInt_val_eq h + +@[simp] theorem I128.ofInt_val_eq (h : Scalar.min ScalarTy.I128 ≤ x ∧ x ≤ Scalar.max ScalarTy.I128) : (I128.ofInt x h).val = x := by + apply Scalar.ofInt_val_eq h + +@[simp] theorem Usize.ofInt_val_eq (h : Scalar.min ScalarTy.Usize ≤ x ∧ x ≤ Scalar.max ScalarTy.Usize) : (Usize.ofInt x h).val = x := by + apply Scalar.ofInt_val_eq h + +@[simp] theorem U8.ofInt_val_eq (h : Scalar.min ScalarTy.U8 ≤ x ∧ x ≤ Scalar.max ScalarTy.U8) : (U8.ofInt x h).val = x := by + apply Scalar.ofInt_val_eq h + +@[simp] theorem U16.ofInt_val_eq (h : Scalar.min ScalarTy.U16 ≤ x ∧ x ≤ Scalar.max ScalarTy.U16) : (U16.ofInt x h).val = x := by + apply Scalar.ofInt_val_eq h + +@[simp] theorem U32.ofInt_val_eq (h : Scalar.min ScalarTy.U32 ≤ x ∧ x ≤ Scalar.max ScalarTy.U32) : (U32.ofInt x h).val = x := by + apply Scalar.ofInt_val_eq h + +@[simp] theorem U64.ofInt_val_eq (h : Scalar.min ScalarTy.U64 ≤ x ∧ x ≤ Scalar.max ScalarTy.U64) : (U64.ofInt x h).val = x := by + apply Scalar.ofInt_val_eq h + +@[simp] theorem U128.ofInt_val_eq (h : Scalar.min ScalarTy.U128 ≤ x ∧ x ≤ Scalar.max ScalarTy.U128) : (U128.ofInt x h).val = x := by + apply Scalar.ofInt_val_eq h + + -- Comparisons instance {ty} : LT (Scalar ty) where lt a b := LT.lt a.val b.val -- cgit v1.2.3 From 81e991822879a942af34489b7a072f31739f28f6 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Wed, 26 Jul 2023 12:37:17 +0200 Subject: Update the syntax of the progress tactic --- backends/lean/Base/Arith/Int.lean | 2 +- backends/lean/Base/Arith/Scalar.lean | 2 +- backends/lean/Base/Progress/Progress.lean | 41 +++++++++++++++++-------------- backends/lean/Base/Utils.lean | 12 ++++++--- 4 files changed, 32 insertions(+), 25 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Arith/Int.lean b/backends/lean/Base/Arith/Int.lean index 48a30a49..7a5bbe98 100644 --- a/backends/lean/Base/Arith/Int.lean +++ b/backends/lean/Base/Arith/Int.lean @@ -147,7 +147,7 @@ def introInstances (declToUnfold : Name) (lookup : Expr → MetaM (Option Expr)) let hs ← collectInstancesFromMainCtx lookup hs.toArray.mapM fun e => do let type ← inferType e - let name ← mkFreshUserName `h + let name ← mkFreshAnonPropUserName -- Add a declaration let nval ← Utils.addDeclTac name e type (asLet := false) -- Simplify to unfold the declaration to unfold (i.e., the projector) diff --git a/backends/lean/Base/Arith/Scalar.lean b/backends/lean/Base/Arith/Scalar.lean index 6f4a8eba..b792ff21 100644 --- a/backends/lean/Base/Arith/Scalar.lean +++ b/backends/lean/Base/Arith/Scalar.lean @@ -12,7 +12,7 @@ def scalarTacExtraPreprocess : Tactic.TacticM Unit := do -- Inroduce the bounds for the isize/usize types let add (e : Expr) : Tactic.TacticM Unit := do let ty ← inferType e - let _ ← Utils.addDeclTac (← mkFreshUserName `h) e ty (asLet := false) + let _ ← Utils.addDeclTac (← Utils.mkFreshAnonPropUserName) e ty (asLet := false) add (← mkAppM ``Scalar.cMin_bound #[.const ``ScalarTy.Isize []]) add (← mkAppM ``Scalar.cMax_bound #[.const ``ScalarTy.Usize []]) add (← mkAppM ``Scalar.cMax_bound #[.const ``ScalarTy.Isize []]) diff --git a/backends/lean/Base/Progress/Progress.lean b/backends/lean/Base/Progress/Progress.lean index 4a406bdf..9300edff 100644 --- a/backends/lean/Base/Progress/Progress.lean +++ b/backends/lean/Base/Progress/Progress.lean @@ -79,7 +79,7 @@ def progressWith (fExpr : Expr) (th : TheoremOrLocal) match th with | .Theorem thName => mkAppOptM thName (mvars.map some) | .Local decl => mkAppOptM' (mkFVar decl.fvarId) (mvars.map some) - let asmName ← do match keep with | none => mkFreshUserName `h | some n => do pure n + let asmName ← do match keep with | none => mkFreshAnonPropUserName | some n => do pure n let thTy ← inferType th let thAsm ← Utils.addDeclTac asmName th thTy (asLet := false) withMainContext do -- The context changed - TODO: remove once addDeclTac is updated @@ -101,8 +101,8 @@ def progressWith (fExpr : Expr) (th : TheoremOrLocal) let hName := (← h.fvarId!.getDecl).userName let (optIds, ids) ← do match ids with - | [] => do pure (some (hName, ← mkFreshUserName `h), []) - | none :: ids => do pure (some (hName, ← mkFreshUserName `h), ids) + | [] => do pure (some (hName, ← mkFreshAnonPropUserName), []) + | none :: ids => do pure (some (hName, ← mkFreshAnonPropUserName), ids) | some id :: ids => do pure (some (hName, id), ids) splitConjTac h optIds (fun hEq hPost => k hEq (some hPost) ids) else k h none ids @@ -142,7 +142,7 @@ def progressWith (fExpr : Expr) (th : TheoremOrLocal) -- Split let nid ← do match nid with - | none => mkFreshUserName `h + | none => mkFreshAnonPropUserName | some nid => pure nid trace[Progress] "\n- prevId: {prevId}\n- nid: {nid}\n- remaining ids: {ids}" if ← isConj (← inferType hPost) then @@ -270,23 +270,26 @@ def progressAsmsOrLookupTheorem (keep : Option Name) (withTh : Option TheoremOrL -- Nothing worked: failed throwError "Progress failed" -syntax progressArgs := ("keep" ("as" (ident))?)? ("with" ident)? ("as" " ⟨ " (ident <|> "_"),* " .."? " ⟩")? +syntax progressArgs := ("keep" (ident <|> "_"))? ("with" ident)? ("as" " ⟨ " (ident <|> "_"),* " .."? " ⟩")? def evalProgress (args : TSyntax `Progress.progressArgs) : TacticM Unit := do let args := args.raw -- Process the arguments to retrieve the identifiers to use trace[Progress] "Progress arguments: {args}" - let args := args.getArgs + let (keepArg, withArg, asArgs) ← + match args.getArgs.toList with + | [keepArg, withArg, asArgs] => do pure (keepArg, withArg, asArgs) + | _ => throwError "Unexpected: invalid arguments" let keep : Option Name ← do - let args := (args.get! 0).getArgs - if args.size > 0 then do - let args := (args.get! 1).getArgs - if args.size > 0 then pure (some (args.get! 1).getId) - else do pure (some (← mkFreshUserName `h)) - else pure none + let args := keepArg.getArgs + trace[Progress] "Keep args: {args}" + let arg := args.get! 1 + trace[Progress] "Keep arg: {arg}" + if arg.isIdent then pure (some arg.getId) + else do pure (some (← mkFreshAnonPropUserName)) trace[Progress] "Keep: {keep}" let withArg ← do - let withArg := (args.get! 1).getArgs + let withArg := withArg.getArgs if withArg.size > 0 then let id := withArg.get! 1 trace[Progress] "With arg: {id}" @@ -306,12 +309,12 @@ def evalProgress (args : TSyntax `Progress.progressArgs) : TacticM Unit := do pure (some (.Theorem id)) else pure none let ids := - let args := (args.get! 2).getArgs + let args := asArgs.getArgs let args := (args.get! 2).getSepArgs args.map (λ s => if s.isIdent then some s.getId else none) trace[Progress] "User-provided ids: {ids}" let splitPost : Bool := - let args := (args.get! 2).getArgs + let args := asArgs.getArgs (args.get! 3).getArgs.size > 0 trace[Progress] "Split post: {splitPost}" /- For scalarTac we have a fast track: if the goal is not a linear @@ -343,15 +346,15 @@ namespace Test (hmin : Scalar.min ty ≤ x.val + y.val) (hmax : x.val + y.val ≤ Scalar.max ty) : ∃ z, x + y = ret z ∧ z.val = x.val + y.val := by - progress keep as h as ⟨ x, h1 .. ⟩ - simp [*] + progress keep _ as ⟨ z, h1 .. ⟩ + simp [*, h1] example {ty} {x y : Scalar ty} (hmin : Scalar.min ty ≤ x.val + y.val) (hmax : x.val + y.val ≤ Scalar.max ty) : ∃ z, x + y = ret z ∧ z.val = x.val + y.val := by - progress keep as h with Scalar.add_spec as ⟨ z ⟩ - simp [*] + progress keep h with Scalar.add_spec as ⟨ z ⟩ + simp [*, h] /- Checking that universe instantiation works: the original spec uses `α : Type u` where u is quantified, while here we use `α : Type 0` -/ diff --git a/backends/lean/Base/Utils.lean b/backends/lean/Base/Utils.lean index f6dc45c7..1f8f1455 100644 --- a/backends/lean/Base/Utils.lean +++ b/backends/lean/Base/Utils.lean @@ -201,6 +201,10 @@ partial def mapVisit (k : Nat → Expr → MetaM Expr) (e : Expr) : MetaM Expr : | .proj _ _ b => return e.updateProj! (← visit (i + 1) b) visit 0 e +-- Generate a fresh user name for an anonymous proposition to introduce in the +-- assumptions +def mkFreshAnonPropUserName := mkFreshUserName `_ + section Methods variable [MonadLiftT MetaM m] [MonadControlT MetaM m] [Monad m] [MonadError m] variable {a : Type} @@ -411,7 +415,7 @@ def splitDisjTac (h : Expr) (kleft kright : TacticM Unit) : TacticM Unit := do trace[Arith] "left: {inl}: {mleft}" trace[Arith] "right: {inr}: {mright}" -- Create the match expression - withLocalDeclD (← mkFreshUserName `h) hTy fun hVar => do + withLocalDeclD (← mkFreshAnonPropUserName) hTy fun hVar => do let motive ← mkLambdaFVars #[hVar] goalType let casesExpr ← mkAppOptM ``Or.casesOn #[a, b, motive, h, inl, inr] let mgoal ← getMainGoal @@ -505,8 +509,8 @@ def splitConjTac (h : Expr) (optIds : Option (Name × Name)) (k : Expr → Expr let altVarNames ← match optIds with | none => do - let id0 ← mkFreshUserName `h - let id1 ← mkFreshUserName `h + let id0 ← mkFreshAnonPropUserName + let id1 ← mkFreshAnonPropUserName pure #[{ varNames := [id0, id1] }] | some (id0, id1) => do pure #[{ varNames := [id0, id1] }] @@ -532,7 +536,7 @@ partial def splitFullConjTacAux [Inhabited α] [Nonempty α] (keepCurrentName : let ids ← do if keepCurrentName then do let cur := (← h.fvarId!.getDecl).userName - let nid ← mkFreshUserName `h + let nid ← mkFreshAnonPropUserName pure (some (cur, nid)) else pure none -- cgit v1.2.3 From 3337c4ac3326c3132dcc322f55f23a7d2054ceb0 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Wed, 26 Jul 2023 15:00:11 +0200 Subject: Update some of the Vec function specs --- backends/lean/Base/Primitives/Vec.lean | 13 +++++++++---- backends/lean/Base/Progress/Progress.lean | 17 ++++++++++++----- 2 files changed, 21 insertions(+), 9 deletions(-) (limited to 'backends') diff --git a/backends/lean/Base/Primitives/Vec.lean b/backends/lean/Base/Primitives/Vec.lean index 523372bb..a09d6ac2 100644 --- a/backends/lean/Base/Primitives/Vec.lean +++ b/backends/lean/Base/Primitives/Vec.lean @@ -85,14 +85,19 @@ def Vec.index (α : Type u) (v: Vec α) (i: Usize) : Result α := | none => fail .arrayOutOfBounds | some x => ret x +/- In the theorems below: we don't always need the `∃ ..`, but we use one + so that `progress` introduces an opaque variable and an equality. This + helps control the context. + -/ + @[pspec] theorem Vec.index_spec {α : Type u} [Inhabited α] (v: Vec α) (i: Usize) (hbound : i.val < v.length) : - v.index α i = ret (v.val.index i.val) := by + ∃ x, v.index α i = ret x ∧ x = v.val.index i.val := by simp only [index] -- TODO: dependent rewrite have h := List.indexOpt_eq_index v.val i.val (by scalar_tac) (by simp [*]) - simp only [*] + simp [*] -- This shouldn't be used def Vec.index_back (α : Type u) (v: Vec α) (i: Usize) (_: α) : Result Unit := @@ -109,11 +114,11 @@ def Vec.index_mut (α : Type u) (v: Vec α) (i: Usize) : Result α := @[pspec] theorem Vec.index_mut_spec {α : Type u} [Inhabited α] (v: Vec α) (i: Usize) (hbound : i.val < v.length) : - v.index_mut α i = ret (v.val.index i.val) := by + ∃ x, v.index_mut α i = ret x ∧ x = v.val.index i.val := by simp only [index_mut] -- TODO: dependent rewrite have h := List.indexOpt_eq_index v.val i.val (by scalar_tac) (by simp [*]) - simp only [*] + simp [*] instance {α : Type u} (p : Vec α → Prop) : Arith.HasIntProp (Subtype p) where prop_ty := λ x => p x diff --git a/backends/lean/Base/Progress/Progress.lean b/backends/lean/Base/Progress/Progress.lean index 9300edff..6a4729dc 100644 --- a/backends/lean/Base/Progress/Progress.lean +++ b/backends/lean/Base/Progress/Progress.lean @@ -162,6 +162,7 @@ def progressWith (fExpr : Expr) (th : TheoremOrLocal) allGoals asmTac let newGoals ← getUnsolvedGoals setGoals (newGoals ++ curGoals) + trace[Progress] "progress: replaced the goals" -- pure .Ok @@ -281,12 +282,15 @@ def evalProgress (args : TSyntax `Progress.progressArgs) : TacticM Unit := do | [keepArg, withArg, asArgs] => do pure (keepArg, withArg, asArgs) | _ => throwError "Unexpected: invalid arguments" let keep : Option Name ← do + trace[Progress] "Keep arg: {keepArg}" let args := keepArg.getArgs - trace[Progress] "Keep args: {args}" - let arg := args.get! 1 - trace[Progress] "Keep arg: {arg}" - if arg.isIdent then pure (some arg.getId) - else do pure (some (← mkFreshAnonPropUserName)) + if args.size > 0 then do + trace[Progress] "Keep args: {args}" + let arg := args.get! 1 + trace[Progress] "Keep arg: {arg}" + if arg.isIdent then pure (some arg.getId) + else do pure (some (← mkFreshAnonPropUserName)) + else do pure none trace[Progress] "Keep: {keep}" let withArg ← do let withArg := withArg.getArgs @@ -328,7 +332,10 @@ def evalProgress (args : TSyntax `Progress.progressArgs) : TacticM Unit := do else throwError "Not a linear arithmetic goal" progressAsmsOrLookupTheorem keep withArg ids splitPost ( + withMainContext do + trace[Progress] "trying to solve assumption: {← getMainGoal}" firstTac [assumptionTac, scalarTac]) + trace[Diverge] "Progress done" elab "progress" args:progressArgs : tactic => evalProgress args -- cgit v1.2.3