diff options
author | Josh Chen | 2020-07-21 02:09:44 +0200 |
---|---|---|
committer | Josh Chen | 2020-07-21 02:09:44 +0200 |
commit | 12eed8685674b7d5ff7bc45a44a061e01f99ce5f (patch) | |
tree | 44b8c1a3f1de9c22e41f583595005bf85681cd8c | |
parent | 3bcaf5d1c40b513f8e4590f7d38d3eef8393092e (diff) |
1. Type-checking/inference now more principled, and the implementation is better. 2. Changed most tactics to context tactics.
Diffstat (limited to '')
-rw-r--r-- | hott/Equivalence.thy | 52 | ||||
-rw-r--r-- | hott/Identity.thy | 147 | ||||
-rw-r--r-- | hott/More_List.thy | 4 | ||||
-rw-r--r-- | hott/More_Nat.thy | 6 | ||||
-rw-r--r-- | hott/Nat.thy | 17 | ||||
-rw-r--r-- | spartan/core/Spartan.thy | 147 | ||||
-rw-r--r-- | spartan/core/context_tactical.ML | 33 | ||||
-rw-r--r-- | spartan/core/elimination.ML | 6 | ||||
-rw-r--r-- | spartan/core/eqsubst.ML | 36 | ||||
-rw-r--r-- | spartan/core/rewrite.ML | 26 | ||||
-rw-r--r-- | spartan/core/tactics.ML | 280 | ||||
-rw-r--r-- | spartan/core/tactics2.ML | 191 | ||||
-rw-r--r-- | spartan/core/typechecking.ML | 21 | ||||
-rw-r--r-- | spartan/lib/List.thy | 13 | ||||
-rw-r--r-- | spartan/lib/Maybe.thy | 7 | ||||
-rw-r--r-- | spartan/lib/More_Types.thy | 30 |
16 files changed, 394 insertions, 622 deletions
diff --git a/hott/Equivalence.thy b/hott/Equivalence.thy index 66248a9..88adc8b 100644 --- a/hott/Equivalence.thy +++ b/hott/Equivalence.thy @@ -35,7 +35,7 @@ Lemma (derive) hsym: "\<And>x. x: A \<Longrightarrow> B x: U i" shows "H: f ~ g \<Longrightarrow> g ~ f" unfolding homotopy_def - apply intros + apply intro apply (rule pathinv) \<guillemotright> by (elim H) \<guillemotright> by typechk @@ -106,7 +106,8 @@ Lemma homotopy_id_left [typechk]: Lemma homotopy_id_right [typechk]: assumes "A: U i" "B: U i" "f: A \<rightarrow> B" shows "homotopy_refl A f: f \<circ> (id A) ~ f" - unfolding homotopy_refl_def homotopy_def by reduce + unfolding homotopy_refl_def homotopy_def + by reduce Lemma homotopy_funcomp_left: assumes @@ -165,11 +166,9 @@ Lemma (derive) id_qinv: assumes "A: U i" shows "qinv (id A)" unfolding qinv_def - apply intro defer - apply intro defer - apply htpy_id - apply id_htpy - done + proof intro + show "id A: A \<rightarrow> A" by typechk + qed (reduce, intro; refl) Lemma (derive) quasiinv_qinv: assumes "A: U i" "B: U i" "f: A \<rightarrow> B" @@ -272,7 +271,7 @@ Lemma (derive) biinv_imp_qinv: also have ".. ~ (id A) \<circ> h" by (subst funcomp_assoc[symmetric]) (htpy_o, fact) also have ".. ~ h" by reduce refl finally have "g ~ h" by this - then have "f \<circ> g ~ f \<circ> h" by o_htpy + then have "f \<circ> g ~ f \<circ> h" by (o_htpy, this) also note \<open>H2:_\<close> finally show "f \<circ> g ~ id B" by this qed @@ -313,10 +312,9 @@ Lemma (derive) equivalence_refl: assumes "A: U i" shows "A \<simeq> A" unfolding equivalence_def - apply intro defer - apply (rule qinv_imp_biinv) defer - apply (rule id_qinv) - done + proof intro + show "biinv (id A)" by (rule qinv_imp_biinv) (rule id_qinv) + qed typechk text \<open> The following could perhaps be easier with transport (but then I think we need @@ -340,11 +338,14 @@ Lemma (derive) equivalence_symmetric: Lemma (derive) equivalence_transitive: assumes "A: U i" "B: U i" "C: U i" shows "A \<simeq> B \<rightarrow> B \<simeq> C \<rightarrow> A \<simeq> C" + (* proof intros + fix AB BC assume "AB: A \<simeq> B" "BC: B \<simeq> C" + let "?f: {}" = "(fst AB) :: o" *) apply intros unfolding equivalence_def focus vars p q apply (elim p, elim q) focus vars f biinv\<^sub>f g biinv\<^sub>g apply intro - \<guillemotright> apply (rule funcompI) defer by assumption known + \<guillemotright> apply (rule funcompI) defer by assumption+ known \<guillemotright> by (rule funcomp_biinv) done done @@ -372,32 +373,31 @@ Lemma (derive) id_imp_equiv: shows "<trans (id (U i)) p, ?isequiv>: A \<simeq> B" unfolding equivalence_def apply intros defer - - \<comment> \<open>Switch off auto-typechecking, which messes with universe levels\<close> - supply [[auto_typechk=false]] - - \<guillemotright> apply (eq p, typechk) + \<guillemotright> apply (eq p) \<^item> prems vars A B apply (rewrite at A in "A \<rightarrow> B" id_comp[symmetric]) + using [[solve_side_conds=1]] apply (rewrite at B in "_ \<rightarrow> B" id_comp[symmetric]) - apply (rule transport, rule U_in_U) - apply (rule lift_universe_codomain, rule U_in_U) - apply (typechk, rule U_in_U) + apply (rule transport, rule Ui_in_USi) + apply (rule lift_universe_codomain, rule Ui_in_USi) + apply (typechk, rule Ui_in_USi) done \<^item> prems vars A + using [[solve_side_conds=1]] apply (subst transport_comp) - \<^enum> by (rule U_in_U) - \<^enum> by reduce (rule lift_U) + \<^enum> by (rule Ui_in_USi) + \<^enum> by reduce (rule in_USi_if_in_Ui) \<^enum> by reduce (rule id_biinv) done done \<guillemotright> \<comment> \<open>Similar proof as in the first subgoal above\<close> apply (rewrite at A in "A \<rightarrow> B" id_comp[symmetric]) + using [[solve_side_conds=1]] apply (rewrite at B in "_ \<rightarrow> B" id_comp[symmetric]) - apply (rule transport, rule U_in_U) - apply (rule lift_universe_codomain, rule U_in_U) - apply (typechk, rule U_in_U) + apply (rule transport, rule Ui_in_USi) + apply (rule lift_universe_codomain, rule Ui_in_USi) + apply (typechk, rule Ui_in_USi) done done diff --git a/hott/Identity.thy b/hott/Identity.thy index dd2d6a0..1cb3946 100644 --- a/hott/Identity.thy +++ b/hott/Identity.thy @@ -20,7 +20,8 @@ syntax "_Id" :: \<open>o \<Rightarrow> o \<Rightarrow> o \<Rightarrow> o\<close> translations "a =\<^bsub>A\<^esub> b" \<rightleftharpoons> "CONST Id A a b" axiomatization where - IdF: "\<lbrakk>A: U i; a: A; b: A\<rbrakk> \<Longrightarrow> a =\<^bsub>A\<^esub> b: U i" and + \<comment> \<open>Here `A: U i` comes last because A is often implicit\<close> + IdF: "\<lbrakk>a: A; b: A; A: U i\<rbrakk> \<Longrightarrow> a =\<^bsub>A\<^esub> b: U i" and IdI: "a: A \<Longrightarrow> refl a: a =\<^bsub>A\<^esub> a" and @@ -39,35 +40,36 @@ axiomatization where \<rbrakk> \<Longrightarrow> IdInd A (fn x y p. C x y p) (fn x. f x) a a (refl a) \<equiv> f a" lemmas - [intros] = IdF IdI and - [elims "?p" "?a" "?b"] = IdE and - [comps] = Id_comp and + [form] = IdF and + [intro, intros] = IdI and + [elim "?p" "?a" "?b"] = IdE and + [comp] = Id_comp and [refl] = IdI section \<open>Path induction\<close> -method_setup eq = \<open> -Args.term >> (fn tm => fn ctxt => CONTEXT_METHOD (K ( - CONTEXT_SUBGOAL (fn (goal, i) => - let - val facts = Proof_Context.facts_of ctxt - val prems = Logic.strip_assums_hyp goal - val template = \<^const>\<open>has_type\<close> - $ tm - $ (\<^const>\<open>Id\<close> $ Var (("*?A", 0), \<^typ>\<open>o\<close>) - $ Var (("*?x", 0), \<^typ>\<open>o\<close>) - $ Var (("*?y", 0), \<^typ>\<open>o\<close>)) - val types = - map (Thm.prop_of o #1) (Facts.could_unify facts template) - @ filter (fn prem => Term.could_unify (template, prem)) prems - |> map Lib.type_of_typing - in case types of - (\<^const>\<open>Id\<close> $ _ $ x $ y)::_ => - elim_context_tac [tm, x, y] ctxt i - | _ => Context_Tactic.CONTEXT_TACTIC no_tac - end) 1))) -\<close> +method_setup eq = + \<open>Args.term >> (fn tm => K (CONTEXT_METHOD ( + CHEADGOAL o SIDE_CONDS ( + CONTEXT_SUBGOAL (fn (goal, i) => fn cst as (ctxt, st) => + let + val facts = Proof_Context.facts_of ctxt + val prems = Logic.strip_assums_hyp goal + val template = \<^const>\<open>has_type\<close> + $ tm + $ (\<^const>\<open>Id\<close> $ Var (("*?A", 0), \<^typ>\<open>o\<close>) + $ Var (("*?x", 0), \<^typ>\<open>o\<close>) + $ Var (("*?y", 0), \<^typ>\<open>o\<close>)) + val types = + map (Thm.prop_of o #1) (Facts.could_unify facts template) + @ filter (fn prem => Term.could_unify (template, prem)) prems + |> map Lib.type_of_typing + in case types of + (Const (\<^const_name>\<open>Id\<close>, _) $ _ $ x $ y) :: _ => + elim_ctac [tm, x, y] i cst + | _ => no_ctac cst + end)))))\<close> section \<open>Symmetry and transitivity\<close> @@ -77,7 +79,7 @@ Lemma (derive) pathinv: shows "y =\<^bsub>A\<^esub> x" by (eq p) intro -lemma pathinv_comp [comps]: +lemma pathinv_comp [comp]: assumes "x: A" "A: U i" shows "pathinv A x x (refl x) \<equiv> refl x" unfolding pathinv_def by reduce @@ -95,7 +97,7 @@ Lemma (derive) pathcomp: done done -lemma pathcomp_comp [comps]: +lemma pathcomp_comp [comp]: assumes "a: A" "A: U i" shows "pathcomp A a a a (refl a) (refl a) \<equiv> refl a" unfolding pathcomp_def by reduce @@ -133,14 +135,14 @@ Lemma (derive) refl_pathcomp: assumes "A: U i" "x: A" "y: A" "p: x =\<^bsub>A\<^esub> y" shows "(refl x) \<bullet> p = p" apply (eq p) - apply (reduce; intros) + apply (reduce; intro) done Lemma (derive) pathcomp_refl: assumes "A: U i" "x: A" "y: A" "p: x =\<^bsub>A\<^esub> y" shows "p \<bullet> (refl y) = p" apply (eq p) - apply (reduce; intros) + apply (reduce; intro) done definition [implicit]: "ru p \<equiv> pathcomp_refl ? ? ? p" @@ -150,36 +152,30 @@ translations "CONST ru p" \<leftharpoondown> "CONST pathcomp_refl A x y p" "CONST lu p" \<leftharpoondown> "CONST refl_pathcomp A x y p" -Lemma lu_refl [comps]: +Lemma lu_refl [comp]: assumes "A: U i" "x: A" shows "lu (refl x) \<equiv> refl (refl x)" - unfolding refl_pathcomp_def by reduce + unfolding refl_pathcomp_def by reduce+ -Lemma ru_refl [comps]: +Lemma ru_refl [comp]: assumes "A: U i" "x: A" shows "ru (refl x) \<equiv> refl (refl x)" - unfolding pathcomp_refl_def by reduce + unfolding pathcomp_refl_def by reduce+ Lemma (derive) inv_pathcomp: assumes "A: U i" "x: A" "y: A" "p: x =\<^bsub>A\<^esub> y" shows "p\<inverse> \<bullet> p = refl y" - apply (eq p) - apply (reduce; intros) - done + by (eq p) (reduce; intro) Lemma (derive) pathcomp_inv: assumes "A: U i" "x: A" "y: A" "p: x =\<^bsub>A\<^esub> y" shows "p \<bullet> p\<inverse> = refl x" - apply (eq p) - apply (reduce; intros) - done + by (eq p) (reduce; intro) Lemma (derive) pathinv_pathinv: assumes "A: U i" "x: A" "y: A" "p: x =\<^bsub>A\<^esub> y" shows "p\<inverse>\<inverse> = p" - apply (eq p) - apply (reduce; intros) - done + by (eq p) (reduce; intro) Lemma (derive) pathcomp_assoc: assumes @@ -191,7 +187,7 @@ Lemma (derive) pathcomp_assoc: apply (eq p) focus prems vars x p apply (eq p) - apply (reduce; intros) + apply (reduce; intro) done done done @@ -206,16 +202,14 @@ Lemma (derive) ap: "f: A \<rightarrow> B" "p: x =\<^bsub>A\<^esub> y" shows "f x = f y" - apply (eq p) - apply intro - done + by (eq p) intro definition ap_i ("_[_]" [1000, 0]) where [implicit]: "ap_i f p \<equiv> ap ? ? ? ? f p" translations "f[p]" \<leftharpoondown> "CONST ap A B x y f p" -Lemma ap_refl [comps]: +Lemma ap_refl [comp]: assumes "f: A \<rightarrow> B" "x: A" "A: U i" "B: U i" shows "f[refl x] \<equiv> refl (f x)" unfolding ap_def by reduce @@ -254,17 +248,16 @@ Lemma (derive) ap_funcomp: "p: x =\<^bsub>A\<^esub> y" shows "(g \<circ> f)[p] = g[f[p]]" apply (eq p) - apply (simp only: funcomp_apply_comp[symmetric]) - apply (reduce; intro) + \<guillemotright> by reduce + \<guillemotright> by reduce intro done Lemma (derive) ap_id: assumes "A: U i" "x: A" "y: A" "p: x =\<^bsub>A\<^esub> y" shows "(id A)[p] = p" apply (eq p) - apply (rewrite at "\<hole> = _" id_comp[symmetric]) - apply (rewrite at "_ = \<hole>" id_comp[symmetric]) - apply (reduce; intros) + \<guillemotright> by reduce + \<guillemotright> by reduce intro done @@ -284,7 +277,7 @@ definition transport_i ("trans") translations "trans P p" \<leftharpoondown> "CONST transport A P x y p" -Lemma transport_comp [comps]: +Lemma transport_comp [comp]: assumes "a: A" "A: U i" @@ -302,6 +295,7 @@ Lemma use_transport: "A: U i" "\<And>x. x: A \<Longrightarrow> P x: U i" shows "trans P p\<inverse> u: P y" + unfolding transport_def pathinv_def by typechk method transport uses eq = (rule use_transport[OF eq]) @@ -322,7 +316,7 @@ Lemma (derive) transport_right_inv: "x: A" "y: A" "p: x =\<^bsub>A\<^esub> y" shows "(trans P p) \<circ> (trans P p\<inverse>) = id (P y)" - by (eq p) (reduce; intros) + by (eq p) (reduce; intro) Lemma (derive) transport_pathcomp: assumes @@ -335,7 +329,7 @@ Lemma (derive) transport_pathcomp: apply (eq p) focus prems vars x p apply (eq p) - apply (reduce; intros) + apply (reduce; intro) done done @@ -348,7 +342,7 @@ Lemma (derive) transport_compose_typefam: "p: x =\<^bsub>A\<^esub> y" "u: P (f x)" shows "trans (fn x. P (f x)) p u = trans P f[p] u" - by (eq p) (reduce; intros) + by (eq p) (reduce; intro) Lemma (derive) transport_function_family: assumes @@ -360,7 +354,7 @@ Lemma (derive) transport_function_family: "u: P x" "p: x =\<^bsub>A\<^esub> y" shows "trans Q p ((f x) u) = (f y) (trans P p u)" - by (eq p) (reduce; intros) + by (eq p) (reduce; intro) Lemma (derive) transport_const: assumes @@ -375,12 +369,12 @@ definition transport_const_i ("trans'_const") translations "trans_const B p" \<leftharpoondown> "CONST transport_const A B x y p" -Lemma transport_const_comp [comps]: +Lemma transport_const_comp [comp]: assumes "x: A" "b: B" "A: U i" "B: U i" shows "trans_const B (refl x) b\<equiv> refl b" - unfolding transport_const_def by reduce + unfolding transport_const_def by reduce+ Lemma (derive) pathlift: assumes @@ -390,21 +384,21 @@ Lemma (derive) pathlift: "p: x =\<^bsub>A\<^esub> y" "u: P x" shows "<x, u> = <y, trans P p u>" - by (eq p) (reduce; intros) + by (eq p) (reduce; intro) definition pathlift_i ("lift") where [implicit]: "lift P p u \<equiv> pathlift ? P ? ? p u" translations "lift P p u" \<leftharpoondown> "CONST pathlift A P x y p u" -Lemma pathlift_comp [comps]: +Lemma pathlift_comp [comp]: assumes "u: P x" "x: A" "\<And>x. x: A \<Longrightarrow> P x: U i" "A: U i" shows "lift P (refl x) u \<equiv> refl <x, u>" - unfolding pathlift_def by reduce + unfolding pathlift_def by reduce+ Lemma (derive) pathlift_fst: assumes @@ -415,13 +409,7 @@ Lemma (derive) pathlift_fst: "p: x =\<^bsub>A\<^esub> y" shows "fst[lift P p u] = p" apply (eq p) - text \<open>Some rewriting needed here:\<close> - \<guillemotright> vars x y - (*Here an automatic reordering tactic would be helpful*) - apply (rewrite at x in "x = y" fst_comp[symmetric]) - prefer 4 - apply (rewrite at y in "_ = y" fst_comp[symmetric]) - done + \<guillemotright> by reduce \<guillemotright> by reduce intro done @@ -436,21 +424,21 @@ Lemma (derive) apd: "x: A" "y: A" "p: x =\<^bsub>A\<^esub> y" shows "trans P p (f x) = f y" - by (eq p) (reduce; intros; typechk) + by (eq p) (reduce; intro) definition apd_i ("apd") where [implicit]: "apd f p \<equiv> Identity.apd ? ? f ? ? p" translations "apd f p" \<leftharpoondown> "CONST Identity.apd A P f x y p" -Lemma dependent_map_comp [comps]: +Lemma dependent_map_comp [comp]: assumes "f: \<Prod>x: A. P x" "x: A" "A: U i" "\<And>x. x: A \<Longrightarrow> P x: U i" shows "apd f (refl x) \<equiv> refl (f x)" - unfolding apd_def by reduce + unfolding apd_def by reduce+ Lemma (derive) apd_ap: assumes @@ -467,6 +455,9 @@ section \<open>Whiskering\<close> Lemma (derive) right_whisker: assumes "A: U i" "a: A" "b: A" "c: A" shows "\<lbrakk>p: a = b; q: a = b; r: b = c; \<alpha>: p =\<^bsub>a = b\<^esub> q\<rbrakk> \<Longrightarrow> p \<bullet> r = q \<bullet> r" + \<comment> \<open>TODO: In the above we need to annotate the type of \<alpha> with the type `a = b` + in order for the `eq` method to work correctly. This should be fixed with a + pre-proof elaborator.\<close> apply (eq r) focus prems vars x s t proof - @@ -500,17 +491,17 @@ translations "\<alpha> \<bullet>\<^sub>r\<^bsub>a\<^esub> r" \<leftharpoondown> "CONST right_whisker A a b c p q r \<alpha>" "r \<bullet>\<^sub>l\<^bsub>c\<^esub> \<alpha>" \<leftharpoondown> "CONST left_whisker A a b c p q r \<alpha>" -Lemma whisker_refl [comps]: +Lemma whisker_refl [comp]: assumes "A: U i" "a: A" "b: A" shows "\<lbrakk>p: a = b; q: a = b; \<alpha>: p =\<^bsub>a = b\<^esub> q\<rbrakk> \<Longrightarrow> \<alpha> \<bullet>\<^sub>r\<^bsub>a\<^esub> (refl b) \<equiv> ru p \<bullet> \<alpha> \<bullet> (ru q)\<inverse>" - unfolding right_whisker_def by reduce + unfolding right_whisker_def by reduce+ -Lemma refl_whisker [comps]: +Lemma refl_whisker [comp]: assumes "A: U i" "a: A" "b: A" shows "\<lbrakk>p: a = b; q: a = b; \<alpha>: p = q\<rbrakk> \<Longrightarrow> (refl a) \<bullet>\<^sub>l\<^bsub>b\<^esub> \<alpha> \<equiv> (lu p) \<bullet> \<alpha> \<bullet> (lu q)\<inverse>" - unfolding left_whisker_def by reduce + unfolding left_whisker_def by reduce+ method left_whisker = (rule left_whisker) method right_whisker = (rule right_whisker) @@ -632,12 +623,12 @@ Lemma (derive) eckmann_hilton: proof - have "\<alpha> \<bullet> \<beta> = \<alpha> \<star> \<beta>" by (rule pathcomp_eq_horiz_pathcomp) - also have [simplified comps]: ".. = \<alpha> \<star>' \<beta>" + also have [simplified comp]: ".. = \<alpha> \<star>' \<beta>" by (transport eq: \<Omega>2.horiz_pathcomp_eq_horiz_pathcomp') refl also have ".. = \<beta> \<bullet> \<alpha>" by (rule pathcomp_eq_horiz_pathcomp') finally show "\<alpha> \<bullet> \<beta> = \<beta> \<bullet> \<alpha>" - by this (reduce add: \<Omega>2.horiz_pathcomp_def \<Omega>2.horiz_pathcomp'_def) + by this (reduce add: \<Omega>2.horiz_pathcomp_def \<Omega>2.horiz_pathcomp'_def)+ qed end diff --git a/hott/More_List.thy b/hott/More_List.thy index 460dc7b..1b8034f 100644 --- a/hott/More_List.thy +++ b/hott/More_List.thy @@ -10,8 +10,8 @@ section \<open>Length\<close> definition [implicit]: "len \<equiv> ListRec ? Nat 0 (fn _ _ rec. suc rec)" experiment begin - Lemma "len [] \<equiv> ?n" by (subst comps)+ - Lemma "len [0, suc 0, suc (suc 0)] \<equiv> ?n" by (subst comps)+ + Lemma "len [] \<equiv> ?n" by (subst comp)+ + Lemma "len [0, suc 0, suc (suc 0)] \<equiv> ?n" by (subst comp)+ end diff --git a/hott/More_Nat.thy b/hott/More_Nat.thy index 8177170..59c8d54 100644 --- a/hott/More_Nat.thy +++ b/hott/More_Nat.thy @@ -15,15 +15,15 @@ Lemma (derive) eq: shows "Nat \<rightarrow> Nat \<rightarrow> U O" apply intro focus vars n apply elim \<guillemotright> by (rule TopF) \<comment> \<open>n \<equiv> 0\<close> \<guillemotright> by (rule BotF) \<comment> \<open>n \<equiv> suc _\<close> - \<guillemotright> by (rule U_in_U) + \<guillemotright> by (rule Ui_in_USi) done \<comment> \<open>m \<equiv> suc k\<close> apply intro focus vars k k_eq n apply (elim n) \<guillemotright> by (rule BotF) \<comment> \<open>n \<equiv> 0\<close> \<guillemotright> prems vars l proof - show "k_eq l: U O" by typechk qed - \<guillemotright> by (rule U_in_U) + \<guillemotright> by (rule Ui_in_USi) done - by (rule U_in_U) + by (rule Ui_in_USi) done text \<open> diff --git a/hott/Nat.thy b/hott/Nat.thy index 4abd315..177ec47 100644 --- a/hott/Nat.thy +++ b/hott/Nat.thy @@ -38,9 +38,10 @@ where f n (NatInd (fn n. C n) c\<^sub>0 (fn k rec. f k rec) n)" lemmas - [intros] = NatF Nat_zero Nat_suc and - [elims "?n"] = NatE and - [comps] = Nat_comp_zero Nat_comp_suc + [form] = NatF and + [intro, intros] = Nat_zero Nat_suc and + [elim "?n"] = NatE and + [comp] = Nat_comp_zero Nat_comp_suc abbreviation "NatRec C \<equiv> NatInd (fn _. C)" @@ -66,12 +67,12 @@ lemma add_type [typechk]: shows "m + n: Nat" unfolding add_def by typechk -lemma add_zero [comps]: +lemma add_zero [comp]: assumes "m: Nat" shows "m + 0 \<equiv> m" unfolding add_def by reduce -lemma add_suc [comps]: +lemma add_suc [comp]: assumes "m: Nat" "n: Nat" shows "m + suc n \<equiv> suc (m + n)" unfolding add_def by reduce @@ -127,17 +128,17 @@ lemma mul_type [typechk]: shows "m * n: Nat" unfolding mul_def by typechk -lemma mul_zero [comps]: +lemma mul_zero [comp]: assumes "n: Nat" shows "n * 0 \<equiv> 0" unfolding mul_def by reduce -lemma mul_one [comps]: +lemma mul_one [comp]: assumes "n: Nat" shows "n * 1 \<equiv> n" unfolding mul_def by reduce -lemma mul_suc [comps]: +lemma mul_suc [comp]: assumes "m: Nat" "n: Nat" shows "m * suc n \<equiv> m + m * n" unfolding mul_def by reduce diff --git a/spartan/core/Spartan.thy b/spartan/core/Spartan.thy index 1b9093b..a4ad300 100644 --- a/spartan/core/Spartan.thy +++ b/spartan/core/Spartan.thy @@ -79,16 +79,16 @@ axiomatization lt_trans: "i < j \<Longrightarrow> j < k \<Longrightarrow> i < k" axiomatization U :: \<open>lvl \<Rightarrow> o\<close> where - U_hierarchy: "i < j \<Longrightarrow> U i: U j" and - U_cumulative: "A: U i \<Longrightarrow> i < j \<Longrightarrow> A: U j" + Ui_in_Uj: "i < j \<Longrightarrow> U i: U j" and + in_Uj_if_in_Ui: "A: U i \<Longrightarrow> i < j \<Longrightarrow> A: U j" -lemma U_in_U: +lemma Ui_in_USi: "U i: U (S i)" - by (rule U_hierarchy, rule lt_S) + by (rule Ui_in_Uj, rule lt_S) -lemma lift_U: +lemma in_USi_if_in_Ui: "A: U i \<Longrightarrow> A: U (S i)" - by (erule U_cumulative, rule lt_S) + by (erule in_Uj_if_in_Ui, rule lt_S) subsection \<open>\<Prod>-type\<close> @@ -199,70 +199,61 @@ consts "rhs" :: \<open>'a\<close> ("..") ML_file \<open>congruence.ML\<close> -subsection \<open>Theorem attributes, type-checking and proof methods\<close> +subsection \<open>Proof methods and type-checking/inference\<close> -named_theorems intros and comps +named_theorems form and intro and intros and comp +\<comment> \<open>`intros` stores the introduction rule variants used by the + `intro` and `intros` methods.\<close> ML_file \<open>elimination.ML\<close> \<comment> \<open>elimination rules\<close> ML_file \<open>cases.ML\<close> \<comment> \<open>case reasoning rules\<close> lemmas - [intros] = anno PiF PiI SigF SigI and - [elims "?f"] = PiE and - [elims "?p"] = SigE and - [comps] = beta Sig_comp and + [form] = PiF SigF and + [intro] = PiI SigI and + [intros] = PiI[rotated] SigI and + [elim "?f"] = PiE and + [elim "?p"] = SigE and + [comp] = beta Sig_comp and [cong] = Pi_cong lam_cong Sig_cong ML_file \<open>typechecking.ML\<close> -ML_file \<open>tactics2.ML\<close> ML_file \<open>tactics.ML\<close> -method_setup assumptions = - \<open>Scan.succeed (fn ctxt => SIMPLE_METHOD ( - CHANGED (TRYALL (assumptions_tac ctxt))))\<close> +method_setup typechk = + \<open>Scan.succeed (K (CONTEXT_METHOD ( + CHEADGOAL o Types.check_infer)))\<close> method_setup known = - \<open>Scan.succeed (fn ctxt => SIMPLE_METHOD ( - CHANGED (TRYALL (known_tac ctxt))))\<close> + \<open>Scan.succeed (K (CONTEXT_METHOD ( + CHEADGOAL o Types.known_ctac)))\<close> + +method_setup rule = + \<open>Attrib.thms >> (fn ths => K (CONTEXT_METHOD ( + CHEADGOAL o SIDE_CONDS (rule_ctac ths))))\<close> + +method_setup dest = + \<open>Scan.lift (Scan.option (Args.parens Parse.int)) + -- Attrib.thms >> (fn (n_opt, ths) => K (CONTEXT_METHOD ( + CHEADGOAL o SIDE_CONDS (dest_ctac n_opt ths))))\<close> method_setup intro = - \<open>Scan.succeed (fn ctxt => SIMPLE_METHOD (HEADGOAL (intro_tac ctxt)))\<close> + \<open>Scan.succeed (K (CONTEXT_METHOD ( + CHEADGOAL o SIDE_CONDS (intro_ctac))))\<close> method_setup intros = - \<open>Scan.succeed (fn ctxt => SIMPLE_METHOD (HEADGOAL (intros_tac ctxt)))\<close> + \<open>Scan.succeed (K (CONTEXT_METHOD ( + CHEADGOAL o SIDE_CONDS (CREPEAT o intro_ctac))))\<close> method_setup elim = - \<open>Scan.repeat Args.term >> (fn tms => fn ctxt => - CONTEXT_METHOD (K (elim_context_tac tms ctxt 1)))\<close> + \<open>Scan.repeat Args.term >> (fn tms => K (CONTEXT_METHOD ( + CHEADGOAL o SIDE_CONDS (elim_ctac tms))))\<close> method elims = elim+ method_setup cases = - \<open>Args.term >> (fn tm => fn ctxt => SIMPLE_METHOD' (cases_tac tm ctxt))\<close> - -(*This could possibly use additional simplification hints via a simp: modifier*) -method_setup typechk' = - \<open>Scan.succeed (fn ctxt => SIMPLE_METHOD' ( - SIDE_CONDS (typechk_tac ctxt) ctxt)) - (* CHANGED (ALLGOALS (TRY o typechk_tac ctxt)))) *)\<close> - -method_setup rule' = - \<open>Attrib.thms >> (fn ths => fn ctxt => - SIMPLE_METHOD (HEADGOAL (SIDE_CONDS (rule_tac ths ctxt) ctxt)))\<close> - -method_setup dest = - \<open>Scan.lift (Scan.option (Args.parens Parse.int)) -- Attrib.thms - >> (fn (opt_n, ths) => fn ctxt => - SIMPLE_METHOD (HEADGOAL (SIDE_CONDS (dest_tac opt_n ths ctxt) ctxt)))\<close> - -(*NEW*) -method_setup typechk = - \<open>Scan.succeed (K (CONTEXT_METHOD ( - CHEADGOAL o Types.check_infer)))\<close> - -method_setup rule = - \<open>Attrib.thms >> (fn ths => K (CONTEXT_METHOD ( - CHEADGOAL o Tactics2.SIDE_CONDS (Tactics2.rule_tac ths))))\<close> + \<open>Args.term >> (fn tm => K (CONTEXT_METHOD ( + CHEADGOAL o SIDE_CONDS (cases_ctac tm))))\<close> subsection \<open>Reflexivity\<close> @@ -270,14 +261,13 @@ subsection \<open>Reflexivity\<close> named_theorems refl method refl = (rule refl) -subsection \<open>Trivial proofs modulo typechecking\<close> +subsection \<open>Trivial proofs (modulo automatic discharge of side conditions)\<close> method_setup this = - \<open>Scan.succeed (fn ctxt => METHOD ( - EVERY o map (HEADGOAL o - (fn ths => SIDE_CONDS (resolve_tac ctxt ths) ctxt) o - single) - ))\<close> + \<open>Scan.succeed (K (CONTEXT_METHOD (fn facts => + CHEADGOAL (SIDE_CONDS + (CONTEXT_TACTIC' (fn ctxt => resolve_tac ctxt facts)) + facts))))\<close> subsection \<open>Rewriting\<close> @@ -298,7 +288,7 @@ lemma eta_expand: lemma rewr_imp: assumes "PROP A \<equiv> PROP B" shows "(PROP A \<Longrightarrow> PROP C) \<equiv> (PROP B \<Longrightarrow> PROP C)" - apply (rule Pure.equal_intr_rule) + apply (Pure.rule Pure.equal_intr_rule) apply (drule equal_elim_rule2[OF assms]; assumption) apply (drule equal_elim_rule1[OF assms]; assumption) done @@ -316,9 +306,11 @@ ML_file \<open>~~/src/HOL/Library/cconv.ML\<close> ML_file \<open>rewrite.ML\<close> \<comment> \<open>\<open>reduce\<close> computes terms via judgmental equalities\<close> -setup \<open>map_theory_simpset (fn ctxt => ctxt addSolver (mk_solver "" typechk_tac))\<close> +setup \<open>map_theory_simpset (fn ctxt => + ctxt addSolver (mk_solver "" (fn ctxt' => + NO_CONTEXT_TACTIC ctxt' o Types.check_infer (Simplifier.prems_of ctxt'))))\<close> -method reduce uses add = (simp add: comps add | subst comps)+ +method reduce uses add = changed \<open>((simp add: comp add | sub comp); typechk?)+\<close> subsection \<open>Implicits\<close> @@ -387,11 +379,8 @@ lemma refine_codomain: lemma lift_universe_codomain: assumes "A: U i" "f: A \<rightarrow> U j" shows "f: A \<rightarrow> U (S j)" - (*FIXME: Temporary; should be fixed once all methods are rewritten to use - the new typechk*) - apply (Pure.rule refine_codomain, typechk, typechk) - apply (Pure.rule lift_U, typechk) - done + using in_USi_if_in_Ui[of "f {}"] + by (rule refine_codomain) subsection \<open>Function composition\<close> @@ -413,7 +402,7 @@ lemma funcompI [typechk]: "g \<circ>\<^bsub>A\<^esub> f: \<Prod>x: A. C (f x)" unfolding funcomp_def by typechk -lemma funcomp_assoc [comps]: +lemma funcomp_assoc [comp]: assumes "f: A \<rightarrow> B" "g: B \<rightarrow> C" @@ -423,7 +412,7 @@ lemma funcomp_assoc [comps]: "(h \<circ>\<^bsub>B\<^esub> g) \<circ>\<^bsub>A\<^esub> f \<equiv> h \<circ>\<^bsub>A\<^esub> g \<circ>\<^bsub>A\<^esub> f" unfolding funcomp_def by reduce -lemma funcomp_lambda_comp [comps]: +lemma funcomp_lambda_comp [comp]: assumes "A: U i" "\<And>x. x: A \<Longrightarrow> b x: B" @@ -432,7 +421,7 @@ lemma funcomp_lambda_comp [comps]: "(\<lambda>x: B. c x) \<circ>\<^bsub>A\<^esub> (\<lambda>x: A. b x) \<equiv> \<lambda>x: A. c (b x)" unfolding funcomp_def by reduce -lemma funcomp_apply_comp [comps]: +lemma funcomp_apply_comp [comp]: assumes "f: A \<rightarrow> B" "g: \<Prod>x: B. C x" "x: A" @@ -454,22 +443,22 @@ abbreviation id where "id A \<equiv> \<lambda>x: A. x" lemma id_type[typechk]: "A: U i \<Longrightarrow> id A: A \<rightarrow> A" and - id_comp [comps]: "x: A \<Longrightarrow> (id A) x \<equiv> x" \<comment> \<open>for the occasional manual rewrite\<close> - by reduce + id_comp [comp]: "x: A \<Longrightarrow> (id A) x \<equiv> x" \<comment> \<open>for the occasional manual rewrite\<close> + by reduce+ -lemma id_left [comps]: +lemma id_left [comp]: assumes "f: A \<rightarrow> B" "A: U i" "B: U i" shows "(id B) \<circ>\<^bsub>A\<^esub> f \<equiv> f" by (subst eta_exp[of f]) (reduce, rule eta) -lemma id_right [comps]: +lemma id_right [comp]: assumes "f: A \<rightarrow> B" "A: U i" "B: U i" shows "f \<circ>\<^bsub>A\<^esub> (id A) \<equiv> f" by (subst eta_exp[of f]) (reduce, rule eta) lemma id_U [typechk]: "id (U i): U i \<rightarrow> U i" - by typechk (fact U_in_U) + by typechk (rule Ui_in_USi) (*FIXME: Add annotation rule to typechecker*) section \<open>Pairs\<close> @@ -482,7 +471,7 @@ lemma fst_type [typechk]: shows "fst A B: (\<Sum>x: A. B x) \<rightarrow> A" unfolding fst_def by typechk -lemma fst_comp [comps]: +lemma fst_comp [comp]: assumes "a: A" "b: B a" @@ -496,10 +485,10 @@ lemma snd_type [typechk]: shows "snd A B: \<Prod>p: \<Sum>x: A. B x. B (fst A B p)" unfolding snd_def by typechk reduce -lemma snd_comp [comps]: +lemma snd_comp [comp]: assumes "a: A" "b: B a" "A: U i" "\<And>x. x: A \<Longrightarrow> B x: U i" shows "snd A B <a, b> \<equiv> b" - unfolding snd_def by reduce + unfolding snd_def by reduce+ subsection \<open>Notation\<close> @@ -509,7 +498,7 @@ definition fst_i ("fst") definition snd_i ("snd") where [implicit]: "snd \<equiv> Spartan.snd ? ?" -no_translations +translations "fst" \<leftharpoondown> "CONST Spartan.fst A B" "snd" \<leftharpoondown> "CONST Spartan.snd A B" @@ -520,22 +509,14 @@ Lemma fst [typechk]: "p: \<Sum>x: A. B x" "A: U i" "\<And>x. x: A \<Longrightarrow> B x: U i" shows "fst p: A" - \<comment> \<open>This can't be solved by a single application of `typechk`; it needs - multiple (two) passes. Something to do with constraint/subgoal reordering.\<close> - apply (Pure.rule elims) - apply (Pure.rule typechk) - apply known [1] - defer \<comment> \<open>The deferred subgoal is an inhabitation problem.\<close> - apply known [1] - by known - + by typechk Lemma snd [typechk]: assumes "p: \<Sum>x: A. B x" "A: U i" "\<And>x. x: A \<Longrightarrow> B x: U i" shows "snd p: B (fst p)" - by typechk+ + by typechk method fst for p::o = rule fst[of p] method snd for p::o = rule snd[of p] diff --git a/spartan/core/context_tactical.ML b/spartan/core/context_tactical.ML index 78991a9..a5159f6 100644 --- a/spartan/core/context_tactical.ML +++ b/spartan/core/context_tactical.ML @@ -11,6 +11,7 @@ structure Context_Tactical: sig type context_tactic' = int -> context_tactic +val CONTEXT_TACTIC': (Proof.context -> int -> tactic) -> context_tactic' val all_ctac: context_tactic val no_ctac: context_tactic val print_ctac: (Proof.context -> string) -> context_tactic @@ -25,16 +26,21 @@ val CREPEAT: context_tactic -> context_tactic val CFILTER: (context_state -> bool) -> context_tactic -> context_tactic val CCHANGED: context_tactic -> context_tactic val CTHEN_ALL_NEW: context_tactic' * context_tactic' -> context_tactic' +val CREPEAT_IN_RANGE: int -> int -> context_tactic' -> context_tactic val CREPEAT_ALL_NEW: context_tactic' -> context_tactic' val CTHEN_ALL_NEW_FWD: context_tactic' * context_tactic' -> context_tactic' val CREPEAT_ALL_NEW_FWD: context_tactic' -> context_tactic' val CHEADGOAL: context_tactic' -> context_tactic val CALLGOALS: context_tactic' -> context_tactic +val CSOMEGOAL: context_tactic' -> context_tactic +val CRANGE: context_tactic' list -> context_tactic' end = struct type context_tactic' = int -> context_tactic +fun CONTEXT_TACTIC' tac i (ctxt, st) = TACTIC_CONTEXT ctxt ((tac ctxt i) st) + val all_ctac = Seq.make_results o Seq.single val no_ctac = K Seq.empty fun print_ctac f (ctxt, st) = CONTEXT_TACTIC (print_tac ctxt (f ctxt)) (ctxt, st) @@ -87,26 +93,34 @@ local (*By Peter Lammich: apply tactic to subgoals in interval in a forward manner, skipping over emerging subgoals*) - fun INTERVAL_FWD ctac l u (cst as (_, st)) = - if l > u then all_ctac cst + fun INTERVAL_FWD ctac l u (cst as (_, st)) = cst |> + (if l > u then all_ctac else (ctac l CTHEN (fn cst' as (_, st') => let val ofs = Thm.nprems_of st' - Thm.nprems_of st in if ofs < ~1 then raise THM ( "INTERVAL_FWD: tactic solved more than one goal", ~1, [st, st']) else INTERVAL_FWD ctac (l + 1 + ofs) (u + ofs) cst' - end)) cst + end))) in fun (ctac1 CTHEN_ALL_NEW ctac2) i (cst as (_, st)) = cst |> (ctac1 i CTHEN (fn cst' as (_, st') => - cst' |> INTERVAL ctac2 i (i + Thm.nprems_of st' - Thm.nprems_of st))) + INTERVAL ctac2 i (i + Thm.nprems_of st' - Thm.nprems_of st) cst')) (*By Peter Lammich: apply ctac2 to all subgoals emerging from ctac1, in forward manner*) fun (ctac1 CTHEN_ALL_NEW_FWD ctac2) i (cst as (_, st)) = cst |> (ctac1 i CTHEN (fn cst' as (_, st') => - cst' |> INTERVAL_FWD ctac2 i (i + Thm.nprems_of st' - Thm.nprems_of st))) + INTERVAL_FWD ctac2 i (i + Thm.nprems_of st' - Thm.nprems_of st) cst')) + +(*Repeatedly apply ctac to the i-th until the k-th-from-last subgoals + (i.e. leave the last k subgoals alone), until no more changes appear in the + goal state.*) +fun CREPEAT_IN_RANGE i k ctac = + let fun interval_ctac (cst as (_, st)) = + INTERVAL_FWD ctac i (Thm.nprems_of st - k) cst + in CREPEAT (CCHANGED interval_ctac) end end @@ -124,6 +138,15 @@ fun CALLGOALS ctac (cst as (_, st)) = | doall n = ctac n CTHEN doall (n - 1); in doall (Thm.nprems_of st) cst end +fun CSOMEGOAL ctac (cst as (_, st)) = + let + fun find 0 = no_ctac + | find n = ctac n CORELSE find (n - 1); + in find (Thm.nprems_of st) cst end + +fun CRANGE [] _ = all_ctac + | CRANGE (ctac :: ctacs) i = CRANGE ctacs (i + 1) CTHEN ctac i + end open Context_Tactical diff --git a/spartan/core/elimination.ML b/spartan/core/elimination.ML index 11b3af9..cd4e414 100644 --- a/spartan/core/elimination.ML +++ b/spartan/core/elimination.ML @@ -34,13 +34,13 @@ fun register_rule tms rl = in Rules.map (Termtab.update (hd, (rl, map (#1 o dest_Var) tms))) end -(* [elims] attribute *) +(* [elim] attribute *) val _ = Theory.setup ( - Attrib.setup \<^binding>\<open>elims\<close> + Attrib.setup \<^binding>\<open>elim\<close> (Scan.repeat Args.term_pattern >> (Thm.declaration_attribute o register_rule)) "" - #> Global_Theory.add_thms_dynamic (\<^binding>\<open>elims\<close>, + #> Global_Theory.add_thms_dynamic (\<^binding>\<open>elim\<close>, fn context => (map #1 (rules (Context.proof_of context)))) ) diff --git a/spartan/core/eqsubst.ML b/spartan/core/eqsubst.ML index ea6f098..e7ecf63 100644 --- a/spartan/core/eqsubst.ML +++ b/spartan/core/eqsubst.ML @@ -416,19 +416,27 @@ fun eqsubst_asm_tac ctxt occs thms i st = should be done to an assumption, false = apply to the conclusion of the goal) as well as the theorems to use *) val _ = - Theory.setup - (Method.setup \<^binding>\<open>sub\<close> - (Scan.lift (Args.mode "asm" -- Scan.optional (Args.parens (Scan.repeat Parse.nat)) [0]) -- - Attrib.thms >> (fn ((asm, occs), inthms) => fn ctxt => - SIMPLE_METHOD' ((if asm then eqsubst_asm_tac else eqsubst_tac) ctxt occs inthms))) - "single-step substitution" - #> - (Method.setup \<^binding>\<open>subst\<close> - (Scan.lift (Args.mode "asm" -- Scan.optional (Args.parens (Scan.repeat Parse.nat)) [0]) -- - Attrib.thms >> (fn ((asm, occs), inthms) => fn ctxt => - SIMPLE_METHOD' (SIDE_CONDS - ((if asm then eqsubst_asm_tac else eqsubst_tac) ctxt occs inthms) - ctxt))) - "single-step substitution with auto-typechecking")) + let + val parser = + Scan.lift (Args.mode "asm" + -- Scan.optional (Args.parens (Scan.repeat Parse.nat)) [0]) + -- Attrib.thms + fun eqsubst_asm_ctac occs inthms = + CONTEXT_TACTIC' (fn ctxt => eqsubst_asm_tac ctxt occs inthms) + fun eqsubst_ctac occs inthms = + CONTEXT_TACTIC' (fn ctxt => eqsubst_tac ctxt occs inthms) + in + Theory.setup ( + Method.setup \<^binding>\<open>sub\<close> + (parser >> (fn ((asm, occs), inthms) => fn ctxt => SIMPLE_METHOD' ( + (if asm then eqsubst_asm_tac else eqsubst_tac) ctxt occs inthms))) + "single-step substitution" #> + Method.setup \<^binding>\<open>subst\<close> + (parser >> (fn ((asm, occs), inthms) => K (CONTEXT_METHOD ( + CHEADGOAL o SIDE_CONDS + ((if asm then eqsubst_asm_ctac else eqsubst_ctac) occs inthms))))) + "single-step substitution with automatic discharge of side conditions" + ) + end end; diff --git a/spartan/core/rewrite.ML b/spartan/core/rewrite.ML index f9c5d8e..eba0e81 100644 --- a/spartan/core/rewrite.ML +++ b/spartan/core/rewrite.ML @@ -15,8 +15,8 @@ patterns but has diverged significantly during its development. We also allow introduction of identifiers for bound variables, which can then be used to match arbitrary subterms inside abstractions. -This code is slightly modified from the original at HOL/Library/rewrite.ML, -to incorporate auto-typechecking for type theory. +This code has been slightly modified from the original at HOL/Library/rewrite.ML +to incorporate automatic discharge of type-theoretic side conditions. *) infix 1 then_pconv; @@ -448,18 +448,18 @@ val _ = val subst_parser = let val scan = raw_pattern -- to_parser -- Parse.thms1 in context_lift scan prep_args end + + fun rewrite_export_ctac inputs inthms = + CONTEXT_TACTIC' (fn ctxt => rewrite_export_tac ctxt inputs inthms) in Method.setup \<^binding>\<open>rewr\<close> (subst_parser >> - (fn (pattern, inthms, (to, pat_ctxt)) => fn orig_ctxt => - SIMPLE_METHOD' - (rewrite_export_tac orig_ctxt ((pattern, to), SOME pat_ctxt) inthms))) - "single-step rewriting, allowing subterm selection via patterns" - #> - (Method.setup \<^binding>\<open>rewrite\<close> (subst_parser >> - (fn (pattern, inthms, (to, pat_ctxt)) => fn orig_ctxt => - SIMPLE_METHOD' (SIDE_CONDS - (rewrite_export_tac orig_ctxt ((pattern, to), SOME pat_ctxt) inthms) - orig_ctxt))) - "single-step rewriting with auto-typechecking") + (fn (pattern, inthms, (to, pat_ctxt)) => fn orig_ctxt => SIMPLE_METHOD' + (rewrite_export_tac orig_ctxt ((pattern, to), SOME pat_ctxt) inthms))) + "single-step rewriting, allowing subterm selection via patterns" #> + Method.setup \<^binding>\<open>rewrite\<close> (subst_parser >> + (fn (pattern, inthms, (to, pat_ctxt)) => K (CONTEXT_METHOD ( + CHEADGOAL o SIDE_CONDS + (rewrite_export_ctac ((pattern, to), SOME pat_ctxt) inthms))))) + "single-step rewriting with auto-typechecking" end end diff --git a/spartan/core/tactics.ML b/spartan/core/tactics.ML index 172ae90..3922ae0 100644 --- a/spartan/core/tactics.ML +++ b/spartan/core/tactics.ML @@ -7,102 +7,66 @@ General tactics for dependent type theory. structure Tactics: sig -val assumptions_tac: Proof.context -> int -> tactic -val known_tac: Proof.context -> int -> tactic -val typechk_tac: Proof.context -> int -> tactic -val auto_typechk: bool Config.T -val SIDE_CONDS: (int -> tactic) -> Proof.context -> int -> tactic -val rule_tac: thm list -> Proof.context -> int -> tactic -val dest_tac: int option -> thm list -> Proof.context -> int -> tactic -val intro_tac: Proof.context -> int -> tactic -val intros_tac: Proof.context -> int -> tactic -val elim_context_tac: term list -> Proof.context -> int -> context_tactic -val cases_tac: term -> Proof.context -> int -> tactic +val solve_side_conds: int Config.T +val SIDE_CONDS: context_tactic' -> thm list -> context_tactic' +val rule_ctac: thm list -> context_tactic' +val dest_ctac: int option -> thm list -> context_tactic' +val intro_ctac: context_tactic' +val elim_ctac: term list -> context_tactic' +val cases_ctac: term -> context_tactic' end = struct -(*An assumption tactic that only solves typing goals with rigid terms and - judgmental equalities without schematic variables*) -fun assumptions_tac ctxt = SUBGOAL (fn (goal, i) => - let - val concl = Logic.strip_assums_concl goal - in - if - Lib.is_typing concl andalso Lib.is_rigid (Lib.term_of_typing concl) - orelse not ((exists_subterm is_Var) concl) - then assume_tac ctxt i - else no_tac - end) - -(*Solves typing goals with rigid term by resolving with context facts and - simplifier premises, or arbitrary goals by *non-unifying* assumption*) -fun known_tac ctxt = SUBGOAL (fn (goal, i) => - let - val concl = Logic.strip_assums_concl goal - in - ((if Lib.is_typing concl andalso Lib.is_rigid (Lib.term_of_typing concl) - then - let val ths = map fst (Facts.props (Proof_Context.facts_of ctxt)) - in resolve_tac ctxt (ths @ Simplifier.prems_of ctxt) end - else K no_tac) - ORELSE' assumptions_tac ctxt) i - end) - -(*Typechecking: try to solve goals of the form "a: A" where a is rigid*) -fun typechk_tac ctxt = - let - val tac = SUBGOAL (fn (goal, i) => - if Lib.rigid_typing_concl goal - then - let val net = Tactic.build_net - ((Named_Theorems.get ctxt \<^named_theorems>\<open>typechk\<close>) - @(Named_Theorems.get ctxt \<^named_theorems>\<open>intros\<close>) - @(map #1 (Elim.rules ctxt))) - in (resolve_from_net_tac ctxt net) i end - else no_tac) - in - REPEAT_ALL_NEW (known_tac ctxt ORELSE' tac) - end -(*Many methods try to automatically discharge side conditions by typechecking. - Switch this flag off to discharge by non-unifying assumption instead.*) -val auto_typechk = Attrib.setup_config_bool \<^binding>\<open>auto_typechk\<close> (K true) +(* Side conditions *) +val solve_side_conds = + Attrib.setup_config_int \<^binding>\<open>solve_side_conds\<close> (K 2) + +fun SIDE_CONDS ctac facts i (cst as (ctxt, st)) = cst |> (ctac i CTHEN + (case Config.get ctxt solve_side_conds of + 1 => CALLGOALS (CTRY o Types.known_ctac facts) + | 2 => CREPEAT_IN_RANGE i (Thm.nprems_of st - i) + (CTRY o CREPEAT_ALL_NEW_FWD (Types.check_infer facts)) + | _ => all_ctac)) -fun side_cond_tac ctxt = CHANGED o REPEAT o - (if Config.get ctxt auto_typechk then typechk_tac ctxt else known_tac ctxt) -(*Combinator runs tactic and tries to discharge all new typing side conditions*) -fun SIDE_CONDS tac ctxt = tac THEN_ALL_NEW (TRY o side_cond_tac ctxt) +(* rule, dest, intro *) local -fun mk_rules _ ths [] = ths - | mk_rules n ths ths' = - let val ths'' = foldr1 (op @) - (map (fn th => [rotate_prems n (th RS @{thm PiE})] handle THM _ => []) ths') - in - mk_rules n (ths @ ths') ths'' - end + fun mk_rules _ ths [] = ths + | mk_rules n ths ths' = + let val ths'' = foldr1 (op @) + (map + (fn th => [rotate_prems n (th RS @{thm PiE})] handle THM _ => []) + ths') + in + mk_rules n (ths @ ths') ths'' + end in -(*Resolves with given rules, discharging as many side conditions as possible*) -fun rule_tac ths ctxt = resolve_tac ctxt (mk_rules 0 [] ths) +(*Resolves with given rules*) +fun rule_ctac ths i (ctxt, st) = + TACTIC_CONTEXT ctxt (resolve_tac ctxt (mk_rules 0 [] ths) i st) (*Attempts destruct-resolution with the n-th premise of the given rules*) -fun dest_tac opt_n ths ctxt = dresolve_tac ctxt - (mk_rules (case opt_n of NONE => 0 | SOME 0 => 0 | SOME n => n-1) [] ths) +fun dest_ctac opt_n ths i (ctxt, st) = + TACTIC_CONTEXT ctxt (dresolve_tac ctxt + (mk_rules (case opt_n of NONE => 0 | SOME 0 => 0 | SOME n => n-1) [] ths) + i st) end (*Applies some introduction rule*) -fun intro_tac ctxt = SUBGOAL (fn (_, i) => SIDE_CONDS - (resolve_tac ctxt (Named_Theorems.get ctxt \<^named_theorems>\<open>intros\<close>)) ctxt i) +fun intro_ctac i (ctxt, st) = TACTIC_CONTEXT ctxt (resolve_tac ctxt + (Named_Theorems.get ctxt \<^named_theorems>\<open>intros\<close>) i st) -fun intros_tac ctxt = SUBGOAL (fn (_, i) => - (CHANGED o REPEAT o CHANGED o intro_tac ctxt) i) (* Induction/elimination *) -(*Pushes a context/goal premise typing t:T into a \<Prod>-type*) +(*Pushes a known typing t:T into a \<Prod>-type. + This tactic is well-behaved only when t is sufficiently well specified + (otherwise there might be multiple possible judgments t:T that unify, and + which is chosen is undefined).*) fun internalize_fact_tac t = Subgoal.FOCUS_PARAMS (fn {context = ctxt, concl = raw_concl, ...} => let @@ -116,18 +80,10 @@ fun internalize_fact_tac t = in HEADGOAL (resolve_tac ctxt [resolvent]) (*known_tac infers the correct type T inferred by unification*) - THEN SOMEGOAL (known_tac ctxt) + THEN SOMEGOAL (NO_CONTEXT_TACTIC ctxt o Types.known_ctac []) end) -(*Premises that have already been pushed into the \<Prod>-type*) -structure Inserts = Proof_Data ( - type T = term Item_Net.T - val init = K (Item_Net.init Term.aconv_untyped single) -) - -local - -fun elim_core_tac tms types ctxt = SUBGOAL (K ( +fun elim_core_tac tms types ctxt = let val rule_insts = map ((Elim.lookup_rule ctxt) o Term.head_of) types val rules = flat (map @@ -137,84 +93,86 @@ fun elim_core_tac tms types ctxt = SUBGOAL (K ( (idxnames ~~ map (Thm.cterm_of ctxt) tms) rl]) rule_insts) in - HEADGOAL (resolve_tac ctxt rules) - THEN RANGE (replicate (length tms) (typechk_tac ctxt)) 1 - end handle Option => no_tac)) - -in + resolve_tac ctxt rules + THEN' RANGE (replicate (length tms) (NO_CONTEXT_TACTIC ctxt o Types.check_infer [])) + end handle Option => K no_tac -fun elim_context_tac tms ctxt = case tms of - [] => CONTEXT_SUBGOAL (K (Context_Tactic.CONTEXT_TACTIC (HEADGOAL ( - SIDE_CONDS (eresolve_tac ctxt (map #1 (Elim.rules ctxt))) ctxt)))) - | major::_ => CONTEXT_SUBGOAL (fn (goal, _) => - let - val facts = Proof_Context.facts_of ctxt - val prems = Logic.strip_assums_hyp goal - val template = Lib.typing_of_term major - val types = - map (Thm.prop_of o #1) (Facts.could_unify facts template) - @ filter (fn prem => Term.could_unify (template, prem)) prems - |> map Lib.type_of_typing - in case types of - [] => Context_Tactic.CONTEXT_TACTIC no_tac - | _ => - let - val inserts = map (Thm.prop_of o fst) (Facts.props facts) @ prems - |> filter Lib.is_typing - |> map Lib.dest_typing - |> filter_out (fn (t, _) => - Term.aconv (t, major) orelse Item_Net.member (Inserts.get ctxt) t) - |> map (fn (t, T) => ((t, T), Lib.subterm_count_distinct tms T)) - |> filter (fn (_, i) => i > 0) - (*`t1: T1` comes before `t2: T2` if T1 contains t2 as subterm. - If they are incomparable, then order by decreasing - `subterm_count [p, x, y] T`*) - |> sort (fn (((t1, _), i), ((_, T2), j)) => - Lib.cond_order (Lib.subterm_order T2 t1) (int_ord (j, i))) - |> map (#1 o #1) - val record_inserts = Inserts.map (fold Item_Net.update inserts) - val tac = - (*Push premises having a subterm in `tms` into a \<Prod>*) - fold (fn t => fn tac => - tac THEN HEADGOAL (internalize_fact_tac t ctxt)) - inserts all_tac - (*Apply elimination rule*) - THEN (HEADGOAL ( - elim_core_tac tms types ctxt - (*Pull pushed premises back out*) - THEN_ALL_NEW (SUBGOAL (fn (_, i) => - REPEAT_DETERM_N (length inserts) - (resolve_tac ctxt @{thms PiI} i))) - )) - (*Side conditions*) - THEN ALLGOALS (TRY o side_cond_tac ctxt) - in - fn (ctxt, st) => Context_Tactic.TACTIC_CONTEXT - (record_inserts ctxt) (tac st) - end - end) +(*Premises that have already been pushed into the \<Prod>-type*) +structure Inserts = Proof_Data ( + type T = term Item_Net.T + val init = K (Item_Net.init Term.aconv_untyped single) +) -fun cases_tac tm ctxt = SUBGOAL (fn (goal, i) => - let - val facts = Proof_Context.facts_of ctxt - val prems = Logic.strip_assums_hyp goal - val template = Lib.typing_of_term tm - val types = - map (Thm.prop_of o #1) (Facts.could_unify facts template) - @ filter (fn prem => Term.could_unify (template, prem)) prems - |> map Lib.type_of_typing - val res = (case types of - [typ] => Drule.infer_instantiate' ctxt [SOME (Thm.cterm_of ctxt tm)] - (the (Case.lookup_rule ctxt (Term.head_of typ))) - | [] => raise Option - | _ => raise error (Syntax.string_of_term ctxt tm ^ "not uniquely typed")) - handle Option => error ("no case rule known for " - ^ (Syntax.string_of_term ctxt tm)) - in - SIDE_CONDS (resolve_tac ctxt [res]) ctxt i - end) +fun elim_ctac tms = + case tms of + [] => CONTEXT_TACTIC' (fn ctxt => eresolve_tac ctxt (map #1 (Elim.rules ctxt))) + | major :: _ => CONTEXT_SUBGOAL (fn (goal, _) => fn cst as (ctxt, st) => + let + val facts = Proof_Context.facts_of ctxt + val prems = Logic.strip_assums_hyp goal + val template = Lib.typing_of_term major + val types = + map (Thm.prop_of o #1) (Facts.could_unify facts template) + @ filter (fn prem => Term.could_unify (template, prem)) prems + |> map Lib.type_of_typing + in case types of + [] => no_ctac cst + | _ => + let + val inserts = map (Thm.prop_of o fst) (Facts.props facts) @ prems + |> filter Lib.is_typing + |> map Lib.dest_typing + |> filter_out (fn (t, _) => + Term.aconv (t, major) orelse Item_Net.member (Inserts.get ctxt) t) + |> map (fn (t, T) => ((t, T), Lib.subterm_count_distinct tms T)) + |> filter (fn (_, i) => i > 0) + (*`t1: T1` comes before `t2: T2` if T1 contains t2 as subterm. + If they are incomparable, then order by decreasing + `subterm_count [p, x, y] T`*) + |> sort (fn (((t1, _), i), ((_, T2), j)) => + Lib.cond_order (Lib.subterm_order T2 t1) (int_ord (j, i))) + |> map (#1 o #1) + val record_inserts = Inserts.map (fold Item_Net.update inserts) + val tac = + (*Push premises having a subterm in `tms` into a \<Prod>*) + fold (fn t => fn tac => + tac THEN HEADGOAL (internalize_fact_tac t ctxt)) + inserts all_tac + (*Apply elimination rule*) + THEN HEADGOAL ( + elim_core_tac tms types ctxt + (*Pull pushed premises back out*) + THEN_ALL_NEW (SUBGOAL (fn (_, i) => + REPEAT_DETERM_N (length inserts) + (resolve_tac ctxt @{thms PiI[rotated]} i)))) + in + TACTIC_CONTEXT (record_inserts ctxt) (tac st) + end + end) + +fun cases_ctac tm = + let fun tac ctxt = + SUBGOAL (fn (goal, i) => + let + val facts = Proof_Context.facts_of ctxt + val prems = Logic.strip_assums_hyp goal + val template = Lib.typing_of_term tm + val types = + map (Thm.prop_of o #1) (Facts.could_unify facts template) + @ filter (fn prem => Term.could_unify (template, prem)) prems + |> map Lib.type_of_typing + val res = (case types of + [typ] => Drule.infer_instantiate' ctxt [SOME (Thm.cterm_of ctxt tm)] + (the (Case.lookup_rule ctxt (Term.head_of typ))) + | [] => raise Option + | _ => raise error (Syntax.string_of_term ctxt tm ^ "not uniquely typed")) + handle Option => + error ("no case rule known for " ^ (Syntax.string_of_term ctxt tm)) + in + resolve_tac ctxt [res] i + end) + in CONTEXT_TACTIC' tac end -end end diff --git a/spartan/core/tactics2.ML b/spartan/core/tactics2.ML deleted file mode 100644 index 801424f..0000000 --- a/spartan/core/tactics2.ML +++ /dev/null @@ -1,191 +0,0 @@ -(* Title: tactics.ML - Author: Joshua Chen - -General tactics for dependent type theory. -*) - -structure Tactics2: -sig - -val solve_side_conds: bool Config.T -val SIDE_CONDS: context_tactic' -> thm list -> context_tactic' -val rule_tac: thm list -> context_tactic' -val dest_tac: int option -> thm list -> context_tactic' -val intro_tac: context_tactic' -val intros_tac: context_tactic' -(* -val elim_context_tac: term list -> Proof.context -> int -> context_tactic -val cases_tac: term -> Proof.context -> int -> tactic -*) - -end = struct - -(*Automatically try to solve side conditions?*) -val solve_side_conds = - Attrib.setup_config_bool \<^binding>\<open>solve_side_conds\<close> (K true) - -local - fun side_cond_ctac facts i (cst as (ctxt, _)) = - if Config.get ctxt solve_side_conds - then (Types.check_infer facts i) cst - else all_ctac cst -in - -(*Combinator runs tactic and tries to discharge newly arising side conditions*) -fun SIDE_CONDS ctac facts = ctac CTHEN_ALL_NEW (CTRY o side_cond_ctac facts) - -end - -local - fun mk_rules _ ths [] = ths - | mk_rules n ths ths' = - let val ths'' = foldr1 (op @) - (map - (fn th => [rotate_prems n (th RS @{thm PiE})] handle THM _ => []) - ths') - in - mk_rules n (ths @ ths') ths'' - end -in - -(*Resolves with given rules*) -fun rule_tac ths i (ctxt, st) = - TACTIC_CONTEXT ctxt (resolve_tac ctxt (mk_rules 0 [] ths) i st) - -(*Attempts destruct-resolution with the n-th premise of the given rules*) -fun dest_tac opt_n ths i (ctxt, st) = - TACTIC_CONTEXT ctxt (dresolve_tac ctxt - (mk_rules (case opt_n of NONE => 0 | SOME 0 => 0 | SOME n => n-1) [] ths) - i st) - -end - -(*Applies some introduction rule*) -fun intro_tac i (ctxt, st) = - TACTIC_CONTEXT ctxt (resolve_tac ctxt - (Named_Theorems.get ctxt \<^named_theorems>\<open>intros\<close>) i st) - -val intros_tac = CONTEXT_SUBGOAL (fn (_, i) => - (CCHANGED o CREPEAT o CCHANGED o intro_tac) i) - -(* -(* Induction/elimination *) - -(*Pushes a context/goal premise typing t:T into a \<Prod>-type*) -fun internalize_fact_tac t = - Subgoal.FOCUS_PARAMS (fn {context = ctxt, concl = raw_concl, ...} => - let - val concl = Logic.strip_assums_concl (Thm.term_of raw_concl) - val C = Lib.type_of_typing concl - val B = Thm.cterm_of ctxt (Lib.lambda_var t C) - val a = Thm.cterm_of ctxt t - (*The resolvent is PiE[where ?B=B and ?a=a]*) - val resolvent = - Drule.infer_instantiate' ctxt [NONE, NONE, SOME B, SOME a] @{thm PiE} - in - HEADGOAL (resolve_tac ctxt [resolvent]) - (*known_tac infers the correct type T inferred by unification*) - THEN SOMEGOAL (known_tac ctxt) - end) - -(*Premises that have already been pushed into the \<Prod>-type*) -structure Inserts = Proof_Data ( - type T = term Item_Net.T - val init = K (Item_Net.init Term.aconv_untyped single) -) - -local - -fun elim_core_tac tms types ctxt = SUBGOAL (K ( - let - val rule_insts = map ((Elim.lookup_rule ctxt) o Term.head_of) types - val rules = flat (map - (fn rule_inst => case rule_inst of - NONE => [] - | SOME (rl, idxnames) => [Drule.infer_instantiate ctxt - (idxnames ~~ map (Thm.cterm_of ctxt) tms) rl]) - rule_insts) - in - HEADGOAL (resolve_tac ctxt rules) - THEN RANGE (replicate (length tms) (typechk_tac ctxt)) 1 - end handle Option => no_tac)) - -in - -fun elim_context_tac tms ctxt = case tms of - [] => CONTEXT_SUBGOAL (K (Context_Tactic.CONTEXT_TACTIC (HEADGOAL ( - SIDE_CONDS' (eresolve_tac ctxt (map #1 (Elim.rules ctxt))) ctxt)))) - | major::_ => CONTEXT_SUBGOAL (fn (goal, _) => - let - val facts = Proof_Context.facts_of ctxt - val prems = Logic.strip_assums_hyp goal - val template = Lib.typing_of_term major - val types = - map (Thm.prop_of o #1) (Facts.could_unify facts template) - @ filter (fn prem => Term.could_unify (template, prem)) prems - |> map Lib.type_of_typing - in case types of - [] => Context_Tactic.CONTEXT_TACTIC no_tac - | _ => - let - val inserts = map (Thm.prop_of o fst) (Facts.props facts) @ prems - |> filter Lib.is_typing - |> map Lib.dest_typing - |> filter_out (fn (t, _) => - Term.aconv (t, major) orelse Item_Net.member (Inserts.get ctxt) t) - |> map (fn (t, T) => ((t, T), Lib.subterm_count_distinct tms T)) - |> filter (fn (_, i) => i > 0) - (*`t1: T1` comes before `t2: T2` if T1 contains t2 as subterm. - If they are incomparable, then order by decreasing - `subterm_count [p, x, y] T`*) - |> sort (fn (((t1, _), i), ((_, T2), j)) => - Lib.cond_order (Lib.subterm_order T2 t1) (int_ord (j, i))) - |> map (#1 o #1) - val record_inserts = Inserts.map (fold Item_Net.update inserts) - val tac = - (*Push premises having a subterm in `tms` into a \<Prod>*) - fold (fn t => fn tac => - tac THEN HEADGOAL (internalize_fact_tac t ctxt)) - inserts all_tac - (*Apply elimination rule*) - THEN (HEADGOAL ( - elim_core_tac tms types ctxt - (*Pull pushed premises back out*) - THEN_ALL_NEW (SUBGOAL (fn (_, i) => - REPEAT_DETERM_N (length inserts) - (resolve_tac ctxt @{thms PiI} i))) - )) - (*Side conditions*) - THEN ALLGOALS (TRY o side_cond_tac ctxt) - in - fn (ctxt, st) => Context_Tactic.TACTIC_CONTEXT - (record_inserts ctxt) (tac st) - end - end) - -fun cases_tac tm ctxt = SUBGOAL (fn (goal, i) => - let - val facts = Proof_Context.facts_of ctxt - val prems = Logic.strip_assums_hyp goal - val template = Lib.typing_of_term tm - val types = - map (Thm.prop_of o #1) (Facts.could_unify facts template) - @ filter (fn prem => Term.could_unify (template, prem)) prems - |> map Lib.type_of_typing - val res = (case types of - [typ] => Drule.infer_instantiate' ctxt [SOME (Thm.cterm_of ctxt tm)] - (the (Case.lookup_rule ctxt (Term.head_of typ))) - | [] => raise Option - | _ => raise error (Syntax.string_of_term ctxt tm ^ "not uniquely typed")) - handle Option => error ("no case rule known for " - ^ (Syntax.string_of_term ctxt tm)) - in - SIDE_CONDS' (resolve_tac ctxt [res]) ctxt i - end) - -end -*) - -end - -open Tactics2 diff --git a/spartan/core/typechecking.ML b/spartan/core/typechecking.ML index 13ca440..946ecd6 100644 --- a/spartan/core/typechecking.ML +++ b/spartan/core/typechecking.ML @@ -35,12 +35,6 @@ fun put_types typings = foldr1 (op o) (map put_type typings) (* Context tactics for type-checking *) -(*For debugging*) -structure Probe = Proof_Data ( - type T = int - val init = K 0 -) - (*Solves goals without metavariables and type inference problems by resolving with facts or assumption from inline premises.*) fun known_ctac facts = CONTEXT_SUBGOAL (fn (goal, i) => fn (ctxt, st) => @@ -69,22 +63,23 @@ local (*MAYBE FIXME: Remove `typechk` from this list, and instead perform definitional unfolding to (w?)hnf.*) @(Named_Theorems.get ctxt \<^named_theorems>\<open>typechk\<close>) - @(Named_Theorems.get ctxt \<^named_theorems>\<open>intros\<close>) + @(Named_Theorems.get ctxt \<^named_theorems>\<open>form\<close>) + @(Named_Theorems.get ctxt \<^named_theorems>\<open>intro\<close>) @(map #1 (Elim.rules ctxt))) in (resolve_from_net_tac ctxt net) i end else no_tac) - val ctxt' = Probe.map (fn i => i + 1) ctxt + val ctxt' = ctxt in TACTIC_CONTEXT ctxt' (tac i st) end in -fun check_infer facts = - let val probe = K (print_ctac (Int.toString o Probe.get)) +fun check_infer facts i (cst as (_, st)) = + let + val ctac = known_ctac facts CORELSE' check_infer_step facts in - CREPEAT_ALL_NEW_FWD ( - probe CTHEN' known_ctac facts - CORELSE' probe CTHEN' check_infer_step facts) + cst |> (ctac i CTHEN + CREPEAT_IN_RANGE i (Thm.nprems_of st - i) (CTRY o CREPEAT_ALL_NEW_FWD ctac)) end end diff --git a/spartan/lib/List.thy b/spartan/lib/List.thy index a755859..be86b63 100644 --- a/spartan/lib/List.thy +++ b/spartan/lib/List.thy @@ -44,9 +44,10 @@ where f x xs (ListInd A (fn xs. C xs) c\<^sub>0 (fn x xs rec. f x xs rec) xs)" lemmas - [intros] = ListF List_nil List_cons and - [elims "?xs"] = ListE and - [comps] = List_comp_nil List_comp_cons + [form] = ListF and + [intro, intros] = List_nil List_cons and + [elim "?xs"] = ListE and + [comp] = List_comp_nil List_comp_cons abbreviation "ListRec A C \<equiv> ListInd A (fn _. C)" @@ -110,7 +111,7 @@ Lemma head_type [typechk]: shows "head xs: Maybe A" unfolding head_def by typechk -Lemma head_of_cons [comps]: +Lemma head_of_cons [comp]: assumes "A: U i" "x: A" "xs: List A" shows "head (x # xs) \<equiv> some x" unfolding head_def by reduce @@ -120,7 +121,7 @@ Lemma tail_type [typechk]: shows "tail xs: List A" unfolding tail_def by typechk -Lemma tail_of_cons [comps]: +Lemma tail_of_cons [comp]: assumes "A: U i" "x: A" "xs: List A" shows "tail (x # xs) \<equiv> xs" unfolding tail_def by reduce @@ -181,7 +182,7 @@ Lemma rev_type [typechk]: shows "rev xs: List A" unfolding rev_def by typechk -Lemma rev_nil [comps]: +Lemma rev_nil [comp]: assumes "A: U i" shows "rev (nil A) \<equiv> nil A" unfolding rev_def by reduce diff --git a/spartan/lib/Maybe.thy b/spartan/lib/Maybe.thy index d821920..0ce534c 100644 --- a/spartan/lib/Maybe.thy +++ b/spartan/lib/Maybe.thy @@ -54,9 +54,10 @@ Lemma Maybe_comp_some: unfolding MaybeInd_def some_def by (reduce add: Maybe_def) lemmas - [intros] = MaybeF Maybe_none Maybe_some and - [comps] = Maybe_comp_none Maybe_comp_some and - MaybeE [elims "?m"] = MaybeInd[rotated 4] + [form] = MaybeF and + [intro, intros] = Maybe_none Maybe_some and + [comp] = Maybe_comp_none Maybe_comp_some and + MaybeE [elim "?m"] = MaybeInd[rotated 4] lemmas Maybe_cases [cases] = MaybeE diff --git a/spartan/lib/More_Types.thy b/spartan/lib/More_Types.thy index 0d7096f..55e6554 100644 --- a/spartan/lib/More_Types.thy +++ b/spartan/lib/More_Types.thy @@ -16,9 +16,9 @@ notation Sum (infixl "\<or>" 50) axiomatization where SumF: "\<lbrakk>A: U i; B: U i\<rbrakk> \<Longrightarrow> A \<or> B: U i" and - Sum_inl: "\<lbrakk>a: A; B: U i\<rbrakk> \<Longrightarrow> inl A B a: A \<or> B" and + Sum_inl: "\<lbrakk>B: U i; a: A\<rbrakk> \<Longrightarrow> inl A B a: A \<or> B" and - Sum_inr: "\<lbrakk>b: B; A: U i\<rbrakk> \<Longrightarrow> inr A B b: A \<or> B" and + Sum_inr: "\<lbrakk>A: U i; b: B\<rbrakk> \<Longrightarrow> inr A B b: A \<or> B" and SumE: "\<lbrakk> s: A \<or> B; @@ -42,9 +42,11 @@ axiomatization where \<rbrakk> \<Longrightarrow> SumInd A B (fn s. C s) (fn a. c a) (fn b. d b) (inr A B b) \<equiv> d b" lemmas - [intros] = SumF Sum_inl Sum_inr and - [elims ?s] = SumE and - [comps] = Sum_comp_inl Sum_comp_inr + [form] = SumF and + [intro] = Sum_inl Sum_inr and + [intros] = Sum_inl[rotated] Sum_inr[rotated] and + [elim ?s] = SumE and + [comp] = Sum_comp_inl Sum_comp_inr method left = rule Sum_inl method right = rule Sum_inr @@ -76,10 +78,11 @@ and BotE: "\<lbrakk>x: \<bottom>; \<And>x. x: \<bottom> \<Longrightarrow> C x: U i\<rbrakk> \<Longrightarrow> BotInd (fn x. C x) x: C x" lemmas - [intros] = TopF TopI BotF and - [elims ?a] = TopE and - [elims ?x] = BotE and - [comps] = Top_comp + [form] = TopF BotF and + [intro, intros] = TopI and + [elim ?a] = TopE and + [elim ?x] = BotE and + [comp] = Top_comp section \<open>Booleans\<close> @@ -125,9 +128,10 @@ Lemma if_false: by reduce lemmas - [intros] = BoolF Bool_true Bool_false and - [comps] = if_true if_false and - [elims ?x] = ifelse + [form] = BoolF and + [intro, intros] = Bool_true Bool_false and + [comp] = if_true if_false and + [elim ?x] = ifelse lemmas BoolE = ifelse @@ -136,7 +140,7 @@ subsection \<open>Notation\<close> definition ifelse_i ("if _ then _ else _") where [implicit]: "if x then a else b \<equiv> ifelse ? x a b" -no_translations "if x then a else b" \<leftharpoondown> "CONST ifelse C x a b" +translations "if x then a else b" \<leftharpoondown> "CONST ifelse C x a b" subsection \<open>Logical connectives\<close> |