Zulip Chat Archive
Stream: lean4
Topic: How to prove properties about monad operations?
Nicolas Rouquette (Dec 31 2022 at 01:03):
Now that I'm more confortable composing monad operations, I would like to proove some properties of such compositions. I'm aware that this is probably jumping ahead of @David Thrane Christiansen's section 8 so I made an MWE to precisely illustrate the problem:
import Lean.Data.HashMap
import Lean.Data.HashSet
instance [BEq α] [Hashable α] [Repr α]: Repr (Lean.HashSet α) where
reprPrec h n := h.toList.repr n
instance [BEq α] [Hashable α] [Repr (α × β)]: Repr (Lean.HashMap α β) where
reprPrec h n := h.toList.repr n
namespace MWE
inductive «Entity» where
| aspect
(name: String)
(«specializations»: List String := List.nil)
| concept
(name: String)
(«specializations»: List String := List.nil)
deriving Repr
def «Entity».name (e: «Entity»): String :=
match e with
| .aspect n _ => n
| .concept n _ => n
def «Entity».«specializations» (e: «Entity»): List String :=
match e with
| .aspect _ s => s
| .concept _ s => s
class «Vocabulary»where
«ownedStatements»: List «Entity»
deriving Repr
def v : «Vocabulary» := {
ownedStatements := [
«Entity».aspect "base:Container",
«Entity».concept "mission:Component" [ "base:Container" ]
]
}
inductive RDeclarationKind where
| rAspect
| rConcept
deriving BEq, Repr
def «Entity».toKind (e: «Entity»): RDeclarationKind :=
match e with
| .aspect _ _ => .rAspect
| .concept _ _ => .rConcept
inductive Exception where
| error (message: String)
deriving Repr
abbrev Names := Lean.HashSet String
abbrev Name2NamesMap := Lean.HashMap String Names
structure State where
declarations : Lean.HashMap String RDeclarationKind := .empty
aspectSpecializations : Name2NamesMap := .empty
conceptSpecializations : Name2NamesMap := .empty
deriving Repr
structure Context where
vocabularies: List «Vocabulary» := .nil
deriving Repr
abbrev MCore := EStateM Exception State
abbrev M := ReaderT Context MCore
def EStateM.Result.getState (r: EStateM.Result Exception State α): State :=
match r with
| EStateM.Result.ok _ s => s
| _ => {}
def State.appendSpecializations
(d: String) (dts: List RDeclarationKind)
(ds: List String)
(coll: State → Name2NamesMap)
(update: State → String → Names → State)
: M Unit := do
let s ← get
match s.declarations.find? d with
| some k =>
if dts.contains k then
let rds : Names := (coll s).findD d .empty
let merged : Names := ds.foldl .insert rds
let s' : State := update s d merged
set s'
pure ()
else
throw (Exception.error s!"Error: appendSpecializations: {repr d} is registered as a {repr k}, not one of {repr dts}.")
| none =>
throw (Exception.error s!"Error: appendSpecializations: there is no registered {repr dts}: {repr d} to append specializations to.")
def State.updateAspectSpecializations (s: State) (d: String) (ds: Names): State :=
{ s with aspectSpecializations := s.aspectSpecializations.insert d ds }
def State.appendAspectSpecializations (a: String) (as: List String): M Unit := do
appendSpecializations a [ .rAspect ] as State.aspectSpecializations State.updateAspectSpecializations
def State.updateConceptSpecializations (s: State) (d: String) (ds: Names): State :=
{ s with conceptSpecializations := s.conceptSpecializations.insert d ds }
def State.appendConceptSpecializations (c: String) (cs: List String): M Unit := do
appendSpecializations c [ .rAspect, .rConcept ] cs State.conceptSpecializations State.updateConceptSpecializations
def validateStatementDeclaration (e: «Entity»): M Unit := do
let s ← get
match s.declarations.find? e.name with
| some ek =>
throw (Exception.error s!"Error: declaration conflict: {repr e} is already registered as a {repr ek}.")
| none =>
let s := { s with declarations := s.declarations.insert e.name e.toKind }
set s
pure ()
def validateVocabularyStatementDeclarations: M Unit := do
for v in (← read).vocabularies do
for e in v.ownedStatements do
validateStatementDeclaration e
def validateVocabularySpecialization (e: «Entity»): M Unit := do
let s ← get
match s.declarations.find? e.name with
| some ek =>
if ek == e.toKind then
match ek with
| .rAspect =>
State.appendAspectSpecializations e.name e.specializations
| .rConcept =>
State.appendConceptSpecializations e.name e.specializations
else
throw (Exception.error s!"Error: declaration inconsistency: {repr e} is registered as a {repr ek}, not a {repr e.toKind}.")
| none =>
pure ()
def validateVocabularySpecializations: M Unit := do
for v in (← read).vocabularies do
for e in v.ownedStatements do
validateVocabularySpecialization e
def c0 : Context := { vocabularies := [v] }
def s0 : State := {}
def s1 : State := EStateM.Result.getState (validateVocabularyStatementDeclarations |>.run c0 |>.run s0)
#eval s1
-- { declarations := [("base:Container", MWE.RDeclarationKind.rAspect),
-- ("mission:Component", MWE.RDeclarationKind.rConcept)],
-- aspectSpecializations := [],
-- conceptSpecializations := [] }
def s2 : State := EStateM.Result.getState (validateVocabularySpecializations |>.run c0 |>.run s1)
#eval s2
-- { declarations := [("base:Container", MWE.RDeclarationKind.rAspect),
-- ("mission:Component", MWE.RDeclarationKind.rConcept)],
-- aspectSpecializations := [("base:Container", [])],
-- conceptSpecializations := [("mission:Component", ["base:Container"])] }
-- All keys of the aspectSpecialization map must have a corresponding declaration as an rAspect
theorem AllAspectsSpecializationsKeysAreDeclared (s: State):
∀ a: String, s.aspectSpecializations.contains a → s.declarations.find? a == some .rAspect
:= by
sorry
-- All values of the aspectSpecialization map must have a corresponding declaration as an rAspect
theorem AllAspectsSpecializationsValuesAreDeclared (s: State) :
∀ (a: String) (sup : String), (s.aspectSpecializations.findD a .empty).contains sup → s.declarations.find? sup == some .rAspect
:= by
sorry
-- All keys of the conceptSpecializations map must have a corresponding declaration as an rConcept
theorem AllConceptSpecializationsKeysAreDeclared (s: State):
∀ a: String, s.conceptSpecializations.contains a → s.declarations.find? a == some .rConcept
:= by
sorry
-- All values of the conceptSpecializations map must have a corresponding declaration as an rAspect or rConcept
theorem AllConceptSpecializationsValuesAreDeclared (s: State) :
∀ (a: String) (sup : String), (s.conceptSpecializations.findD a .empty).contains sup →
s.declarations.find? sup == some .rAspect || s.declarations.find? sup == some .rConcept
:= by
sorry
end MWE
In the above, s2
is a simple example of the State
for which I would like to proove 4 theorems. However, I am not sure how to exploit properties of the monad operations and of their composition to do this.
For example, consider the first theorem, which I hope will be easy to proove:
-- All keys of the aspectSpecialization map must have a corresponding declaration as an rAspect
theorem AllAspectsSpecializationsKeysAreDeclared (s: State):
∀ a: String, s.aspectSpecializations.contains a → s.declarations.find? a == some .rAspect
:= by
sorry
Intuitively, it should follow from the composition of these two monad operations: validateVocabularyStatementDeclarations
and validateVocabularySpecializations
:
- both operations iterate over the same
Context
ofvocabularies
and theirownedStatements
. - the validation of statement declarations adds an entry in the
State.declarations
- the validation of statement specializations adds entries in the
State.aspectSpecializations
for the same keys as the previous step.
I hope that I've explained sufficiently clearly the problem and I'm open to suggestions.
Nicolas Rouquette (Jan 02 2023 at 00:22):
The essence of the long MWE above can be reduced to this smaller MWE:
import Lean.Data.HashMap
import Lean.Data.HashSet
namespace MWE4
structure State where
declarations: Lean.HashMap String (Lean.HashSet String)
-- all declaration values must be declaration keys
def State.wff: State → Bool := fun s => s.declarations.toList.all fun (_, xs) => xs.toList.all s.declarations.contains
def State.withDeclaration (s: State) (d: String) : State :=
match s.declarations.contains d with
| true => s
| false => { s with declarations := s.declarations.insert d .empty }
def State.withSpecialization (s: State) (sub: String) (sup: String): State :=
let s := s.withDeclaration sup
{ s with declarations := s.declarations.insert sub ((s.declarations.findD sub .empty).insert sup) }
theorem State.withDeclarationWff (s: State) (d: String) : s.wff → (s.withDeclaration d).wff := sorry
theorem State.withSpecializationWff (s: State) (sub: String) (sup: String): s.wff → (s.withSpecialization sub sup).wff := sorry
end MWE4
Looking at examples in Lean and the Std4 library, I realized that I should try to use inductive data types instead, so I reformulated the MWE using Std4, preserving the intent of the example:
import Std.Data.AssocList
namespace MWE5
abbrev Sups := Std.AssocList String Unit
structure State where
declarations: Std.AssocList String Sups
-- all declaration values must be declaration keys
def State.wff: State → Prop :=
fun s => s.declarations.all
fun (_ sups) => sups.all
fun (sup _) => s.declarations.contains sup
def State.withDeclaration (s: State) (d: String) : State :=
match s.declarations.contains d with
| true => s
| false => { s with declarations := s.declarations.replace d .nil }
def State.withSpecialization (s: State) (sub: String) (sup: String): State :=
let s := s.withDeclaration sup
let sups : Sups := match s.declarations.find? sub with
| none => .nil
| some x => x.replace sup ()
{ s with declarations := s.declarations.replace sub sups }
theorem State.withDeclarationWff (s: State) (d: String) : s.wff → (s.withDeclaration d).wff := sorry
theorem State.withSpecializationWff (s: State) (sub: String) (sup: String): s.wff → (s.withSpecialization sub sup).wff := sorry
end MWE5
I have several questions about this:
1) I noticed that there are different ways to define well-formedness predicates as Bool
vs. Prop
.
https://github.com/leanprover/lean4/blob/master/tests/playground/pge.lean
https://github.com/leanprover/lean4/blob/master/doc/examples/bintree.lean
https://github.com/leanprover/std4/blob/main/Std/Data/AssocList.lean
It seems that one can use either form to define subtypes.
Is this difference a matter of proof writing style, or is there something more subtle going on?
2) Even though State
is defined above in terms of inductive datatypes, it is not inductively defined.
It seems that this approach complicates writing proofs since we cannot use the cases
tactic.
So, I suspect I must reformulate my MWE using an inductive datatype definition.
James Gallicchio (Jan 02 2023 at 00:59):
It seems that one can use either form to define subtypes.
Bool
can be coerced to Prop
(with true
=> True
and false
=> False
), so anywhere that expects a Prop
you can also pass a Bool
(including, e.g. the predicate for subtypes)
Nicolas Rouquette (Jan 03 2023 at 00:38):
Thanks @James Gallicchio; I see that there's indeed a conversion:
instance boolToProp : Coe Bool Prop where
coe b := Eq b true
I managed to proove a simple theorem about the data structure above; however, I'm getting stuck for others:
import Std.Data.AssocList
namespace MWE5
abbrev Strings := List String
abbrev Sups := Std.AssocList String Strings
instance : Repr Sups where reprPrec s n := s.toList.repr n
structure State where
declarations: Std.AssocList String Strings := .nil
deriving Repr
def State.empty: State := {}
-- all declaration values must be declaration keys
def State.wff: State → Bool :=
fun s => s.declarations.all
fun (_ sups) => sups.all
fun sup => s.declarations.contains sup
def State.withDeclaration (s: State) (d: String) : State :=
match s.declarations.contains d with
| true => s
| false => { s with declarations := s.declarations.cons d .nil }
def State.withSpecialization (s: State) (sub: String) (sup: String): State :=
let s := s.withDeclaration sub |>.withDeclaration sup
let sups : Strings := match s.declarations.find? sub with
| none => [sup]
| some xs => if xs.contains sup then xs else xs.cons sup
{ s with declarations := s.declarations.replace sub sups }
theorem State.withDeclaration.noChange (s: State) (d: String) (h: s.declarations.contains d)
: s = s.withDeclaration d
:= by rw [State.withDeclaration, h]
theorem State.withDeclaration.added (s: State) (d: String) (h: !s.declarations.contains d)
: (s.withDeclaration d).declarations.contains d
:= by
rw [State.withDeclaration]
sorry
theorem State.withDeclaration.wff (s: State) (d: String)
: s.wff → (s.withDeclaration d).wff
:= by
simp [State.wff, State.withDeclaration, List.all, List.any]
intro h
sorry
theorem State.withSpecialization.wff (s: State) (sub: String) (sup: String)
: s.wff → (s.withSpecialization sub sup).wff
:= by
simp [State.wff, State.withSpecialization, State.withDeclaration, List.all, List.any]
intro h
sorry
def s0 : State := State.empty |>.withSpecialization "a" "b"
#eval s0
end MWE5
For the 2nd one, I am wondering how to simplify a nested expression in the goal; i.e.:
theorem State.withDeclaration.added (s: State) (d: String) (h: !s.declarations.contains d)
: (s.withDeclaration d).declarations.contains d
:= by
rw [State.withDeclaration]
sorry
The tactic state before sorry
shows:
s: State
d: String
h: (!Std.AssocList.contains d s.declarations) = true
⊢ Std.AssocList.contains d
(match Std.AssocList.contains d s.declarations with
| true => s
| false => { declarations := Std.AssocList.cons d [] s.declarations }).declarations =
true
Is there a trick to apply !h
inside the goal and simplify the match
expression?
For the last 2 theorems, I am wondering if I am on the right path.
Any suggestions?
Nicolas Rouquette (Jan 03 2023 at 02:28):
I managed to make some progress, proving 3 theorems out of 7; any suggestions for the last 4?
import Std.Data.AssocList
namespace MWE5
abbrev Strings := List String
abbrev Sups := Std.AssocList String Strings
instance : Repr Sups where reprPrec s n := s.toList.repr n
structure State where
declarations: Std.AssocList String Strings := .nil
deriving Repr
def State.empty: State := {}
-- all declaration values must be declaration keys
def State.wff: State → Bool :=
fun s => s.declarations.all
fun (_ sups) => sups.all
fun sup => s.declarations.contains sup
def State.withDeclaration (s: State) (d: String) : State :=
match s.declarations.contains d with
| true => s
| false => { s with declarations := s.declarations.cons d .nil }
def State.withSpecialization (s: State) (sub: String) (sup: String): State :=
let s := s.withDeclaration sub |>.withDeclaration sup
let sups : Strings := match s.declarations.find? sub with
| none =>
-- this case can never happen since sub has been declared.
[sup]
| some xs =>
if xs.contains sup then xs else xs.cons sup
{ s with declarations := s.declarations.replace sub sups }
theorem State.withDeclaration.noChange (s: State) (d: String) (h: s.declarations.contains d)
: s = s.withDeclaration d
:= by rw [State.withDeclaration, h]
theorem State.withDeclaration.added1 (s: State) (d: String) (h: !s.declarations.contains d)
: (s.withDeclaration d).declarations.contains d
:= by
rw [State.withDeclaration] at *
simp [Std.AssocList.contains, List.any] at *
simp [*] at *
theorem State.withDeclaration.added2 (s: State) (d: String) (h: !s.declarations.contains d)
: (s.withDeclaration d).declarations.find? d = some []
:= by
rw [State.withDeclaration] at *
simp [Std.AssocList.contains, List.any] at *
simp [*] at *
theorem State.withDeclaration.wff (s: State) (d: String)
: s.wff → (s.withDeclaration d).wff
:= by
simp [State.wff, State.withDeclaration]
intro h
split
.apply h
.simp [*]; done
theorem State.withSpecialization.noChange (s: State) (sub: String) (sup: String)
(h1: s.declarations.contains sub)
(h2: s.declarations.contains sup)
(h3: (s.declarations.find? sub).all (fun sups => sups.contains sup))
: s = s.withSpecialization sub sup
:= by
--unfold State.withSpecialization State.withDeclaration at *
simp [State.withSpecialization, State.withDeclaration] at *
simp [*] at *
done
theorem State.withSpecialization.added (s: State) (sub: String) (sup: String)
(h1: !s.declarations.contains sub)
(h2: !s.declarations.contains sup)
: (s.withSpecialization sub sup).declarations.contains sub &&
(s.withSpecialization sub sup).declarations.contains sup
:= by
rw [State.withSpecialization] at *
simp [State.withDeclaration, Std.AssocList.contains, List.any, List.replaceF] at *
simp [*] at *
apply And.intro
done
theorem State.withSpecialization.wff (s: State) (sub: String) (sup: String)
: s.wff → (s.withSpecialization sub sup).wff
:= by
simp [State.wff, State.withSpecialization, State.withDeclaration] at *
intro h
simp [*] at *
done
def s0 : State := State.empty |>.withSpecialization "a" "b"
#eval s0
end MWE5
James Gallicchio (Jan 03 2023 at 04:45):
Ah, sure -- check out the split
tactic!
To finish State.withDeclaration.added1
:
split
. assumption
. simp
Something similar should work for the others!
James Gallicchio (Jan 03 2023 at 05:17):
Oh, wait, I misunderstood your question...
Rewriting within a match is generally a hard to do. I'm not sure if there's a way to rewrite through it. The only way I know to do what you want is to split
and then do the simp/rewrite in each case. Usually this isn't too bad, since you can use the <;>
combinator like split <;> simp
or similar
James Gallicchio (Jan 03 2023 at 05:22):
Honestly, a good strategy with any goals involving match
is to just split on it. Sometimes I find it much easier to clean up unnecessary splits after the fact, instead of thinking too deeply about whether I need to split or not...
Nicolas Rouquette (Jan 03 2023 at 05:30):
Thanks, I managed to simplify a few proofs:
theorem State.withDeclaration.added1 (s: State) (d: String) (h: !s.declarations.contains d)
: (s.withDeclaration d).declarations.contains d
:= by
rw [State.withDeclaration] at *
simp_all
theorem State.withDeclaration.added2 (s: State) (d: String) (h: !s.declarations.contains d)
: (s.withDeclaration d).declarations.find? d = some []
:= by
rw [State.withDeclaration] at *
simp_all
For the next one, I am not sure how to split the nested Or:
theorem State.withDeclaration.wff (s: State) (d: String)
: s.wff → (s.withDeclaration d).wff
:= by
simp_all [State.wff, State.withDeclaration]
intro h
split
. apply h
. simp_all
sorry
Before sorry
, the tactic state shows:
s: State
d: String
x✝: Bool
h: (List.all (Std.AssocList.toList s.declarations) fun x =>
List.all x.snd fun sup => List.any (Std.AssocList.toList s.declarations) fun x => x.fst == sup) =
true
heq✝: (List.any (Std.AssocList.toList s.declarations) fun x => x.fst == d) = false
⊢ (List.all (Std.AssocList.toList s.declarations) fun x =>
List.all x.snd fun sup => d == sup || List.any (Std.AssocList.toList s.declarations) fun x => x.fst == sup) =
true
What are the grayed symbols: x✝
and heq✝
?
In the above, the assumption h
is very close to the goal.
The goal has an Or with the left d == sup
that seems ought to match with heq✝
if I could somehow bind it to an assumption (I tried but it is not available); the right could match the assumption h
.
Since the Or is nested, split
is not readily applicable.
How can I tackle this situation?
For the next one, the problem is similar that I cannot readily apply an assumption to a nested expression:
theorem State.withSpecialization.noChange (s: State) (sub: String) (sup: String)
(h1: s.declarations.contains sub)
(h2: s.declarations.contains sup)
(h3: (s.declarations.find? sub).all (fun sups => sups.contains sup))
: s = s.withSpecialization sub sup
:= by
--unfold State.withSpecialization State.withDeclaration at *
simp [State.withSpecialization, State.withDeclaration] at *
simp_all
apply h3
sorry
The application of h3
fails because:
tactic 'apply' failed, failed to unify
Option.all (fun sups => List.contains sups sup)
(Option.map (fun x => x.snd) (List.find? (fun x => x.fst == sub) (Std.AssocList.toList s.declarations))) =
true
with
s =
{
declarations :=
Std.AssocList.replace sub
(match
Option.map (fun x => x.snd) (List.find? (fun x => x.fst == sub) (Std.AssocList.toList s.declarations)) with
| none => [sup]
| some xs => if List.contains xs sup = true then xs else sup :: xs)
s.declarations }
s: State
subsup: String
h1: (List.any (Std.AssocList.toList s.declarations) fun x => x.fst == sub) = true
h2: (List.any (Std.AssocList.toList s.declarations) fun x => x.fst == sup) = true
h3: Option.all (fun sups => List.contains sups sup)
(Option.map (fun x => x.snd) (List.find? (fun x => x.fst == sub) (Std.AssocList.toList s.declarations))) =
true
⊢ s =
{
declarations :=
Std.AssocList.replace sub
(match
Option.map (fun x => x.snd) (List.find? (fun x => x.fst == sub) (Std.AssocList.toList s.declarations)) with
| none => [sup]
| some xs => if List.contains xs sup = true then xs else sup :: xs)
s.declarations }
The last two proofs involve much more complicated expressions; I have not figured out how to tackle them.
James Gallicchio (Jan 03 2023 at 06:03):
Nicolas Rouquette said:
What are the grayed symbols:
x✝
andheq✝
?
Those are auto-generated, inaccessible names. There's a bunch of different ways to introduce accessible names, but the easiest here is probably to use next
instead of .
for the second case, like next x heq =>
.
Almost all (all?) autogenerated names are inaccessible, I believe that's to ensure autogenerated names don't have to be stable.
Since the Or is nested,
split
is not readily applicable.
Yeah, you have to do quite a lot of unpacking before you can use split. My first attempt at doing so reached an unprovable goal, but maybe it's still helpful to look at:
theorem State.withDeclaration.wff (s: State) (d: String)
: s.wff → (s.withDeclaration d).wff
:= by
simp_all [State.wff, State.withDeclaration]
intro h
split
. apply h
next x heq =>
simp_all
rw [←h]
clear h
congr
funext (a,b)
congr
funext sup
cases h : d == sup
case true =>
have : d = sup := eq_of_beq h
rw [←this, heq]
simp
sorry
case false =>
simp
done
How can I tackle this situation?
For the next one, the problem is similar that I cannot readily apply an assumption to a nested expression:
theorem State.withSpecialization.noChange (s: State) (sub: String) (sup: String)
(h1: s.declarations.contains sub)
(h2: s.declarations.contains sup)
(h3: (s.declarations.find? sub).all (fun sups => sups.contains sup))
: s = s.withSpecialization sub sup
:= by
--unfold State.withSpecialization State.withDeclaration at *
simp [State.withSpecialization, State.withDeclaration] at *
simp_all
apply h3
sorry
James Gallicchio (Jan 03 2023 at 06:07):
As you can probably tell, Lean currently has very little automation for proofs about even relatively small programs... I don't know if I have good advice aside from keeping your lemmas very small and focused, since that's the only way I know to parse large expressions in Lean
Mario Carneiro (Jan 03 2023 at 06:45):
here's my attempt at that theorem:
import Std.Data.List.Lemmas
...
theorem State.withDeclaration.wff (s: State) (d: String) (h : s.wff) :
(s.withDeclaration d).wff := by
simp [State.wff, State.withDeclaration] at h ⊢
intro (s1, ss)
split <;> simp
· apply h
· rintro (⟨rfl, rfl⟩ | h'); {intro.}
exact fun _ hs => .inr (h _ h' _ hs)
James Gallicchio (Jan 03 2023 at 06:48):
I should really learn the rintro black magic at some point....
Mario Carneiro (Jan 03 2023 at 06:48):
the thing that pulls the most weight in this proof is the simp
on the first line, plus the Std.Data.List.Lemmas
to make it actually do something
Mario Carneiro (Jan 03 2023 at 06:50):
The original proof had a simp [List.all]
, that's the wrong move. You want to turn all those boolean functions into logical functions (in this case a \forall
) ASAP since then all the other stuff like intro
and fun
do useful things
James Gallicchio (Jan 03 2023 at 06:53):
This does seem a bit situational, since we kinda luck out that List.all
and List.any
are succinct to describe in proposition land
James Gallicchio (Jan 03 2023 at 06:54):
A common situation, nonetheless :rofl:
Mario Carneiro (Jan 03 2023 at 07:00):
not really. If you look at AssocList
you will notice that every single function has a theorem which relates it to proposition land written immediately afterward
Mario Carneiro (Jan 03 2023 at 07:03):
this is very much by design
Nicolas Rouquette (Jan 03 2023 at 16:38):
Thanks for all of your suggestions; I have plenty to think about.
Nicolas Rouquette (Jan 07 2023 at 21:26):
I want to make sure that I understand correctly @Mario Carneiro 's ingenious proof that I've reformatted like this:
theorem State.withDeclaration.wff (s: State) (d: String) (h: s.wff)
: (s.withDeclaration d).wff
:= by
simp [State.wff, State.withDeclaration] at h ⊢
intro (s1, ss)
split <;> simp
. apply h
· rintro (⟨rfl, rfl⟩ | h')
. intro.
. exact fun x hs => .inr (h _ h' x hs)
Just before rintro
, the goal is:
⊢ s1 = d ∧ ss = [] ∨ (s1, ss) ∈ Std.AssocList.toList s.declarations →
∀ (x : String), x ∈ ss → d = x ∨ ∃ a, a ∈ Std.AssocList.toList s.declarations ∧ a.fst = x
Based on VS code feedback, I see that rintro (⟨rfl, rfl⟩ | h')
matches the left hand side of the implication: s1 = d ∧ ss = [] ∨ (s1, ss) ∈ Std.AssocList.toList s.declarations
.
Why is it matching just the LHS of the goal instead of the whole goal expression?
On the last line, the goal is the RHS of the above, I am really amazed by the flexibility of the equivalence between universal quantifiers and functions; it seems to me far more powerful than what is explained in section 4 here: https://leanprover.github.io/theorem_proving_in_lean4/quantifiers_and_equality.html
If I understand correctly, the goal at the exact
line above is a universal quantifier expression:
⊢ ∀ (x : String), x ∈ ss → d = x ∨ ∃ a, a ∈ Std.AssocList.toList s.declarations ∧ a.fst = x
So it is equivalent to a function like this:
fun (x: String) (hs: x ∈ ss) => d = x ∨ ∃ a, a ∈ Std.AssocList.toList s.declarations ∧ a.fst = x
That equivalence allows matching the exact
pattern function: exact fun x hs =>
Then on the right, the .inr
construction involves the reverse.
With:
h: ∀ (x : String × Strings),
x ∈ Std.AssocList.toList s.declarations →
∀ (x_1 : String), x_1 ∈ x.snd → ∃ x, x ∈ Std.AssocList.toList s.declarations ∧ x.fst = x_1
h': (s1, ss) ∈ Std.AssocList.toList s.declarations
x: String
hs: x ∈ ss
The expression: (h _ h' x hs)
effectively uses h
as a function of 4 arguments:
(x : String × Strings)
given by the placeholder_
x ∈ Std.AssocList.toList s.declarations
given by the hypothesish'
(x_1 : String)
given byx
x_1 ∈ x.snd
given by the hypothesishs
Did I get this right?
Kevin Buzzard (Jan 08 2023 at 12:35):
Are you asking about what the rintro
tactic does? rintro
is just intro
then cases
, and rintro rfl
is just intro h
and then subst h
.
Nicolas Rouquette (Jan 08 2023 at 17:00):
I read this in the doc; however p, it’s application in this particular proof is unclear to me because it seems to be matching the LHS of the goal instead of the whole goal.
Then it took me a while to understand how the last line works; which is close to magical.
Hanting Zhang (Jan 08 2023 at 17:12):
The goal looks like A -> B
(here both A and B are complicated). intro
doesn't "match on the LHS", rather it introduces a : A
into the context and turns the goal into B
In particular, A
here looks like x /\ y \/ z
. Normally, you could do intro h
, which would give you h : x /\ y \/ z
in the context. Then you can destruct the ands and ors with split
, etc. rintro
just makes things easier by allowing you to destruct while introducing. So you could also write rintro ((hx, hy) | hz)
, where the |
eliminates on or
, and the <hx, hy>
destructs the and. However, notice x
and y
are also equalities, and most likely your next step would have been subst hx; subst hy
-- so rintro
also allows the sugar rfl
to automatically do that for you.
Nicolas Rouquette (Jan 08 2023 at 20:52):
Ah, thanks @Hanting Zhang for the explanation.
James Gallicchio (Jan 08 2023 at 23:01):
Also worth clarifying, small but important distinction:
∀ (x : String), x ∈ ss → ...
is a type (specifically a dependent function type)(x : String) → x ∈ ss → ...
is equivalent to that type, written in a syntax that emphasizes the function aspect
These are distinct from
fun (x : String) => fun (hs : x ∈ ss) => ...
, a value of the above typesfun (x : String) (hs : x ∈ ss) => ...
is equivalent to the above value, just with more syntax sugar
So the function value inhabits the (dependent) function type, and the forall quantifier is secretly just a dependent function type. exact blah
is a tactic that lets you provide a term whose type is exact
ly the goal. So here we are providing a value of type ∀ (x : String), ...
, but terms of that type look like fun (x : String) => ...
Nicolas Rouquette (Jan 08 2023 at 23:22):
Oh, thanks @James Gallicchio, this is indeed a subtle aspect of the propositions-as-types correspondence that I misunderstood.
To be fair, what you wrote above is clearer to me than the cryptic doc here:
https://leanprover.github.io/theorem_proving_in_lean4/quantifiers_and_equality.html
The Calculus of Constructions therefore identifies dependent arrow types with forall-expressions in this way. If p is any expression, ∀ x : α, p is nothing more than alternative notation for (x : α) → p, with the idea that the former is more natural than the latter in cases where p is a proposition.
I've read the above several times and I had not quite grasped the subtle distinction you're making, particularly the last paragraph.
James Gallicchio (Jan 08 2023 at 23:26):
Ah, yeah, that page seems hard to parse. I'll look into cleaning it up...
Nicolas Rouquette (Jan 09 2023 at 05:05):
Following earlier advice to simplify the functions even more, I reduced my struggle to the following MWE.
Below, there is a single function, addSup
, for which I want to show that the result contains the element being added.
example : ∀ (l: Strings) (x: String), (addSup l x).contains x := sorry
I tried to proove this by induction on l
; however, I ended up getting stuck at showing that a proof terminates.
import Std.Data.AssocList
import Std.Data.List.Lemmas
namespace MWE5a
abbrev Strings := List String
def addSup : Strings → String → Strings
| [], sup => [sup]
| x :: xs, sup => if x = sup then x :: xs else x :: (addSup xs sup)
theorem addSup.nil (sup: String)
: List.elem sup (addSup [] sup)
:= by simp [addSup]
theorem addSup.more (x: String) (head: String) (xs: Strings) (sup: String)
(h: List.elem sup (addSup (x :: xs) sup))
: List.elem sup (addSup (x :: head :: xs) sup)
:= by
match xs with
| [] =>
simp [addSup, h]
split
. next ht =>
simp_all
. next hf =>
simp_all
split
. rfl
. split
. simp_all
. next ht =>
rw [List.elem]
split
. rfl
. simp
| a :: as =>
rw [← addSup.more] -- lean says: failed to prove termination
assumption
termination_by addSup.more _ _ xs _ _ => xs.length
theorem addSup.cons (x: String) (xs: Strings) (sup: String)
: List.elem sup (addSup (x :: xs) sup)
:= by
induction xs
case nil =>
simp_all [addSup]
split
. case inl =>
simp_all
. case inr =>
next hf =>
simp_all [hf]
split <;> rfl
case cons head tail tail_ih =>
apply addSup.more x head tail sup tail_ih
end MWE5a
Above, the line rw [← addSup.more]
shows:
failed to prove termination, possible solutions:
- Use `have`-expressions to prove the remaining goals
- Use `termination_by` to specify a different well-founded relation
- Use `decreasing_by` to specify your own tactic for discharging this kind of goal
a: String
as: List String
xhead: String
xs: Strings
sup: String
h: List.elem sup (addSup (x :: xs) sup) = true
⊢ List.length xs < Nat.succ (List.length as)
Since I matched on xs
and obtained a case: a :: as
, the goal above should be true.
I get the feeling that this proof strategy is overly complicated but I am not sure what it is that I'm missing.
Any suggestions?
James Gallicchio (Jan 09 2023 at 05:15):
If you just want to expose the fact that you pattern matched on xs
, you can instead match h_xs : xs with ...
and then use subst_vars
to substitute a::as
for xs
. Not sure RE: simplifying the overall proof structure though...
Mario Carneiro (Jan 09 2023 at 05:19):
Here's how I would approach your original theorem:
example (l : Strings) (x : String) : (addSup l x).contains x := by
simp [List.contains]
apply List.elem_eq_true_of_mem
induction l <;> simp [addSup]
split <;> simp [*]
List.elem_eq_true_of_mem
should be a simp lemma, which would have eliminated that line
Mario Carneiro (Jan 09 2023 at 05:21):
also addSup
is called List.insert
in std (although it pushes the element to the front instead of the end)
Nicolas Rouquette (Jan 14 2023 at 00:44):
ok, I followed Mario's suggestion to use List.insert
instead of my own...
Now, I am getting stuck at prooving an existential property:
import Std.Data.AssocList
import Std.Data.List.Lemmas
namespace MWE7
abbrev Strings := List String
def addBoth : (Std.AssocList String Strings) → String → String → (Std.AssocList String Strings)
| .nil, sub, sup => .cons sup [] (.cons sub [sup] .nil)
| .cons a as tail, sub, sup => bif a == sub then .cons sub (as.insert sup) tail else .cons a as (addBoth tail sub sup)
theorem addBoth.nil (sub: String) (sup: String)
: (addBoth .nil sub sup).contains sub && (addBoth .nil sub sup).contains sup
:= by simp [addBoth]
theorem addBoth.sub (ss: Std.AssocList String Strings) (sub: String) (sup: String)
: (addBoth ss sub sup).contains sub
:= by
simp [Std.AssocList.contains]
induction ss
case nil =>
simp [addBoth]
case cons key value tail tail_ih =>
simp [addBoth]
-- apply Exists.elim
sorry
end MWE7
Just before the sorry
, the tactic state is:
subsupkey: String
value: Strings
tail: Std.AssocList String Strings
tail_ih: ∃ x, x ∈ Std.AssocList.toList (addBoth tail sub sup) ∧ x.fst = sub
⊢ ∃ x,
x ∈
Std.AssocList.toList
(bif key == sub then Std.AssocList.cons sub (List.insert sup value) tail
else Std.AssocList.cons key value (addBoth tail sub sup)) ∧
x.fst = sub
According to https://leanprover.github.io/theorem_proving_in_lean4/quantifiers_and_equality.html#the-existential-quantifier
I should be able to apply the Exists.elim
tactic; however, there are implicit variables in that theorem:
theorem Exists.elim {α : Sort u} {p : α → Prop} {b : Prop}
(h₁ : Exists (fun x => p x)) (h₂ : ∀ (a : α), p a → b) : b :=
match h₁ with
| intro a h => h₂ a h
So if I apply this tactic, I get subgoals that I have no idea how to discharge:
case h₁
subsupkey: String
value: Strings
tail: Std.AssocList String Strings
tail_ih: ∃ x, x ∈ Std.AssocList.toList (addBoth tail sub sup) ∧ x.fst = sub
⊢ ∃ x, ?p x
case h₂
subsupkey: String
value: Strings
tail: Std.AssocList String Strings
tail_ih: ∃ x, x ∈ Std.AssocList.toList (addBoth tail sub sup) ∧ x.fst = sub
⊢ ∀ (a : ?α),
?p a →
∃ x,
x ∈
Std.AssocList.toList
(bif key == sub then Std.AssocList.cons sub (List.insert sup value) tail
else Std.AssocList.cons key value (addBoth tail sub sup)) ∧
x.fst = sub
case α
subsupkey: String
value: Strings
tail: Std.AssocList String Strings
tail_ih: ∃ x, x ∈ Std.AssocList.toList (addBoth tail sub sup) ∧ x.fst = sub
⊢ Sort ?u.3967
case p
subsupkey: String
value: Strings
tail: Std.AssocList String Strings
tail_ih: ∃ x, x ∈ Std.AssocList.toList (addBoth tail sub sup) ∧ x.fst = sub
⊢ ?α → Prop
How do I handle this kind of proof?
James Gallicchio (Jan 14 2023 at 01:39):
Exists.elim lets you sort of “forward” the burden of existence to a different existential. Here, if you apply Exists.elim tail_ih
, you’ll be left with the goal to prove that the tail_ih property implies the goal before Exists.elim
James Gallicchio (Jan 14 2023 at 01:40):
And since you’re supplying h1 in Exists.elim explicitly, it will unify against that to figure out the implicit variables in the theorem :)
Nicolas Rouquette (Jan 14 2023 at 22:07):
I think I need something that deconstructs the goal, not the hypothesis and split the cases for the nested bif ...
expression.
The closest I managed to get is the following:
def addBoth : (Std.AssocList String Strings) → String → String → (Std.AssocList String Strings)
| .nil, sub, sup => .cons sup [] (.cons sub [sup] .nil)
| .cons a as tail, sub, sup => bif a == sub then .cons sub (as.insert sup) tail else .cons a as (addBoth tail sub sup)
theorem addBoth.sub (ss: Std.AssocList String Strings) (sub: String) (sup: String)
: (addBoth ss sub sup).contains sub
:= by
simp [Std.AssocList.contains]
induction ss
case nil =>
simp [addBoth]
case cons key value tail tail_ih =>
simp [addBoth]
by_cases key == sup
case pos h => {
}
case neg nh => {
}
Now, in the pos
case, the tactic state becomes:
subsupkey: String
value: Strings
tail: Std.AssocList String Strings
tail_ih: ∃ x, x ∈ Std.AssocList.toList (addBoth tail sub sup) ∧ x.fst = sub
h: (key == sup) = true
⊢ ∃ x,
x ∈
Std.AssocList.toList
(bif key == sub then Std.AssocList.cons sub (List.insert sup value) tail
else Std.AssocList.cons key value (addBoth tail sub sup)) ∧
x.fst = sub
and in the neg
case, I get:
subsupkey: String
value: Strings
tail: Std.AssocList String Strings
tail_ih: ∃ x, x ∈ Std.AssocList.toList (addBoth tail sub sup) ∧ x.fst = sub
nh: ¬(key == sup) = true
⊢ ∃ x,
x ∈
Std.AssocList.toList
(bif key == sub then Std.AssocList.cons sub (List.insert sup value) tail
else Std.AssocList.cons key value (addBoth tail sub sup)) ∧
x.fst = sub
Now, it seems that I need to deconstruct the existential quantifier of the goal, not the tail_ih
hypothesis.
With apply Elim.exists
, I get multiple cases for the implicit parameters of the theorem; that does not seem the right way to go.
With apply Elim.exists tail_ih
, the goal transforms from just an existential quantification into an implication of a universal quantification for the existential quantification. That seems to me going backwards, not towards deconstructing the goal.
It seems to me that there is something missing in the guidance for dealing w/ existential quantifiers.
James Gallicchio (Jan 14 2023 at 22:11):
The tail_ih
hypothesis can be split using match
or cases
to put the x
and the proof of its properties into context.
Then, if you can come up with some expression e
for which the goal properties hold, you can refine ⟨e, ?_⟩
to claim e
satisfies those properties (you then have to prove it does satisfy those properties)
James Gallicchio (Jan 14 2023 at 22:13):
I had the impression that the x
of tail_ih
is the same x
that works for the goal, in which case apply Exists.elim tail_ih; intro h
is basically the same as doing the above steps one at a time
James Gallicchio (Jan 14 2023 at 22:16):
TPIL has a section about existentials that will probably explain better than I can
Nicolas Rouquette (Jan 14 2023 at 22:24):
I found something helpful: https://leanprover.zulipchat.com/#narrow/stream/270676-lean4/topic/.E2.9C.94.20Golfable.20proof.3F/near/311749494
Then I managed to make progress:
abbrev Strings := List String
def addBoth : (Std.AssocList String Strings) → String → String → (Std.AssocList String Strings)
| .nil, sub, sup => .cons sup [] (.cons sub [sup] .nil)
| .cons a as tail, sub, sup => bif a = sub then .cons sub (as.insert sup) tail else .cons a as (addBoth tail sub sup)
theorem cond_eq_ite (c : Bool) (a b : α) : cond c a b = if c then a else b := by cases c <;> rfl
theorem addBoth.sub (ss: Std.AssocList String Strings) (sub: String) (sup: String)
: (addBoth ss sub sup).contains sub
:= by
simp [Std.AssocList.contains]
induction ss
. case nil =>
simp [addBoth]
. case cons key value tail tail_ih =>
simp [addBoth]
simp [cond_eq_ite]
by_cases key = sup
. case pos h =>
simp [h]
by_cases sub = sup
. case pos ss =>
simp [ss]
. case neg ss =>
sorry
. case neg h =>
sorry
When the condition is true, I solved the goal by simply applying the condition to simplify the if
.
When the condition is false, it seems that I need some other theorem to simplify the if
, e.g.:
subsupkey: String
value: Strings
tail: Std.AssocList String Strings
tail_ih: ∃ x, x ∈ Std.AssocList.toList (addBoth tail sub sup) ∧ x.fst = sub
h: key = sup
ss: ¬sub = sup
⊢ ∃ x,
x ∈
Std.AssocList.toList
(if sup = sub then Std.AssocList.cons sub (List.insert sup value) tail
else Std.AssocList.cons sup value (addBoth tail sub sup)) ∧
x.fst = sub
Nicolas Rouquette (Jan 14 2023 at 22:27):
Got it!
import Std.Data.AssocList
import Std.Data.List.Lemmas
namespace MWE7
abbrev Strings := List String
def addBoth : (Std.AssocList String Strings) → String → String → (Std.AssocList String Strings)
| .nil, sub, sup => .cons sup [] (.cons sub [sup] .nil)
| .cons a as tail, sub, sup => bif a = sub then .cons sub (as.insert sup) tail else .cons a as (addBoth tail sub sup)
theorem addBoth.nil (sub: String) (sup: String)
: (addBoth .nil sub sup).contains sub && (addBoth .nil sub sup).contains sup
:= by simp [addBoth]
theorem cond_eq_ite (c : Bool) (a b : α) : cond c a b = if c then a else b := by cases c <;> rfl
theorem addBoth.sub (ss: Std.AssocList String Strings) (sub: String) (sup: String)
: (addBoth ss sub sup).contains sub
:= by
simp [Std.AssocList.contains]
induction ss
. case nil =>
simp [addBoth]
. case cons key value tail tail_ih =>
simp [addBoth]
simp [cond_eq_ite]
by_cases key = sup
. case pos h =>
simp [h]
by_cases sub = sup
. case pos ss =>
simp [ss]
. case neg ss =>
split <;> simp_all
. case neg =>
split <;> simp_all
end MWE7
There is probably a way to simplify the proof; this is my first non-trivial proof about a non-trivial data structure operation that I managed to do in Lean! Yeah!
James Gallicchio (Jan 14 2023 at 22:43):
If you want cool lean tricks, you can write the induction as
induction ss <;> simp [addBoth]
next key value tail tail_ih =>
simp [cond_eq_ite]
....
It looks pretty clean overall :D
Nicolas Rouquette (Jan 14 2023 at 23:47):
Thanks for the tip. Encouraged by this progress, I tried to prove something a bit more difficult that requires dealing w/ an existential quantifier.
I think I can simplify the proof w/ an auxiliary lemma but I am not sure how to handle this pattern.
import Std.Data.AssocList
import Std.Data.List.Lemmas
namespace MWE8
abbrev Strings := List String
def addBoth : (Std.AssocList String Strings) → String → String → (Std.AssocList String Strings)
| .nil, sub, sup => .cons sup [] (.cons sub [sup] .nil)
| .cons a as tail, sub, sup => bif a = sub then .cons sub (as.insert sup) tail else .cons a as (addBoth tail sub sup)
theorem addBoth.nil (sub: String) (sup: String)
: (addBoth .nil sub sup).contains sub && (addBoth .nil sub sup).contains sup
:= by simp [addBoth]
theorem cond_eq_ite (c : Bool) (a b : α) : cond c a b = if c then a else b := by cases c <;> rfl
theorem addBoth.tail (a: String × Strings) (tail: Std.AssocList String Strings) (sub: String) (sup: String)
(h: a ∈ Std.AssocList.toList (addBoth tail sub sup) ∧ a.fst = sup)
: ∃ a, a ∈ Std.AssocList.toList tail ∧ a.fst = sup
:= by
induction tail <;> simp [addBoth]
. case nil =>
simp_all
sorry
. case cons key value tail tail_ih =>
by_cases key = sup
. case pos h =>
apply Or.inl h
. case neg h ht =>
simp_all
sorry
theorem addBoth.sup (ss: Std.AssocList String Strings) (sub: String) (sup: String)
: (addBoth ss sub sup).contains sup
:= by
simp [Std.AssocList.contains]
induction ss <;> simp [addBoth]
. next key value tail tail_ih =>
simp [addBoth]
simp [cond_eq_ite]
by_cases key = sub
. case pos h =>
simp [h]
by_cases sub = sup
. case pos ss =>
apply Or.inl ss
. case neg ss =>
simp [ss]
apply Exists.elim tail_ih
intro a h
apply addBoth.tail a tail sub sup h
. case neg =>
split <;> simp_all
end MWE8
For the nil
case, the sorry
tactic state is:
a: String × Strings
subsup: String
h: (a = (sup, []) ∨ a = (sub, [sup])) ∧ a.fst = sup
⊢ False
Here, h
is "obviously" false so I should be able to proove this by contradiction, splitting over h
.
Not sure how to do this...
For the cons.neg
case, the sorry
tactic state is:
a: String × Strings
subsupkey: String
value: Strings
tail: Std.AssocList String Strings
tail_ih: a ∈ Std.AssocList.toList (addBoth tail sub sup) → ∃ a, a ∈ Std.AssocList.toList tail ∧ a.fst = sup
h: a ∈ Std.AssocList.toList (addBoth (Std.AssocList.cons key value tail) sub sup) ∧ a.fst = sup
ht: ¬key = sup
⊢ ∃ a, a ∈ Std.AssocList.toList tail ∧ a.fst = sup
This seems like regressing back to the original goal.
I am wondering if this is the right proof strategy.
Casavaca (Jan 27 2023 at 20:37):
Could anyone show me how to prove this? Thanks.
example {α} {m : Type u → Type v} (l : List α) [inst : Monad m]:
inst.pure l = (fun (x : List α) => l ++ x) <$> inst.pure [] := by sorry
l : List α
inst : Monad m
⊢ pure l = (fun x => l ++ x) <$> pure []
Mario Carneiro (Jan 27 2023 at 20:38):
it's not true without a LawfulMonad
instance
Mario Carneiro (Jan 27 2023 at 20:40):
example {α} {m : Type u → Type v} (l : List α) [Monad m] [LawfulMonad m] :
(pure l : m _) = (fun (x : List α) => l ++ x) <$> pure [] := by simp
Casavaca (Jan 28 2023 at 03:35):
Thanks. I learned something new.
Casavaca (Jan 28 2023 at 22:26):
Could anyone show me how to prove this (if it's true). Thanks again.
example {m : Type u → Type v} [Monad m] [LawfulMonad m]
(l : List β) (tail : (m (List β))) (head : (m β)) :
(do {
let __do_lift ← head
(List.cons __do_lift) <$> tail
}) =
Seq.seq (List.cons <$> head) fun x => tail := by
Parth Shastri (Jan 28 2023 at 22:35):
simp [seq_eq_bind, map_eq_pure_bind]
Casavaca (Jan 29 2023 at 02:06):
Thanks.
Can you tell me how to find such a proof? library_search
(mathlib4) shows a bunch of unrelated results.
Mario Carneiro (Jan 29 2023 at 13:39):
there is a pretty short list of monad lemmas (roughly the contents of the LawfulMonad
class itself) which suffice to put all monad expressions into a normal form. If you are rewriting seq
or <$>
or bind
(usually pretty printed as do let x <- a; b
), then you should look there first
James Gallicchio (Jan 31 2023 at 16:57):
is there a reason they're not marked simp lemmas? (have we just not settled on what the normal form should be?)
Last updated: Dec 20 2023 at 11:08 UTC