Zulip Chat Archive
Stream: Is there code for X?
Topic: Tensor product distributes over submodules
Eric Wieser (Nov 12 2023 at 18:12):
Do we have this?
import Mathlib.LinearAlgebra.TensorProduct
open scoped TensorProduct
suppress_compilation
variable {R M P : Type*} [CommSemiring R] [AddCommMonoid M] [Module R M] [AddCommMonoid P] [Module R P]
/-- A tensor product of submodules injects into the image of the submodules under the tensor product -/
def TensorProduct.subModuleDistrib (W₁ W₂ : Submodule R M) :
W₁ ⊗[R] W₂ →ₗ[R] Submodule.map₂ (TensorProduct.mk _ _ _) W₁ W₂ :=
(LinearMap.codRestrict _ (TensorProduct.map W₁.subtype W₂.subtype) <| fun c => by
induction c using TensorProduct.induction_on with
| zero => simpa only [map_zero] using zero_mem _
| add x y hx hy => simpa only [map_add] using add_mem hx hy
| tmul w₁ w₂ => exact Submodule.apply_mem_map₂ _ w₁.prop w₂.prop)
-- but it also surjects, right?
def Submodule.tensorProductDistrib (W₁ W₂ : Submodule R M) :
Submodule.map₂ (TensorProduct.mk _ _ _) W₁ W₂ →ₗ[R] W₁ ⊗[R] W₂ :=
-- can we do this constructively?
sorry
noncomputable def TensorProduct.submoduleDistribEquiv {W₁ W₂ : Submodule R M} :
W₁ ⊗[R] W₂ ≃ₗ[R] Submodule.map₂ (TensorProduct.mk _ _ _) W₁ W₂ :=
LinearEquiv.ofLinear
(TensorProduct.subModuleDistrib _ _)
(Submodule.tensorProductDistrib _ _)
sorry
sorry
(inspired by one of @Richard Copley's threads)
Eric Wieser (Nov 12 2023 at 18:18):
I feel like for that sorry I need
/-- If a function is defined on every element of a supremum, and agrees on the intersection, then it can be extended globally. -/
def suprLift {N} [AddCommMonoid N] [Module R N]
(S : ι → Submodule R M) (f : ∀ i, S i →ₗ[R] N)
(hf : ∀ i j x, (hi : x ∈ S i) → (hj : x ∈ S j) → f i ⟨x, hi⟩ = f j ⟨x, hj⟩) :
↥(⨆ i, S i) →ₗ[R] N := sorry
which I guess is docs#Set.iUnionLift but for submodules
Richard Copley (Nov 12 2023 at 22:02):
As you saw, I had been looking at the same thing, with LinearMap.mul'
composed.
import Mathlib.LinearAlgebra.TensorProduct
import Mathlib.LinearAlgebra.TensorProductBasis
import Mathlib.LinearAlgebra.TensorAlgebra.Basic
suppress_compilation
variable {R M P : Type*} [CommRing R] [AddCommGroup M] [Module R M] [AddCommGroup P] [Module R P]
open TensorProduct Submodule LinearMap in
def Basis.tensorMul {ι₁ ι₂ : Type*} {W₁ W₂ : Submodule R (TensorAlgebra R M)}
(ℰ₁ : Basis ι₁ R W₁) (ℰ₂ : Basis ι₂ R W₂) : Basis (ι₁ × ι₂) R (W₁ * W₂) := by
letI : AddCommGroup (W₁ ⊗[R] W₂) := inferInstance
let incl' : W₁ ⊗[R] W₂ →ₗ[R] TensorAlgebra R M :=
LinearMap.mul' R (TensorAlgebra R M) ∘ₗ mapIncl W₁ W₂
let incl : W₁ ⊗[R] W₂ →ₗ[R] W₁ * W₂ := LinearMap.codRestrict _ incl' (by
intro x
induction x using TensorProduct.induction_on with
| zero => rewrite [LinearMap.map_zero] ; exact Submodule.zero_mem (W₁ * W₂)
| tmul x y => exact mul_mem_mul x.property y.property
| add x y ihx ihy => rewrite [LinearMap.map_add] ; exact Submodule.add_mem (W₁ * W₂) ihx ihy)
have inj : LinearMap.ker incl = ⊥ := by
sorry
have surj : LinearMap.range incl = ⊤ := by
sorry
have e : W₁ ⊗[R] W₂ ≃ₗ[R] W₁ * W₂ :=
LinearEquiv.ofBijective incl ⟨ker_eq_bot.mp inj, range_eq_top.mp surj⟩
exact (Basis.tensorProduct ℰ₁ ℰ₂).map e
I thought I was stuck on showing that there are no zero-divisors with respect to ⊗ₜ[R]
(this isn't true, right?). But in fact even without the mul'
, that is, for your TensorProduct.subModuleDistrib
, I can't see how to get the injectivity (though it feels terribly obvious).
Did I miss something subtle in your post? Were you just planning to go for bijectivity immediately?
import Mathlib.LinearAlgebra.TensorProduct
open scoped TensorProduct
suppress_compilation
variable {R M P : Type*} [CommSemiring R] [AddCommMonoid M] [Module R M] [AddCommMonoid P] [Module R P]
/-- A tensor product of submodules injects into the image of the submodules under the tensor product -/
def TensorProduct.subModuleDistrib (W₁ W₂ : Submodule R M) :
W₁ ⊗[R] W₂ →ₗ[R] Submodule.map₂ (TensorProduct.mk _ _ _) W₁ W₂ :=
(LinearMap.codRestrict _ (TensorProduct.map W₁.subtype W₂.subtype) <| fun c => by
induction c using TensorProduct.induction_on with
| zero => simpa only [map_zero] using zero_mem _
| add x y hx hy => simpa only [map_add] using add_mem hx hy
| tmul w₁ w₂ => exact Submodule.apply_mem_map₂ _ w₁.prop w₂.prop)
-- Right?
open Submodule LinearMap in
theorem TensorProduct.subModuleDistrib_inj (W₁ W₂ : Submodule R M) :
LinearMap.ker (subModuleDistrib W₁ W₂) = ⊥ := by
unfold subModuleDistrib
rewrite [ker_codRestrict, ker_eq_bot']
intro x
induction x using TensorProduct.induction_on with
| zero => intro ; rfl
| tmul u v =>
rewrite [map_tmul, subtype_apply, subtype_apply]
show (u.val ⊗ₜ[R] v.val : M ⊗[R] M) = 0 → (u ⊗ₜ[R] v : W₁ ⊗[R] W₂) = 0
sorry
| add u v ihu ihv =>
sorry
Junyan Xu (Nov 13 2023 at 00:32):
@Eric Wieser You didn't prove injectivity at all! Recall the definition of flatness:
image.png
which says if M
is not flat, then for some I : Submodule R R
, the map I ⊗ M → R ⊗ M
is not injective. However, we can't take M = R
since R
is flat as an R
-module. To construct a counterexample in the setting where W₁
and W₂
are submodules of the same R
-module M
, we may take W₁ = I ⊕ 0
and W₂ = 0 ⊕ M
as submodules of R ⊕ M
.
On the other hand, surjectivity should be easy enough using docs#Submodule.map₂_le.
Eric Wieser (Nov 13 2023 at 00:41):
What do you mean, I must have proved it because I said so in the docstring :upside_down:
Eric Wieser (Nov 13 2023 at 00:41):
It's not clear to me how Submodule.map₂_le
helps with the converse; it's not clear to me which submodule I should compare to
Junyan Xu (Nov 13 2023 at 00:44):
You want to show Submodule.map₂ ...
is contained in the range of W₁ ⊗[R] W₂ →ₗ[R] M ⊗[R] M
, right?
Richard Copley (Nov 13 2023 at 00:49):
open Submodule LinearMap in
theorem TensorProduct.subModuleDistrib_surj (W₁ W₂ : Submodule R M) :
LinearMap.range (subModuleDistrib W₁ W₂) = ⊤ := by
unfold subModuleDistrib
rewrite [range_codRestrict, comap_subtype_eq_top, map₂_le]
intro m hm n hn
rewrite [mem_range]
use ⟨m, hm⟩ ⊗ₜ[R] ⟨n, hn⟩
rewrite [map_tmul]
rfl
Eric Wieser (Nov 13 2023 at 00:53):
lemma TensorProduct.subModuleDistrib_surjective (W₁ W₂ : Submodule R M) :
Function.Surjective (TensorProduct.subModuleDistrib W₁ W₂) := by
rw [←LinearMap.range_eq_top, TensorProduct.subModuleDistrib, LinearMap.range_codRestrict, Submodule.comap_subtype_eq_top, Submodule.map₂_le]
simp_rw [Subtype.forall']
intros x₁ x₂
exact ⟨x₁ ⊗ₜ x₂, rfl⟩
Eric Wieser (Nov 13 2023 at 00:53):
Darn, you beat me to it
Richard Copley (Nov 16 2023 at 19:56):
Nerdsniped by @Junyan Xu. Here's some work towards the first TODO
in Mathlib.RingTheory.Flat
. The next part (extending from ideals of R
to submodules of a general module) seems extremely difficult if you look at Stacks. Less so if you look at Dummit and Foote or AtiMac.
import Mathlib.LinearAlgebra.TensorProduct.RightExactness
import Mathlib.RingTheory.Flat
-- Remove the `FG` condition from `Module.Flat.iff_rTensor_injective`
noncomputable section
suppress_compilation
open TensorProduct
variable {R : Type*} [CommRing R]
variable (M : Type*) [AddCommGroup M] [Module R M]
variable (N : Type*) [AddCommGroup N] [Module R N]
-- When `I` and `J` are submodules of `N` and `h` is a proof of `I ≤ J`,
-- let `φ M N h` be the tensor product of the inclusion `I → J` and the identity `M → M`
def φ {I J : Submodule R N} (h : I ≤ J) : I ⊗[R] M →ₗ[R] J ⊗[R] M :=
let h : ∀ x : I, I.subtype x ∈ J := fun x => h.subset x.property
LinearMap.rTensor M (I.subtype.codRestrict J h)
-- alternative spelling "map (I.subtype.codRestrict J h) LinearMap.id"
-- Applying φ to a simple tensor `i ⊗ₜ m`
theorem φ_tmul {I J : Submodule R N} (h : I ≤ J) (i : ↥I) (m : M) :
φ M N h (i ⊗ₜ[R] m) = (⟨i.val, h.subset i.property⟩ : J) ⊗ₜ m := by
rewrite [φ, LinearMap.rTensor_tmul] ; rfl
-- φ respects (preserves? reflects? commutes with?) the preorder on submodules
theorem φ_comp {I J K : Submodule R N} (hIJ : I ≤ J) (hJK : J ≤ K) :
φ M N hJK ∘ₗ φ M N hIJ = φ M N (hIJ.trans hJK) := by
-- Next line should be `ext` but importing `TensorProduct.RightExactness` breaks it
refine TensorProduct.ext (LinearMap.ext fun i => (LinearMap.ext fun m => ?_))
simp? [φ_tmul] says simp only [LinearMap.compr₂_apply, mk_apply, LinearMap.coe_comp,
Function.comp_apply, φ_tmul]
-- Trivial. The two linear maps concerned are related by composition with the equivalence `eM`.
open LinearMap (rTensor ext) in
theorem φ_top_injective_iff {I : Submodule R R} :
Function.Injective (φ M R (le_top : I ≤ ⊤)) ↔
Function.Injective (rTensor M I.subtype) := by
-- `e : ⊤ ≃ₗ R`
let e : (⊤ : Submodule R R) ≃ₗ[R] R := {
(⊤ : Submodule R R).subtype with
invFun := fun r => ⟨r, trivial⟩
left_inv := fun _ => rfl
right_inv := fun _ => rfl
}
-- `eM : ⊤ ⊗ M ≃ₗ R ⊗ M`.
let eM : (⊤ : Submodule R R) ⊗[R] M ≃ₗ[R] R ⊗[R] M := LinearEquiv.ofLinear
(rTensor M e) (rTensor M e.symm)
-- Note: specify `ext` lemmas explicitly because others that work poorly here may be visible
(TensorProduct.ext (ext fun i => (ext fun m => rfl)))
(TensorProduct.ext (ext fun i => (ext fun m => rfl)))
have h₁ : rTensor M I.subtype = eM ∘ₗ φ M R (le_top : I ≤ ⊤) := by
refine TensorProduct.ext (LinearMap.ext fun i => (LinearMap.ext fun m => ?_))
rewrite [LinearMap.compr₂_apply, LinearMap.compr₂_apply, mk_apply, LinearMap.rTensor_tmul,
LinearMap.comp_apply, φ_tmul]
rfl
have h₂ : eM.symm ∘ₗ rTensor M I.subtype = φ M R (le_top : I ≤ ⊤) := by
refine LinearMap.ext fun x => ?_
rewrite [LinearMap.comp_apply, LinearEquiv.coe_coe, eM.symm_apply_eq, h₁]
rfl
constructor
. rewrite [h₁] ; exact fun h => eM.injective.comp h
. rewrite [← h₂] ; exact fun h => eM.symm.injective.comp h
-- If `I ≤ R` and `x : I ⊗ M`, there is an f.g. ideal `J ≤ I` such that `x` is
-- the image of some `y : J ⊗ M`
-- (cf. Dummit & Foote (ed. 3), pp. 400, 401, proof of Corollary 42)
variable {M} in
theorem exists_fg_le_eq_rTensor_subtype_codRestrict_apply {I : Submodule R R} (x : I ⊗ M) :
∃ (J : Submodule R R) (_ : J.FG) (hJ : J ≤ I) (y : J ⊗ M), x = φ M R hJ y := by
induction x using TensorProduct.induction_on with
| zero => exact ⟨⊥, ⟨∅, Finset.coe_empty ▸ Submodule.span_empty⟩, zero_le I, 0, rfl⟩
| tmul i m =>
have hle : (R ∙ i.val) ≤ I := (Submodule.span_singleton_le_iff_mem i.val I).mpr i.property
let i' : (R ∙ i.val) := ⟨i.val, Submodule.mem_span_singleton_self _⟩
exact ⟨(R ∙ i.val), Submodule.fg_span_singleton _, hle, i' ⊗ₜ m, by rw [φ_tmul]⟩
| add x y ihx ihy =>
have ⟨J₁, hFG₁, hJ₁, y₁, h₁⟩ := ihx
have ⟨J₂, hFG₂, hJ₂, y₂, h₂⟩ := ihy
let z := φ M R (le_sup_left : J₁ ≤ J₁ ⊔ J₂) y₁ + φ M R (le_sup_right : J₂ ≤ J₁ ⊔ J₂) y₂
refine ⟨J₁ ⊔ J₂, Submodule.FG.sup hFG₁ hFG₂, sup_le hJ₁ hJ₂, z, ?_⟩
unfold_let z
rw [map_add, ← LinearMap.comp_apply, ← LinearMap.comp_apply, φ_comp, φ_comp, h₁, h₂]
-- If `φ M R ‹I ≤ ⊤› : I ⊗[R] M →ₗ[R] R ⊗[R] M` is injective for every finitely generated
-- ideal `I` of `R` then it is injective for every ideal `I` of `R`.
-- (Dummit & Foote (ed. 3), p. 406, part of exercise 10.25(b))
theorem rTensor_subtype_injective_of_forall_fg' :
(∀ I : Submodule R R, I.FG → Function.Injective (φ M R (le_top : I ≤ ⊤))) →
∀ I : Submodule R R, Function.Injective (φ M R (le_top : I ≤ ⊤)) := by
intro h I
letI : AddCommGroup (I ⊗[R] M) := inferInstance -- injective_iff_map_eq_zero cannot find this
rewrite [injective_iff_map_eq_zero]
intro x hx₀
have ⟨J, hfg, hle, y, hy⟩ := exists_fg_le_eq_rTensor_subtype_codRestrict_apply x
apply (fun hy₀ => by rw [hy, hy₀, map_zero] : y = 0 → x = 0)
rewrite [hy, ← LinearMap.comp_apply, φ_comp] at hx₀
letI : AddCommGroup (J ⊗[R] M) := inferInstance -- injective_iff_map_eq_zero cannot find this
exact (injective_iff_map_eq_zero _).mp (h J hfg) y hx₀
-- Simplify the statement of the previous result.
theorem rTensor_subtype_injective_of_forall_fg :
(∀ I : Submodule R R, I.FG → Function.Injective (LinearMap.rTensor M I.subtype)) →
∀ I : Submodule R R, Function.Injective (LinearMap.rTensor M I.subtype) := by
simp_rw [← φ_top_injective_iff]
exact rTensor_subtype_injective_of_forall_fg' M
-- Remove the `FG` condition from `Module.Flat.iff_rTensor_injective`
theorem iff_rTensor_injective' :
Module.Flat R M ↔ (∀ (I : Ideal R), Function.Injective (LinearMap.rTensor M I.subtype)) := by
rewrite [Module.Flat.iff_rTensor_injective]
constructor
. exact rTensor_subtype_injective_of_forall_fg M
. refine fun h I _ => h I
Junyan Xu (Nov 16 2023 at 20:02):
Nice! I think you're now in a position to review (or co-author?) #7225 :wink:
Junyan Xu (Nov 16 2023 at 20:20):
You might also like to watch Jujian's talk: https://www.youtube.com/watch?v=Xp8gViooBXU
The Zulip thread is here
Last updated: Dec 20 2023 at 11:08 UTC