linear_algebra.tensor_powerMathlib.LinearAlgebra.TensorPower

This file has been ported!

Changes since the initial port

The following section lists changes to this file in mathlib3 and mathlib4 that occured after the initial port. Most recent changes are shown first. Hovering over a commit will show all commits associated with the same mathlib3 commit.

Changes in mathlib3

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(no changes)

(last sync)

Changes in mathlib3port

mathlib3
mathlib3port
Diff
@@ -174,7 +174,7 @@ theorem gradedMonoid_eq_of_cast {a b : GradedMonoid fun n => ⨂[R] i : Fin n, M
     (h2 : cast R M h a.snd = b.snd) : a = b :=
   by
   refine' graded_monoid_eq_of_reindex_cast h _
-  rw [cast] at h2 
+  rw [cast] at h2
   rw [← Fin.castIso_to_equiv, ← h2]
 #align tensor_power.graded_monoid_eq_of_cast TensorPower.gradedMonoid_eq_of_cast
 -/
@@ -212,7 +212,7 @@ theorem one_mul {n} (a : (⨂[R]^n) M) : cast R M (zero_add n) (ₜ1 ₜ* a) = a
   by
   rw [ghas_mul_def, ghas_one_def]
   induction' a using PiTensorProduct.induction_on with r a x y hx hy
-  · dsimp only at a 
+  · dsimp only at a
     rw [TensorProduct.tmul_smul, LinearEquiv.map_smul, LinearEquiv.map_smul, ← ghas_mul_def,
       tprod_mul_tprod, cast_tprod]
     congr 2 with i
@@ -228,7 +228,7 @@ theorem mul_one {n} (a : (⨂[R]^n) M) : cast R M (add_zero _) (a ₜ* ₜ1) = a
   by
   rw [ghas_mul_def, ghas_one_def]
   induction' a using PiTensorProduct.induction_on with r a x y hx hy
-  · dsimp only at a 
+  · dsimp only at a
     rw [← TensorProduct.smul_tmul', LinearEquiv.map_smul, LinearEquiv.map_smul, ← ghas_mul_def,
       tprod_mul_tprod R a _, cast_tprod]
     congr 2 with i
@@ -356,7 +356,7 @@ instance galgebra : DirectSum.GAlgebra R fun i => (⨂[R]^i) M
       (by
         have := (algebra_map₀_mul r x.snd).trans (mul_algebra_map₀ r x.snd).symm
         rw [← LinearEquiv.eq_symm_apply, cast_symm]
-        rw [← LinearEquiv.eq_symm_apply, cast_symm, cast_cast] at this 
+        rw [← LinearEquiv.eq_symm_apply, cast_symm, cast_cast] at this
         exact this)
   smul_def r x :=
     gradedMonoid_eq_of_cast (zero_add x.fst).symm
Diff
@@ -75,14 +75,14 @@ open PiTensorProduct
 
 #print TensorPower.gOne /-
 /-- As a graded monoid, `⨂[R]^i M` has a `1 : ⨂[R]^0 M`. -/
-instance gOne : GradedMonoid.GOne fun i => (⨂[R]^i) M where one := tprod R <| @Fin.elim0' M
+instance gOne : GradedMonoid.GOne fun i => (⨂[R]^i) M where one := tprod R <| @Fin.elim0 M
 #align tensor_power.ghas_one TensorPower.gOne
 -/
 
 local notation "ₜ1" => @GradedMonoid.GOne.one ℕ (fun i => (⨂[R]^i) M) _ _
 
 #print TensorPower.gOne_def /-
-theorem gOne_def : ₜ1 = tprod R (@Fin.elim0' M) :=
+theorem gOne_def : ₜ1 = tprod R (@Fin.elim0 M) :=
   rfl
 #align tensor_power.ghas_one_def TensorPower.gOne_def
 -/
@@ -216,7 +216,7 @@ theorem one_mul {n} (a : (⨂[R]^n) M) : cast R M (zero_add n) (ₜ1 ₜ* a) = a
     rw [TensorProduct.tmul_smul, LinearEquiv.map_smul, LinearEquiv.map_smul, ← ghas_mul_def,
       tprod_mul_tprod, cast_tprod]
     congr 2 with i
-    rw [Fin.elim0'_append]
+    rw [Fin.elim0_append]
     refine' congr_arg a (Fin.ext _)
     simp
   · rw [TensorProduct.tmul_add, map_add, map_add, hx, hy]
@@ -232,7 +232,7 @@ theorem mul_one {n} (a : (⨂[R]^n) M) : cast R M (add_zero _) (a ₜ* ₜ1) = a
     rw [← TensorProduct.smul_tmul', LinearEquiv.map_smul, LinearEquiv.map_smul, ← ghas_mul_def,
       tprod_mul_tprod R a _, cast_tprod]
     congr 2 with i
-    rw [Fin.append_elim0']
+    rw [Fin.append_elim0]
     refine' congr_arg a (Fin.ext _)
     simp
   · rw [TensorProduct.add_tmul, map_add, map_add, hx, hy]
Diff
@@ -3,9 +3,9 @@ Copyright (c) 2021 Eric Wieser. All rights reserved.
 Released under Apache 2.0 license as described in the file LICENSE.
 Authors: Eric Wieser
 -/
-import Mathbin.LinearAlgebra.PiTensorProduct
-import Mathbin.Logic.Equiv.Fin
-import Mathbin.Algebra.DirectSum.Algebra
+import LinearAlgebra.PiTensorProduct
+import Logic.Equiv.Fin
+import Algebra.DirectSum.Algebra
 
 #align_import linear_algebra.tensor_power from "leanprover-community/mathlib"@"575b4ea3738b017e30fb205cb9b4a8742e5e82b6"
 
Diff
@@ -262,7 +262,7 @@ theorem mul_assoc {na nb nc} (a : (⨂[R]^na) M) (b : (⨂[R]^nb) M) (c : (⨂[R
   congr with j
   rw [Fin.append_assoc]
   refine' congr_arg (Fin.append a (Fin.append b c)) (Fin.ext _)
-  rw [Fin.coe_castIso, Fin.coe_castIso]
+  rw [Fin.coe_cast, Fin.coe_cast]
 #align tensor_power.mul_assoc TensorPower.mul_assoc
 -/
 
Diff
@@ -345,7 +345,7 @@ instance galgebra : DirectSum.GAlgebra R fun i => (⨂[R]^i) M
     where
   toFun := (algebraMap₀ : R ≃ₗ[R] (⨂[R]^0) M).toLinearMap.toAddMonoidHom
   map_one := algebraMap₀_one
-  map_mul r s :=
+  map_hMul r s :=
     gradedMonoid_eq_of_cast rfl
       (by
         rw [← LinearEquiv.eq_symm_apply]
Diff
@@ -2,16 +2,13 @@
 Copyright (c) 2021 Eric Wieser. All rights reserved.
 Released under Apache 2.0 license as described in the file LICENSE.
 Authors: Eric Wieser
-
-! This file was ported from Lean 3 source module linear_algebra.tensor_power
-! leanprover-community/mathlib commit 575b4ea3738b017e30fb205cb9b4a8742e5e82b6
-! Please do not edit these lines, except to modify the commit id
-! if you have ported upstream changes.
 -/
 import Mathbin.LinearAlgebra.PiTensorProduct
 import Mathbin.Logic.Equiv.Fin
 import Mathbin.Algebra.DirectSum.Algebra
 
+#align_import linear_algebra.tensor_power from "leanprover-community/mathlib"@"575b4ea3738b017e30fb205cb9b4a8742e5e82b6"
+
 /-!
 # Tensor power of a semimodule over a commutative semirings
 
Diff
@@ -128,13 +128,13 @@ variable (R M)
 #print TensorPower.cast /-
 /-- Cast between "equal" tensor powers. -/
 def cast {i j} (h : i = j) : (⨂[R]^i) M ≃ₗ[R] (⨂[R]^j) M :=
-  reindex R M (Fin.cast h).toEquiv
+  reindex R M (Fin.castIso h).toEquiv
 #align tensor_power.cast TensorPower.cast
 -/
 
 #print TensorPower.cast_tprod /-
 theorem cast_tprod {i j} (h : i = j) (a : Fin i → M) :
-    cast R M h (tprod R a) = tprod R (a ∘ Fin.cast h.symm) :=
+    cast R M h (tprod R a) = tprod R (a ∘ Fin.castIso h.symm) :=
   reindex_tprod _ _
 #align tensor_power.cast_tprod TensorPower.cast_tprod
 -/
@@ -142,7 +142,7 @@ theorem cast_tprod {i j} (h : i = j) (a : Fin i → M) :
 #print TensorPower.cast_refl /-
 @[simp]
 theorem cast_refl {i} (h : i = i) : cast R M h = LinearEquiv.refl _ _ :=
-  ((congr_arg fun f => reindex R M (RelIso.toEquiv f)) <| Fin.cast_refl h).trans reindex_refl
+  ((congr_arg fun f => reindex R M (RelIso.toEquiv f)) <| Fin.castIso_refl h).trans reindex_refl
 #align tensor_power.cast_refl TensorPower.cast_refl
 -/
 
@@ -178,7 +178,7 @@ theorem gradedMonoid_eq_of_cast {a b : GradedMonoid fun n => ⨂[R] i : Fin n, M
   by
   refine' graded_monoid_eq_of_reindex_cast h _
   rw [cast] at h2 
-  rw [← Fin.cast_to_equiv, ← h2]
+  rw [← Fin.castIso_to_equiv, ← h2]
 #align tensor_power.graded_monoid_eq_of_cast TensorPower.gradedMonoid_eq_of_cast
 -/
 
@@ -265,7 +265,7 @@ theorem mul_assoc {na nb nc} (a : (⨂[R]^na) M) (b : (⨂[R]^nb) M) (c : (⨂[R
   congr with j
   rw [Fin.append_assoc]
   refine' congr_arg (Fin.append a (Fin.append b c)) (Fin.ext _)
-  rw [Fin.coe_cast, Fin.coe_cast]
+  rw [Fin.coe_castIso, Fin.coe_castIso]
 #align tensor_power.mul_assoc TensorPower.mul_assoc
 -/
 
Diff
@@ -259,7 +259,7 @@ theorem mul_assoc {na nb nc} (a : (⨂[R]^na) M) (b : (⨂[R]^nb) M) (c : (⨂[R
   have rhs_eq : ∀ a b c, rhs a b c = a ₜ* (b ₜ* c) := fun _ _ _ => rfl
   suffices : lhs = rhs
   exact LinearMap.congr_fun (LinearMap.congr_fun (LinearMap.congr_fun this a) b) c
-  ext (a b c)
+  ext a b c
   -- clean up
   simp only [LinearMap.compMultilinearMap_apply, lhs_eq, rhs_eq, tprod_mul_tprod, e, cast_tprod]
   congr with j
Diff
@@ -50,11 +50,11 @@ protected def TensorPower (R : Type _) (n : ℕ) (M : Type _) [CommSemiring R] [
 
 variable {R : Type _} {M : Type _} [CommSemiring R] [AddCommMonoid M] [Module R M]
 
--- mathport name: tensor_power
 scoped[TensorProduct] notation:100 "⨂[" R "]^" n:arg => TensorPower R n
 
 namespace PiTensorProduct
 
+#print PiTensorProduct.gradedMonoid_eq_of_reindex_cast /-
 /-- Two dependent pairs of tensor products are equal if their index is equal and the contents
 are equal after a canonical reindexing. -/
 @[ext]
@@ -66,6 +66,7 @@ theorem gradedMonoid_eq_of_reindex_cast {ιι : Type _} {ι : ιι → Type _} :
     subst hi
     simpa using h
 #align pi_tensor_product.graded_monoid_eq_of_reindex_cast PiTensorProduct.gradedMonoid_eq_of_reindex_cast
+-/
 
 end PiTensorProduct
 
@@ -81,17 +82,20 @@ instance gOne : GradedMonoid.GOne fun i => (⨂[R]^i) M where one := tprod R <|
 #align tensor_power.ghas_one TensorPower.gOne
 -/
 
--- mathport name: exprₜ1
 local notation "ₜ1" => @GradedMonoid.GOne.one ℕ (fun i => (⨂[R]^i) M) _ _
 
+#print TensorPower.gOne_def /-
 theorem gOne_def : ₜ1 = tprod R (@Fin.elim0' M) :=
   rfl
 #align tensor_power.ghas_one_def TensorPower.gOne_def
+-/
 
+#print TensorPower.mulEquiv /-
 /-- A variant of `pi_tensor_prod.tmul_equiv` with the result indexed by `fin (n + m)`. -/
 def mulEquiv {n m : ℕ} : (⨂[R]^n) M ⊗[R] (⨂[R]^m) M ≃ₗ[R] (⨂[R]^(n + m)) M :=
   (tmulEquiv R M).trans (reindex R M finSumFinEquiv)
 #align tensor_power.mul_equiv TensorPower.mulEquiv
+-/
 
 #print TensorPower.gMul /-
 /-- As a graded monoid, `⨂[R]^i M` has a `(*) : ⨂[R]^i M → ⨂[R]^j M → ⨂[R]^(i + j) M`. -/
@@ -101,13 +105,15 @@ instance gMul : GradedMonoid.GMul fun i => (⨂[R]^i) M
 #align tensor_power.ghas_mul TensorPower.gMul
 -/
 
--- mathport name: «expr ₜ* »
 local infixl:70 " ₜ* " => @GradedMonoid.GMul.mul ℕ (fun i => (⨂[R]^i) M) _ _ _ _
 
+#print TensorPower.gMul_def /-
 theorem gMul_def {i j} (a : (⨂[R]^i) M) (b : (⨂[R]^j) M) : a ₜ* b = mulEquiv (a ⊗ₜ b) :=
   rfl
 #align tensor_power.ghas_mul_def TensorPower.gMul_def
+-/
 
+#print TensorPower.gMul_eq_coe_linearMap /-
 theorem gMul_eq_coe_linearMap {i j} (a : (⨂[R]^i) M) (b : (⨂[R]^j) M) :
     a ₜ* b =
       ((TensorProduct.mk R _ _).compr₂ ↑(mulEquiv : _ ≃ₗ[R] (⨂[R]^(i + j)) M) :
@@ -115,43 +121,57 @@ theorem gMul_eq_coe_linearMap {i j} (a : (⨂[R]^i) M) (b : (⨂[R]^j) M) :
         a b :=
   rfl
 #align tensor_power.ghas_mul_eq_coe_linear_map TensorPower.gMul_eq_coe_linearMap
+-/
 
 variable (R M)
 
+#print TensorPower.cast /-
 /-- Cast between "equal" tensor powers. -/
 def cast {i j} (h : i = j) : (⨂[R]^i) M ≃ₗ[R] (⨂[R]^j) M :=
   reindex R M (Fin.cast h).toEquiv
 #align tensor_power.cast TensorPower.cast
+-/
 
+#print TensorPower.cast_tprod /-
 theorem cast_tprod {i j} (h : i = j) (a : Fin i → M) :
     cast R M h (tprod R a) = tprod R (a ∘ Fin.cast h.symm) :=
   reindex_tprod _ _
 #align tensor_power.cast_tprod TensorPower.cast_tprod
+-/
 
+#print TensorPower.cast_refl /-
 @[simp]
 theorem cast_refl {i} (h : i = i) : cast R M h = LinearEquiv.refl _ _ :=
   ((congr_arg fun f => reindex R M (RelIso.toEquiv f)) <| Fin.cast_refl h).trans reindex_refl
 #align tensor_power.cast_refl TensorPower.cast_refl
+-/
 
+#print TensorPower.cast_symm /-
 @[simp]
 theorem cast_symm {i j} (h : i = j) : (cast R M h).symm = cast R M h.symm :=
   reindex_symm _
 #align tensor_power.cast_symm TensorPower.cast_symm
+-/
 
+#print TensorPower.cast_trans /-
 @[simp]
 theorem cast_trans {i j k} (h : i = j) (h' : j = k) :
     (cast R M h).trans (cast R M h') = cast R M (h.trans h') :=
   reindex_trans _ _
 #align tensor_power.cast_trans TensorPower.cast_trans
+-/
 
 variable {R M}
 
+#print TensorPower.cast_cast /-
 @[simp]
 theorem cast_cast {i j k} (h : i = j) (h' : j = k) (a : (⨂[R]^i) M) :
     cast R M h' (cast R M h a) = cast R M (h.trans h') a :=
   reindex_reindex _ _ _
 #align tensor_power.cast_cast TensorPower.cast_cast
+-/
 
+#print TensorPower.gradedMonoid_eq_of_cast /-
 @[ext]
 theorem gradedMonoid_eq_of_cast {a b : GradedMonoid fun n => ⨂[R] i : Fin n, M} (h : a.fst = b.fst)
     (h2 : cast R M h a.snd = b.snd) : a = b :=
@@ -160,7 +180,9 @@ theorem gradedMonoid_eq_of_cast {a b : GradedMonoid fun n => ⨂[R] i : Fin n, M
   rw [cast] at h2 
   rw [← Fin.cast_to_equiv, ← h2]
 #align tensor_power.graded_monoid_eq_of_cast TensorPower.gradedMonoid_eq_of_cast
+-/
 
+#print TensorPower.cast_eq_cast /-
 -- named to match `fin.cast_eq_cast`
 theorem cast_eq_cast {i j} (h : i = j) : ⇑(cast R M h) = cast (congr_arg _ h) :=
   by
@@ -168,11 +190,11 @@ theorem cast_eq_cast {i j} (h : i = j) : ⇑(cast R M h) = cast (congr_arg _ h)
   rw [cast_refl]
   rfl
 #align tensor_power.cast_eq_cast TensorPower.cast_eq_cast
+-/
 
 variable (R)
 
-include R
-
+#print TensorPower.tprod_mul_tprod /-
 theorem tprod_mul_tprod {na nb} (a : Fin na → M) (b : Fin nb → M) :
     tprod R a ₜ* tprod R b = tprod R (Fin.append a b) :=
   by
@@ -184,11 +206,11 @@ theorem tprod_mul_tprod {na nb} (a : Fin na → M) (b : Fin nb → M) :
   apply funext
   apply Fin.addCases <;> simp
 #align tensor_power.tprod_mul_tprod TensorPower.tprod_mul_tprod
-
-omit R
+-/
 
 variable {R}
 
+#print TensorPower.one_mul /-
 theorem one_mul {n} (a : (⨂[R]^n) M) : cast R M (zero_add n) (ₜ1 ₜ* a) = a :=
   by
   rw [ghas_mul_def, ghas_one_def]
@@ -202,7 +224,9 @@ theorem one_mul {n} (a : (⨂[R]^n) M) : cast R M (zero_add n) (ₜ1 ₜ* a) = a
     simp
   · rw [TensorProduct.tmul_add, map_add, map_add, hx, hy]
 #align tensor_power.one_mul TensorPower.one_mul
+-/
 
+#print TensorPower.mul_one /-
 theorem mul_one {n} (a : (⨂[R]^n) M) : cast R M (add_zero _) (a ₜ* ₜ1) = a :=
   by
   rw [ghas_mul_def, ghas_one_def]
@@ -216,7 +240,9 @@ theorem mul_one {n} (a : (⨂[R]^n) M) : cast R M (add_zero _) (a ₜ* ₜ1) = a
     simp
   · rw [TensorProduct.add_tmul, map_add, map_add, hx, hy]
 #align tensor_power.mul_one TensorPower.mul_one
+-/
 
+#print TensorPower.mul_assoc /-
 theorem mul_assoc {na nb nc} (a : (⨂[R]^na) M) (b : (⨂[R]^nb) M) (c : (⨂[R]^nc) M) :
     cast R M (add_assoc _ _ _) (a ₜ* b ₜ* c) = a ₜ* (b ₜ* c) :=
   by
@@ -241,6 +267,7 @@ theorem mul_assoc {na nb nc} (a : (⨂[R]^na) M) (b : (⨂[R]^nb) M) (c : (⨂[R
   refine' congr_arg (Fin.append a (Fin.append b c)) (Fin.ext _)
   rw [Fin.coe_cast, Fin.coe_cast]
 #align tensor_power.mul_assoc TensorPower.mul_assoc
+-/
 
 #print TensorPower.gmonoid /-
 -- for now we just use the default for the `gnpow` field as it's easier.
@@ -253,38 +280,50 @@ instance gmonoid : GradedMonoid.GMonoid fun i => (⨂[R]^i) M :=
 #align tensor_power.gmonoid TensorPower.gmonoid
 -/
 
+#print TensorPower.algebraMap₀ /-
 /-- The canonical map from `R` to `⨂[R]^0 M` corresponding to the algebra_map of the tensor
 algebra. -/
 def algebraMap₀ : R ≃ₗ[R] (⨂[R]^0) M :=
   LinearEquiv.symm <| isEmptyEquiv (Fin 0)
 #align tensor_power.algebra_map₀ TensorPower.algebraMap₀
+-/
 
+#print TensorPower.algebraMap₀_eq_smul_one /-
 theorem algebraMap₀_eq_smul_one (r : R) : (algebraMap₀ r : (⨂[R]^0) M) = r • ₜ1 := by
   simp [algebra_map₀]; congr
 #align tensor_power.algebra_map₀_eq_smul_one TensorPower.algebraMap₀_eq_smul_one
+-/
 
+#print TensorPower.algebraMap₀_one /-
 theorem algebraMap₀_one : (algebraMap₀ 1 : (⨂[R]^0) M) = ₜ1 :=
   (algebraMap₀_eq_smul_one 1).trans (one_smul _ _)
 #align tensor_power.algebra_map₀_one TensorPower.algebraMap₀_one
+-/
 
+#print TensorPower.algebraMap₀_mul /-
 theorem algebraMap₀_mul {n} (r : R) (a : (⨂[R]^n) M) :
     cast R M (zero_add _) (algebraMap₀ r ₜ* a) = r • a := by
   rw [ghas_mul_eq_coe_linear_map, algebra_map₀_eq_smul_one, LinearMap.map_smul₂,
     LinearEquiv.map_smul, ← ghas_mul_eq_coe_linear_map, one_mul]
 #align tensor_power.algebra_map₀_mul TensorPower.algebraMap₀_mul
+-/
 
+#print TensorPower.mul_algebraMap₀ /-
 theorem mul_algebraMap₀ {n} (r : R) (a : (⨂[R]^n) M) :
     cast R M (add_zero _) (a ₜ* algebraMap₀ r) = r • a := by
   rw [ghas_mul_eq_coe_linear_map, algebra_map₀_eq_smul_one, LinearMap.map_smul,
     LinearEquiv.map_smul, ← ghas_mul_eq_coe_linear_map, mul_one]
 #align tensor_power.mul_algebra_map₀ TensorPower.mul_algebraMap₀
+-/
 
+#print TensorPower.algebraMap₀_mul_algebraMap₀ /-
 theorem algebraMap₀_mul_algebraMap₀ (r s : R) :
     cast R M (add_zero _) (algebraMap₀ r ₜ* algebraMap₀ s) = algebraMap₀ (r * s) :=
   by
   rw [← smul_eq_mul, LinearEquiv.map_smul]
   exact algebra_map₀_mul r (@algebra_map₀ R M _ _ _ s)
 #align tensor_power.algebra_map₀_mul_algebra_map₀ TensorPower.algebraMap₀_mul_algebraMap₀
+-/
 
 #print TensorPower.gsemiring /-
 instance gsemiring : DirectSum.GSemiring fun i => (⨂[R]^i) M :=
@@ -301,6 +340,7 @@ instance gsemiring : DirectSum.GSemiring fun i => (⨂[R]^i) M :=
 
 example : Semiring (⨁ n : ℕ, (⨂[R]^n) M) := by infer_instance
 
+#print TensorPower.galgebra /-
 /-- The tensor powers form a graded algebra.
 
 Note that this instance implies `algebra R (⨁ n : ℕ, ⨂[R]^n M)` via `direct_sum.algebra`. -/
@@ -327,11 +367,14 @@ instance galgebra : DirectSum.GAlgebra R fun i => (⨂[R]^i) M
         rw [← LinearEquiv.eq_symm_apply, cast_symm]
         exact (algebra_map₀_mul r x.snd).symm)
 #align tensor_power.galgebra TensorPower.galgebra
+-/
 
+#print TensorPower.galgebra_toFun_def /-
 theorem galgebra_toFun_def (r : R) :
     @DirectSum.GAlgebra.toFun ℕ R (fun i => (⨂[R]^i) M) _ _ _ _ _ _ _ r = algebraMap₀ r :=
   rfl
 #align tensor_power.galgebra_to_fun_def TensorPower.galgebra_toFun_def
+-/
 
 example : Algebra R (⨁ n : ℕ, (⨂[R]^n) M) := by infer_instance
 
Diff
@@ -4,7 +4,7 @@ Released under Apache 2.0 license as described in the file LICENSE.
 Authors: Eric Wieser
 
 ! This file was ported from Lean 3 source module linear_algebra.tensor_power
-! leanprover-community/mathlib commit ce11c3c2a285bbe6937e26d9792fda4e51f3fe1a
+! leanprover-community/mathlib commit 575b4ea3738b017e30fb205cb9b4a8742e5e82b6
 ! Please do not edit these lines, except to modify the commit id
 ! if you have ported upstream changes.
 -/
@@ -15,6 +15,9 @@ import Mathbin.Algebra.DirectSum.Algebra
 /-!
 # Tensor power of a semimodule over a commutative semirings
 
+> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
+> Any changes to this file require a corresponding PR to mathlib4.
+
 We define the `n`th tensor power of `M` as the n-ary tensor product indexed by `fin n` of `M`,
 `⨂[R] (i : fin n), M`. This is a special case of `pi_tensor_product`.
 
Diff
@@ -35,6 +35,7 @@ tensor powers. Elsewhere, using `1` and `*` on `graded_monoid` should be preferr
 
 open scoped TensorProduct
 
+#print TensorPower /-
 /-- Homogenous tensor powers $M^{\otimes n}$. `⨂[R]^n M` is a shorthand for
 `⨂[R] (i : fin n), M`. -/
 @[reducible]
@@ -42,6 +43,7 @@ protected def TensorPower (R : Type _) (n : ℕ) (M : Type _) [CommSemiring R] [
     [Module R M] : Type _ :=
   ⨂[R] i : Fin n, M
 #align tensor_power TensorPower
+-/
 
 variable {R : Type _} {M : Type _} [CommSemiring R] [AddCommMonoid M] [Module R M]
 
@@ -70,42 +72,46 @@ open scoped TensorProduct DirectSum
 
 open PiTensorProduct
 
+#print TensorPower.gOne /-
 /-- As a graded monoid, `⨂[R]^i M` has a `1 : ⨂[R]^0 M`. -/
-instance ghasOne : GradedMonoid.GOne fun i => (⨂[R]^i) M where one := tprod R <| @Fin.elim0' M
-#align tensor_power.ghas_one TensorPower.ghasOne
+instance gOne : GradedMonoid.GOne fun i => (⨂[R]^i) M where one := tprod R <| @Fin.elim0' M
+#align tensor_power.ghas_one TensorPower.gOne
+-/
 
 -- mathport name: exprₜ1
 local notation "ₜ1" => @GradedMonoid.GOne.one ℕ (fun i => (⨂[R]^i) M) _ _
 
-theorem ghasOne_def : ₜ1 = tprod R (@Fin.elim0' M) :=
+theorem gOne_def : ₜ1 = tprod R (@Fin.elim0' M) :=
   rfl
-#align tensor_power.ghas_one_def TensorPower.ghasOne_def
+#align tensor_power.ghas_one_def TensorPower.gOne_def
 
 /-- A variant of `pi_tensor_prod.tmul_equiv` with the result indexed by `fin (n + m)`. -/
 def mulEquiv {n m : ℕ} : (⨂[R]^n) M ⊗[R] (⨂[R]^m) M ≃ₗ[R] (⨂[R]^(n + m)) M :=
   (tmulEquiv R M).trans (reindex R M finSumFinEquiv)
 #align tensor_power.mul_equiv TensorPower.mulEquiv
 
+#print TensorPower.gMul /-
 /-- As a graded monoid, `⨂[R]^i M` has a `(*) : ⨂[R]^i M → ⨂[R]^j M → ⨂[R]^(i + j) M`. -/
-instance ghasMul : GradedMonoid.GMul fun i => (⨂[R]^i) M
+instance gMul : GradedMonoid.GMul fun i => (⨂[R]^i) M
     where mul i j a b :=
     (TensorProduct.mk R _ _).compr₂ (↑(mulEquiv : _ ≃ₗ[R] (⨂[R]^(i + j)) M)) a b
-#align tensor_power.ghas_mul TensorPower.ghasMul
+#align tensor_power.ghas_mul TensorPower.gMul
+-/
 
 -- mathport name: «expr ₜ* »
 local infixl:70 " ₜ* " => @GradedMonoid.GMul.mul ℕ (fun i => (⨂[R]^i) M) _ _ _ _
 
-theorem ghasMul_def {i j} (a : (⨂[R]^i) M) (b : (⨂[R]^j) M) : a ₜ* b = mulEquiv (a ⊗ₜ b) :=
+theorem gMul_def {i j} (a : (⨂[R]^i) M) (b : (⨂[R]^j) M) : a ₜ* b = mulEquiv (a ⊗ₜ b) :=
   rfl
-#align tensor_power.ghas_mul_def TensorPower.ghasMul_def
+#align tensor_power.ghas_mul_def TensorPower.gMul_def
 
-theorem ghasMul_eq_coe_linearMap {i j} (a : (⨂[R]^i) M) (b : (⨂[R]^j) M) :
+theorem gMul_eq_coe_linearMap {i j} (a : (⨂[R]^i) M) (b : (⨂[R]^j) M) :
     a ₜ* b =
       ((TensorProduct.mk R _ _).compr₂ ↑(mulEquiv : _ ≃ₗ[R] (⨂[R]^(i + j)) M) :
           (⨂[R]^i) M →ₗ[R] (⨂[R]^j) M →ₗ[R] (⨂[R]^(i + j)) M)
         a b :=
   rfl
-#align tensor_power.ghas_mul_eq_coe_linear_map TensorPower.ghasMul_eq_coe_linearMap
+#align tensor_power.ghas_mul_eq_coe_linear_map TensorPower.gMul_eq_coe_linearMap
 
 variable (R M)
 
@@ -233,14 +239,16 @@ theorem mul_assoc {na nb nc} (a : (⨂[R]^na) M) (b : (⨂[R]^nb) M) (c : (⨂[R
   rw [Fin.coe_cast, Fin.coe_cast]
 #align tensor_power.mul_assoc TensorPower.mul_assoc
 
+#print TensorPower.gmonoid /-
 -- for now we just use the default for the `gnpow` field as it's easier.
 instance gmonoid : GradedMonoid.GMonoid fun i => (⨂[R]^i) M :=
-  { TensorPower.ghasMul,
-    TensorPower.ghasOne with
+  { TensorPower.gMul,
+    TensorPower.gOne with
     one_mul := fun a => gradedMonoid_eq_of_cast (zero_add _) (one_mul _)
     mul_one := fun a => gradedMonoid_eq_of_cast (add_zero _) (mul_one _)
     mul_assoc := fun a b c => gradedMonoid_eq_of_cast (add_assoc _ _ _) (mul_assoc _ _ _) }
 #align tensor_power.gmonoid TensorPower.gmonoid
+-/
 
 /-- The canonical map from `R` to `⨂[R]^0 M` corresponding to the algebra_map of the tensor
 algebra. -/
@@ -275,6 +283,7 @@ theorem algebraMap₀_mul_algebraMap₀ (r s : R) :
   exact algebra_map₀_mul r (@algebra_map₀ R M _ _ _ s)
 #align tensor_power.algebra_map₀_mul_algebra_map₀ TensorPower.algebraMap₀_mul_algebraMap₀
 
+#print TensorPower.gsemiring /-
 instance gsemiring : DirectSum.GSemiring fun i => (⨂[R]^i) M :=
   { TensorPower.gmonoid with
     mul_zero := fun i j a => LinearMap.map_zero _
@@ -285,6 +294,7 @@ instance gsemiring : DirectSum.GSemiring fun i => (⨂[R]^i) M :=
     natCast_zero := by rw [Nat.cast_zero, map_zero]
     natCast_succ := fun n => by rw [Nat.cast_succ, map_add, algebra_map₀_one] }
 #align tensor_power.gsemiring TensorPower.gsemiring
+-/
 
 example : Semiring (⨁ n : ℕ, (⨂[R]^n) M) := by infer_instance
 
Diff
@@ -148,7 +148,7 @@ theorem gradedMonoid_eq_of_cast {a b : GradedMonoid fun n => ⨂[R] i : Fin n, M
     (h2 : cast R M h a.snd = b.snd) : a = b :=
   by
   refine' graded_monoid_eq_of_reindex_cast h _
-  rw [cast] at h2
+  rw [cast] at h2 
   rw [← Fin.cast_to_equiv, ← h2]
 #align tensor_power.graded_monoid_eq_of_cast TensorPower.gradedMonoid_eq_of_cast
 
@@ -184,7 +184,7 @@ theorem one_mul {n} (a : (⨂[R]^n) M) : cast R M (zero_add n) (ₜ1 ₜ* a) = a
   by
   rw [ghas_mul_def, ghas_one_def]
   induction' a using PiTensorProduct.induction_on with r a x y hx hy
-  · dsimp only at a
+  · dsimp only at a 
     rw [TensorProduct.tmul_smul, LinearEquiv.map_smul, LinearEquiv.map_smul, ← ghas_mul_def,
       tprod_mul_tprod, cast_tprod]
     congr 2 with i
@@ -198,7 +198,7 @@ theorem mul_one {n} (a : (⨂[R]^n) M) : cast R M (add_zero _) (a ₜ* ₜ1) = a
   by
   rw [ghas_mul_def, ghas_one_def]
   induction' a using PiTensorProduct.induction_on with r a x y hx hy
-  · dsimp only at a
+  · dsimp only at a 
     rw [← TensorProduct.smul_tmul', LinearEquiv.map_smul, LinearEquiv.map_smul, ← ghas_mul_def,
       tprod_mul_tprod R a _, cast_tprod]
     congr 2 with i
@@ -306,7 +306,7 @@ instance galgebra : DirectSum.GAlgebra R fun i => (⨂[R]^i) M
       (by
         have := (algebra_map₀_mul r x.snd).trans (mul_algebra_map₀ r x.snd).symm
         rw [← LinearEquiv.eq_symm_apply, cast_symm]
-        rw [← LinearEquiv.eq_symm_apply, cast_symm, cast_cast] at this
+        rw [← LinearEquiv.eq_symm_apply, cast_symm, cast_cast] at this 
         exact this)
   smul_def r x :=
     gradedMonoid_eq_of_cast (zero_add x.fst).symm
Diff
@@ -33,7 +33,7 @@ tensor powers. Elsewhere, using `1` and `*` on `graded_monoid` should be preferr
 -/
 
 
-open TensorProduct
+open scoped TensorProduct
 
 /-- Homogenous tensor powers $M^{\otimes n}$. `⨂[R]^n M` is a shorthand for
 `⨂[R] (i : fin n), M`. -/
@@ -66,7 +66,7 @@ end PiTensorProduct
 
 namespace TensorPower
 
-open TensorProduct DirectSum
+open scoped TensorProduct DirectSum
 
 open PiTensorProduct
 
Diff
@@ -248,10 +248,8 @@ def algebraMap₀ : R ≃ₗ[R] (⨂[R]^0) M :=
   LinearEquiv.symm <| isEmptyEquiv (Fin 0)
 #align tensor_power.algebra_map₀ TensorPower.algebraMap₀
 
-theorem algebraMap₀_eq_smul_one (r : R) : (algebraMap₀ r : (⨂[R]^0) M) = r • ₜ1 :=
-  by
-  simp [algebra_map₀]
-  congr
+theorem algebraMap₀_eq_smul_one (r : R) : (algebraMap₀ r : (⨂[R]^0) M) = r • ₜ1 := by
+  simp [algebra_map₀]; congr
 #align tensor_power.algebra_map₀_eq_smul_one TensorPower.algebraMap₀_eq_smul_one
 
 theorem algebraMap₀_one : (algebraMap₀ 1 : (⨂[R]^0) M) = ₜ1 :=
Diff
@@ -293,7 +293,7 @@ example : Semiring (⨁ n : ℕ, (⨂[R]^n) M) := by infer_instance
 /-- The tensor powers form a graded algebra.
 
 Note that this instance implies `algebra R (⨁ n : ℕ, ⨂[R]^n M)` via `direct_sum.algebra`. -/
-instance galgebra : DirectSum.Galgebra R fun i => (⨂[R]^i) M
+instance galgebra : DirectSum.GAlgebra R fun i => (⨂[R]^i) M
     where
   toFun := (algebraMap₀ : R ≃ₗ[R] (⨂[R]^0) M).toLinearMap.toAddMonoidHom
   map_one := algebraMap₀_one
@@ -318,7 +318,7 @@ instance galgebra : DirectSum.Galgebra R fun i => (⨂[R]^i) M
 #align tensor_power.galgebra TensorPower.galgebra
 
 theorem galgebra_toFun_def (r : R) :
-    @DirectSum.Galgebra.toFun ℕ R (fun i => (⨂[R]^i) M) _ _ _ _ _ _ _ r = algebraMap₀ r :=
+    @DirectSum.GAlgebra.toFun ℕ R (fun i => (⨂[R]^i) M) _ _ _ _ _ _ _ r = algebraMap₀ r :=
   rfl
 #align tensor_power.galgebra_to_fun_def TensorPower.galgebra_toFun_def
 
Diff
@@ -277,7 +277,7 @@ theorem algebraMap₀_mul_algebraMap₀ (r s : R) :
   exact algebra_map₀_mul r (@algebra_map₀ R M _ _ _ s)
 #align tensor_power.algebra_map₀_mul_algebra_map₀ TensorPower.algebraMap₀_mul_algebraMap₀
 
-instance gsemiring : DirectSum.Gsemiring fun i => (⨂[R]^i) M :=
+instance gsemiring : DirectSum.GSemiring fun i => (⨂[R]^i) M :=
   { TensorPower.gmonoid with
     mul_zero := fun i j a => LinearMap.map_zero _
     zero_mul := fun i j b => LinearMap.map_zero₂ _ _
Diff
@@ -4,13 +4,13 @@ Released under Apache 2.0 license as described in the file LICENSE.
 Authors: Eric Wieser
 
 ! This file was ported from Lean 3 source module linear_algebra.tensor_power
-! leanprover-community/mathlib commit 70fd9563a21e7b963887c9360bd29b2393e6225a
+! leanprover-community/mathlib commit ce11c3c2a285bbe6937e26d9792fda4e51f3fe1a
 ! Please do not edit these lines, except to modify the commit id
 ! if you have ported upstream changes.
 -/
 import Mathbin.LinearAlgebra.PiTensorProduct
 import Mathbin.Logic.Equiv.Fin
-import Mathbin.Algebra.GradedMonoid
+import Mathbin.Algebra.DirectSum.Algebra
 
 /-!
 # Tensor power of a semimodule over a commutative semirings
@@ -23,13 +23,8 @@ abbreviation for `⨂[R] i : fin n, M`.
 
 ## Main definitions:
 
-* `tensor_power.ghas_one`
-* `tensor_power.ghas_mul`
-
-## TODO
-
-Show `direct_sum.galgebra R (λ i, ⨂[R]^i M)` and `algebra R (⨁ n : ℕ, ⨂[R]^n M)`.
-
+* `tensor_power.gsemiring`: the tensor powers form a graded semiring.
+* `tensor_power.galgebra`: the tensor powers form a graded algebra.
 
 ## Implementation notes
 
@@ -53,20 +48,36 @@ variable {R : Type _} {M : Type _} [CommSemiring R] [AddCommMonoid M] [Module R
 -- mathport name: tensor_power
 scoped[TensorProduct] notation:100 "⨂[" R "]^" n:arg => TensorPower R n
 
+namespace PiTensorProduct
+
+/-- Two dependent pairs of tensor products are equal if their index is equal and the contents
+are equal after a canonical reindexing. -/
+@[ext]
+theorem gradedMonoid_eq_of_reindex_cast {ιι : Type _} {ι : ιι → Type _} :
+    ∀ {a b : GradedMonoid fun ii => ⨂[R] i : ι ii, M} (h : a.fst = b.fst),
+      reindex R M (Equiv.cast <| congr_arg ι h) a.snd = b.snd → a = b
+  | ⟨ai, a⟩, ⟨bi, b⟩ => fun (hi : ai = bi) (h : reindex R M _ a = b) =>
+    by
+    subst hi
+    simpa using h
+#align pi_tensor_product.graded_monoid_eq_of_reindex_cast PiTensorProduct.gradedMonoid_eq_of_reindex_cast
+
+end PiTensorProduct
+
 namespace TensorPower
 
-open TensorProduct
+open TensorProduct DirectSum
 
 open PiTensorProduct
 
 /-- As a graded monoid, `⨂[R]^i M` has a `1 : ⨂[R]^0 M`. -/
-instance ghasOne : GradedMonoid.GOne fun i => (⨂[R]^i) M where one := tprod R Fin.elim0
+instance ghasOne : GradedMonoid.GOne fun i => (⨂[R]^i) M where one := tprod R <| @Fin.elim0' M
 #align tensor_power.ghas_one TensorPower.ghasOne
 
 -- mathport name: exprₜ1
 local notation "ₜ1" => @GradedMonoid.GOne.one ℕ (fun i => (⨂[R]^i) M) _ _
 
-theorem ghasOne_def : ₜ1 = tprod R Fin.elim0 :=
+theorem ghasOne_def : ₜ1 = tprod R (@Fin.elim0' M) :=
   rfl
 #align tensor_power.ghas_one_def TensorPower.ghasOne_def
 
@@ -76,7 +87,9 @@ def mulEquiv {n m : ℕ} : (⨂[R]^n) M ⊗[R] (⨂[R]^m) M ≃ₗ[R] (⨂[R]^(n
 #align tensor_power.mul_equiv TensorPower.mulEquiv
 
 /-- As a graded monoid, `⨂[R]^i M` has a `(*) : ⨂[R]^i M → ⨂[R]^j M → ⨂[R]^(i + j) M`. -/
-instance ghasMul : GradedMonoid.GMul fun i => (⨂[R]^i) M where mul i j a b := mulEquiv (a ⊗ₜ b)
+instance ghasMul : GradedMonoid.GMul fun i => (⨂[R]^i) M
+    where mul i j a b :=
+    (TensorProduct.mk R _ _).compr₂ (↑(mulEquiv : _ ≃ₗ[R] (⨂[R]^(i + j)) M)) a b
 #align tensor_power.ghas_mul TensorPower.ghasMul
 
 -- mathport name: «expr ₜ* »
@@ -86,5 +99,230 @@ theorem ghasMul_def {i j} (a : (⨂[R]^i) M) (b : (⨂[R]^j) M) : a ₜ* b = mul
   rfl
 #align tensor_power.ghas_mul_def TensorPower.ghasMul_def
 
+theorem ghasMul_eq_coe_linearMap {i j} (a : (⨂[R]^i) M) (b : (⨂[R]^j) M) :
+    a ₜ* b =
+      ((TensorProduct.mk R _ _).compr₂ ↑(mulEquiv : _ ≃ₗ[R] (⨂[R]^(i + j)) M) :
+          (⨂[R]^i) M →ₗ[R] (⨂[R]^j) M →ₗ[R] (⨂[R]^(i + j)) M)
+        a b :=
+  rfl
+#align tensor_power.ghas_mul_eq_coe_linear_map TensorPower.ghasMul_eq_coe_linearMap
+
+variable (R M)
+
+/-- Cast between "equal" tensor powers. -/
+def cast {i j} (h : i = j) : (⨂[R]^i) M ≃ₗ[R] (⨂[R]^j) M :=
+  reindex R M (Fin.cast h).toEquiv
+#align tensor_power.cast TensorPower.cast
+
+theorem cast_tprod {i j} (h : i = j) (a : Fin i → M) :
+    cast R M h (tprod R a) = tprod R (a ∘ Fin.cast h.symm) :=
+  reindex_tprod _ _
+#align tensor_power.cast_tprod TensorPower.cast_tprod
+
+@[simp]
+theorem cast_refl {i} (h : i = i) : cast R M h = LinearEquiv.refl _ _ :=
+  ((congr_arg fun f => reindex R M (RelIso.toEquiv f)) <| Fin.cast_refl h).trans reindex_refl
+#align tensor_power.cast_refl TensorPower.cast_refl
+
+@[simp]
+theorem cast_symm {i j} (h : i = j) : (cast R M h).symm = cast R M h.symm :=
+  reindex_symm _
+#align tensor_power.cast_symm TensorPower.cast_symm
+
+@[simp]
+theorem cast_trans {i j k} (h : i = j) (h' : j = k) :
+    (cast R M h).trans (cast R M h') = cast R M (h.trans h') :=
+  reindex_trans _ _
+#align tensor_power.cast_trans TensorPower.cast_trans
+
+variable {R M}
+
+@[simp]
+theorem cast_cast {i j k} (h : i = j) (h' : j = k) (a : (⨂[R]^i) M) :
+    cast R M h' (cast R M h a) = cast R M (h.trans h') a :=
+  reindex_reindex _ _ _
+#align tensor_power.cast_cast TensorPower.cast_cast
+
+@[ext]
+theorem gradedMonoid_eq_of_cast {a b : GradedMonoid fun n => ⨂[R] i : Fin n, M} (h : a.fst = b.fst)
+    (h2 : cast R M h a.snd = b.snd) : a = b :=
+  by
+  refine' graded_monoid_eq_of_reindex_cast h _
+  rw [cast] at h2
+  rw [← Fin.cast_to_equiv, ← h2]
+#align tensor_power.graded_monoid_eq_of_cast TensorPower.gradedMonoid_eq_of_cast
+
+-- named to match `fin.cast_eq_cast`
+theorem cast_eq_cast {i j} (h : i = j) : ⇑(cast R M h) = cast (congr_arg _ h) :=
+  by
+  subst h
+  rw [cast_refl]
+  rfl
+#align tensor_power.cast_eq_cast TensorPower.cast_eq_cast
+
+variable (R)
+
+include R
+
+theorem tprod_mul_tprod {na nb} (a : Fin na → M) (b : Fin nb → M) :
+    tprod R a ₜ* tprod R b = tprod R (Fin.append a b) :=
+  by
+  dsimp [ghas_mul_def, MulEquiv]
+  rw [tmul_equiv_apply R M a b]
+  refine' (reindex_tprod _ _).trans _
+  congr 1
+  dsimp only [Fin.append, finSumFinEquiv, Equiv.coe_fn_symm_mk]
+  apply funext
+  apply Fin.addCases <;> simp
+#align tensor_power.tprod_mul_tprod TensorPower.tprod_mul_tprod
+
+omit R
+
+variable {R}
+
+theorem one_mul {n} (a : (⨂[R]^n) M) : cast R M (zero_add n) (ₜ1 ₜ* a) = a :=
+  by
+  rw [ghas_mul_def, ghas_one_def]
+  induction' a using PiTensorProduct.induction_on with r a x y hx hy
+  · dsimp only at a
+    rw [TensorProduct.tmul_smul, LinearEquiv.map_smul, LinearEquiv.map_smul, ← ghas_mul_def,
+      tprod_mul_tprod, cast_tprod]
+    congr 2 with i
+    rw [Fin.elim0'_append]
+    refine' congr_arg a (Fin.ext _)
+    simp
+  · rw [TensorProduct.tmul_add, map_add, map_add, hx, hy]
+#align tensor_power.one_mul TensorPower.one_mul
+
+theorem mul_one {n} (a : (⨂[R]^n) M) : cast R M (add_zero _) (a ₜ* ₜ1) = a :=
+  by
+  rw [ghas_mul_def, ghas_one_def]
+  induction' a using PiTensorProduct.induction_on with r a x y hx hy
+  · dsimp only at a
+    rw [← TensorProduct.smul_tmul', LinearEquiv.map_smul, LinearEquiv.map_smul, ← ghas_mul_def,
+      tprod_mul_tprod R a _, cast_tprod]
+    congr 2 with i
+    rw [Fin.append_elim0']
+    refine' congr_arg a (Fin.ext _)
+    simp
+  · rw [TensorProduct.add_tmul, map_add, map_add, hx, hy]
+#align tensor_power.mul_one TensorPower.mul_one
+
+theorem mul_assoc {na nb nc} (a : (⨂[R]^na) M) (b : (⨂[R]^nb) M) (c : (⨂[R]^nc) M) :
+    cast R M (add_assoc _ _ _) (a ₜ* b ₜ* c) = a ₜ* (b ₜ* c) :=
+  by
+  let mul : ∀ n m : ℕ, (⨂[R]^n) M →ₗ[R] (⨂[R]^m) M →ₗ[R] (⨂[R]^(n + m)) M := fun n m =>
+    (TensorProduct.mk R _ _).compr₂ ↑(MulEquiv : _ ≃ₗ[R] (⨂[R]^(n + m)) M)
+  -- replace `a`, `b`, `c` with `tprod R a`, `tprod R b`, `tprod R c`
+  let e : (⨂[R]^(na + nb + nc)) M ≃ₗ[R] (⨂[R]^(na + (nb + nc))) M := cast R M (add_assoc _ _ _)
+  let lhs : (⨂[R]^na) M →ₗ[R] (⨂[R]^nb) M →ₗ[R] (⨂[R]^nc) M →ₗ[R] (⨂[R]^(na + (nb + nc))) M :=
+    (LinearMap.llcomp R _ _ _ ((mul _ nc).compr₂ e.to_linear_map)).comp (mul na nb)
+  have lhs_eq : ∀ a b c, lhs a b c = e (a ₜ* b ₜ* c) := fun _ _ _ => rfl
+  let rhs : (⨂[R]^na) M →ₗ[R] (⨂[R]^nb) M →ₗ[R] (⨂[R]^nc) M →ₗ[R] (⨂[R]^(na + (nb + nc))) M :=
+    (LinearMap.llcomp R _ _ _ (LinearMap.lflip R _ _ _) <|
+        (LinearMap.llcomp R _ _ _ (mul na _).flip).comp (mul nb nc)).flip
+  have rhs_eq : ∀ a b c, rhs a b c = a ₜ* (b ₜ* c) := fun _ _ _ => rfl
+  suffices : lhs = rhs
+  exact LinearMap.congr_fun (LinearMap.congr_fun (LinearMap.congr_fun this a) b) c
+  ext (a b c)
+  -- clean up
+  simp only [LinearMap.compMultilinearMap_apply, lhs_eq, rhs_eq, tprod_mul_tprod, e, cast_tprod]
+  congr with j
+  rw [Fin.append_assoc]
+  refine' congr_arg (Fin.append a (Fin.append b c)) (Fin.ext _)
+  rw [Fin.coe_cast, Fin.coe_cast]
+#align tensor_power.mul_assoc TensorPower.mul_assoc
+
+-- for now we just use the default for the `gnpow` field as it's easier.
+instance gmonoid : GradedMonoid.GMonoid fun i => (⨂[R]^i) M :=
+  { TensorPower.ghasMul,
+    TensorPower.ghasOne with
+    one_mul := fun a => gradedMonoid_eq_of_cast (zero_add _) (one_mul _)
+    mul_one := fun a => gradedMonoid_eq_of_cast (add_zero _) (mul_one _)
+    mul_assoc := fun a b c => gradedMonoid_eq_of_cast (add_assoc _ _ _) (mul_assoc _ _ _) }
+#align tensor_power.gmonoid TensorPower.gmonoid
+
+/-- The canonical map from `R` to `⨂[R]^0 M` corresponding to the algebra_map of the tensor
+algebra. -/
+def algebraMap₀ : R ≃ₗ[R] (⨂[R]^0) M :=
+  LinearEquiv.symm <| isEmptyEquiv (Fin 0)
+#align tensor_power.algebra_map₀ TensorPower.algebraMap₀
+
+theorem algebraMap₀_eq_smul_one (r : R) : (algebraMap₀ r : (⨂[R]^0) M) = r • ₜ1 :=
+  by
+  simp [algebra_map₀]
+  congr
+#align tensor_power.algebra_map₀_eq_smul_one TensorPower.algebraMap₀_eq_smul_one
+
+theorem algebraMap₀_one : (algebraMap₀ 1 : (⨂[R]^0) M) = ₜ1 :=
+  (algebraMap₀_eq_smul_one 1).trans (one_smul _ _)
+#align tensor_power.algebra_map₀_one TensorPower.algebraMap₀_one
+
+theorem algebraMap₀_mul {n} (r : R) (a : (⨂[R]^n) M) :
+    cast R M (zero_add _) (algebraMap₀ r ₜ* a) = r • a := by
+  rw [ghas_mul_eq_coe_linear_map, algebra_map₀_eq_smul_one, LinearMap.map_smul₂,
+    LinearEquiv.map_smul, ← ghas_mul_eq_coe_linear_map, one_mul]
+#align tensor_power.algebra_map₀_mul TensorPower.algebraMap₀_mul
+
+theorem mul_algebraMap₀ {n} (r : R) (a : (⨂[R]^n) M) :
+    cast R M (add_zero _) (a ₜ* algebraMap₀ r) = r • a := by
+  rw [ghas_mul_eq_coe_linear_map, algebra_map₀_eq_smul_one, LinearMap.map_smul,
+    LinearEquiv.map_smul, ← ghas_mul_eq_coe_linear_map, mul_one]
+#align tensor_power.mul_algebra_map₀ TensorPower.mul_algebraMap₀
+
+theorem algebraMap₀_mul_algebraMap₀ (r s : R) :
+    cast R M (add_zero _) (algebraMap₀ r ₜ* algebraMap₀ s) = algebraMap₀ (r * s) :=
+  by
+  rw [← smul_eq_mul, LinearEquiv.map_smul]
+  exact algebra_map₀_mul r (@algebra_map₀ R M _ _ _ s)
+#align tensor_power.algebra_map₀_mul_algebra_map₀ TensorPower.algebraMap₀_mul_algebraMap₀
+
+instance gsemiring : DirectSum.Gsemiring fun i => (⨂[R]^i) M :=
+  { TensorPower.gmonoid with
+    mul_zero := fun i j a => LinearMap.map_zero _
+    zero_mul := fun i j b => LinearMap.map_zero₂ _ _
+    mul_add := fun i j a b₁ b₂ => LinearMap.map_add _ _ _
+    add_mul := fun i j a₁ a₂ b => LinearMap.map_add₂ _ _ _ _
+    natCast := fun n => algebraMap₀ (n : R)
+    natCast_zero := by rw [Nat.cast_zero, map_zero]
+    natCast_succ := fun n => by rw [Nat.cast_succ, map_add, algebra_map₀_one] }
+#align tensor_power.gsemiring TensorPower.gsemiring
+
+example : Semiring (⨁ n : ℕ, (⨂[R]^n) M) := by infer_instance
+
+/-- The tensor powers form a graded algebra.
+
+Note that this instance implies `algebra R (⨁ n : ℕ, ⨂[R]^n M)` via `direct_sum.algebra`. -/
+instance galgebra : DirectSum.Galgebra R fun i => (⨂[R]^i) M
+    where
+  toFun := (algebraMap₀ : R ≃ₗ[R] (⨂[R]^0) M).toLinearMap.toAddMonoidHom
+  map_one := algebraMap₀_one
+  map_mul r s :=
+    gradedMonoid_eq_of_cast rfl
+      (by
+        rw [← LinearEquiv.eq_symm_apply]
+        have := algebra_map₀_mul_algebra_map₀ r s
+        exact this.symm)
+  commutes r x :=
+    gradedMonoid_eq_of_cast (add_comm _ _)
+      (by
+        have := (algebra_map₀_mul r x.snd).trans (mul_algebra_map₀ r x.snd).symm
+        rw [← LinearEquiv.eq_symm_apply, cast_symm]
+        rw [← LinearEquiv.eq_symm_apply, cast_symm, cast_cast] at this
+        exact this)
+  smul_def r x :=
+    gradedMonoid_eq_of_cast (zero_add x.fst).symm
+      (by
+        rw [← LinearEquiv.eq_symm_apply, cast_symm]
+        exact (algebra_map₀_mul r x.snd).symm)
+#align tensor_power.galgebra TensorPower.galgebra
+
+theorem galgebra_toFun_def (r : R) :
+    @DirectSum.Galgebra.toFun ℕ R (fun i => (⨂[R]^i) M) _ _ _ _ _ _ _ r = algebraMap₀ r :=
+  rfl
+#align tensor_power.galgebra_to_fun_def TensorPower.galgebra_toFun_def
+
+example : Algebra R (⨁ n : ℕ, (⨂[R]^n) M) := by infer_instance
+
 end TensorPower
 

Changes in mathlib4

mathlib3
mathlib4
fix(LinearAlgebra/TensorPower): correct notation precedence (#11062)

This removes some ugly parens that were introduced during porting

Diff
@@ -43,7 +43,7 @@ def TensorPower (R : Type*) (n : ℕ) (M : Type*) [CommSemiring R] [AddCommMonoi
 
 variable {R : Type*} {M : Type*} [CommSemiring R] [AddCommMonoid M] [Module R M]
 
-@[inherit_doc] scoped[TensorProduct] notation:100 "⨂[" R "]^" n:arg => TensorPower R n
+@[inherit_doc] scoped[TensorProduct] notation:max "⨂[" R "]^" n:arg => TensorPower R n
 
 namespace PiTensorProduct
 
@@ -67,43 +67,43 @@ open scoped TensorProduct DirectSum
 open PiTensorProduct
 
 /-- As a graded monoid, `⨂[R]^i M` has a `1 : ⨂[R]^0 M`. -/
-instance gOne : GradedMonoid.GOne fun i => (⨂[R]^i) M where one := tprod R <| @Fin.elim0 M
+instance gOne : GradedMonoid.GOne fun i => ⨂[R]^i M where one := tprod R <| @Fin.elim0 M
 #align tensor_power.ghas_one TensorPower.gOne
 
-local notation "ₜ1" => @GradedMonoid.GOne.one ℕ (fun i => (⨂[R]^i) M) _ _
+local notation "ₜ1" => @GradedMonoid.GOne.one ℕ (fun i => ⨂[R]^i M) _ _
 
 theorem gOne_def : ₜ1 = tprod R (@Fin.elim0 M) :=
   rfl
 #align tensor_power.ghas_one_def TensorPower.gOne_def
 
 /-- A variant of `PiTensorProduct.tmulEquiv` with the result indexed by `Fin (n + m)`. -/
-def mulEquiv {n m : ℕ} : (⨂[R]^n) M ⊗[R] (⨂[R]^m) M ≃ₗ[R] (⨂[R]^(n + m)) M :=
+def mulEquiv {n m : ℕ} : ⨂[R]^n M ⊗[R] (⨂[R]^m) M ≃ₗ[R] (⨂[R]^(n + m)) M :=
   (tmulEquiv R M).trans (reindex R (fun _ ↦ M) finSumFinEquiv)
 #align tensor_power.mul_equiv TensorPower.mulEquiv
 
 /-- As a graded monoid, `⨂[R]^i M` has a `(*) : ⨂[R]^i M → ⨂[R]^j M → ⨂[R]^(i + j) M`. -/
-instance gMul : GradedMonoid.GMul fun i => (⨂[R]^i) M where
+instance gMul : GradedMonoid.GMul fun i => ⨂[R]^i M where
   mul {i j} a b :=
     (TensorProduct.mk R _ _).compr₂ (↑(mulEquiv : _ ≃ₗ[R] (⨂[R]^(i + j)) M)) a b
 #align tensor_power.ghas_mul TensorPower.gMul
 
-local infixl:70 " ₜ* " => @GradedMonoid.GMul.mul ℕ (fun i => (⨂[R]^i) M) _ _ _ _
+local infixl:70 " ₜ* " => @GradedMonoid.GMul.mul ℕ (fun i => ⨂[R]^i M) _ _ _ _
 
-theorem gMul_def {i j} (a : (⨂[R]^i) M) (b : (⨂[R]^j) M) :
+theorem gMul_def {i j} (a : ⨂[R]^i M) (b : (⨂[R]^j) M) :
     a ₜ* b = @mulEquiv R M _ _ _ i j (a ⊗ₜ b) :=
   rfl
 #align tensor_power.ghas_mul_def TensorPower.gMul_def
 
-theorem gMul_eq_coe_linearMap {i j} (a : (⨂[R]^i) M) (b : (⨂[R]^j) M) :
+theorem gMul_eq_coe_linearMap {i j} (a : ⨂[R]^i M) (b : (⨂[R]^j) M) :
     a ₜ* b = ((TensorProduct.mk R _ _).compr₂ ↑(mulEquiv : _ ≃ₗ[R] (⨂[R]^(i + j)) M) :
-      (⨂[R]^i) M →ₗ[R] (⨂[R]^j) M →ₗ[R] (⨂[R]^(i + j)) M) a b :=
+      ⨂[R]^i M →ₗ[R] (⨂[R]^j) M →ₗ[R] (⨂[R]^(i + j)) M) a b :=
   rfl
 #align tensor_power.ghas_mul_eq_coe_linear_map TensorPower.gMul_eq_coe_linearMap
 
 variable (R M)
 
 /-- Cast between "equal" tensor powers. -/
-def cast {i j} (h : i = j) : (⨂[R]^i) M ≃ₗ[R] (⨂[R]^j) M :=
+def cast {i j} (h : i = j) : ⨂[R]^i M ≃ₗ[R] (⨂[R]^j) M :=
   reindex R (fun _ ↦ M) (Fin.castIso h).toEquiv
 #align tensor_power.cast TensorPower.cast
 
@@ -132,7 +132,7 @@ theorem cast_trans {i j k} (h : i = j) (h' : j = k) :
 variable {R M}
 
 @[simp]
-theorem cast_cast {i j k} (h : i = j) (h' : j = k) (a : (⨂[R]^i) M) :
+theorem cast_cast {i j k} (h : i = j) (h' : j = k) (a : ⨂[R]^i M) :
     cast R M h' (cast R M h a) = cast R M (h.trans h') a :=
   reindex_reindex _ _ _
 #align tensor_power.cast_cast TensorPower.cast_cast
@@ -146,7 +146,7 @@ theorem gradedMonoid_eq_of_cast {a b : GradedMonoid fun n => ⨂[R] _ : Fin n, M
 #align tensor_power.graded_monoid_eq_of_cast TensorPower.gradedMonoid_eq_of_cast
 
 theorem cast_eq_cast {i j} (h : i = j) :
-    ⇑(cast R M h) = _root_.cast (congrArg (fun i => (⨂[R]^i) M) h) := by
+    ⇑(cast R M h) = _root_.cast (congrArg (fun i => ⨂[R]^i M) h) := by
   subst h
   rw [cast_refl]
   rfl
@@ -167,7 +167,7 @@ theorem tprod_mul_tprod {na nb} (a : Fin na → M) (b : Fin nb → M) :
 
 variable {R}
 
-theorem one_mul {n} (a : (⨂[R]^n) M) : cast R M (zero_add n) (ₜ1 ₜ* a) = a := by
+theorem one_mul {n} (a : ⨂[R]^n M) : cast R M (zero_add n) (ₜ1 ₜ* a) = a := by
   rw [gMul_def, gOne_def]
   induction a using PiTensorProduct.induction_on with
   | smul_tprod r a =>
@@ -181,7 +181,7 @@ theorem one_mul {n} (a : (⨂[R]^n) M) : cast R M (zero_add n) (ₜ1 ₜ* a) = a
     rw [TensorProduct.tmul_add, map_add, map_add, hx, hy]
 #align tensor_power.one_mul TensorPower.one_mul
 
-theorem mul_one {n} (a : (⨂[R]^n) M) : cast R M (add_zero _) (a ₜ* ₜ1) = a := by
+theorem mul_one {n} (a : ⨂[R]^n M) : cast R M (add_zero _) (a ₜ* ₜ1) = a := by
   rw [gMul_def, gOne_def]
   induction a using PiTensorProduct.induction_on with
   | smul_tprod r a =>
@@ -197,7 +197,7 @@ theorem mul_one {n} (a : (⨂[R]^n) M) : cast R M (add_zero _) (a ₜ* ₜ1) = a
 
 theorem mul_assoc {na nb nc} (a : (⨂[R]^na) M) (b : (⨂[R]^nb) M) (c : (⨂[R]^nc) M) :
     cast R M (add_assoc _ _ _) (a ₜ* b ₜ* c) = a ₜ* (b ₜ* c) := by
-  let mul : ∀ n m : ℕ, (⨂[R]^n) M →ₗ[R] (⨂[R]^m) M →ₗ[R] (⨂[R]^(n + m)) M := fun n m =>
+  let mul : ∀ n m : ℕ, ⨂[R]^n M →ₗ[R] (⨂[R]^m) M →ₗ[R] (⨂[R]^(n + m)) M := fun n m =>
     (TensorProduct.mk R _ _).compr₂ ↑(mulEquiv : _ ≃ₗ[R] (⨂[R]^(n + m)) M)
   -- replace `a`, `b`, `c` with `tprod R a`, `tprod R b`, `tprod R c`
   let e : (⨂[R]^(na + nb + nc)) M ≃ₗ[R] (⨂[R]^(na + (nb + nc))) M := cast R M (add_assoc _ _ _)
@@ -220,7 +220,7 @@ theorem mul_assoc {na nb nc} (a : (⨂[R]^na) M) (b : (⨂[R]^nb) M) (c : (⨂[R
 #align tensor_power.mul_assoc TensorPower.mul_assoc
 
 -- for now we just use the default for the `gnpow` field as it's easier.
-instance gmonoid : GradedMonoid.GMonoid fun i => (⨂[R]^i) M :=
+instance gmonoid : GradedMonoid.GMonoid fun i => ⨂[R]^i M :=
   { TensorPower.gMul, TensorPower.gOne with
     one_mul := fun a => gradedMonoid_eq_of_cast (zero_add _) (one_mul _)
     mul_one := fun a => gradedMonoid_eq_of_cast (add_zero _) (mul_one _)
@@ -241,13 +241,13 @@ theorem algebraMap₀_one : (algebraMap₀ 1 : (⨂[R]^0) M) = ₜ1 :=
   (algebraMap₀_eq_smul_one 1).trans (one_smul _ _)
 #align tensor_power.algebra_map₀_one TensorPower.algebraMap₀_one
 
-theorem algebraMap₀_mul {n} (r : R) (a : (⨂[R]^n) M) :
+theorem algebraMap₀_mul {n} (r : R) (a : ⨂[R]^n M) :
     cast R M (zero_add _) (algebraMap₀ r ₜ* a) = r • a := by
   rw [gMul_eq_coe_linearMap, algebraMap₀_eq_smul_one, LinearMap.map_smul₂,
     LinearEquiv.map_smul, ← gMul_eq_coe_linearMap, one_mul]
 #align tensor_power.algebra_map₀_mul TensorPower.algebraMap₀_mul
 
-theorem mul_algebraMap₀ {n} (r : R) (a : (⨂[R]^n) M) :
+theorem mul_algebraMap₀ {n} (r : R) (a : ⨂[R]^n M) :
     cast R M (add_zero _) (a ₜ* algebraMap₀ r) = r • a := by
   rw [gMul_eq_coe_linearMap, algebraMap₀_eq_smul_one, LinearMap.map_smul,
     LinearEquiv.map_smul, ← gMul_eq_coe_linearMap, mul_one]
@@ -259,7 +259,7 @@ theorem algebraMap₀_mul_algebraMap₀ (r s : R) :
   exact algebraMap₀_mul r (@algebraMap₀ R M _ _ _ s)
 #align tensor_power.algebra_map₀_mul_algebra_map₀ TensorPower.algebraMap₀_mul_algebraMap₀
 
-instance gsemiring : DirectSum.GSemiring fun i => (⨂[R]^i) M :=
+instance gsemiring : DirectSum.GSemiring fun i => ⨂[R]^i M :=
   { TensorPower.gmonoid with
     mul_zero := fun a => LinearMap.map_zero _
     zero_mul := fun b => LinearMap.map_zero₂ _ _
@@ -270,12 +270,12 @@ instance gsemiring : DirectSum.GSemiring fun i => (⨂[R]^i) M :=
     natCast_succ := fun n => by simp only [Nat.cast_succ, map_add, algebraMap₀_one] }
 #align tensor_power.gsemiring TensorPower.gsemiring
 
-example : Semiring (⨁ n : ℕ, (⨂[R]^n) M) := by infer_instance
+example : Semiring (⨁ n : ℕ, ⨂[R]^n M) := by infer_instance
 
 /-- The tensor powers form a graded algebra.
 
 Note that this instance implies `Algebra R (⨁ n : ℕ, ⨂[R]^n M)` via `DirectSum.Algebra`. -/
-instance galgebra : DirectSum.GAlgebra R fun i => (⨂[R]^i) M where
+instance galgebra : DirectSum.GAlgebra R fun i => ⨂[R]^i M where
   toFun := (algebraMap₀ : R ≃ₗ[R] (⨂[R]^0) M).toLinearMap.toAddMonoidHom
   map_one := algebraMap₀_one
   map_mul r s := gradedMonoid_eq_of_cast rfl (by
@@ -293,10 +293,10 @@ instance galgebra : DirectSum.GAlgebra R fun i => (⨂[R]^i) M where
 #align tensor_power.galgebra TensorPower.galgebra
 
 theorem galgebra_toFun_def (r : R) :
-    @DirectSum.GAlgebra.toFun ℕ R (fun i => (⨂[R]^i) M) _ _ _ _ _ _ _ r = algebraMap₀ r :=
+    @DirectSum.GAlgebra.toFun ℕ R (fun i => ⨂[R]^i M) _ _ _ _ _ _ _ r = algebraMap₀ r :=
   rfl
 #align tensor_power.galgebra_to_fun_def TensorPower.galgebra_toFun_def
 
-example : Algebra R (⨁ n : ℕ, (⨂[R]^n) M) := by infer_instance
+example : Algebra R (⨁ n : ℕ, ⨂[R]^n M) := by infer_instance
 
 end TensorPower
chore: prepare Lean version bump with explicit simp (#10999)

Co-authored-by: Scott Morrison <scott.morrison@gmail.com>

Diff
@@ -212,7 +212,7 @@ theorem mul_assoc {na nb nc} (a : (⨂[R]^na) M) (b : (⨂[R]^nb) M) (c : (⨂[R
     LinearMap.congr_fun (LinearMap.congr_fun (LinearMap.congr_fun this a) b) c
   ext a b c
   -- clean up
-  simp only [LinearMap.compMultilinearMap_apply, lhs_eq, rhs_eq, tprod_mul_tprod, cast_tprod]
+  simp only [e, LinearMap.compMultilinearMap_apply, lhs_eq, rhs_eq, tprod_mul_tprod, cast_tprod]
   congr with j
   rw [Fin.append_assoc]
   refine' congr_arg (Fin.append a (Fin.append b c)) (Fin.ext _)
chore: rename arguments to PiTensorProduct induction principles (#10904)

This looks much nicer in the induction tactic than C1 and Cp, as

induction a using PiTensorProduct.induction_on with
| smul_tprod r a => sorry
| add x y hx hy => sorry
Diff
@@ -169,26 +169,30 @@ variable {R}
 
 theorem one_mul {n} (a : (⨂[R]^n) M) : cast R M (zero_add n) (ₜ1 ₜ* a) = a := by
   rw [gMul_def, gOne_def]
-  induction' a using PiTensorProduct.induction_on with r a x y hx hy
-  · rw [TensorProduct.tmul_smul, LinearEquiv.map_smul, LinearEquiv.map_smul, ← gMul_def,
+  induction a using PiTensorProduct.induction_on with
+  | smul_tprod r a =>
+    rw [TensorProduct.tmul_smul, LinearEquiv.map_smul, LinearEquiv.map_smul, ← gMul_def,
       tprod_mul_tprod, cast_tprod]
     congr 2 with i
     rw [Fin.elim0_append]
     refine' congr_arg a (Fin.ext _)
     simp
-  · rw [TensorProduct.tmul_add, map_add, map_add, hx, hy]
+  | add x y hx hy =>
+    rw [TensorProduct.tmul_add, map_add, map_add, hx, hy]
 #align tensor_power.one_mul TensorPower.one_mul
 
 theorem mul_one {n} (a : (⨂[R]^n) M) : cast R M (add_zero _) (a ₜ* ₜ1) = a := by
   rw [gMul_def, gOne_def]
-  induction' a using PiTensorProduct.induction_on with r a x y hx hy
-  · rw [← TensorProduct.smul_tmul', LinearEquiv.map_smul, LinearEquiv.map_smul, ← gMul_def,
+  induction a using PiTensorProduct.induction_on with
+  | smul_tprod r a =>
+    rw [← TensorProduct.smul_tmul', LinearEquiv.map_smul, LinearEquiv.map_smul, ← gMul_def,
       tprod_mul_tprod R a _, cast_tprod]
     congr 2 with i
     rw [Fin.append_elim0]
     refine' congr_arg a (Fin.ext _)
     simp
-  · rw [TensorProduct.add_tmul, map_add, map_add, hx, hy]
+  | add x y hx hy =>
+    rw [TensorProduct.add_tmul, map_add, map_add, hx, hy]
 #align tensor_power.mul_one TensorPower.mul_one
 
 theorem mul_assoc {na nb nc} (a : (⨂[R]^na) M) (b : (⨂[R]^nb) M) (c : (⨂[R]^nc) M) :
feat: Dependent eliminator for Fin 0 (#10850)

Fin.elim0 and Fin.elim0' are exactly the same function (non-dependent eliminator for Fin 0) and we were missing the dependent version (I suspect that Fin.elim0 originally was dependent, and became non-dependent upon landing in Std).

From PFR

Diff
@@ -67,12 +67,12 @@ open scoped TensorProduct DirectSum
 open PiTensorProduct
 
 /-- As a graded monoid, `⨂[R]^i M` has a `1 : ⨂[R]^0 M`. -/
-instance gOne : GradedMonoid.GOne fun i => (⨂[R]^i) M where one := tprod R <| @Fin.elim0' M
+instance gOne : GradedMonoid.GOne fun i => (⨂[R]^i) M where one := tprod R <| @Fin.elim0 M
 #align tensor_power.ghas_one TensorPower.gOne
 
 local notation "ₜ1" => @GradedMonoid.GOne.one ℕ (fun i => (⨂[R]^i) M) _ _
 
-theorem gOne_def : ₜ1 = tprod R (@Fin.elim0' M) :=
+theorem gOne_def : ₜ1 = tprod R (@Fin.elim0 M) :=
   rfl
 #align tensor_power.ghas_one_def TensorPower.gOne_def
 
@@ -173,7 +173,7 @@ theorem one_mul {n} (a : (⨂[R]^n) M) : cast R M (zero_add n) (ₜ1 ₜ* a) = a
   · rw [TensorProduct.tmul_smul, LinearEquiv.map_smul, LinearEquiv.map_smul, ← gMul_def,
       tprod_mul_tprod, cast_tprod]
     congr 2 with i
-    rw [Fin.elim0'_append]
+    rw [Fin.elim0_append]
     refine' congr_arg a (Fin.ext _)
     simp
   · rw [TensorProduct.tmul_add, map_add, map_add, hx, hy]
@@ -185,7 +185,7 @@ theorem mul_one {n} (a : (⨂[R]^n) M) : cast R M (add_zero _) (a ₜ* ₜ1) = a
   · rw [← TensorProduct.smul_tmul', LinearEquiv.map_smul, LinearEquiv.map_smul, ← gMul_def,
       tprod_mul_tprod R a _, cast_tprod]
     congr 2 with i
-    rw [Fin.append_elim0']
+    rw [Fin.append_elim0]
     refine' congr_arg a (Fin.ext _)
     simp
   · rw [TensorProduct.add_tmul, map_add, map_add, hx, hy]
chore: remove stream-of-consciousness uses of have, replace and suffices (#10640)

No changes to tactic file, it's just boring fixes throughout the library.

This follows on from #6964.

Co-authored-by: sgouezel <sebastien.gouezel@univ-rennes1.fr> Co-authored-by: Eric Wieser <wieser.eric@gmail.com>

Diff
@@ -204,8 +204,8 @@ theorem mul_assoc {na nb nc} (a : (⨂[R]^na) M) (b : (⨂[R]^nb) M) (c : (⨂[R
     (LinearMap.llcomp R _ _ _ (LinearMap.lflip (R := R)) <|
         (LinearMap.llcomp R _ _ _ (mul na _).flip).comp (mul nb nc)).flip
   have rhs_eq : ∀ a b c, rhs a b c = a ₜ* (b ₜ* c) := fun _ _ _ => rfl
-  suffices : lhs = rhs
-  exact LinearMap.congr_fun (LinearMap.congr_fun (LinearMap.congr_fun this a) b) c
+  suffices lhs = rhs from
+    LinearMap.congr_fun (LinearMap.congr_fun (LinearMap.congr_fun this a) b) c
   ext a b c
   -- clean up
   simp only [LinearMap.compMultilinearMap_apply, lhs_eq, rhs_eq, tprod_mul_tprod, cast_tprod]
doc: @[inherit_doc] on notations (#9942)

Make all the notations that unambiguously should inherit the docstring of their definition actually inherit it.

Also write a few docstrings by hand. I only wrote the ones I was competent to write and which I was sure of. Some docstrings come from mathlib3 as they were lost during the early port.

This PR is only intended as a first pass There are many more docstrings to add.

Diff
@@ -43,7 +43,7 @@ def TensorPower (R : Type*) (n : ℕ) (M : Type*) [CommSemiring R] [AddCommMonoi
 
 variable {R : Type*} {M : Type*} [CommSemiring R] [AddCommMonoid M] [Module R M]
 
-scoped[TensorProduct] notation:100 "⨂[" R "]^" n:arg => TensorPower R n
+@[inherit_doc] scoped[TensorProduct] notation:100 "⨂[" R "]^" n:arg => TensorPower R n
 
 namespace PiTensorProduct
 
feat(LinearAlgebra/PiTensorProduct): make reindex dependently typed (#9445)

used to be (⨂[R] _ : ι, M) ≃ₗ[R] ⨂[R] _ : ι₂, M, now M can vary according to the indexing set.

Co-authored-by: Eric Wieser <wieser.eric@gmail.com>

Diff
@@ -52,8 +52,8 @@ are equal after a canonical reindexing. -/
 @[ext]
 theorem gradedMonoid_eq_of_reindex_cast {ιι : Type*} {ι : ιι → Type*} :
     ∀ {a b : GradedMonoid fun ii => ⨂[R] _ : ι ii, M} (h : a.fst = b.fst),
-      reindex R M (Equiv.cast <| congr_arg ι h) a.snd = b.snd → a = b
-  | ⟨ai, a⟩, ⟨bi, b⟩ => fun (hi : ai = bi) (h : reindex R M _ a = b) => by
+      reindex R (fun _ ↦ M) (Equiv.cast <| congr_arg ι h) a.snd = b.snd → a = b
+  | ⟨ai, a⟩, ⟨bi, b⟩ => fun (hi : ai = bi) (h : reindex R (fun _ ↦ M) _ a = b) => by
     subst hi
     simp_all
 #align pi_tensor_product.graded_monoid_eq_of_reindex_cast PiTensorProduct.gradedMonoid_eq_of_reindex_cast
@@ -78,7 +78,7 @@ theorem gOne_def : ₜ1 = tprod R (@Fin.elim0' M) :=
 
 /-- A variant of `PiTensorProduct.tmulEquiv` with the result indexed by `Fin (n + m)`. -/
 def mulEquiv {n m : ℕ} : (⨂[R]^n) M ⊗[R] (⨂[R]^m) M ≃ₗ[R] (⨂[R]^(n + m)) M :=
-  (tmulEquiv R M).trans (reindex R M finSumFinEquiv)
+  (tmulEquiv R M).trans (reindex R (fun _ ↦ M) finSumFinEquiv)
 #align tensor_power.mul_equiv TensorPower.mulEquiv
 
 /-- As a graded monoid, `⨂[R]^i M` has a `(*) : ⨂[R]^i M → ⨂[R]^j M → ⨂[R]^(i + j) M`. -/
@@ -104,7 +104,7 @@ variable (R M)
 
 /-- Cast between "equal" tensor powers. -/
 def cast {i j} (h : i = j) : (⨂[R]^i) M ≃ₗ[R] (⨂[R]^j) M :=
-  reindex R M (Fin.castIso h).toEquiv
+  reindex R (fun _ ↦ M) (Fin.castIso h).toEquiv
 #align tensor_power.cast TensorPower.cast
 
 theorem cast_tprod {i j} (h : i = j) (a : Fin i → M) :
@@ -114,7 +114,8 @@ theorem cast_tprod {i j} (h : i = j) (a : Fin i → M) :
 
 @[simp]
 theorem cast_refl {i} (h : i = i) : cast R M h = LinearEquiv.refl _ _ :=
-  ((congr_arg fun f => reindex R M (RelIso.toEquiv f)) <| Fin.castIso_refl h).trans reindex_refl
+  ((congr_arg fun f => reindex R (fun _ ↦ M) (RelIso.toEquiv f)) <| Fin.castIso_refl h).trans
+    reindex_refl
 #align tensor_power.cast_refl TensorPower.cast_refl
 
 @[simp]
feat: use suppress_compilation in tensor products (#7504)

More principled version of #7281.

Diff
@@ -29,6 +29,7 @@ In this file we use `ₜ1` and `ₜ*` as local notation for the graded multiplic
 tensor powers. Elsewhere, using `1` and `*` on `GradedMonoid` should be preferred.
 -/
 
+suppress_compilation
 
 open scoped TensorProduct
 
chore: replace Fin.castIso and Fin.revPerm with Fin.cast and Fin.rev for the bump of Std (#5847)

Some theorems in Data.Fin.Basic are copied to Std at the recent commit in Std. These are written using Fin.cast and Fin.rev, so declarations using Fin.castIso and Fin.revPerm in Mathlib should be rewritten.

Co-authored-by: Pol'tta / Miyahara Kō <52843868+Komyyy@users.noreply.github.com> Co-authored-by: Johan Commelin <johan@commelin.net>

Diff
@@ -107,7 +107,7 @@ def cast {i j} (h : i = j) : (⨂[R]^i) M ≃ₗ[R] (⨂[R]^j) M :=
 #align tensor_power.cast TensorPower.cast
 
 theorem cast_tprod {i j} (h : i = j) (a : Fin i → M) :
-    cast R M h (tprod R a) = tprod R (a ∘ Fin.castIso h.symm) :=
+    cast R M h (tprod R a) = tprod R (a ∘ Fin.cast h.symm) :=
   reindex_tprod _ _
 #align tensor_power.cast_tprod TensorPower.cast_tprod
 
@@ -143,7 +143,6 @@ theorem gradedMonoid_eq_of_cast {a b : GradedMonoid fun n => ⨂[R] _ : Fin n, M
   rw [← Fin.castIso_to_equiv, ← h2]
 #align tensor_power.graded_monoid_eq_of_cast TensorPower.gradedMonoid_eq_of_cast
 
--- named to match `Fin.cast_eq_cast`, which is now `Fin.castIso_eq_cast`
 theorem cast_eq_cast {i j} (h : i = j) :
     ⇑(cast R M h) = _root_.cast (congrArg (fun i => (⨂[R]^i) M) h) := by
   subst h
@@ -211,7 +210,7 @@ theorem mul_assoc {na nb nc} (a : (⨂[R]^na) M) (b : (⨂[R]^nb) M) (c : (⨂[R
   congr with j
   rw [Fin.append_assoc]
   refine' congr_arg (Fin.append a (Fin.append b c)) (Fin.ext _)
-  rw [Fin.coe_castIso, Fin.coe_castIso]
+  rw [Fin.coe_cast, Fin.coe_cast]
 #align tensor_power.mul_assoc TensorPower.mul_assoc
 
 -- for now we just use the default for the `gnpow` field as it's easier.
chore: update/remove heart beat bumps (#6860)

We clean up heart beat bumps after #6474.

Diff
@@ -254,7 +254,6 @@ theorem algebraMap₀_mul_algebraMap₀ (r s : R) :
   exact algebraMap₀_mul r (@algebraMap₀ R M _ _ _ s)
 #align tensor_power.algebra_map₀_mul_algebra_map₀ TensorPower.algebraMap₀_mul_algebraMap₀
 
-set_option maxHeartbeats 250000 in
 instance gsemiring : DirectSum.GSemiring fun i => (⨂[R]^i) M :=
   { TensorPower.gmonoid with
     mul_zero := fun a => LinearMap.map_zero _
chore: remove unused simps (#6632)

Co-authored-by: Eric Wieser <wieser.eric@gmail.com>

Diff
@@ -169,8 +169,7 @@ variable {R}
 theorem one_mul {n} (a : (⨂[R]^n) M) : cast R M (zero_add n) (ₜ1 ₜ* a) = a := by
   rw [gMul_def, gOne_def]
   induction' a using PiTensorProduct.induction_on with r a x y hx hy
-  · dsimp only at a
-    rw [TensorProduct.tmul_smul, LinearEquiv.map_smul, LinearEquiv.map_smul, ← gMul_def,
+  · rw [TensorProduct.tmul_smul, LinearEquiv.map_smul, LinearEquiv.map_smul, ← gMul_def,
       tprod_mul_tprod, cast_tprod]
     congr 2 with i
     rw [Fin.elim0'_append]
@@ -182,8 +181,7 @@ theorem one_mul {n} (a : (⨂[R]^n) M) : cast R M (zero_add n) (ₜ1 ₜ* a) = a
 theorem mul_one {n} (a : (⨂[R]^n) M) : cast R M (add_zero _) (a ₜ* ₜ1) = a := by
   rw [gMul_def, gOne_def]
   induction' a using PiTensorProduct.induction_on with r a x y hx hy
-  · dsimp only at a
-    rw [← TensorProduct.smul_tmul', LinearEquiv.map_smul, LinearEquiv.map_smul, ← gMul_def,
+  · rw [← TensorProduct.smul_tmul', LinearEquiv.map_smul, LinearEquiv.map_smul, ← gMul_def,
       tprod_mul_tprod R a _, cast_tprod]
     congr 2 with i
     rw [Fin.append_elim0']
chore: banish Type _ and Sort _ (#6499)

We remove all possible occurences of Type _ and Sort _ in favor of Type* and Sort*.

This has nice performance benefits.

Diff
@@ -35,12 +35,12 @@ open scoped TensorProduct
 /-- Homogenous tensor powers $M^{\otimes n}$. `⨂[R]^n M` is a shorthand for
 `⨂[R] (i : Fin n), M`. -/
 @[reducible]
-def TensorPower (R : Type _) (n : ℕ) (M : Type _) [CommSemiring R] [AddCommMonoid M]
+def TensorPower (R : Type*) (n : ℕ) (M : Type*) [CommSemiring R] [AddCommMonoid M]
     [Module R M] : Type _ :=
   ⨂[R] _ : Fin n, M
 #align tensor_power TensorPower
 
-variable {R : Type _} {M : Type _} [CommSemiring R] [AddCommMonoid M] [Module R M]
+variable {R : Type*} {M : Type*} [CommSemiring R] [AddCommMonoid M] [Module R M]
 
 scoped[TensorProduct] notation:100 "⨂[" R "]^" n:arg => TensorPower R n
 
@@ -49,7 +49,7 @@ namespace PiTensorProduct
 /-- Two dependent pairs of tensor products are equal if their index is equal and the contents
 are equal after a canonical reindexing. -/
 @[ext]
-theorem gradedMonoid_eq_of_reindex_cast {ιι : Type _} {ι : ιι → Type _} :
+theorem gradedMonoid_eq_of_reindex_cast {ιι : Type*} {ι : ιι → Type*} :
     ∀ {a b : GradedMonoid fun ii => ⨂[R] _ : ι ii, M} (h : a.fst = b.fst),
       reindex R M (Equiv.cast <| congr_arg ι h) a.snd = b.snd → a = b
   | ⟨ai, a⟩, ⟨bi, b⟩ => fun (hi : ai = bi) (h : reindex R M _ a = b) => by
chore: script to replace headers with #align_import statements (#5979)

Open in Gitpod

Co-authored-by: Eric Wieser <wieser.eric@gmail.com> Co-authored-by: Scott Morrison <scott.morrison@gmail.com>

Diff
@@ -2,16 +2,13 @@
 Copyright (c) 2021 Eric Wieser. All rights reserved.
 Released under Apache 2.0 license as described in the file LICENSE.
 Authors: Eric Wieser
-
-! This file was ported from Lean 3 source module linear_algebra.tensor_power
-! leanprover-community/mathlib commit ce11c3c2a285bbe6937e26d9792fda4e51f3fe1a
-! Please do not edit these lines, except to modify the commit id
-! if you have ported upstream changes.
 -/
 import Mathlib.LinearAlgebra.PiTensorProduct
 import Mathlib.Logic.Equiv.Fin
 import Mathlib.Algebra.DirectSum.Algebra
 
+#align_import linear_algebra.tensor_power from "leanprover-community/mathlib"@"ce11c3c2a285bbe6937e26d9792fda4e51f3fe1a"
+
 /-!
 # Tensor power of a semimodule over a commutative semiring
 
chore: rename Fin.cast to Fin.castIso (#5584)

Co-authored-by: Parcly Taxel <reddeloostw@gmail.com>

Diff
@@ -106,17 +106,17 @@ variable (R M)
 
 /-- Cast between "equal" tensor powers. -/
 def cast {i j} (h : i = j) : (⨂[R]^i) M ≃ₗ[R] (⨂[R]^j) M :=
-  reindex R M (Fin.cast h).toEquiv
+  reindex R M (Fin.castIso h).toEquiv
 #align tensor_power.cast TensorPower.cast
 
 theorem cast_tprod {i j} (h : i = j) (a : Fin i → M) :
-    cast R M h (tprod R a) = tprod R (a ∘ Fin.cast h.symm) :=
+    cast R M h (tprod R a) = tprod R (a ∘ Fin.castIso h.symm) :=
   reindex_tprod _ _
 #align tensor_power.cast_tprod TensorPower.cast_tprod
 
 @[simp]
 theorem cast_refl {i} (h : i = i) : cast R M h = LinearEquiv.refl _ _ :=
-  ((congr_arg fun f => reindex R M (RelIso.toEquiv f)) <| Fin.cast_refl h).trans reindex_refl
+  ((congr_arg fun f => reindex R M (RelIso.toEquiv f)) <| Fin.castIso_refl h).trans reindex_refl
 #align tensor_power.cast_refl TensorPower.cast_refl
 
 @[simp]
@@ -143,10 +143,10 @@ theorem gradedMonoid_eq_of_cast {a b : GradedMonoid fun n => ⨂[R] _ : Fin n, M
     (h2 : cast R M h a.snd = b.snd) : a = b := by
   refine' gradedMonoid_eq_of_reindex_cast h _
   rw [cast] at h2
-  rw [← Fin.cast_to_equiv, ← h2]
+  rw [← Fin.castIso_to_equiv, ← h2]
 #align tensor_power.graded_monoid_eq_of_cast TensorPower.gradedMonoid_eq_of_cast
 
--- named to match `Fin.cast_eq_cast`
+-- named to match `Fin.cast_eq_cast`, which is now `Fin.castIso_eq_cast`
 theorem cast_eq_cast {i j} (h : i = j) :
     ⇑(cast R M h) = _root_.cast (congrArg (fun i => (⨂[R]^i) M) h) := by
   subst h
@@ -216,7 +216,7 @@ theorem mul_assoc {na nb nc} (a : (⨂[R]^na) M) (b : (⨂[R]^nb) M) (c : (⨂[R
   congr with j
   rw [Fin.append_assoc]
   refine' congr_arg (Fin.append a (Fin.append b c)) (Fin.ext _)
-  rw [Fin.coe_cast, Fin.coe_cast]
+  rw [Fin.coe_castIso, Fin.coe_castIso]
 #align tensor_power.mul_assoc TensorPower.mul_assoc
 
 -- for now we just use the default for the `gnpow` field as it's easier.
chore: remove superfluous parentheses in calls to ext (#5258)

Co-authored-by: Xavier Roblot <46200072+xroblot@users.noreply.github.com> Co-authored-by: Joël Riou <joel.riou@universite-paris-saclay.fr> Co-authored-by: Riccardo Brasca <riccardo.brasca@gmail.com> Co-authored-by: Yury G. Kudryashov <urkud@urkud.name> Co-authored-by: Scott Morrison <scott.morrison@anu.edu.au> Co-authored-by: Scott Morrison <scott.morrison@gmail.com> Co-authored-by: Jeremy Tan Jie Rui <reddeloostw@gmail.com> Co-authored-by: Pol'tta / Miyahara Kō <pol_tta@outlook.jp> Co-authored-by: Jason Yuen <jason_yuen2007@hotmail.com> Co-authored-by: Mario Carneiro <di.gama@gmail.com> Co-authored-by: Jireh Loreaux <loreaujy@gmail.com> Co-authored-by: Ruben Van de Velde <65514131+Ruben-VandeVelde@users.noreply.github.com> Co-authored-by: Kyle Miller <kmill31415@gmail.com> Co-authored-by: Heather Macbeth <25316162+hrmacbeth@users.noreply.github.com> Co-authored-by: Jujian Zhang <jujian.zhang1998@outlook.com> Co-authored-by: Yaël Dillies <yael.dillies@gmail.com>

Diff
@@ -210,7 +210,7 @@ theorem mul_assoc {na nb nc} (a : (⨂[R]^na) M) (b : (⨂[R]^nb) M) (c : (⨂[R
   have rhs_eq : ∀ a b c, rhs a b c = a ₜ* (b ₜ* c) := fun _ _ _ => rfl
   suffices : lhs = rhs
   exact LinearMap.congr_fun (LinearMap.congr_fun (LinearMap.congr_fun this a) b) c
-  ext (a b c)
+  ext a b c
   -- clean up
   simp only [LinearMap.compMultilinearMap_apply, lhs_eq, rhs_eq, tprod_mul_tprod, cast_tprod]
   congr with j
style: allow _ for an argument in notation3 & replace _foo with _ in notation3 (#4652)
Diff
@@ -40,7 +40,7 @@ open scoped TensorProduct
 @[reducible]
 def TensorPower (R : Type _) (n : ℕ) (M : Type _) [CommSemiring R] [AddCommMonoid M]
     [Module R M] : Type _ :=
-  ⨂[R] _i : Fin n, M
+  ⨂[R] _ : Fin n, M
 #align tensor_power TensorPower
 
 variable {R : Type _} {M : Type _} [CommSemiring R] [AddCommMonoid M] [Module R M]
@@ -53,7 +53,7 @@ namespace PiTensorProduct
 are equal after a canonical reindexing. -/
 @[ext]
 theorem gradedMonoid_eq_of_reindex_cast {ιι : Type _} {ι : ιι → Type _} :
-    ∀ {a b : GradedMonoid fun ii => ⨂[R] _i : ι ii, M} (h : a.fst = b.fst),
+    ∀ {a b : GradedMonoid fun ii => ⨂[R] _ : ι ii, M} (h : a.fst = b.fst),
       reindex R M (Equiv.cast <| congr_arg ι h) a.snd = b.snd → a = b
   | ⟨ai, a⟩, ⟨bi, b⟩ => fun (hi : ai = bi) (h : reindex R M _ a = b) => by
     subst hi
@@ -139,7 +139,7 @@ theorem cast_cast {i j k} (h : i = j) (h' : j = k) (a : (⨂[R]^i) M) :
 #align tensor_power.cast_cast TensorPower.cast_cast
 
 @[ext]
-theorem gradedMonoid_eq_of_cast {a b : GradedMonoid fun n => ⨂[R] _i : Fin n, M} (h : a.fst = b.fst)
+theorem gradedMonoid_eq_of_cast {a b : GradedMonoid fun n => ⨂[R] _ : Fin n, M} (h : a.fst = b.fst)
     (h2 : cast R M h a.snd = b.snd) : a = b := by
   refine' gradedMonoid_eq_of_reindex_cast h _
   rw [cast] at h2
feat: port LinearAlgebra.TensorPower (#4648)

Dependencies 8 + 434

435 files ported (98.2%)
181326 lines ported (98.3%)
Show graph

The unported dependencies are