-
Notifications
You must be signed in to change notification settings - Fork 19
/
chapter3.tex
1627 lines (1493 loc) · 83.6 KB
/
chapter3.tex
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
\section{Chapter III\hspace{0.2em} Rings and modules}
\subsection{\textsection1. Definition of ring}
\begin{problem}[1.1]
$\vartriangleright$ Prove that if $0 = 1$ in a ring $R$, then $R$ is a zero-ring. [\textsection1.2]
\end{problem}
\begin{solution}
For any $x$ in the ring $R$, we have
\[
1\cdot x=x,\qquad 0\cdot x=0.
\]
Since $0 = 1$ we see that $x=0$, which implies $R$ is a ring with only one element $0$.
\end{solution}
\hypertarget{Exercise III.1.2}{}
\begin{problem}[1.2]
$\neg$ Let $S$ be a set, and define operations on the power set $\mathscr{P}(S)$ of $S$ by setting $\forall A,B \in \mathscr{P}(S)$
\[
A+B :=(A \cup B) \backslash(A \cap B) \quad, \quad A \cdot B=A \cap B
\]
Prove that $(\mathscr{P}(S),+,\cdot)$ is a commutative ring. [2.3, 3.15]
\end{problem}
\begin{solution}
First, we need to check that $(\mathscr{P}(S),+)$ is an abelian group:
\begin{itemize}
\item associativity:
\begin{align*}
&\hspace{1em}(A+B)+C\\
&=((A \cup B) \backslash(A \cap B))+C\\
&=((A \cup B) \cap(A^C \cup B^C))+C\\
&=(A\cap(A^C \cup B^C) )\cup (B\cap(A^C \cup B^C)) +C\\
&=(A \cap B^C) \cup(A^C \cap B)+C\\
&=(((A \cap B^C) \cup(A^C \cap B)) \cap C^C) \cup(((A \cap B^C) \cup(A^C \cap B))^C \cap C)\\
&=((A \cap B^C\cap C^C )\cup(A^C \cap B\cap C^C) ) \cup((A^C \cup B) \cap(A\cup B^C)\cap C)\\
&=((A \cap B^C\cap C^C )\cup(A^C \cap B\cap C^C) ) \cup((A^C\cap B^C) \cup( A\cap B) \cap C)\\
&=(A \cap B^C\cap C^C )\cup(A^C \cap B\cap C^C) \cup(A^C\cap B^C\cap C) \cup( A\cap B\cap C) \\
&=(A \cap (B \cap C) \cup(B^C \cap C^C)) \cup((A^C \cap B \cap C^C) \cup (A^C \cap B^C \cap C))\\
&=(A \cap (B^C \cup C) \cap(B \cup C^C)) \cup((A^C \cap B \cap C^C) \cup (A^C \cap B^C \cap C))\\
&=(A \cap ((B \cap C^C) \cup(B^C \cap C))^C) \cup(A^C \cap ((B \cap C^C) \cup(B^C \cap C)))\\
&=A+((B \cap C^C) \cup(B^C \cap C))\\
&=A+(B+C);
\end{align*}
\item commutativity:
\[
A+B =(A \cup B) \backslash(A \cap B)=(B \cup A) \backslash(B \cap A)= B+A;
\]
\item additive identity: the additive identity is $\varnothing$ since
\[
A+\varnothing=(A \cup \varnothing) \backslash(A \cap \varnothing)=A; \backslash\varnothing=A
\]
\item inverse: the inverse of some set $A$ is just itself since
\[
A+A=(A \cup A) \backslash(A \cap A)=A\backslash A=\varnothing.
\]
\end{itemize}
Then we have to show that $(\mathscr{P}(S),\cdot)$ is a commutative monoid, which clearly holds with the multiplicative identity $S$. What is left to show is the distributive properties and the check is straightforward.
\begin{align*}
&\hspace{1em}(A+B)\cdot C\\
&=((A \cap B^C) \cup(A^C \cap B))\cap C\\
&=(A \cap B^C\cap C) \cup(A^C \cap B\cap C)\\
&=(A\cap C \cap (B^C\cup C^C)) \cup((A^C\cup C^C) \cap (B\cap C))\\
&=(A\cap C \cap (B\cap C)^C) \cup((A\cap C)^C \cap (B\cap C))\\
&=A\cdot C+B\cdot C.
\end{align*}
\end{solution}
\begin{problem}[1.3]
$\neg$ Let $R$ be a ring, and let $S$ be any set. Explain how to endow the set $R^S$ of set-functions $S\to R$ of two operations $+$, $\cdot$ so as to make $R^S$ into a ring, such that $R^S$ is just a copy of $R$ if $S$ is a singleton. [2.3]
\end{problem}
\begin{solution}
To make $(R^S,+,\cdot )$ a ring , for all $f,g\in R^S$ we define addition and multiplication as
\begin{align*}
f+g&:S\longrightarrow R,\quad x\longmapsto f(x)+g(x)\\
f\cdot g&:S\longrightarrow R,\quad x\longmapsto f(x)\cdot g(x).
\end{align*}
\end{solution}
\hypertarget{Exercise III.1.4}{}
\begin{problem}[1.4]
$\vartriangleright$ The set of $n\times n$ matrices with entries in a ring $R$ isdenoted $\mathcal{M}_n(R)$. Prove that componentwise addition and matrix multiplication makes $\mathcal{M}_n(R)$ into a ring, for any ring $R$. The notation $\mathfrak{gl}_n(R)$ is also commonly used, especially $R=\mathbb{R}$ or $\mathbb{C}$ (although this indicates one is considering them as \emph{Lie algebras}) in parallel with the analogous notation for the corresponding groups of units, cf. \hyperlink{Exercise II.6.1}{Exercise II.6.1}. In
fact, the parallel continues with the definition of the following sets of matrices:
\begin{itemize}
\item $\mathfrak{sl}_n(\mathbb{R}) = \{M \in \mathfrak{gl}_n(\mathbb{R}) | \mathrm{tr}(M) = 0\}$;
\item $\mathfrak{sl}_n(\mathbb{C}) = \{M \in \mathfrak{gl}_n(\mathbb{C}) | \mathrm{tr}(M) = 0\}$;
\item $\mathfrak{so}_n(\mathbb{R}) = \{M \in \mathfrak{sl}_n(\mathbb{R}) |M +M^t = 0\}$;
\item $\mathfrak{su}(n) = \{M \in \mathfrak{sl}_n(\mathbb{C}) |M +M^\dag = 0\}$.
\end{itemize}
Here $\mathrm{tr}(M)$ is the trace of $M$, that is, the sum of its diagonal entries. The other notation matches the notation used in \hyperlink{Exercise II.6.1}{Exercise II.6.1}. Can we make rings of these sets, by endowing them of ordinary addition and multiplication of matrices? (These sets are all Lie algebras, cf. \hyperlink{Exercise VI.1.4}{Exercise VI.1.4}.) [\textsection1.2, 2.4, 5.9, VI.1.2, VI.1.4]
\end{problem}
\begin{solution}
It is plain to show $\mathcal{M}_n(R)$ is a ring according to the definition. For multiplicative associativity, it follows that for all $A,B,C\in\mathcal{M}_n(R)$,
\begin{align*}
&\hspace{1em}((A B) C)_{\alpha, \delta}\\
&=\sum_{i=1}^{n}(A B)_{\alpha, i} c_{i, \delta}\\
&=\sum_{i=1}^{n}\left(\sum_{j=1}^{n} a_{\alpha, j} b_{j, i}\right) c_{i, \delta}\\
&=\sum_{i=1}^{n} \sum_{j=1}^{n}\left(a_{\alpha, j} b_{j, i}\right) c_{i, \delta}\\
&=\sum_{j=1}^{n} \sum_{n=1}^{n} a_{\alpha, j}\left(b_{j, i} c_{i, \delta}\right)\\
&=\sum_{j=1}^{n} a_{\alpha, j}\left(\sum_{i=1}^{n} b_{j, i} c_{i, \delta}\right)\\
&=\sum_{j=1}^{n} a_{\alpha, j}(B C)_{j, \delta}\\
&=(A(B C))_{\alpha, \delta}.
\end{align*}
Under the ordinary addition and multiplication of matrices, $\mathfrak{sl}_n(\mathbb{R}),\mathfrak{sl}_n(\mathbb{C}),\mathfrak{so}_n(\mathbb{R}),\mathfrak{su}_n(\mathbb{C})$ are not rings. In fact, they are not closed under the multiplication.
\end{solution}
\begin{problem}[1.5]
Let $R$ be a ring. If $a, b$ are zero-divisors in $R$, is $a+b$ necessarily a zero-divisor?
\end{problem}
\begin{solution}
That is not true. Let's take $\mathbb{Z}/6\mathbb{Z}$ as an counterexample. Though both $[2]_6$ and $[3]_6$ are zero-divisors, their sum $[5]_6$ is not a zero-divisor.
\end{solution}
\hypertarget{Exercise III.1.6}{}
\begin{problem}[1.6]
$\neg$ An element $a$ of a ring $R$ is \emph{nilpotent} if $a^n = 0$ for some $n$.
\begin{enumerate}
\item Prove that if $a$ and $b$ are nilpotent in $R$ and $ab = ba$, then $a+b$ is also nilpotent.
\item Is the hypothesis $ab = ba$ in the previous statement necessary for its conclusion to hold?
\end{enumerate}
[3.12]
\end{problem}
\begin{solution}
\begin{enumerate}
\item Assume that $a^n=b^m=0$ and let $k=2\max\{n,m\}$. If $ab = ba$, we can get
\[
(a+b)^k=\sum_{p=0}^{\tfrac{k}{2}}\binom{k}{p}a^kb^{k-p}+\sum_{p=\tfrac{k}{2}+1}^{k}\binom{k}{p}a^kb^{k-p}=\sum_{p=0}^{\tfrac{k}{2}}\binom{k}{p}a^k\cdot 0+\sum_{p=\tfrac{k}{2}+1}^{k}\binom{k}{p}0\cdot b^{k-p}=0,
\]
which means $a+b$ is also nilpotent.
\item The hypothesis $ab = ba$ is necessary. A counterexample can be found in the ring $\mathfrak{gl}_2(\mathbb{R})$. Let
\[
a=\left(
\begin{matrix}
0 & 1\\
0 & 0
\end{matrix}
\right),\quad
b=\left(
\begin{matrix}
0 & 0\\
1 & 0
\end{matrix}
\right)
\]
and then we have $a^2=b^2=0$. In other words, $a$ and $b$ are nilpotent. However, by diagonalization we see that
\[
(a+b)^n=
\left(
\begin{matrix}
0 & 1\\
1 & 0
\end{matrix}
\right)^n
=\left(
\begin{matrix}
-1 & 1\\
1 & 1
\end{matrix}
\right)
\left(
\begin{matrix}
-1 & 0\\
0 & 1
\end{matrix}
\right)^n
\left(
\begin{matrix}
-1 & 1\\
1 & 1
\end{matrix}
\right)^{-1}\ne
\left(
\begin{matrix}
0 & 0\\
0 & 0
\end{matrix}
\right).
\]
Thus in such case, $a+b$ is no longer nilpotent.
\end{enumerate}
\end{solution}
\begin{problem}[1.8]
Prove that $x = \pm1$ are the only solutions to the equation $x^2 = 1$ in an integral domain. Find a ring in which the equation $x^2 = 1$ has more than 2 solutions.
\end{problem}
\begin{solution}
It clearly holds that $1\cdot1=1$ and $(-1)\cdot(-1)=((-1)\times(-1))1\cdot1=1$. That is to say, $x = \pm1$ are the solutions to the equation $x^2 = 1$. Note that if there exists $x$ in an integral domain such that $x^2=1$, then we have
\[
(x-1)\cdot(x+1)=x^2-1=0,
\]
which implies $x-1=0$ or $x+1=0$. Therefore, we can assert $x = \pm1$ are the solutions. In the ring $\mathbb{Z}/8\mathbb{Z}$, $[3]_8$ and $[5]_8$ are also the solutions to the equation $x^2 = 1$.
\end{solution}
\begin{problem}[1.10]
Let $R$ be a ring. Prove that if $a \in R$ is a right unit, and has two or more left-inverses, then $a$ is not a left-zero-divisor, and is a right-zero-divisor.
\end{problem}
\begin{solution}
Since $a \in R$ is a right unit, it cannot be a left-zero-divisor. Assume there exist two distinct elements $x,y\in R$ such that $xa=ya=1$ and it deduces $(y-x)a=0$. Thus we show that $a$ a right-zero-divisor.
\end{solution}
\begin{problem}[1.11]
Construct a field with 4 elements: as mentioned in the text, the underlying
abelian group will have to be $\mathbb{Z}/2\mathbb{Z}\times\mathbb{Z}/2\mathbb{Z}$; $(0, 0)$ will be the zero element, and (1, 1) will be the multiplicative identity. The question is what $(0, 1)\cdot(0, 1)$, $(0, 1)\cdot(1, 0)$, $(1, 0)\cdot(1, 0)$ must be, in order to get a field. [\textsection1.2, \textsection V.5.1]
\end{problem}
\begin{solution}
Define
\[
(0, 1)\cdot(0, 1)=(0, 1),\quad (0, 1)\cdot(1, 0)=(0,0),\quad (1, 0)\cdot(1, 0)=(1, 0),
\]
and the the rest definition of multiplication will be determined uniquely according to field properties. For example, we have no alternatives but to define
\[
(0, 1)\cdot(1,1)=(0, 1)\cdot((0,1)+(1,0))=(0, 1)\cdot(0,1)+(0, 1)\cdot(1,0)=(0, 1)+(0,0)=(0,1).
\]
Then we can check $\mathbb{Z}/2\mathbb{Z}\times\mathbb{Z}/2\mathbb{Z}$ forms a field by definition.
\end{solution}
\hypertarget{Exercise III.1.4}{}
\begin{problem}[1.12]
Just as complex numbers may be viewed as combinations $a+bi$, where
$a,b\in \R$, and $i$ satisfies the relation $i^2=-1$ (and commutes with $\R$), we may construct a ring $\mathbb{H}$ by considering linear combinations $a + bi + cj + dk$ where $a, b, c, d \in \R$, and $i, j, k$ commute with $\R$ and satisfy the following relations:
\[
i^{2}=j^{2}=k^{2}=-1 \quad, \quad i j=-j i=k \quad, \quad j k=-k j=i \quad, \quad k i=-i k=j.
\]
Addition in $\mathbb{H}$ is defined componentwise, while multiplication is defined by imposing distributivity and applying the relations. For example,
\[
(1+i+j) \cdot(2+k)=1 \cdot 2+i \cdot 2+j \cdot 2+1 \cdot k+i \cdot k+j \cdot k=2+2 i+2 j+k-j+i=2+3 i+j+k.
\]
\begin{enumerate}[(i)]
\item Verify that this prescription does indeed define a ring.
\item Compute $(a+b i+c j+d k)(a-b i-c j-d k)$, where $a, b, c, d \in \R$.
\item Prove that $\mathbb{H}$ is a division ring. Elements of $\mathbb{H}$ are called quaternions. Note that $Q_8 := \{\pm1,\pm i,\pm j,\pm k\}$ forms a subgroup of the group of units of $\mathbb{H}$; it is a noncommutative group of order 8, called the quaternionic group.
\item List all subgroups of $Q_8$, and prove that they are all normal.
\item Prove that $Q_8$, $D_8$ are not isomorphic.
\item Prove that $Q_8$ admits the presentation $\left(x, y | x^{2} y^{-2}, y^{4}, x y x^{-1} y\right)$.
\end{enumerate}
[\textsection II.7.1, 2.4, IV.1.12, IV.5.16, IV.5.17, V.6.19]
\end{problem}
\begin{solution}
\begin{enumerate}[(i)]
\item Verifying the $(\mathbb{H},+)$ is a abelian group is immediate and we just omitted it. It is easy to see the multiplicative identity is 1 and the distributive properties are guaranteed by definition. The check of the associativity of multiplication looks straightforward but tedious.
\begin{align*}
&\hspace{1.2em}((a_1+b_1i+c_1j+d_1k)\cdot(a_2+b_2i+c_2j+d_2k))\cdot(a_3+b_3i+c_3j+d_3k)\\
&=[-c_3 \left(a_ 2 c_ 1+a_ 1 c_ 2+b_ 2 d_ 1-b_ 1 d_ 2\right)-b_ 3
\left(a_ 2 b_ 1+a_ 1 b_ 2-c_ 2 d_ 1+c_ 1 d_ 2\right)\\
&\hspace{1em}+a_ 3 \left(a_ 1a_ 2-b_ 1b_ 2-c_ 1 c_ 2-d_ 1 d_ 2\right)-d_3\left(-b_2 c_ 1+b_ 1 c_ 2+a_ 2 d_1+a_ 1 d_ 2\right) ]\\
&\hspace{1em}+[-c_3 \left(-b_2 c_ 1+b_ 1 c_ 2+a_ 2 d_ 1+a_ 1 d_ 2\right)+a_ 3 \left(a_ 2 b_ 1+a_ 1
b_ 2-c_ 2 d_ 1+c_ 1 d_ 2\right)\\
&\hspace{1em}+b_ 3 \left(a_ 1 a_ 2-b_ 1 b_ 2-c_ 1 c_ 2-d_ 1 d_ 2\right)+d_ 3\left(a_ 2 c_ 1+a_ 1 c_ 2+b_ 2 d_ 1-b_ 1 d_ 2\right) ]i\\
&\hspace{1em}+[b_ 3 \left(-b_2 c_ 1+b_ 1
c_ 2+a_ 2 d_ 1+a_ 1 d_ 2\right)+a_ 3 \left(a_ 2 c_ 1+a_ 1 c_ 2+b_ 2d_ 1-b_ 1 d_ 2\right)\\
&\hspace{1em}+c_ 3 \left(a_ 1 a_ 2-b_ 1 b_ 2-c_ 1 c_ 2-d_1d_2\right)-d_ 3\left(a_ 2 b_ 1+a_ 1 b_ 2-c_ 2d_ 1+c_ 1 d_ 2\right) ]j\\
&\hspace{1em}+[a_ 3 \left(-b_2 c_ 1+b_ 1 c_ 2+a_ 2 d_ 1+a_ 1 d_ 2\right)-b_ 3 \left(a_ 2 c_ 1+a_ 1 c_ 2+b_ 2 d_ 1-b_ 1 d_ 2\right)\\
&\hspace{1em}+c_ 3 \left(a_2b_ 1+a_ 1 b_ 2-c_ 2d_ 1+c_ 1 d_ 2\right)+d_ 3\left(a_ 1 a_ 2-b_ 1 b_ 2-c_ 1 c_ 2-d_ 1 d_ 2\right) ]k
\end{align*}
\begin{align*}
&\hspace{1.2em}(a_1+b_1i+c_1j+d_1k)\cdot((a_2+b_2i+c_2j+d_2k)\cdot(a_3+b_3i+c_3j+d_3k))\\
&=[-d_1 \left(a_3 d_2+a_2 d_3-b_3
c_2+b_2 c_3\right)-c_1 \left(a_3 c_2+a_2 c_3+b_3 d_2-b_2
d_3\right)\\
&\hspace{1em}-b_1 \left(a_3 b_2+a_2 b_3-c_3 d_2+c_2
d_3\right)+a_1 \left(a_2 a_3-b_2 b_3-c_2 c_3-d_2
d_3\right) ]\\
&\hspace{1em}+[c_1 \left(a_3 d_2+a_2 d_3-b_3 c_2+b_2
c_3\right)-d_1 \left(a_3 c_2+a_2 c_3+b_3 d_2-b_2
d_3\right)\\
&\hspace{1em}+a_1 \left(a_3 b_2+a_2 b_3-c_3 d_2+c_2
d_3\right)+b_1 \left(a_2 a_3-b_2 b_3-c_2 c_3-d_2
d_3\right) ]i\\
&\hspace{1em}+[-b_1 \left(a_3 d_2+a_2 d_3-b_3 c_2+b_2
c_3\right)+a_1 \left(a_3 c_2+a_2 c_3+b_3 d_2-b_2
d_3\right)\\
&\hspace{1em}+d_1 \left(a_3 b_2+a_2 b_3-c_3 d_2+c_2
d_3\right)+c_1 \left(a_2 a_3-b_2 b_3-c_2 c_3-d_2
d_3\right)]j\\
&\hspace{1em}+[a_1 \left(a_3 d_2+a_2 d_3-b_3 c_2+b_2
c_3\right)+b_1 \left(a_3 c_2+a_2 c_3+b_3 d_2-b_2
d_3\right)\\
&\hspace{1em}-c_1 \left(a_3 b_2+a_2 b_3-c_3 d_2+c_2
d_3\right)+d_1 \left(a_2 a_3-b_2 b_3-c_2 c_3-d_2
d_3\right) ]k
\end{align*}
\item Expand it by distributive properties and we get
\begin{align*}
&(a+b i+c j+d k)(a-b i-c j-d k)\\
&=a^2-abi-acj-adk+abi+b^2-bck+bdj+acj+bck+c^2-cdi+adk-bdj+cdi+d^2\\
&=a^2+b^2+c^2+d^2.
\end{align*}
\item Applying the results in (ii) we see that for any non-zero element $a+b i+c j+d k\in\mathbb{H}$,
\[
(a+b i+c j+d k)\cdot\frac{a-b i-c j-d k}{a^2+b^2+c^2+d^2}=\frac{a-b i-c j-d k}{a^2+b^2+c^2+d^2}\cdot(a+b i+c j+d k)=1,
\]
which implies $a+b i+c j+d k$ is a two-sided unit. Thus we show that $\mathbb{H}$ is a division ring.
\item $Q_8$ has 6 subgroups: $\{1\}$, $\{1,-1\}$, $\{1,-1,i,-i\}$, $\{1,-1,j,-j\}$, $\{1,-1,k,-k\}$, $Q_8$. We can just prove that they are all normal by the definition of normal subgroups.
\item Note that $D_8=\{e,r,r^2,r^3,s_1,s_2,s_3,s_4\}$ has 7 subgroups: $\{e\}$, $\{e,r,r^2,r^3\}$, $\{e,s_1\}$, $\{e,s_2\}$, $\{e,s_3\}$, $\{e,s_4\}$, $D_8$, while $Q_8$ has 6 subgroups. Thus $Q_8$, $D_8$ are not isomorphic.
\item Let $P=\left(x, y | x^{2} y^{-2}, y^{4}, x y x^{-1} y\right)$. The relation $x^{2} y^{-2}=e$ implies $x^2=y^2$ and the relation $xyx^{-1}y=e$ implies $yx=yx^{-1}x^2=x^{-1}y^{-1}x^2=x^3y^3x^2=x^3y^5=x^3y$. First, we can always replace $yx$ by $x^3y$ until we obtain a word of the form $x^iy^j$. Then applying $x^4=y^4=e$ and replace $y^2$ by $x^2$, we can transform it into the form $x^iy^j$ with $0\le i\le 3$ and $0\le j \le 1$. Thus we see $P$ has at most 8 elements.
Next we will complete our proof by means of the \hyperlink{Lemma II.1}{Lemma II.1} in the appendix. Define a mapping
\begin{align*}
f:\{x,y\}\longrightarrow Q_8,\quad& x\longmapsto i,\\
& y\longmapsto j.
\end{align*}
Let $\varphi:F(\{x,y\})\to Q_8$ be the unique homomorphism induced by the universal property of free group. Since
\begin{align*}
&\varphi(x^{2} y^{-2})=i^2j^{-2}=1,\\
&\varphi(y^{4})=j^{4}=1,\\
&\varphi(x y x^{-1} y)=i j i^{-1} j=1,
\end{align*}
we see $\mathscr{R}=\{x^{2} y^{-2}, y^{4}, x y x^{-1} y\}\subset\ker\varphi$. And it is immediate to show that $Q_8$ can be generated by $\{i,j\}$. Thus according to the lemma, there exists a unique homomorphism $\psi:P\to Q_8$ such that $f=\psi\circ\pi\circ i$ and actually $\psi$ is surjective.
\[\xymatrix{
P\ar@{-->}[rd]^{\exists!\psi}\\
F(\{x,y\})\ar@{-->}[r]^{\varphi}\ar[u]^{\pi} &Q_8\\
\{x,y\}\ar[ru]_{f}\ar[u]^{i}&
}\]
Hence we get the inequality of cardinality $|P|\ge|Q_8|$. Since we have shown $|P|\le 8=|Q_8|$, there must be $|P|=|Q_8|=8$, which implies $\psi$ is indeed an isomorphism. Finally we conclude that $Q_8\cong\left(x, y | x^{2} y^{-2}, y^{4}, x y x^{-1} y\right)$ and complete our proof.
\end{enumerate}
\end{solution}
\hypertarget{Exercise III.1.14}{}
\begin{problem}[1.14]
$\vartriangleright$ Let $R$ be a ring, and let $f(x),g(x)\in R[x]$ be nonzero polynomials. Prove that
\[
\deg(f(x) + g(x))\le\max(\deg(f(x)), \deg(g(x))).
\]
Assuming that $R$ is an integral domain, prove that
\[
\deg(f(x)\cdot g(x)) = \deg(f(x)) + \deg(g(x)).
\]
[\textsection1.3]
\end{problem}
\begin{solution}
Assume
\[
f(x)=\sum_{i \ge 0} a_{i} x^{i},\quad g(x)=\sum_{i \ge 0} b_{i} x^{i}, \quad a_i,b_i\in R
\]
and $n,m$ are respectively the
largest integers $p,q$ for which $a_p$, $b_q$ are non-zero. In others words, we have $a_n\ne 0$, $a_i=0$ for $i>n$ and $b_m\ne 0$, $b_i=0$ for $i>m$. Since
\[
f(x)+g(x)=\sum_{i \ge 0} (a_{i}+b_i) x^{i}=\sum_{i =0}^{\max\{n,m\}} (a_{i}+b_i) x^{i},
\]
we see that
\[
\deg(f(x) + g(x))\le\max\{n,m\}=\max(\deg(f(x)), \deg(g(x))).
\]
Now Suppose that $R$ is an integral domain. Noticing $a_n\ne 0$ and $b_m\ne 0$ implies $a_nb_m\ne 0$, we can see
\[
f(x) \cdot g(x) =\sum_{k \geq 0} \sum_{i+j=k} a_{i} b_{j} x^{i+j}=\sum_{k = 0}^{n+m} \sum_{i+j=k} a_{i} b_{j} x^{i+j}
\]
has a degree of $n+m$. That is,
\[
\deg(f(x)\cdot g(x)) = \deg(f(x)) + \deg(g(x)).
\]
\end{solution}
\begin{problem}[1.15]
$\vartriangleright$ Prove that $R[x]$ is an integral domain if and only if $R$ is an integral domain. [\textsection1.3]
\end{problem}
\begin{solution}
Assume $R$ is an integral domain. \hyperlink{Exercise III.1.14}{Exercise III.1.14} tells us if $f(x)$, $g(x)\in R[x]$ are nonzero polynomials, we have
\[
\deg(f(x)\cdot g(x)) = \deg(f(x)) + \deg(g(x)),
\]
which implies $f(x)\cdot g(x)$ is also nonzero polynomial. Thus we show $R[x]$ is a integral domain.
Conversely, assume $R[x]$ is an integral domain. Note that given any $a,b\in R$, they also belong to $R[x]$. Hence we obtain
\[
a\ne0,b\ne0\implies ab\ne0,
\]
which means $R$ is an integral domain.
\end{solution}
\begin{problem}[1.16]
Let $R$ be a ring, and consider the ring of power series $R[[x]]$ (cf. \textsection1.3).
\begin{enumerate}
\item Prove that a power series $a_0+a_1x+a_2x^2+\cdots$ is a unit in $R[[x]]$ if and only if $a_0$ is a unit in $R$. What is the inverse of $1-x$ in $R[[x]]$?
\item Prove that $R[[x]]$ is an integral domain if and only if $R$ is.
\end{enumerate}
\end{problem}
\begin{solution}
\begin{enumerate}
\item If $a_0$ is a unit in $R$ then we can assume there exists $b_0\in R$ such that $a_0b_0=1$. Let
\[
f(x)=\sum_{n \ge 0} a_{n} x^{n},\quad g(x)=\sum_{n \ge 0} b_{n} x^{n},
\]
where
\[
b_n = -b_0 \sum_{i=1}^n a_i b_{n-i},\quad n\ge1.
\]
Noticing that
\[
a_0b_n= -a_0b_0 \sum_{i=1}^n a_i b_{n-i}=- \sum_{i=1}^n a_i b_{n-i},\quad n\ge1,
\]
we have
\begin{align*}
f(x)g(x)&=\sum_{n \ge 0}\sum_{i=0}^na_{n-i}b_{i}x^n\\
&=1+\sum_{n \ge 1}\sum_{i=0}^na_{i}b_{n-i}x^n\\
&=1+\sum_{n \ge 1}\left(a_0b_n+\sum_{i=1}^na_{i}b_{n-i}\right)x^n\\
&=1+\sum_{n \ge 1}\left(a_0b_n-a_0b_n\right)x^n\\
&=1.
\end{align*}
Hence we show $f(x)=a_0+a_1x+a_2x^2+\cdots$ is a unit.
For the other direction, supposing $f(x)=a_0+a_1x+a_2x^2+\cdots$ is a unit, then there exists $g(x)=b_0+b_1x+b_2x^2+\cdots$ such that
\[
f(x)g(x)=a_0b_0+\sum_{n \ge 1}\sum_{i=0}^na_{i}b_{n-i}x^n=1.
\]
By comparing the both sides of the equality we can find $a_0b_0=1$, which implies $a_0$ is a unit in $R$.
We can check that the inverse of $1-x$ in $R[[x]]$ is $1+x+x^2+\cdots$ since
\[
(1-x)\sum_{i \ge 0}x^i=\sum_{i \ge 0}x^i-\sum_{i \ge 0}x^{i+1}=1.
\]
\item Suppose $R$ is an integral domain. If $f(x)$, $g(x)\in R[x]$ are nonzero polynomials, we can assume that
\[
f(x)=\sum_{i \ge 0} a_{i} x^{i},\quad g(x)=\sum_{i \ge 0} b_{i} x^{i}, \quad a_i,b_i\in R
\]
and that $n,m$ are respectively the smallest integers $p,q$ for which $a_p$, $b_q$ are non-zero. In others words, we have $a_n\ne 0$, $a_i=0$ for $i<n$ and $b_m\ne 0$, $b_i=0$ for $i<m$. Noticing $a_n\ne 0$ and $b_m\ne 0$ implies $a_nb_m\ne 0$, we can see
\[
f(x) \cdot g(x) =\sum_{k \geq 0} \sum_{i+j=k} a_{i} b_{j} x^{i+j}= a_{n} b_{m} x^{n+m}+\sum_{k\ge n+m+1}\sum_{i+j=k} a_{i} b_{j} x^{i+j}\ne 0.
\]
Thus we show $R[[x]]$ is an integral domain.
Conversely, assume that $R[[x]]$ is an integral domain. Note that given any $a,b\in R$, they also belong to $R[[x]]$. Hence we obtain
\[
a\ne0,b\ne0\implies ab\ne0,
\]
which means that $R$ is also an integral domain.
\end{enumerate}
\end{solution}
\subsection{\textsection2. The category $\mathsf{Ring}$}
\begin{problem}[2.1]
Prove that if there is a homomorphism from a zero-ring to a ring $R$, then $R$ is a zero-ring [\textsection2.1]
\end{problem}
\begin{solution}
Suppose that $\varphi$ is a homomorphism from a zero-ring $O$ to a ring $R$. Since $\varphi(0_O)=0_R$, $\varphi(1_O)=1_R$, $0_O=1_O$, we have $0_R=1_R$, which implies that $R$ is a zero-ring.
\end{solution}
\hypertarget{Exercise III.2.4}{}
\begin{problem}[2.4]
Define functions $\mathbb{H} \rightarrow \mathfrak{g l}_{4}(\mathbb{R})$ and $\mathbb{H} \rightarrow \mathfrak{g l}_{2}(\mathbb{C})$ (cf. \hyperlink{Exercise III.1.4}{Exercise III.1.4} and \hyperlink{Exercise III.1.4}{1.12}) by
\begin{align*}
a+b i+c j+d k& \longmapsto\left(\begin{array}{cccc}{a} & {b} & {c} & {d} \\ {-b} & {a} & {-d} & {c} \\ {-c} & {d} & {a} & {-b} \\ {-d} & {-c} & {b} & {a}\end{array}\right)\\
a+b i+c j+d k &\longmapsto\left(\begin{array}{cc}{a+b i} & {c+d i} \\ {-c+d i} & {a-b i}\end{array}\right)
\end{align*}
for all $a, b, c, d \in\R$. Prove that both functions are injective ring homomorphisms.
Thus, quaternions may be viewed as real or complex matrices.
\end{problem}
\begin{solution}
Let $f$ be the function $\mathbb{H} \rightarrow \mathfrak{g l}_{4}(\mathbb{R})$ described above. For simplicity, we omit trivial check and only verify $f$ preserves multiplication
\begin{align*}
&f((a_1+b_1i+c_1j+d_1k)\cdot(a_2+b_2i+c_2j+d_2k))\\
&=f((a_1 a_2-b_1 b_2-c_1 c_2-d_1 d_2)+(a_2
b_1+a_1 b_2-c_2 d_1+c_1 d_2)i\\
&\hspace{1em}+(a_2 c_1+a_1 c_2+b_2 d_1-b_1
d_2)j+(a_2 d_1+a_1 d_2-b_2 c_1+b_1 c_2)k)\\
&=\left(\begin{array}{cccc}{a_1} & {b_1} & {c_1} & {d_1} \\ {-b_1} & {a_1} & {-d_1} & {c_1} \\ {-c_1} & {d_1} & {a_1} & {-b_1} \\ {-d_1} & {-c_1} & {b_1} & {a_1}\end{array}\right)
\left(\begin{array}{cccc}{a_2} & {b_2} & {c_2} & {d_2} \\ {-b_2} & {a_2} & {-d_2} & {c_2} \\ {-c_2} & {d_2} & {a_2} & {-b_2} \\ {-d_2} & {-c_2} & {b_2} & {a_2}\end{array}\right)\\
&=f(a_1+b_1i+c_1j+d_1k)f(a_2+b_2i+c_2j+d_2k)
\end{align*}
\end{solution}
\hypertarget{Exercise III.2.5}{}
\begin{problem}[2.5]
The norm of a quaternion $w=a+b i+c j+d k$, with $a, b, c, d \in \mathbb{R}$, is the real number $N(w)=a^{2}+b^{2}+c^{2}+d^{2}$.
Prove that the function from the multiplicative group $\mathbb{H}^{*}$ of nonzero quaternions to the multiplicative group $\mathbb{R}^{+}$ of positive real numbers, defined by assigning to each nonzero quaternion its norm, is a homomorphism. Prove that the kernel of this homomorphism is isomorphic to $\mathrm{SU}_{2}(\C)$ (cf. \hyperlink{Exercise II.6.3}{Exercise II.6.3}). $[4.10, \mathrm{IV} .5 .17$ $\mathrm{V} .6 .19]$
\end{problem}
\begin{solution}
According to \hyperlink{Exercise III.2.4}{Exercise III.2.4}, $w\in\mathbb{H}^{*}$ can be viewed as a matrix $i(w)\in\mathfrak{g l}_{2}(\mathbb{C})$ where $i:\mathbb{H}\to\mathfrak{g l}_{2}(\mathbb{C})$ is a monomorphism in $\mathsf{Ring}$. Then the function $N:\mathbb{H}^{*}\to \mathbb{R}^{+}$ can be just viewed as the determinant mapping $\det:i(\mathbb{H}^{*})\subset\mathfrak{g l}_{2}(\mathbb{C})\to\mathbb{R}^{+}$. More precisely, it means $N=\det\circ \;i$. We can check that
\[
N(w_1w_2)=\det(i(w_1w_2))=\det(i(w_1)i(w_2))=\det(i(w_1))\det(i(w_2))=N(w_1)N(w_2)
\]
and
\[
w\in\ker N\iff N(w)=\det(i(w))=1\iff i(w)\in\mathrm{SU}_{2}(\C).
\]
Therefore, $N$ is a homomorphism and $\ker N$ isomorphic to $\mathrm{SU}_{2}(\C)$.
\end{solution}
\begin{problem}[2.6]
Verify the ‘extension property’ of polynomial rings, stated in Example 2.3.
[\textsection2.2]
\end{problem}
\begin{solution}
Define the following ring homomorphisms
\begin{align*}
\alpha:\ &R\longrightarrow S,\quad r\longmapsto \alpha(r)\\
\epsilon:\ &R\longrightarrow R[x],\quad r\longmapsto r,
\end{align*}
and functions
\begin{align*}
j:\ &\{s\}\longrightarrow R[x],\quad s\longmapsto x,\\
i:\ &\{s\}\longrightarrow S,\quad s\longmapsto s.
\end{align*}
Assume that $s \in S$ is an element commuting with $\alpha(r)$ for all $r \in R$, we are to show that there exists a unique ring homomorphism $\overline{\alpha}: R[x]\to S$ such that the following diagram commutes.
\[\xymatrix{
R\ar@{->}[d]_{\epsilon} \ar@{->}[rd]^{\alpha}\\
R[x]\ar@{-->}[r]^{\exists!\overline{\alpha}} & S\\
\{s\}\ar@{^{(}->}[ru]_{i}\ar@{|->}[u]^{j}
}\]
\textbf{Uniqueness}. If $\overline{\alpha}$ exists, then the postulated commutativity of the diagram means that for all $f(x)=\sum_{n \ge 0} a_{n}\in R[x]$, there must be
\[
\overline{\alpha}\left(f(x)\right)=\overline{\alpha}\left(\sum_{n \ge 0} a_{n} x^{n}\right)=\sum_{n \ge 0} \overline{\alpha}\left( a_n\right)\overline{\alpha}\left(x\right)^{n}=\sum_{n \ge 0} \alpha\left( a_n\right)s^{n}.
\]
That is, $\overline{\alpha}$ is unique.
\noindent\textbf{Existence}. The only choice is to define
\[
\overline{\alpha}:\ R[x]\longrightarrow S,\quad \sum_{n \ge 0} a_{n} x^{n}\longmapsto \sum_{n \ge 0} \alpha\left( a_n\right)s^{n}
\]
and to check whether it is a ring homomorphism.
\begin{enumerate}
\item Preserving addition:
\begin{align*}
\overline{\alpha}\left(\sum_{n \ge 0} a_{n} x^{n}+\sum_{n \ge 0} b_{n} x^{n}\right)&=\overline{\alpha}\left(\sum_{n \ge 0}(a_{n}+b_{n})x^{n}\right)\\
&=\sum_{n \ge 0}\alpha\left(a_{n}+b_{n}\right)s^{n}\\
&=\sum_{n \ge 0}\alpha\left(a_{n}\right)s^{n}+\sum_{n \ge 0}\alpha\left(b_{n}\right)s^{n}\\
&=\overline{\alpha}\left(\sum_{n \ge 0} a_{n} x^{n}\right)+\overline{\alpha}\left(\sum_{n \ge 0} b_{n} x^{n}\right).
\end{align*}
\item Preserving multiplication:
\begin{align*}
\overline{\alpha}\left(\sum_{n \ge 0} a_{n} x^{n}\sum_{n \ge 0} b_{n} x^{n}\right)&=\overline{\alpha}\left(\sum_{n \ge 0} \sum_{i+j=n} a_{i} b_{j} x^{n}\right)\\
&=\sum_{n \ge 0}\alpha\left(\sum_{i+j=n} a_{i} b_{j}\right)s^{n}\\
&=\sum_{n \ge 0}\sum_{i+j=n}\alpha\left(a_{i} \right)s^{i}\alpha\left( b_{j}\right)s^{j}\\
&=\left(\sum_{n \ge 0}\alpha\left(a_{n}\right)s^{n}\right)\left(\sum_{n \ge 0}\alpha\left(b_{n}\right)s^{n}\right)\\
&=\overline{\alpha}\left(\sum_{n \ge 0} a_{n} x^{n}\right)\overline{\alpha}\left(\sum_{n \ge 0} b_{n} x^{n}\right).
\end{align*}
\item Preserving identity element:
\begin{align*}
\overline{\alpha}(1_R)=\alpha(1_R)=1_S.
\end{align*}
\end{enumerate}
Integrating the two parts we finally conclude there exists a unique ring homomorphism $\overline{\alpha}$ such that the diagram commutes.
\end{solution}
\begin{problem}[2.7]
Let $R=\Z/2\Z$, and let $f(x)=x^2-x$; note $f(x)\ne 0$. What is the polynomial function $R\to R$ determined by $f(x)$? [\textsection2.2, \textsection V.4.2, \textsection V.5.1]
\end{problem}
\begin{solution}
It determines a function $f:\Z/2\Z\to\Z/2\Z$ sends all elements to identity, that is, $f([0]_2)=[0]_2$, $f([1]_2)=[0]_2$.
\end{solution}
\begin{problem}[2.8]
Prove that every subring of a field is an integral domain.
\end{problem}
\begin{solution}
Suppose $\varphi:R\hookrightarrow K$ is a inclusion homomorphism. If $a\ne0$, we have
\[
ab=ac\implies \varphi(a)\varphi(b)=\varphi(a)\varphi(c)\implies \varphi(b)=\varphi(c)\implies b=c.
\]
Due to the community of field it also holds that $ba=ca$. Thus we show $R$ is an integral domain.
\end{solution}
\hypertarget{Exercise III.2.9}{}
\begin{problem}[2.9]
$\neg$ The \emph{center} of a ring $R$ consists of the elements a such that $ar = ra$ for all $r\in R$. Prove that the center is a subring of $R$. Prove that the center of a division ring is a field. [2.11, IV.2.17, VII.5.14,VII.5.16]
\end{problem}
\begin{solution}
Denote the center of $R$ by $Z(R)$. We can check that
\begin{enumerate}
\item for all $x,y\in Z(R)$, for all $r\in R$, $$(x-y)r=xr-yr=rx-ry=r(x-y)\implies x-y\in Z(R);$$
\item for all $r\in R$,
\[
1r=r1\implies 1\in Z(R);
\]
\item for all $x,y\in Z(R)$, for all $r\in R$,
\[
(xy)r=xry=r(xy)\implies xy\in Z(R).
\]
\end{enumerate}
Thus we show that $Z(R)$ is a subring of $R$. If $R$ is a division ring, then $Z(R)$ is a also a division ring. Note that for all $x,y\in Z(R)$, $xy=yx$, we see that $Z(R)$ is a commutative division ring, namely field.
\end{solution}
\hypertarget{Exercise III.2.10}{}
\begin{problem}[2.10]
$\neg$ The \emph{centralizer} of an element $a$ of a ring $R$ consists of the elements $r \in R$ such that $ar = ra$. Prove that the centralizer of $a$ is a subring of $R$, for every $a\in R$. Prove that the center of $R$ is the intersection of all its centralizers. Prove that every centralizer in a division ring is a division ring. [2.11, IV.2.17, VII.5.16]
\end{problem}
\begin{solution}
Denote the centralizer of an element $a$ of $R$ by $Z_a(R)$. That is,
\[
Z_a(R)=\{r\in R\mid ar=ra\}.
\]
We can check that
\begin{enumerate}
\item for all $x,y\in Z_a(R)$, $$(x-y)a=xa-ya=ax-ay=a(x-y)\implies x-y\in Z_a(R);$$
\item
\[
1a=a1\implies 1\in Z_a(R);
\]
\item for all $x,y\in Z_a(R)$,
\[
(xy)a=xay=a(xy)\implies xy\in Z_a(R).
\]
\end{enumerate}
Thus we show that $Z_a(R)$ is a subring of $R$.
By definition we have $Z(R)\subseteq Z_a(R)$ for all $a\in R$, which implies $Z(R)\subseteq \bigcap_{a\in R} Z_a(R)$. Assume $s\in\bigcap_{a\in R} Z_a(R)$, then we see $sa=as$ for all $a\in R$, which means $s\in Z(R)$ and accordingly $\bigcap_{a\in R} Z_a(R)\subseteq Z(R)$. Thus we deduce that $Z(R)=\bigcap_{a\in R} Z_a(R)$.
If $R$ is a division ring and $r\in Z_a(R)$, we can assume that there exists $a\in R$ such as $ar=ra$, which means that
\[
r^{-1}(ar)r^{-1}=r^{-1}(ra)r^{-1}\implies r^{-1}a=ar^{-1}.
\]
According to the definition of $Z_a(R)$, we see $r^{-1}\in Z_a(R)$. Thus we show that $Z_a(R)$ is a division ring.
\end{solution}
\hypertarget{Exercise III.2.11}{}
\begin{problem}[2.11]
$\neg$ Let $R$ be a division ring consisting of $p^{2}$ elements, where $p$ is a prime. Prove that $R$ is commutative, as follows:
\begin{itemize}
\item If $R$ is not commutative, then its center $C$ (\hyperlink{Exercise III.2.9}{Exercise III.2.9}) is a proper subring of $R .$ Prove that $C$ would then consist of $p$ elements.
\item Let $r \in R, r \notin C .$ Prove that the centralizer of $r$ (\hyperlink{Exercise III.2.10}{Exercise III.2.10}) contains both $r$ and $C$.
\item Deduce that the centralizer of $r$ is the whole of $R$.
\item Derive a contradiction, and conclude that $R$ had to be commutative (hence, a field).
\end{itemize}
This is a particular case of Wedderburn's theorem: every finite division ring is a field. [IV.2.17, VII.5.16]
\end{problem}
\begin{solution}
If $R$ is not commutative, then its center $Z(R)$ is a proper subring of $R$, which means $|Z(R)|<p^2$. By considering $Z(R)$ as a subgroup of the underlying abelian group $R$, we can deduce that $|Z(R)|$ divides $p^2$ according to the Lagrange theorem. Thus we see that $Z(R)$ consist of $p$ elements. Given any $r \in R-Z(R)$, in \hyperlink{Exercise III.2.10}{Exercise III.2.10} we have shown that $Z_r(R)$ is a subring of $R$ and $Z(R)\in Z_r(R)$. By the definition of $Z_r(R)$, it is clear that $r\in Z_r(R)$. Hence we have $Z(R)\cup \{r\}\subseteq Z_r(R)$ and $|Z_r(R)|>p$. Again by Lagrange theorem we have $|Z_r(R)|$ divides $p^2$, which forces $|Z_r(R)|=p^2$. Thus we show that $Z_r(R)=R$. Note that $Z_a(R)=R$ for all $a\in Z(R)$. We have $Z_a(R)=R$ for all $a\in R$. In \hyperlink{Exercise III.2.10}{Exercise III.2.10}, we have derived that $\bigcap_{a\in R} Z_a(R)\subseteq Z(R)$, which implies $R\subseteq Z(R)$. Thus we have $Z(R)=R$, which contradicts with the previous deduction that $Z(R)$ is a proper subring of $R$. Therefore, we can conclude that $R$ is commutative.
\end{solution}
\begin{problem}[2.15]
For $m>1,$ the abelian groups $(\mathbb{Z},+)$ and $(m \mathbb{Z},+)$ are manifestly isomorphic: the function $\varphi: \mathbb{Z} \rightarrow m \mathbb{Z}, n \mapsto mn$ is a group isomorphism. Use this isomorphism to transfer the structure of `ring without identity' $(m \mathbb{Z},+, \cdot)$ back onto $\mathbb{Z}:$ give an explicit formula for the `multiplication' $\bullet$ this defines on $\mathbb{Z}$ (that is, such that $\varphi(a \bullet b)=\varphi(a) \cdot \varphi(b))$. Explain why structures induced by different positive integers $m$ are non-isomorphic as `rings without 1'.
(This shows that there are many different ways to give a structure of ring without identity to the \emph{group} $(\mathbb{Z},+)$. Compare this observation with Exercise 2.16.) [\textsection 2.1]
\end{problem}
\begin{solution}
\end{solution}
\subsection{\textsection3. Ideals and quotient rings}
\begin{problem}[3.1]
Prove that the image of a ring homomorphism $\varphi: R \to S$ is a subring of $S$. What can you say about $\varphi$, if its image is an ideal of $S$? What can you say about $\varphi$, if its kernel is a subring of $R$?
\end{problem}
\begin{solution}
We can see that $\im \varphi$ is a subring of $S$ from the canonical decomposition
\[\xymatrix{
R\ar@/^2pc/@{->}[rrr]^{\varphi}\ar@{->>}[r]^{}&R/\ker \varphi \ar@{->}[r]^{\hspace{1em}\sim\hspace{4pt}}_{\hspace{1em}\tilde{\varphi}\hspace{4pt}}&\im \varphi\hspace{2pt}\ar@{^{(}->}[r]^{}&S
}\]
If $\im \varphi$ is an ideal, then $s\in S,1\in\im\varphi \implies s\in \im\varphi$. So $\im \varphi=S$ and $\varphi$ is an epimorphism. Since $\ker\varphi$ is a ideal, if it is also a subring, we have $\ker \varphi=R$.
\end{solution}
\hypertarget{Exercise III.3.2}{}
\begin{problem}[3.2]
Let $\varphi: R\to S$ be a ring homomorphism, and let $J$ be an ideal of $S$. Prove
that $I = \varphi^{-1}(J)$ is an ideal of $R$. [\textsection3.1]
\end{problem}
\begin{solution}
In $\Ab$ we see $\varphi^{-1}(J)$ is a subgroup of $R$. For all $r\in R$, $a\in \varphi^{-1}(J)$, we have
\[
\varphi(ra)=\varphi(r)\varphi(a)\in J\implies ra\in\varphi^{-1}(J).
\]
Similarly we can obtain $ar\in\varphi^{-1}(J)$. Therefore, we conclude that $I = \varphi^{-1}(J)$ is an ideal of $R$.
\end{solution}
\hypertarget{Exercise III.3.3}{}
\begin{problem}[3.3]
$\neg$ Let $\varphi : R \to S$ be a ring homomorphism, and let $J$ be an ideal of $R$.
\begin{itemize}
\item Show that $\varphi(J)$ need not be an ideal of $S$.
\item Assume that $\varphi$ is surjective; then prove that $\varphi(J)$ is an ideal of $S$.
\item Assume that $\varphi$ is surjective, and let $I = \ker\varphi$; thus we may identify $S$ with $R/I$.
Let $\overline{J}= \varphi(J)$, an ideal of $R/I$ by the previous point. Prove that
\[
\frac{R / I}{\overline{J}} \cong \frac{R}{I+J}
\]
\end{itemize}
(Of course this is just a rehash of Proposition 3.11.) [4.11]
\end{problem}
\begin{solution}
\begin{itemize}
\item Let $\varphi:\Z\hookrightarrow \Q$ and $J=\Z$. It is clear that $\varphi(J)=\Z$ is not an ideal of $\Q$.
\item Assume that $\varphi$ is surjective. In $\Ab$ we see $\varphi(J)$ is a subgroup of $S$. For all $a'=\varphi(a)\in\varphi(J)$, $r'=\varphi(r)\in S$,
\[
ra\in J\implies r'a'=\varphi(r)\varphi(a)=\varphi(ra)\in\varphi(J).
\]
Similarly we can obtain $a'r'\in\varphi(J)$. Therefore, we conclude that $\varphi(J)$ is an ideal of $S$.
\item Assume that $\varphi$ is surjective. The universal property yields a unique homomorphism
\begin{align*}
\psi:R/I&\longrightarrow R/(I+J),\\
r+I &\longmapsto r+I+J.
\end{align*}
Since
\begin{align*}
\ker \psi&=\{r+I\in R/I\mid r\in I+J\}\\
&=\{a+b+I\in R/I\mid a\in I,b\in J\}\\
&=\{b+I\in R/I\mid b\in J\}\\
&=\{\varphi(b)\in S\mid b\in J\}\\
&= \varphi(J)=\overline{J}
\end{align*}
and $\psi$ is surjective,
\[
\frac{R / I}{\overline{J}}=\frac{R / I}{\ker \psi} \cong \frac{R}{I+J}.
\]
\end{itemize}
\end{solution}
\hypertarget{Exercise III.3.7}{}
\begin{problem}[3.7]
Let $R$ be a ring, and let $a\in R$. Prove that $Ra$ is a left-ideal of $R$, and $aR$ is a right-ideal of $R$. Prove that $a$ is a left-, resp. right-unit if and only if $R = aR$, resp. $R = Ra$.
\end{problem}
\begin{solution}
For all $r\in R$, $r(Ra)\subseteq Ra$, $(aR)r\subseteq aR$. Therefore, $Ra$ is a left-ideal of $R$, and $aR$ is a right-ideal of $R$. Since $aR\subseteq R$, $R\subseteq aR$ actually amounts to $R = aR$.
\[
a\text{ is a left-unit}\iff\exists b\in R, ab=1\implies \forall r\in R,r=abr\in aR\implies R\subseteq aR
\]
\[
R\subseteq aR\implies\forall r\in R,\exists r'\in R,r=ar'\implies \exists r'\in R, ar'=1\iff a\text{ is a left-unit}
\]
Therefore, $a$ is a left-unit if and only if $R = aR$. Similarly we can prove $a$ is a right-unit if and only if $R = Ra$.
\end{solution}
\begin{problem}[3.8]
Prove that a ring $R$ is a division ring if and only if its only left-ideals and right-ideals are $\{0\}$ and $R$.
In particular, a commutative ring $R$ is a field if and only if the only ideals of $R$ are $\{0\}$ and $R$. [3.9, \textsection4.3]
\end{problem}
\begin{solution}
Assume the only left-ideals and right-ideals that ring $R$ have are $\{0\}$ and $R$. If $a\ne0$, we have $Ra=aR=R$. As a result of \hyperlink{Exercise III.3.7}{Exercise III.3.7}, it implies that $a$ is two-side unit and that accordingly $R$ is a division ring.
Now assume that $R$ is a division ring. Suppose $I$ is a nonzero left-ideal of $R$ and that $a\in I$ is not 0. Note that the condition of division ring guarantees there exists $b\in R$ such that $ba=1$. Since for all $r\in R$, $r=(rb)a\in I$, there must be $I=R$. Supposing that $I'$ is a nonzero right-ideal of $R$ and that $a'\in I'$ is not 0, in a similar way we can deduce $I'=R$. Therefore, we conclude that the only left-ideals of $R$ and right-ideals of $R$ are $\{0\}$ and $R$.
\end{solution}
\begin{problem}[3.11]
Let $R$ be a ring containing $\C$ as a subring. Prove that there are no ring homomorphisms $R \to \R$.
\end{problem}
\begin{solution}
Suppose $f:R \to \R$ is a homomorphism. On the one hand, we have
\[
f(1)=f(1*1)=f(1)^2\ge0.
\]
On the other hand, we can calculate $f(1)$ by
\[
f(1)=f(-i*i)=-f(i)^2\le0,
\]
which forces $f(1)$ to be 0. Thus we see $f$ sends some nonzero element in $R$ to 0 in $\R$, which is a contradiction.
\end{solution}
\hypertarget{Exercise III.3.12}{}
\begin{problem}[3.12]
Let $R$ be a commutative ring. Prove that the set of nilpotent elements of $R$ is an ideal of $R$. (Cf. \hyperlink{Exercise III.1.6}{Exercise III.1.6}. This ideal is called the nilradical of $R$.)
Find a non-commutative ring in which the set of nilpotent elements is not an ideal. [3.13, 4.18, V.3.13, \textsection VII.2.3]
\end{problem}
\begin{solution}
Suppose $N$ is the set of nilpotent elements of $R$. In \hyperlink{Exercise III.1.6}{Exercise III.1.6} we have shown that if $R$ is commutative, then $a+b\in N$ for all $a,b\in N$. Since for all $r\in R$, $a\in N$,
\[
a^n=0\implies r^na^n=a^nr^n=0\implies ra,ar\in N,
\]
we prove that $N$ is an ideal of $R$. A counterexample for non-commutative ring can be found in the ring $\mathfrak{gl}_2(\mathbb{R})$, as is shown in \hyperlink{Exercise III.1.6}{Exercise III.1.6}.
\end{solution}
\hypertarget{Exercise III.3.13}{}
\begin{problem}[3.13]
$\neg$ Let $R$ be a commutative ring, and let $N$ be its nilradical (cf. \hyperlink{Exercise III.3.12}{Exercise III.3.12}). Prove that $R/N$ contains no nonzero nilpotent elements. (Such a ring is said to be reduced.) [4.6, VII.2.8]
\end{problem}
\begin{solution}
Suppose there exists a nilpotent element $r+N\in R/N$ and $n>0$ such that
\[
r^n+N=N\iff r^n\in N.
\]
Then we have $r^{nm}=0$ for some $m>0$, which implies $r\in N$. Therefore, the only nilpotent element in $R/N$ is $N$.
\end{solution}
\begin{problem}[3.14]
$\neg$ Prove that the characteristic of an integral domain is either 0 or a prime
integer. Do you know any ring of characteristic 1?
\end{problem}
\begin{solution}
Suppose the characteristic of the integral domain $R$ is $pq$ where $p,q$ are positive prime integers. Then we have $p1_R\ne 0$ and $q1_R\ne0$, since the order of $1_R$ is $pq$. However, we can deduce
\[
(p1_R)(q1_R)=pq1_R=0_R,
\]
which contradicts the assumption that $R$ is an integral domain.
If the characteristic of the integral domain $R$ is 1, then the inclusion homomorphism $i:\Z\to R$ will send all integers to $0_R$, which means $0_R=1_R$ and $R$ is actually a zero ring instead of an integral domain. Thus the characteristic of an integral domain is either be 0 or a prime integer.
\end{solution}
\hypertarget{Exercise III.3.15}{}
\begin{problem}[3.15]
$\neg$ A ring $R$ is boolean if $a^{2}=a$ for all $a \in R$. Prove that $\mathscr{P}(S)$ is boolean, for every set $S$ (cf. \hyperlink{Exercise III.1.2}{Exercise III.1.2}). Prove that every boolean ring is commutative, and has characteristic 2. Prove that if an integral domain $R$ is boolean, then $R \cong \mathbb{Z} / 2 \mathbb{Z}$. $[4.23, \mathrm{~V}.6.3]$
\end{problem}
\begin{solution}
Since $A\cap A=A$ for all $A\in \mathscr{P}(S)$, $\mathscr{P}(S)$ is boolean.
Assume that $R$ is a boolean ring. For any $x\in R$,
\[
x+x=(x+x)^2=x^2+x^2+x^2+x^2=x+x+x+x\implies x+x=0_R\implies x=-x.
\]
For any $a,b\in R$,
\[
a+b=(a+b)^2=a^2+ab+ba+b^2=a+ab+ba+b\implies ab=-ba=ba.
\]
Thus we show every boolean ring is commutative. Since $1_R+1_R=0_R$, boolean ring has characteristic 2.
If an integral domain $R$ is boolean, we can define
\begin{align*}
\psi:\mathbb{Z} / 2 \mathbb{Z}&\longrightarrow R ,\\
[n]_2 &\longmapsto n\cdot1_R.
\end{align*}
$\psi$ is well-defined since for all $n,k\in\mathbb{Z}$, $n\cdot1_R=(n+2k)\cdot1_R$. Since $\psi([0]_2)\ne\psi([1]_2)$, $\psi$ is injective. For any $a\in R$, we have $a(a-1_R)=0_R$. Since $R$ is an integral domain, there must be $a=0_R$ or $a-1_R=0$, which implies $\psi$ is surjective. Therefore, we show $\psi$ is an isomorphism and $R \cong \mathbb{Z} / 2 \mathbb{Z}$.
\end{solution}
\begin{problem}[3.17]
Let $I, J$ be ideals of a ring $R$. State and prove a precise result relating the ideals $(I + J)/I$ of $R/I$ and $J/(I \cap J)$ of $R/(I \cap J)$. [\textsection3.3]
\end{problem}
\begin{solution}
As abelian groups, the second isomorphism theorem ensures $(I + J)/I\cong J/(I \cap J)$.
\end{solution}
\subsection{\textsection4. Ideals and quotients: remarks and examples. Prime and maximal ideals}
\hypertarget{Exercise III.4.2}{}
\begin{problem}[4.2]
Prove that the homomorphic image of a Noetherian ring is Noetherian. That is, prove that if $\varphi: R \to S$ is a surjective ring homomorphism, and $R$ is Noetherian, then $S$ is Noetherian. [\textsection6.4]
\end{problem}
\begin{solution}
According to \hyperlink{Exercise III.3.2}{Exercise III.3.2}, given any ideal $J$ of $S$, we see $\varphi^{-1}(J)$ is an ideal of $R$. Since $R$ is a Noetherian ring, we have $\varphi^{-1}(J)=(a_1,a_2,\cdots,a_n)$. Since $\varphi$ is surjective, there must be
$$
J=\varphi(\varphi^{-1}(J))=(\varphi(a_1),\varphi(a_2),\cdots,\varphi(a_n)),
$$
which means $J$ is finitely generated. Thus we conclude $S$ is Noetherian.
\end{solution}
\begin{problem}[4.3]
Prove that the ideal $(2, x)$ of $\Z[x]$ is not principal.
\end{problem}
\begin{solution}
Suppose $(f)=(2,x)$. Since it is easy to see $f\ne 0$ and $f\ne 1$, there must be
$$
2=gf\implies f=2.
$$
However, it is impossible to find some $h\in \Z[x]$ such that
\[
2+x=hf=2h,
\]
which leads to a contradiction. Thus we show that the ideal $(2, x)$ of $\Z[x]$ is not principal.
\end{solution}
\hypertarget{Exercise III.4.5}{}
\begin{problem}[4.5]
Let $I, J$ be ideals in a commutative ring $R$, such that $I+J = (1)$. Prove that $IJ = I \cap J$.[\textsection4.1]
\end{problem}
\begin{solution}
For any $k\in IJ$, we can assume that $k=ab$, ($a\in I$, $b\in J$). Note that $k\in aJ=J$ and $k\in Ib=I$. It deduces that $k\in I\cap J$. Thus we show $IJ\subseteq I\cap J$.
\noindent Suppose $l\in I\cap J$. If $1=a+b$ ($a\in I$, $b\in J$), Then we have $l=1*l=(a+b)l=al+lb\in IJ$, which implies that $I\cap J\subseteq IJ$. Therefore, we show $IJ = I \cap J$.
\end{solution}
\begin{problem}[4.6]
Let $I, J$ be ideals in a commutative ring $R$. Assume that $R/(IJ)$ is reduced (that is, it has no nonzero nilpotent elements; cf. \hyperlink{Exercise III.3.13}{Exercise III.3.13}). Prove that $IJ = I \cap J$.
\end{problem}
\begin{solution}
The notation $(IJ)$ suggests $R$ is commutative. As is shown in \hyperlink{Exercise III.4.5}{Exercise III.4.5}, it holds that $IJ\subseteq I\cap J$. Thus we are left to show $I\cap J\subseteq IJ$. Suppose $l\in I\cap J$. The condition that $R/(IJ)$ is reduced tells that $\forall r\in R$,
\[
r^n\in IJ\implies r\in IJ.
\]
Noticing $l\in I$ and $l\in J$, it is clear that $l^2\in IJ$ which implies $l\in IJ$. There we show $I\cap J\subseteq IJ$ and complete the proof.
\end{solution}
\begin{problem}[4.7]
$\vartriangleright$ Let $R = k$ be a field. Prove that every nonzero (principal) ideal in $k[x]$ is generated by a unique \emph{monic} polynomial. [\textsection4.2, \textsection VI.7.2]
\end{problem}
\begin{solution}
Suppose $I$ is an nonzero ideal in $k[x]$ and the least degree of nonzero polynomials in $I$ is $d$. Since $k$ is a field, we can find a monic polynomial $f(x)=k_0x^d+k_1x^{d+1}+\cdots+x^{d+n}$ in $I$. Given any $g(x)\in I$, there exist unique polynomials $q(x), r(x)\in k[x]$ such that $g(x) = f(x)q(x) + r(x)$ and $\deg r(x) < \deg f(x)=d$. Since $r(x)=g(x)-f(x)q(x)\in I$ and the least degree of nonzero polynomials in $I$ is $d$, there must be $r(x)=0$. Thus we show that $I$ is generated by a monic polynomial $f(x)$. Suppose $I=(f(x))$ can be also generated by a monic polynomial $\bar{f}(x)$. Then we have $\bar{f}(x)=cf(x)$ for some $c\ne0$. Since the two monic polynomials $\bar{f}(x),f(x)$ have the same degree, they are forced to be equal. Therefore, we conclude that every nonzero ideal in $k[x]$ is generated by a unique monic polynomial.
\end{solution}
\begin{problem}[4.8]
$\vartriangleright$ Let $R$ be a ring, and $f(x)\in R[x]$ a monic polynomial. Prove that $f(x)$ is not a (left-, or right-) zero-divisor. [\textsection4.2, 4.9]
\end{problem}
\begin{solution}
Suppose $f(x)=x^{d}+a_{d-1} x^{d-1}+\cdots+a_{1} x+a_{0}$ is a monic polynomial in $R[x]$ and $f(x)g(x)=0$ for some $g(x)=b_sx^{s}+b_{s-1} x^{s-1}+\cdots+b_{1} x+b_{0}\in R[x]$. Since the term of the degree of $d+s$ of $f(x)g(x)$ is $b_sx^{d+s}$, there must be $b_s=0$. Then the term of the degree of $d+s-1$ of $f(x)g(x)$ is $b_{s-1}x^{d+s-z}$, which implies $b_{s-1}=0$. Repeating this process we can show that $b_s=b_{s-1}=\cdots=b_0=0$, that is, $g(x)=0$. Thus we see $f(x)$ is not a left-zero-divisor. In a similar way we can show that $f(x)$ is not a right-zero-divisor.
\end{solution}
\hypertarget{Exercise III.4.10}{}
\begin{problem}[4.10]
$\neg$ Let $d$ be an integer that is not the square of an integer, and consider the subset of $\mathbb{C}$ defined by
\[
\mathbb{Q}(\sqrt{d}):=\{a+b \sqrt{d} | a, b \in \mathbb{Q}\}
\]
\begin{itemize}
\item Prove that $\mathbb{Q}(\sqrt{d})$ is a subring of $\mathbb{C}$.
\item Define a function $N: \mathbb{Q}(\sqrt{d}) \rightarrow \mathbb{Q}$ by $N(a+b \sqrt{d}):=a^{2}-b^{2} d .$ Prove that
\[
N(z w)=N(z) N(w), \text { and that } N(z) \neq 0 \text { if } z \in \mathbb{Q}(\sqrt{d}), z \neq 0
\]
The function $N$ is a `norm'; it is very useful in the study of $\mathbb{Q}(\sqrt{d})$ and of its subrings. (Cf. also \hyperlink{Exercise III.2.5}{Exercise III.2.5}.)
\item Prove that $\mathbb{Q}(\sqrt{d})$ is a field, and in fact the smallest subfield of $\mathbb{C}$ containing both $\mathbb{Q} \text { and } \sqrt{d} . \text { (Use } N .)$
\item Prove that $\mathbb{Q}(\sqrt{d})\cong \mathbb{Q}[t] /\left(t^{2}-d\right)$ . (Cf. Example 4.8.)\\
$[\mathrm{V} .1 .17, \mathrm{V} .2 .18, \mathrm{V} .6 .13, \mathrm{VII} .1 .12]$
\end{itemize}
\end{problem}
\begin{solution}
\begin{itemize}
\item We only show the check on multiplication
\[
(a_1+b_1 \sqrt{d})(a_2+b_2 \sqrt{d})=(a_1a_2+b_1b_2d)+(a_1b_2+a_2b_1)\sqrt{d}\in\Q(\sqrt{d}).
\]
\item It is immediate to check $N(z w)=N(z) N(w)$. Let $z \in \mathbb{Q}(\sqrt{d})$ and $z=a+b\sqrt{d} \neq 0$. Suppose $N(z)=a^2-b^2d=0$. If $b=0$, we have $a=0$, which contradicts with $a+b\sqrt{d} \neq 0$. Otherwise we have $b\ne0$ and $d=(a/b)^2$. Thus we get a contradiction again.
\item We have known $\mathbb{Q}(\sqrt{d})$ is a commutative ring. For any $z=a+b\sqrt{d}\in\mathbb{Q}(\sqrt{d})$ such that $z\ne0$,
\[
N(z)=(a+b \sqrt{d})(a-b \sqrt{d})=a^{2}-b^{2}d\ne0.
\]
Therefore
\[
\left(a+b \sqrt{d}\right)\left(\frac{a}{N(z)}-\frac{b}{N(z)} \sqrt{d}\right)=1
\]
and $\mathbb{Q}(\sqrt{d})$ is a field.
\item The mapping
\begin{align*}
\overline{\varphi}:\mathbb{Q}[t] /\left(t^{2}-d\right)& \longrightarrow \Q(\sqrt{d}),\\
a+bt+(t^{2}-d)& \longmapsto a+b\sqrt{d}.
\end{align*}
is well-defined since if $(a_1+b_1t)-(a_2+b_2t)=g(t)(t^2-d)$, then
\begin{align*}
\overline{\varphi}(a_1+b_1t+(t^{2}-d))-\overline{\varphi}(a_2+b_2t+(t^{2}-d))&=\left(a_1+b_1\sqrt{d}\right)-\left(a_2+b_2\sqrt{d}\right)\\
&=g\left(\sqrt{d}\right)\left(\left(\sqrt{d}\right)^2-d\right)\\
&=0.
\end{align*}
It is clear that $\overline{\varphi}$ preserves addition. Then we can check $\overline{\varphi}$ preserve multiplication:
\begin{align*}
&\hspace{1em}\overline{\varphi}\left((a_1+b_1t+(t^{2}-d))(a_2+b_2t+(t^{2}-d))\right)\\
&=\overline{\varphi}\left((a_1a_2+(a_1b_2+a_2b_1)t+b_1b_2t^2+(t^{2}-d)\right)\\
&=\overline{\varphi}\left(((a_1a_2+b_1b_2d)+(a_1b_2+a_2b_1)t+b_1b_2(t^2-d)+(t^{2}-d)\right)\\
&=(a_1a_2+b_1b_2d)+(a_1b_2+a_2b_1)\sqrt{d}\\
&=(a_1+b_1\sqrt{d})(a_2+b_2\sqrt{d})\\
&=\overline{\varphi}\left(a_1+b_1t+(t^{2}-d)\right)\overline{\varphi}\left(a_2+b_2t+(t^{2}-d)\right).
\end{align*}
Thus we see $\overline{\varphi}$ is a ring homomorphism. Note
\[
a+bt+(t^{2}-d)\in\ker \overline{\varphi}\iff a+b\sqrt{d}=0\iff a=b=0.
\]
It implies that $\ker \overline{\varphi}=\{0+(t^2-d)\}$ and $\overline{\varphi}$ is injective. It is clear that $\overline{\varphi}$ is surjective. Therefore, $\overline{\varphi}$ is an isomorphism and $\mathbb{Q}(\sqrt{d})\cong \mathbb{Q}[t] /\left(t^{2}-d\right)$.
\end{itemize}
\end{solution}
\begin{problem}[4.11]
Let $R$ be a commutative ring, $a \in R,$ and $f_{1}(x), \ldots, f_{r}(x) \in R[x]$.
\begin{itemize}
\item Prove the equality of ideals
\[
\left(f_{1}(x), \ldots, f_{r}(x), x-a\right)=\left(f_{1}(a), \ldots, f_{r}(a), x-a\right)
\]
\item Prove the useful substitution trick
\[
\frac{R[x]}{\left(f_{1}(x), \ldots, f_{r}(x), x-a\right)} \cong \frac{R}{\left(f_{1}(a), \ldots, f_{r}(a)\right)}
\]
(Hint: \hyperlink{Exercise III.3.3}{Exercise III.3.3}.)
\end{itemize}
\end{problem}
\begin{solution}
\begin{itemize}
\item According to the polynomial remainder theorem, we have
\[
f_i(x)=(x-a)q(x)+f_i(a),
\]
which suffices to show that $\left(f_{1}(x), \ldots, f_{r}(x), x-a\right)=\left(f_{1}(a), \ldots, f_{r}(a), x-a\right)$.
\item Define
\begin{align*}
\varphi:R[x]& \longrightarrow R,\\
f(x)& \longmapsto f(a).
\end{align*}
We can check that $\varphi$ is a surjective ring homomorphism and $\ker\varphi=(x-a)$. According to \hyperlink{Exercise III.3.3}{Exercise III.3.3}, we have
\[
\frac{R[x]}{\left(f_{1}(x), \cdots, f_{r}(x), x-a\right)}\cong\frac{R[x]}{\left(f_{1}(a), \cdots, f_{r}(a), x-a\right)}\cong \frac{R[x] / (x-a)}{\overline{\left(f_{1}(a), \cdots, f_{r}(a)\right)}},
\]
where
\[
\overline{\left(f_{1}(a), \cdots, f_{r}(a)\right)}=\left(f_{1}(a)+(x-a), \cdots, f_{r}(a)+(x-a)\right).
\]
The ring isomorphism
\begin{align*}
\psi:R[x]/(x-a)& \longrightarrow R,\\
f(x)+(x-a)& \longmapsto f(a)
\end{align*}
gives the following isomorphism
\[
\frac{R[x] / (x-a)}{\overline{\left(f_{1}(a), \cdots, f_{r}(a)\right)}}\cong \frac{R}{\left(f_{1}(a), \ldots, f_{r}(a)\right)},
\]
which completes the proof.
\end{itemize}
\end{solution}
\hypertarget{Exercise III.4.12}{}
\begin{problem}[4.12]
$\vartriangleright$ Let $R$ be a commutative ring, and $a_{1}, \cdots, a_{n}$ elements of $R .$ Prove that
\[
\frac{R\left[x_{1}, \ldots, x_{n}\right]}{\left(x_{1}-a_{1}, \ldots, x_{n}-a_{n}\right)} \cong R
\]