MathgeomGLS

Форк
0
/
Neslib.FastMath.Sse2_64.inc 
5975 строк · 174.3 Кб
1
{ Note about x64 calling convention
2
  ---------------------------------
3

4
  Delphi uses the Microsoft x64 calling convention:
5
  * Arguments that fit into 1, 2, 4 or 8 bytes are passed in registers RCX, RDX,
6
    R8 and R9. This includes TVector2 records (which are 8 bytes). So those need
7
    to be moved using MOVQ instead of MOVLPS.
8
    NOTE: Delphi 10.3 Rio changed this behavior for 8-byte records (TVector2):
9
    the register contains the address of the parameter instead (as is the case
10
    in 32-bit, so we should use MOVLPS).
11
  * The same goes for function results. Those go into RAX. Floating-point values
12
    are returned in XMM0.
13
  * The first 4 floating-point arguments go into XMM0, XMM1, XMM2 and XMM3.
14
  * These registers must be preserved:
15
    R12, R13, R14, R15, RDI, RSI, RBX, RBP, RSP, XMM6-XMM15.
16
  * At the start of a function, RBP is always aligned to a 16-byte boundary (its
17
    address always ends in 0).
18
  * RSP is also always a multiple of 16, but its address always ends in 8 (since
19
    the return address is pushed to the stack }
20

21
const
22
  { SSE rounding modes (bits in MXCSR register) }
23
  SSE_ROUND_MASK    = $FFFF9FFF;
24
  SSE_ROUND_NEAREST = $00000000;
25
  SSE_ROUND_DOWN    = $00002000;
26
  SSE_ROUND_UP      = $00004000;
27
  SSE_ROUND_TRUNC   = $00006000;
28

29
  { These constants fit in a single XMM register. These values represent
30
    sign-bits as used by 32-bit floating-point values.
31
    XOR'ing a floating-point value with $80000000 swaps the sign.
32
    XOR'ing a floating-point value with $00000000 leaves the value unchanged. }
33
  SSE_MASK_SIGN: array [0..3] of UInt32 = ($80000000, $80000000, $80000000, $80000000);
34
  SSE_MASK_NPNP: array [0..3] of UInt32 = ($80000000, $00000000, $80000000, $00000000);
35
  SSE_MASK_PNPN: array [0..3] of UInt32 = ($00000000, $80000000, $00000000, $80000000);
36
  SSE_MASK_0FFF: array [0..3] of UInt32 = ($FFFFFFFF, $FFFFFFFF, $FFFFFFFF, $00000000);
37

38
  { These constants mask off an element of the binary representation of a
39
    32-bit floating-point value. }
40
  SSE_MASK_FRACTION: array [0..3] of UInt32 = ($007FFFFF, $007FFFFF, $007FFFFF, $007FFFFF);
41
  SSE_MASK_EXPONENT: array [0..3] of UInt32 = ($7F800000, $7F800000, $7F800000, $7F800000);
42
  SSE_MASK_ABS_VAL : array [0..3] of UInt32 = ($7FFFFFFF, $7FFFFFFF, $7FFFFFFF, $7FFFFFFF);
43

44
  { Commonly used floating-point values }
45
  SSE_ONE_HALF    : array [0..3] of Single = (0.5, 0.5, 0.5, 0.5);
46
  SSE_ONE         : array [0..3] of Single = (1, 1, 1, 1);
47
  SSE_TWO         : array [0..3] of Single = (2, 2, 2, 2);
48
  SSE_THREE       : array [0..3] of Single = (3, 3, 3, 3);
49
  SSE_PI_OVER_180 : array [0..3] of Single = (Pi / 180, Pi / 180, Pi / 180, Pi / 180);
50
  SSE_180_OVER_PI : array [0..3] of Single = (180 / Pi, 180 / Pi, 180 / Pi, 180 / Pi);
51
  SSE_NEG_INFINITY: array [0..3] of Single = (NegInfinity, NegInfinity, NegInfinity, NegInfinity);
52
  SSE_PI_OVER_4   : array [0..3] of Single = (Pi / 4, Pi / 4, Pi / 4, Pi / 4);
53

54
  { Commonly used integer values }
55
  SSE_INT_ONE     : array [0..3] of Integer = (1, 1, 1, 1);
56
  SSE_INT_NOT_ONE : array [0..3] of Cardinal = ($FFFFFFFE, $FFFFFFFE, $FFFFFFFE, $FFFFFFFE);
57
  SSE_INT_TWO     : array [0..3] of Integer = (2, 2, 2, 2);
58
  SSE_INT_FOUR    : array [0..3] of Integer = (4, 4, 4, 4);
59

60
  { Constants for approximating trigonometric functions }
61
  SSE_FOPI: array [0..3] of Single = (1.27323954473516, 1.27323954473516, 1.27323954473516, 1.27323954473516);
62
  SSE_SINCOF_P0: array [0..3] of Single = (-1.9515295891E-4, -1.9515295891E-4, -1.9515295891E-4, -1.9515295891E-4);
63
  SSE_SINCOF_P1: array [0..3] of Single = (8.3321608736E-3, 8.3321608736E-3, 8.3321608736E-3, 8.3321608736E-3);
64
  SSE_SINCOF_P2: array [0..3] of Single = (-1.6666654611E-1, -1.6666654611E-1, -1.6666654611E-1, -1.6666654611E-1);
65
  SSE_COSCOF_P0: array [0..3] of Single = (2.443315711809948E-005, 2.443315711809948E-005, 2.443315711809948E-005, 2.443315711809948E-005);
66
  SSE_COSCOF_P1: array [0..3] of Single = (-1.388731625493765E-003, -1.388731625493765E-003, -1.388731625493765E-003, -1.388731625493765E-003);
67
  SSE_COSCOF_P2: array [0..3] of Single = (4.166664568298827E-002, 4.166664568298827E-002, 4.166664568298827E-002, 4.166664568298827E-002);
68

69
  SSE_EXP_A1 : array [0..3] of Single = (12102203.1615614, 12102203.1615614, 12102203.1615614, 12102203.1615614);
70
  SSE_EXP_A2 : array [0..3] of Single = (1065353216, 1065353216, 1065353216, 1065353216);
71
  SSE_EXP_CST: array [0..3] of Single = (2139095040, 2139095040, 2139095040, 2139095040);
72
  SSE_EXP_F1 : array [0..3] of Single = (0.509964287281036376953125, 0.509964287281036376953125, 0.509964287281036376953125, 0.509964287281036376953125);
73
  SSE_EXP_F2 : array [0..3] of Single = (0.3120158612728118896484375, 0.3120158612728118896484375, 0.3120158612728118896484375, 0.3120158612728118896484375);
74
  SSE_EXP_F3 : array [0..3] of Single = (0.1666135489940643310546875, 0.1666135489940643310546875, 0.1666135489940643310546875, 0.1666135489940643310546875);
75
  SSE_EXP_F4 : array [0..3] of Single = (-2.12528370320796966552734375e-3, -2.12528370320796966552734375e-3, -2.12528370320796966552734375e-3, -2.12528370320796966552734375e-3);
76
  SSE_EXP_F5 : array [0..3] of Single = (1.3534179888665676116943359375e-2, 1.3534179888665676116943359375e-2, 1.3534179888665676116943359375e-2, 1.3534179888665676116943359375e-2);
77
  SSE_EXP_I1 : array [0..3] of UInt32 = ($3F800000, $3F800000, $3F800000, $3F800000);
78

79
  SSE_LN_CST: array [0..3] of Single = (-89.93423858, -89.93423858, -89.93423858, -89.93423858);
80
  SSE_LN_F1 : array [0..3] of Single = (3.3977745, 3.3977745, 3.3977745, 3.3977745);
81
  SSE_LN_F2 : array [0..3] of Single = (2.2744832, 2.2744832, 2.2744832, 2.2744832);
82
  SSE_LN_F3 : array [0..3] of Single = (0.024982445, 0.024982445, 0.024982445, 0.024982445);
83
  SSE_LN_F4 : array [0..3] of Single = (0.24371102, 0.24371102, 0.24371102, 0.24371102);
84
  SSE_LN_F5 : array [0..3] of Single = (0.69314718055995, 0.69314718055995, 0.69314718055995, 0.69314718055995);
85

86
  SSE_LOG2_I1: array [0..3] of UInt32 = ($3F000000, $3F000000, $3F000000, $3F000000);
87
  SSE_LOG2_F1: array [0..3] of Single = (1.1920928955078125e-7, 1.1920928955078125e-7, 1.1920928955078125e-7, 1.1920928955078125e-7);
88
  SSE_LOG2_F2: array [0..3] of Single = (124.22551499, 124.22551499, 124.22551499, 124.22551499);
89
  SSE_LOG2_F3: array [0..3] of Single = (1.498030302, 1.498030302, 1.498030302, 1.498030302);
90
  SSE_LOG2_F4: array [0..3] of Single = (1.72587999, 1.72587999, 1.72587999, 1.72587999);
91
  SSE_LOG2_F5: array [0..3] of Single = (0.3520887068, 0.3520887068, 0.3520887068, 0.3520887068);
92

93
  SSE_EXP2_F1: array [0..3] of Single = (121.2740575, 121.2740575, 121.2740575, 121.2740575);
94
  SSE_EXP2_F2: array [0..3] of Single = (27.7280233, 27.7280233, 27.7280233, 27.7280233);
95
  SSE_EXP2_F3: array [0..3] of Single = (4.84252568, 4.84252568, 4.84252568, 4.84252568);
96
  SSE_EXP2_F4: array [0..3] of Single = (1.49012907, 1.49012907, 1.49012907, 1.49012907);
97
  SSE_EXP2_F5: array [0..3] of Single = ($00800000, $00800000, $00800000, $00800000);
98

99
{ Angle and Trigonometry Functions }
100

101
function Radians(const ADegrees: Single): Single;
102
begin
103
  Result := ADegrees * (Pi / 180);
104
end;
105

106
function Radians(const ADegrees: TVector2): TVector2; assembler;
107
asm
108
  {$IF RTLVersion >= 33}
109
  movlps xmm0, [ADegrees]
110
  {$ELSE}
111
  movq   xmm0, ADegrees
112
  {$ENDIF}
113
  movlps xmm1, QWORD [SSE_PI_OVER_180]
114
  mulps  xmm0, xmm1
115
  movq   rax, xmm0
116
end;
117

118
function Radians(const ADegrees: TVector3): TVector3; assembler;
119
asm
120
  movq     xmm0, [ADegrees]
121
  movss    xmm1, DWORD [ADegrees+8]
122
  movups   xmm2, [SSE_PI_OVER_180]
123
  mulps    xmm0, xmm2
124
  mulss    xmm1, xmm2
125
  movq     [Result], xmm0
126
  movss    DWORD [Result+8], xmm1
127
end;
128

129
function Radians(const ADegrees: TVector4): TVector4; assembler;
130
asm
131
  movups xmm0, [ADegrees]
132
  movups xmm1, [SSE_PI_OVER_180]
133
  mulps  xmm0, xmm1
134
  movups [Result], xmm0
135
end;
136

137
function Degrees(const ARadians: Single): Single;
138
begin
139
  Result := ARadians * (180 / Pi);
140
end;
141

142
function Degrees(const ARadians: TVector2): TVector2; assembler;
143
asm
144
  {$IF RTLVersion >= 33}
145
  movlps xmm0, [ARadians]
146
  {$ELSE}
147
  movq   xmm0, ARadians
148
  {$ENDIF}
149
  movlps xmm1, QWORD [SSE_180_OVER_PI]
150
  mulps  xmm0, xmm1
151
  movq   rax, xmm0
152
end;
153

154
function Degrees(const ARadians: TVector3): TVector3; assembler;
155
asm
156
  movq     xmm0, [ARadians]
157
  movss    xmm1, DWORD [ARadians+8]
158
  movups   xmm2, [SSE_180_OVER_PI]
159
  mulps    xmm0, xmm2
160
  mulss    xmm1, xmm2
161
  movq     [Result], xmm0
162
  movss    DWORD [Result+8], xmm1
163
end;
164

165
function Degrees(const ARadians: TVector4): TVector4; assembler;
166
asm
167
  movups xmm0, [ARadians]
168
  movups xmm1, [SSE_180_OVER_PI]
169
  mulps  xmm0, xmm1
170
  movups [Result], xmm0
171
end;
172

173
{ Exponential Functions }
174

175
function Sqrt(const A: Single): Single; assembler;
176
asm
177
  sqrtss xmm0, xmm0
178
end;
179

180
function Sqrt(const A: TVector2): TVector2; assembler;
181
asm
182
  {$IF RTLVersion >= 33}
183
  movlps xmm0, [A]
184
  {$ELSE}
185
  movq   xmm0, A
186
  {$ENDIF}
187
  sqrtps xmm0, xmm0
188
  movq   rax, xmm0
189
end;
190

191
function Sqrt(const A: TVector3): TVector3; assembler;
192
asm
193
  movq     xmm0, [A]
194
  movss    xmm1, DWORD [A+8]
195
  movlhps  xmm0, xmm1
196
  sqrtps   xmm0, xmm0
197
  movhlps  xmm1, xmm0
198
  movq     [Result], xmm0
199
  movss    DWORD [Result+8], xmm1
200
end;
201

202
function Sqrt(const A: TVector4): TVector4; assembler;
203
asm
204
  movups xmm0, [A]
205
  sqrtps xmm0, xmm0
206
  movups [Result], xmm0
207
end;
208

209
function InverseSqrt(const A: Single): Single; assembler;
210
asm
211
  rsqrtss xmm0, xmm0
212
end;
213

214
function InverseSqrt(const A: TVector2): TVector2;
215
asm
216
  {$IF RTLVersion >= 33}
217
  movlps xmm0, [A]
218
  {$ELSE}
219
  movq   xmm0, A
220
  {$ENDIF}
221
  rsqrtps xmm0, xmm0
222
  movq    rax, xmm0
223
end;
224

225
function InverseSqrt(const A: TVector3): TVector3;
226
asm
227
  movq     xmm0, [A]
228
  movss    xmm1, DWORD [A+8]
229
  movlhps  xmm0, xmm1
230
  rsqrtps  xmm0, xmm0
231
  movhlps  xmm1, xmm0
232
  movq     [Result], xmm0
233
  movss    DWORD [Result+8], xmm1
234
end;
235

236
function InverseSqrt(const A: TVector4): TVector4; assembler;
237
asm
238
  movups  xmm0, [A]
239
  rsqrtps xmm0, xmm0
240
  movups  [Result], xmm0
241
end;
242

243
{ Fast approximate Functions }
244

245
function FastSin(const ARadians: Single): Single; assembler;
246
asm
247
  movdqa   [rsp-24], xmm6
248
  movdqa   [rsp-40], xmm7
249

250
  movss    xmm2, DWORD [SSE_MASK_ABS_VAL]
251
  movaps   xmm1, xmm0
252
  movss    xmm3, DWORD [SSE_MASK_SIGN]
253
  andps    xmm0, xmm2               // (xmm0) X := Abs(ARadians)
254
  andps    xmm1, xmm3               // (xmm1) SignBit
255
  movaps   xmm2, xmm0
256
  movss    xmm4, DWORD [SSE_FOPI]
257
  movss    xmm5, DWORD [SSE_INT_ONE]
258
  mulss    xmm2, xmm4
259
  movss    xmm6, DWORD [SSE_INT_NOT_ONE]
260
  cvtps2dq xmm2, xmm2               // J := Trunc(X * FOPI)
261
  movss    xmm7, DWORD [SSE_INT_FOUR]
262
  paddd    xmm2, xmm5
263
  pand     xmm2, xmm6               // (xmm2) J := (J + 1) and (not 1)
264
  movss    xmm6, DWORD [SSE_INT_TWO]
265
  cvtdq2ps xmm4, xmm2               // (xmm4) Y := J
266
  movaps   xmm5, xmm2
267
  pand     xmm2, xmm6               // J and 2
268
  pand     xmm5, xmm7               // J and 4
269
  pxor     xmm7, xmm7
270
  pslld    xmm5, 29                 // (xmm5) SwapSignBit := (J and 4) shl 29
271
  pcmpeqd  xmm2, xmm7               // (xmm2) PolyMask := ((J and 2) = 0)? Yes: $FFFFFFFF, No: $00000000
272
  movss    xmm6, DWORD [SSE_PI_OVER_4]
273
  pxor     xmm1, xmm5               // (xmm1) SignBit := SignBit xor SwapSignBit
274
  mulss    xmm4, xmm6               // Y * Pi / 4
275
  movss    xmm3, DWORD [SSE_COSCOF_P0]
276
  subss    xmm0, xmm4               // (xmm0) X := X - (Y * Pi / 4)
277
  movss    xmm4, DWORD [SSE_COSCOF_P1]
278
  movaps   xmm7, xmm0
279
  movss    xmm6, DWORD [SSE_COSCOF_P2]
280
  mulss    xmm7, xmm7               // (xmm7) Z := X * X
281
  movss    xmm5, DWORD [SSE_SINCOF_P1]
282
  mulss    xmm3, xmm7               // COSCOF_P0 * Z
283
  addss    xmm3, xmm4               // Y := COSCOF_P0 * Z + COSCOF_P1
284
  movss    xmm4, DWORD [SSE_ONE_HALF]
285
  mulss    xmm3, xmm7               // Y * Z
286
  mulss    xmm4, xmm7               // Z * 0.5
287
  addps    xmm3, xmm6               // Y := (Y * Z) + COSCOF_P2
288
  movss    xmm6, DWORD [SSE_ONE]
289
  mulss    xmm3, xmm7               // Y * Z
290
  mulss    xmm3, xmm7               // Y := Y * (Z * Z)
291
  subss    xmm3, xmm4               // Y - Z * 0.5
292
  movss    xmm4, DWORD [SSE_SINCOF_P0]
293
  addps    xmm3, xmm6               // (xmm3) Y := Y - Z * 0.5 + 1
294
  movss    xmm6, DWORD [SSE_SINCOF_P2]
295
  mulss    xmm4, xmm7               // SINCOF_P0 * Z
296
  addss    xmm4, xmm5               // Y2 := SINCOF_P0 * Z + SINCOF_P1
297
  movaps   xmm5, xmm2
298
  mulss    xmm4, xmm7               // Y2 * Z
299
  addss    xmm4, xmm6               // Y2 := (Y2 * Z) + SINCOF_P2
300
  mulss    xmm4, xmm7               // Y2 * Z
301
  mulss    xmm4, xmm0               // Y2 * (Z * X)
302
  addss    xmm4, xmm0               // (xmm4) Y2 := Y2 * (Z * X) + X
303
  andps    xmm4, xmm2               // Y2 := ((J and 2) = 0)? Yes: Y2, No: 0
304
  andnps   xmm5, xmm3               // Y  := ((J and 2) = 0)? Yes: 0 , No: Y
305
  addss    xmm4, xmm5
306
  xorps    xmm4, xmm1               // (Y + Y2) xor SignBit
307
  movss    xmm0, xmm4
308

309
  movdqa   xmm6, [rsp-24]
310
  movdqa   xmm7, [rsp-40]
311
end;
312

313
function FastSin(const ARadians: TVector2): TVector2; assembler;
314
asm
315
  movdqa   [rsp-24], xmm6
316
  movdqa   [rsp-40], xmm7
317

318
  {$IF RTLVersion >= 33}
319
  movlps   xmm0, [ARadians]
320
  {$ELSE}
321
  movq     xmm0, ARadians
322
  {$ENDIF}
323
  movlps   xmm2, QWORD [SSE_MASK_ABS_VAL]
324
  movaps   xmm1, xmm0
325
  movlps   xmm3, QWORD [SSE_MASK_SIGN]
326
  andps    xmm0, xmm2               // (xmm0) X := Abs(ARadians)
327
  andps    xmm1, xmm3               // (xmm1) SignBit
328
  movaps   xmm2, xmm0
329
  movlps   xmm4, QWORD [SSE_FOPI]
330
  movlps   xmm5, QWORD [SSE_INT_ONE]
331
  mulps    xmm2, xmm4
332
  movlps   xmm6, QWORD [SSE_INT_NOT_ONE]
333
  cvtps2dq xmm2, xmm2               // J := Trunc(X * FOPI)
334
  movlps   xmm7, QWORD [SSE_INT_FOUR]
335
  paddd    xmm2, xmm5
336
  pand     xmm2, xmm6               // (xmm2) J := (J + 1) and (not 1)
337
  movlps   xmm6, QWORD [SSE_INT_TWO]
338
  cvtdq2ps xmm4, xmm2               // (xmm4) Y := J
339
  movaps   xmm5, xmm2
340
  pand     xmm2, xmm6               // J and 2
341
  pand     xmm5, xmm7               // J and 4
342
  pxor     xmm7, xmm7
343
  pslld    xmm5, 29                 // (xmm5) SwapSignBit := (J and 4) shl 29
344
  pcmpeqd  xmm2, xmm7               // (xmm2) PolyMask := ((J and 2) = 0)? Yes: $FFFFFFFF, No: $00000000
345
  movlps   xmm6, QWORD [SSE_PI_OVER_4]
346
  pxor     xmm1, xmm5               // (xmm1) SignBit := SignBit xor SwapSignBit
347
  mulps    xmm4, xmm6               // Y * Pi / 4
348
  movlps   xmm3, QWORD [SSE_COSCOF_P0]
349
  subps    xmm0, xmm4               // (xmm0) X := X - (Y * Pi / 4)
350
  movlps   xmm4, QWORD [SSE_COSCOF_P1]
351
  movaps   xmm7, xmm0
352
  movlps   xmm6, QWORD [SSE_COSCOF_P2]
353
  mulps    xmm7, xmm7               // (xmm7) Z := X * X
354
  movlps   xmm5, QWORD [SSE_SINCOF_P1]
355
  mulps    xmm3, xmm7               // COSCOF_P0 * Z
356
  addps    xmm3, xmm4               // Y := COSCOF_P0 * Z + COSCOF_P1
357
  movlps   xmm4, QWORD [SSE_ONE_HALF]
358
  mulps    xmm3, xmm7               // Y * Z
359
  mulps    xmm4, xmm7               // Z * 0.5
360
  addps    xmm3, xmm6               // Y := (Y * Z) + COSCOF_P2
361
  movlps   xmm6, QWORD [SSE_ONE]
362
  mulps    xmm3, xmm7               // Y * Z
363
  mulps    xmm3, xmm7               // Y := Y * (Z * Z)
364
  subps    xmm3, xmm4               // Y - Z * 0.5
365
  movlps   xmm4, QWORD [SSE_SINCOF_P0]
366
  addps    xmm3, xmm6               // (xmm3) Y := Y - Z * 0.5 + 1
367
  movlps   xmm6, QWORD [SSE_SINCOF_P2]
368
  mulps    xmm4, xmm7               // SINCOF_P0 * Z
369
  addps    xmm4, xmm5               // Y2 := SINCOF_P0 * Z + SINCOF_P1
370
  movaps   xmm5, xmm2
371
  mulps    xmm4, xmm7               // Y2 * Z
372
  addps    xmm4, xmm6               // Y2 := (Y2 * Z) + SINCOF_P2
373
  mulps    xmm4, xmm7               // Y2 * Z
374
  mulps    xmm4, xmm0               // Y2 * (Z * X)
375
  addps    xmm4, xmm0               // (xmm4) Y2 := Y2 * (Z * X) + X
376
  andps    xmm4, xmm2               // Y2 := ((J and 2) = 0)? Yes: Y2, No: 0
377
  andnps   xmm5, xmm3               // Y  := ((J and 2) = 0)? Yes: 0 , No: Y
378
  addps    xmm4, xmm5
379
  xorps    xmm4, xmm1               // (Y + Y2) xor SignBit
380
  movq     rax, xmm4
381

382
  movdqa   xmm6, [rsp-24]
383
  movdqa   xmm7, [rsp-40]
384
end;
385

386
function FastSin(const ARadians: TVector3): TVector3; assembler;
387
asm
388
  movdqa   [rsp-24], xmm6
389
  movdqa   [rsp-40], xmm7
390

391
  movq     xmm0, [ARadians]
392
  movss    xmm1, DWORD [ARadians+8]
393
  movlhps  xmm0, xmm1
394
  movups   xmm2, [SSE_MASK_ABS_VAL]
395
  movaps   xmm1, xmm0
396
  movups   xmm3, [SSE_MASK_SIGN]
397
  andps    xmm0, xmm2               // (xmm0) X := Abs(ARadians)
398
  andps    xmm1, xmm3               // (xmm1) SignBit
399
  movaps   xmm2, xmm0
400
  movups   xmm4, [SSE_FOPI]
401
  movups   xmm5, [SSE_INT_ONE]
402
  mulps    xmm2, xmm4
403
  movups   xmm6, [SSE_INT_NOT_ONE]
404
  cvtps2dq xmm2, xmm2               // J := Trunc(X * FOPI)
405
  movups   xmm7, [SSE_INT_FOUR]
406
  paddd    xmm2, xmm5
407
  pand     xmm2, xmm6               // (xmm2) J := (J + 1) and (not 1)
408
  movups   xmm6, [SSE_INT_TWO]
409
  cvtdq2ps xmm4, xmm2               // (xmm4) Y := J
410
  movaps   xmm5, xmm2
411
  pand     xmm2, xmm6               // J and 2
412
  pand     xmm5, xmm7               // J and 4
413
  pxor     xmm7, xmm7
414
  pslld    xmm5, 29                 // (xmm5) SwapSignBit := (J and 4) shl 29
415
  pcmpeqd  xmm2, xmm7               // (xmm2) PolyMask := ((J and 2) = 0)? Yes: $FFFFFFFF, No: $00000000
416
  movups   xmm6, [SSE_PI_OVER_4]
417
  pxor     xmm1, xmm5               // (xmm1) SignBit := SignBit xor SwapSignBit
418
  mulps    xmm4, xmm6               // Y * Pi / 4
419
  movups   xmm3, [SSE_COSCOF_P0]
420
  subps    xmm0, xmm4               // (xmm0) X := X - (Y * Pi / 4)
421
  movups   xmm4, [SSE_COSCOF_P1]
422
  movaps   xmm7, xmm0
423
  movups   xmm6, [SSE_COSCOF_P2]
424
  mulps    xmm7, xmm7               // (xmm7) Z := X * X
425
  movups   xmm5, [SSE_SINCOF_P1]
426
  mulps    xmm3, xmm7               // COSCOF_P0 * Z
427
  addps    xmm3, xmm4               // Y := COSCOF_P0 * Z + COSCOF_P1
428
  movups   xmm4, [SSE_ONE_HALF]
429
  mulps    xmm3, xmm7               // Y * Z
430
  mulps    xmm4, xmm7               // Z * 0.5
431
  addps    xmm3, xmm6               // Y := (Y * Z) + COSCOF_P2
432
  movups   xmm6, [SSE_ONE]
433
  mulps    xmm3, xmm7               // Y * Z
434
  mulps    xmm3, xmm7               // Y := Y * (Z * Z)
435
  subps    xmm3, xmm4               // Y - Z * 0.5
436
  movups   xmm4, [SSE_SINCOF_P0]
437
  addps    xmm3, xmm6               // (xmm3) Y := Y - Z * 0.5 + 1
438
  movups   xmm6, [SSE_SINCOF_P2]
439
  mulps    xmm4, xmm7               // SINCOF_P0 * Z
440
  addps    xmm4, xmm5               // Y2 := SINCOF_P0 * Z + SINCOF_P1
441
  movaps   xmm5, xmm2
442
  mulps    xmm4, xmm7               // Y2 * Z
443
  addps    xmm4, xmm6               // Y2 := (Y2 * Z) + SINCOF_P2
444
  mulps    xmm4, xmm7               // Y2 * Z
445
  mulps    xmm4, xmm0               // Y2 * (Z * X)
446
  addps    xmm4, xmm0               // (xmm4) Y2 := Y2 * (Z * X) + X
447
  andps    xmm4, xmm2               // Y2 := ((J and 2) = 0)? Yes: Y2, No: 0
448
  andnps   xmm5, xmm3               // Y  := ((J and 2) = 0)? Yes: 0 , No: Y
449
  addps    xmm4, xmm5
450
  xorps    xmm4, xmm1               // (Y + Y2) xor SignBit
451
  movhlps  xmm5, xmm4
452
  movq     [Result], xmm4
453
  movss    DWORD [Result+8], xmm5
454

455
  movdqa   xmm6, [rsp-24]
456
  movdqa   xmm7, [rsp-40]
457
end;
458

459
function FastSin(const ARadians: TVector4): TVector4; assembler;
460
asm
461
  movdqa   [rsp-24], xmm6
462
  movdqa   [rsp-40], xmm7
463

464
  movups   xmm0, [ARadians]
465
  movups   xmm2, [SSE_MASK_ABS_VAL]
466
  movaps   xmm1, xmm0
467
  movups   xmm3, [SSE_MASK_SIGN]
468
  andps    xmm0, xmm2               // (xmm0) X := Abs(ARadians)
469
  andps    xmm1, xmm3               // (xmm1) SignBit
470
  movaps   xmm2, xmm0
471
  movups   xmm4, [SSE_FOPI]
472
  movups   xmm5, [SSE_INT_ONE]
473
  mulps    xmm2, xmm4
474
  movups   xmm6, [SSE_INT_NOT_ONE]
475
  cvtps2dq xmm2, xmm2               // J := Trunc(X * FOPI)
476
  movups   xmm7, [SSE_INT_FOUR]
477
  paddd    xmm2, xmm5
478
  pand     xmm2, xmm6               // (xmm2) J := (J + 1) and (not 1)
479
  movups   xmm6, [SSE_INT_TWO]
480
  cvtdq2ps xmm4, xmm2               // (xmm4) Y := J
481
  movaps   xmm5, xmm2
482
  pand     xmm2, xmm6               // J and 2
483
  pand     xmm5, xmm7               // J and 4
484
  pxor     xmm7, xmm7
485
  pslld    xmm5, 29                 // (xmm5) SwapSignBit := (J and 4) shl 29
486
  pcmpeqd  xmm2, xmm7               // (xmm2) PolyMask := ((J and 2) = 0)? Yes: $FFFFFFFF, No: $00000000
487
  movups   xmm6, [SSE_PI_OVER_4]
488
  pxor     xmm1, xmm5               // (xmm1) SignBit := SignBit xor SwapSignBit
489
  mulps    xmm4, xmm6               // Y * Pi / 4
490
  movups   xmm3, [SSE_COSCOF_P0]
491
  subps    xmm0, xmm4               // (xmm0) X := X - (Y * Pi / 4)
492
  movups   xmm4, [SSE_COSCOF_P1]
493
  movaps   xmm7, xmm0
494
  movups   xmm6, [SSE_COSCOF_P2]
495
  mulps    xmm7, xmm7               // (xmm7) Z := X * X
496
  movups   xmm5, [SSE_SINCOF_P1]
497
  mulps    xmm3, xmm7               // COSCOF_P0 * Z
498
  addps    xmm3, xmm4               // Y := COSCOF_P0 * Z + COSCOF_P1
499
  movups   xmm4, [SSE_ONE_HALF]
500
  mulps    xmm3, xmm7               // Y * Z
501
  mulps    xmm4, xmm7               // Z * 0.5
502
  addps    xmm3, xmm6               // Y := (Y * Z) + COSCOF_P2
503
  movups   xmm6, [SSE_ONE]
504
  mulps    xmm3, xmm7               // Y * Z
505
  mulps    xmm3, xmm7               // Y := Y * (Z * Z)
506
  subps    xmm3, xmm4               // Y - Z * 0.5
507
  movups   xmm4, [SSE_SINCOF_P0]
508
  addps    xmm3, xmm6               // (xmm3) Y := Y - Z * 0.5 + 1
509
  movups   xmm6, [SSE_SINCOF_P2]
510
  mulps    xmm4, xmm7               // SINCOF_P0 * Z
511
  addps    xmm4, xmm5               // Y2 := SINCOF_P0 * Z + SINCOF_P1
512
  movaps   xmm5, xmm2
513
  mulps    xmm4, xmm7               // Y2 * Z
514
  addps    xmm4, xmm6               // Y2 := (Y2 * Z) + SINCOF_P2
515
  mulps    xmm4, xmm7               // Y2 * Z
516
  mulps    xmm4, xmm0               // Y2 * (Z * X)
517
  addps    xmm4, xmm0               // (xmm4) Y2 := Y2 * (Z * X) + X
518
  andps    xmm4, xmm2               // Y2 := ((J and 2) = 0)? Yes: Y2, No: 0
519
  andnps   xmm5, xmm3               // Y  := ((J and 2) = 0)? Yes: 0 , No: Y
520
  addps    xmm4, xmm5
521
  xorps    xmm4, xmm1               // (Y + Y2) xor SignBit
522
  movups   [Result], xmm4
523

524
  movdqa   xmm6, [rsp-24]
525
  movdqa   xmm7, [rsp-40]
526
end;
527

528
function FastCos(const ARadians: Single): Single; assembler;
529
asm
530
  movdqa   [rsp-24], xmm6
531
  movdqa   [rsp-40], xmm7
532

533
  movss    xmm1, DWORD [SSE_MASK_ABS_VAL]
534
  movss    xmm2, DWORD [SSE_FOPI]
535
  andps    xmm0, xmm1               // (xmm0) X := Abs(ARadians)
536
  movss    xmm3, DWORD [SSE_INT_NOT_ONE]
537
  movaps   xmm1, xmm0
538
  movss    xmm4, DWORD [SSE_INT_FOUR]
539
  mulss    xmm1, xmm2
540
  movss    xmm2, DWORD [SSE_INT_ONE]
541
  cvtps2dq xmm1, xmm1               // J := Trunc(X * FOPI)
542
  pxor     xmm6, xmm6
543
  paddd    xmm1, xmm2
544
  pand     xmm1, xmm3               // (xmm1) J := (J + 1) and (not 1)
545
  movss    xmm3, DWORD [SSE_INT_TWO]
546
  cvtdq2ps xmm2, xmm1               // (xmm2) Y := J
547
  psubd    xmm1, xmm3               // J - 2
548
  movaps   xmm5, xmm1
549
  pandn    xmm1, xmm4               // (not (J - 2)) and 4
550
  pand     xmm5, xmm3               // (J - 2) and 2
551
  pslld    xmm1, 29                 // (xmm1) SignBit := ((not (J - 2)) and 4) shl 29
552
  movss    xmm3, DWORD [SSE_PI_OVER_4]
553
  pcmpeqd  xmm5, xmm6               // (xmm5) PolyMask := ((J-2) and 2)=0)? Yes: $FFFFFFFF, No: $00000000
554
  mulss    xmm2, xmm3               // Y * Pi / 4
555
  movss    xmm3, DWORD [SSE_COSCOF_P1]
556
  subss    xmm0, xmm2               // (xmm0) X := X - (Y * Pi / 4)
557
  movss    xmm2, DWORD [SSE_COSCOF_P0]
558
  movss    xmm4, DWORD [SSE_COSCOF_P2]
559
  movaps   xmm6, xmm0
560
  mulss    xmm6, xmm6               // (xmm6) Z := X * X
561
  mulss    xmm2, xmm6               // COSCOF_P0 * Z
562
  addps    xmm2, xmm3               // Y := COSCOF_P0 * Z + COSCOF_P1
563
  movss    xmm3, DWORD [SSE_ONE_HALF]
564
  mulss    xmm2, xmm6               // Y * Z
565
  mulss    xmm3, xmm6               // Z * 0.5
566
  addss    xmm2, xmm4               // Y := (Y * Z) + COSCOF_P2
567
  movss    xmm7, DWORD [SSE_ONE]
568
  mulss    xmm2, xmm6
569
  movss    xmm4, DWORD [SSE_SINCOF_P1]
570
  mulss    xmm2, xmm6               // Y := Y * (Z * Z)
571
  subss    xmm2, xmm3               // Y - Z * 0.5
572
  addss    xmm2, xmm7               // (xmm2) Y := Y - Z * 0.5 + 1
573
  movss    xmm3, DWORD [SSE_SINCOF_P0]
574
  movss    xmm7, DWORD [SSE_SINCOF_P2]
575
  mulss    xmm3, xmm6               // SINCOF_P0 * Z
576
  addss    xmm3, xmm4               // Y2 := SINCOF_P0 * Z + SINCOF_P1
577
  mulss    xmm3, xmm6               // Y2 * Z
578
  addss    xmm3, xmm7               // Y2 := (Y2 * Z) + SINCOF_P2
579
  mulss    xmm3, xmm6               // Y2 * Z
580
  mulss    xmm3, xmm0               // Y2 * (Z * X)
581
  addss    xmm3, xmm0               // Y2 := Y2 * (Z * X) + X
582
  andps    xmm3, xmm5               // ((J-2) and 2) = 0)? Yes: Y2, No: 0
583
  andnps   xmm5, xmm2               // ((J-2) and 2) = 0)? Yes: 0 , No: Y
584
  addss    xmm3, xmm5
585
  xorps    xmm3, xmm1               // (Y + Y2) xor SignBit
586
  movss    xmm0, xmm3
587

588
  movdqa   xmm6, [rsp-24]
589
  movdqa   xmm7, [rsp-40]
590
end;
591

592
function FastCos(const ARadians: TVector2): TVector2; assembler;
593
asm
594
  movdqa   [rsp-24], xmm6
595
  movdqa   [rsp-40], xmm7
596

597
  {$IF RTLVersion >= 33}
598
  movlps   xmm0, [ARadians]
599
  {$ELSE}
600
  movq     xmm0, ARadians
601
  {$ENDIF}
602
  movlps   xmm1, QWORD [SSE_MASK_ABS_VAL]
603
  movlps   xmm2, QWORD [SSE_FOPI]
604
  andps    xmm0, xmm1               // (xmm0) X := Abs(ARadians)
605
  movlps   xmm3, QWORD [SSE_INT_NOT_ONE]
606
  movaps   xmm1, xmm0
607
  movlps   xmm4, QWORD [SSE_INT_FOUR]
608
  mulps    xmm1, xmm2
609
  movlps   xmm2, QWORD [SSE_INT_ONE]
610
  cvtps2dq xmm1, xmm1               // J := Trunc(X * FOPI)
611
  pxor     xmm6, xmm6
612
  paddd    xmm1, xmm2
613
  pand     xmm1, xmm3               // (xmm1) J := (J + 1) and (not 1)
614
  movlps   xmm3, QWORD [SSE_INT_TWO]
615
  cvtdq2ps xmm2, xmm1               // (xmm2) Y := J
616
  psubd    xmm1, xmm3               // J - 2
617
  movaps   xmm5, xmm1
618
  pandn    xmm1, xmm4               // (not (J - 2)) and 4
619
  pand     xmm5, xmm3               // (J - 2) and 2
620
  pslld    xmm1, 29                 // (xmm1) SignBit := ((not (J - 2)) and 4) shl 29
621
  movlps   xmm3, QWORD [SSE_PI_OVER_4]
622
  pcmpeqd  xmm5, xmm6               // (xmm5) PolyMask := ((J-2) and 2)=0)? Yes: $FFFFFFFF, No: $00000000
623
  mulps    xmm2, xmm3               // Y * Pi / 4
624
  movlps   xmm3, QWORD [SSE_COSCOF_P1]
625
  subps    xmm0, xmm2               // (xmm0) X := X - (Y * Pi / 4)
626
  movlps   xmm2, QWORD [SSE_COSCOF_P0]
627
  movlps   xmm4, QWORD [SSE_COSCOF_P2]
628
  movaps   xmm6, xmm0
629
  mulps    xmm6, xmm6               // (xmm6) Z := X * X
630
  mulps    xmm2, xmm6               // COSCOF_P0 * Z
631
  addps    xmm2, xmm3               // Y := COSCOF_P0 * Z + COSCOF_P1
632
  movlps   xmm3, QWORD [SSE_ONE_HALF]
633
  mulps    xmm2, xmm6               // Y * Z
634
  mulps    xmm3, xmm6               // Z * 0.5
635
  addps    xmm2, xmm4               // Y := (Y * Z) + COSCOF_P2
636
  movlps   xmm7, QWORD [SSE_ONE]
637
  mulps    xmm2, xmm6
638
  movlps   xmm4, QWORD [SSE_SINCOF_P1]
639
  mulps    xmm2, xmm6               // Y := Y * (Z * Z)
640
  subps    xmm2, xmm3               // Y - Z * 0.5
641
  addps    xmm2, xmm7               // (xmm2) Y := Y - Z * 0.5 + 1
642
  movlps   xmm3, QWORD [SSE_SINCOF_P0]
643
  movlps   xmm7, QWORD [SSE_SINCOF_P2]
644
  mulps    xmm3, xmm6               // SINCOF_P0 * Z
645
  addps    xmm3, xmm4               // Y2 := SINCOF_P0 * Z + SINCOF_P1
646
  mulps    xmm3, xmm6               // Y2 * Z
647
  addps    xmm3, xmm7               // Y2 := (Y2 * Z) + SINCOF_P2
648
  mulps    xmm3, xmm6               // Y2 * Z
649
  mulps    xmm3, xmm0               // Y2 * (Z * X)
650
  addps    xmm3, xmm0               // Y2 := Y2 * (Z * X) + X
651
  andps    xmm3, xmm5               // ((J-2) and 2) = 0)? Yes: Y2, No: 0
652
  andnps   xmm5, xmm2               // ((J-2) and 2) = 0)? Yes: 0 , No: Y
653
  addps    xmm3, xmm5
654
  xorps    xmm3, xmm1               // (Y + Y2) xor SignBit
655
  movq     rax, xmm3
656

657
  movdqa   xmm6, [rsp-24]
658
  movdqa   xmm7, [rsp-40]
659
end;
660

661
function FastCos(const ARadians: TVector3): TVector3; assembler;
662
asm
663
  movdqa   [rsp-24], xmm6
664
  movdqa   [rsp-40], xmm7
665

666
  movq     xmm0, [ARadians]
667
  movss    xmm1, DWORD [ARadians+8]
668
  movlhps  xmm0, xmm1
669
  movups   xmm1, [SSE_MASK_ABS_VAL]
670
  movups   xmm2, [SSE_FOPI]
671
  andps    xmm0, xmm1               // (xmm0) X := Abs(ARadians)
672
  movups   xmm3, [SSE_INT_NOT_ONE]
673
  movaps   xmm1, xmm0
674
  movups   xmm4, [SSE_INT_FOUR]
675
  mulps    xmm1, xmm2
676
  movups   xmm2, [SSE_INT_ONE]
677
  cvtps2dq xmm1, xmm1               // J := Trunc(X * FOPI)
678
  pxor     xmm6, xmm6
679
  paddd    xmm1, xmm2
680
  pand     xmm1, xmm3               // (xmm1) J := (J + 1) and (not 1)
681
  movups   xmm3, [SSE_INT_TWO]
682
  cvtdq2ps xmm2, xmm1               // (xmm2) Y := J
683
  psubd    xmm1, xmm3               // J - 2
684
  movaps   xmm5, xmm1
685
  pandn    xmm1, xmm4               // (not (J - 2)) and 4
686
  pand     xmm5, xmm3               // (J - 2) and 2
687
  pslld    xmm1, 29                 // (xmm1) SignBit := ((not (J - 2)) and 4) shl 29
688
  movups   xmm3, [SSE_PI_OVER_4]
689
  pcmpeqd  xmm5, xmm6               // (xmm5) PolyMask := ((J-2) and 2)=0)? Yes: $FFFFFFFF, No: $00000000
690
  mulps    xmm2, xmm3               // Y * Pi / 4
691
  movups   xmm3, [SSE_COSCOF_P1]
692
  subps    xmm0, xmm2               // (xmm0) X := X - (Y * Pi / 4)
693
  movups   xmm2, [SSE_COSCOF_P0]
694
  movups   xmm4, [SSE_COSCOF_P2]
695
  movaps   xmm6, xmm0
696
  mulps    xmm6, xmm6               // (xmm6) Z := X * X
697
  mulps    xmm2, xmm6               // COSCOF_P0 * Z
698
  addps    xmm2, xmm3               // Y := COSCOF_P0 * Z + COSCOF_P1
699
  movups   xmm3, [SSE_ONE_HALF]
700
  mulps    xmm2, xmm6               // Y * Z
701
  mulps    xmm3, xmm6               // Z * 0.5
702
  addps    xmm2, xmm4               // Y := (Y * Z) + COSCOF_P2
703
  movups   xmm7, [SSE_ONE]
704
  mulps    xmm2, xmm6
705
  movups   xmm4, [SSE_SINCOF_P1]
706
  mulps    xmm2, xmm6               // Y := Y * (Z * Z)
707
  subps    xmm2, xmm3               // Y - Z * 0.5
708
  addps    xmm2, xmm7               // (xmm2) Y := Y - Z * 0.5 + 1
709
  movups   xmm3, [SSE_SINCOF_P0]
710
  movups   xmm7, [SSE_SINCOF_P2]
711
  mulps    xmm3, xmm6               // SINCOF_P0 * Z
712
  addps    xmm3, xmm4               // Y2 := SINCOF_P0 * Z + SINCOF_P1
713
  mulps    xmm3, xmm6               // Y2 * Z
714
  addps    xmm3, xmm7               // Y2 := (Y2 * Z) + SINCOF_P2
715
  mulps    xmm3, xmm6               // Y2 * Z
716
  mulps    xmm3, xmm0               // Y2 * (Z * X)
717
  addps    xmm3, xmm0               // Y2 := Y2 * (Z * X) + X
718
  andps    xmm3, xmm5               // ((J-2) and 2) = 0)? Yes: Y2, No: 0
719
  andnps   xmm5, xmm2               // ((J-2) and 2) = 0)? Yes: 0 , No: Y
720
  addps    xmm3, xmm5
721
  xorps    xmm3, xmm1               // (Y + Y2) xor SignBit
722
  movhlps  xmm4, xmm3
723
  movq     [Result], xmm3
724
  movss    DWORD [Result+8], xmm4
725

726
  movdqa   xmm6, [rsp-24]
727
  movdqa   xmm7, [rsp-40]
728
end;
729

730
function FastCos(const ARadians: TVector4): TVector4; assembler;
731
asm
732
  movdqa   [rsp-24], xmm6
733
  movdqa   [rsp-40], xmm7
734

735
  movups   xmm0, [ARadians]
736
  movups   xmm1, [SSE_MASK_ABS_VAL]
737
  movups   xmm2, [SSE_FOPI]
738
  andps    xmm0, xmm1               // (xmm0) X := Abs(ARadians)
739
  movups   xmm3, [SSE_INT_NOT_ONE]
740
  movaps   xmm1, xmm0
741
  movups   xmm4, [SSE_INT_FOUR]
742
  mulps    xmm1, xmm2
743
  movups   xmm2, [SSE_INT_ONE]
744
  cvtps2dq xmm1, xmm1               // J := Trunc(X * FOPI)
745
  pxor     xmm6, xmm6
746
  paddd    xmm1, xmm2
747
  pand     xmm1, xmm3               // (xmm1) J := (J + 1) and (not 1)
748
  movups   xmm3, [SSE_INT_TWO]
749
  cvtdq2ps xmm2, xmm1               // (xmm2) Y := J
750
  psubd    xmm1, xmm3               // J - 2
751
  movaps   xmm5, xmm1
752
  pandn    xmm1, xmm4               // (not (J - 2)) and 4
753
  pand     xmm5, xmm3               // (J - 2) and 2
754
  pslld    xmm1, 29                 // (xmm1) SignBit := ((not (J - 2)) and 4) shl 29
755
  movups   xmm3, [SSE_PI_OVER_4]
756
  pcmpeqd  xmm5, xmm6               // (xmm5) PolyMask := ((J-2) and 2)=0)? Yes: $FFFFFFFF, No: $00000000
757
  mulps    xmm2, xmm3               // Y * Pi / 4
758
  movups   xmm3, [SSE_COSCOF_P1]
759
  subps    xmm0, xmm2               // (xmm0) X := X - (Y * Pi / 4)
760
  movups   xmm2, [SSE_COSCOF_P0]
761
  movups   xmm4, [SSE_COSCOF_P2]
762
  movaps   xmm6, xmm0
763
  mulps    xmm6, xmm6               // (xmm6) Z := X * X
764
  mulps    xmm2, xmm6               // COSCOF_P0 * Z
765
  addps    xmm2, xmm3               // Y := COSCOF_P0 * Z + COSCOF_P1
766
  movups   xmm3, [SSE_ONE_HALF]
767
  mulps    xmm2, xmm6               // Y * Z
768
  mulps    xmm3, xmm6               // Z * 0.5
769
  addps    xmm2, xmm4               // Y := (Y * Z) + COSCOF_P2
770
  movups   xmm7, [SSE_ONE]
771
  mulps    xmm2, xmm6
772
  movups   xmm4, [SSE_SINCOF_P1]
773
  mulps    xmm2, xmm6               // Y := Y * (Z * Z)
774
  subps    xmm2, xmm3               // Y - Z * 0.5
775
  addps    xmm2, xmm7               // (xmm2) Y := Y - Z * 0.5 + 1
776
  movups   xmm3, [SSE_SINCOF_P0]
777
  movups   xmm7, [SSE_SINCOF_P2]
778
  mulps    xmm3, xmm6               // SINCOF_P0 * Z
779
  addps    xmm3, xmm4               // Y2 := SINCOF_P0 * Z + SINCOF_P1
780
  mulps    xmm3, xmm6               // Y2 * Z
781
  addps    xmm3, xmm7               // Y2 := (Y2 * Z) + SINCOF_P2
782
  mulps    xmm3, xmm6               // Y2 * Z
783
  mulps    xmm3, xmm0               // Y2 * (Z * X)
784
  addps    xmm3, xmm0               // Y2 := Y2 * (Z * X) + X
785
  andps    xmm3, xmm5               // ((J-2) and 2) = 0)? Yes: Y2, No: 0
786
  andnps   xmm5, xmm2               // ((J-2) and 2) = 0)? Yes: 0 , No: Y
787
  addps    xmm3, xmm5
788
  xorps    xmm3, xmm1               // (Y + Y2) xor SignBit
789
  movups   [Result], xmm3
790

791
  movdqa   xmm6, [rsp-24]
792
  movdqa   xmm7, [rsp-40]
793
end;
794

795
procedure FastSinCos(const ARadians: Single; out ASin, ACos: Single); assembler;
796
asm
797
  movdqa   [rsp-24], xmm6
798
  movdqa   [rsp-40], xmm7
799

800
  movss    xmm2, DWORD [SSE_MASK_SIGN]
801
  movss    xmm3, DWORD [SSE_MASK_ABS_VAL]
802
  movaps   xmm1, xmm0
803
  pand     xmm0, xmm3               // (xmm0) X := Abs(ARadians)
804
  pand     xmm1, xmm2               // (xmm1) SignBitSin
805
  movaps   xmm4, xmm0
806
  movss    xmm5, DWORD [SSE_FOPI]
807
  movss    xmm6, DWORD [SSE_INT_ONE]
808
  mulss    xmm4, xmm5
809
  movss    xmm7, DWORD [SSE_INT_NOT_ONE]
810
  cvtps2dq xmm4, xmm4               // (xmm4) J := Trunc(X * FOPI)
811
  movss    xmm5, DWORD [SSE_INT_FOUR]
812
  paddd    xmm4, xmm6
813
  pand     xmm4, xmm7               // (xmm4) J := (J + 1) and (not 1)
814
  movss    xmm7, DWORD [SSE_INT_TWO]
815
  cvtdq2ps xmm2, xmm4               // (xmm2) Y := J
816
  movaps   xmm3, xmm4
817
  movaps   xmm6, xmm4               // (xmm6) J
818
  pand     xmm3, xmm5               // J and 4
819
  pand     xmm4, xmm7               // J and 2
820
  pxor     xmm5, xmm5
821
  pslld    xmm3, 29                 // (xmm3) SwapSignBitSin := (J and 4) shl 29
822
  movss    xmm7, DWORD [SSE_PI_OVER_4]
823
  pcmpeqd  xmm4, xmm5               // (xmm4) PolyMask := ((J and 2) = 0)? Yes: $FFFFFFFF, No: $00000000
824
  mulss    xmm2, xmm7               // Y * Pi / 4
825
  movss    xmm5, DWORD [SSE_INT_TWO]
826
  subss    xmm0, xmm2               // (xmm0) X := X - (Y * Pi / 4)
827
  psubd    xmm6, xmm5               // J - 2
828
  movss    xmm7, DWORD [SSE_INT_FOUR]
829
  pxor     xmm1, xmm3               // (xmm1) SignBitSin := SignBitSin xor SwapSignBitSin
830
  andnps   xmm6, xmm7               // (not (J - 2)) and 4
831
  movaps   xmm3, xmm0
832
  pslld    xmm6, 29                 // (xmm6) SignBitCos := ((not (J - 2)) and 4) shl 29
833
  mulss    xmm3, xmm3               // (xmm3) Z := X * X
834
  movss    xmm2, DWORD [SSE_COSCOF_P0]
835
  movss    xmm5, DWORD [SSE_COSCOF_P1]
836
  movss    xmm7, DWORD [SSE_COSCOF_P2]
837
  mulss    xmm2, xmm3               // COSCOF_P0 * Z
838
  addss    xmm2, xmm5               // Y := COSCOF_P0 * Z + COSCOF_P1
839
  movss    xmm5, DWORD [SSE_ONE_HALF]
840
  mulss    xmm2, xmm3               // Y * Z
841
  addss    xmm2, xmm7               // Y := (Y * Z) + COSCOF_P2
842
  movss    xmm7, DWORD [SSE_ONE]
843
  mulss    xmm2, xmm3               // Y * Z
844
  mulss    xmm5, xmm3               // 0.5 * Z
845
  mulss    xmm2, xmm3               // Y * (Z * Z)
846
  subss    xmm2, xmm5               // Y - 0.5 * Z
847
  movss    xmm5, DWORD [SSE_SINCOF_P0]
848
  addss    xmm2, xmm7               // (xmm2) Y := Y - 0.5 * Z + 1
849
  movss    xmm7, DWORD [SSE_SINCOF_P1]
850
  mulss    xmm5, xmm3               // SINCOF_P0 * Z
851
  addss    xmm5, xmm7               // Y2 := SINCOF_P0 * Z + SINCOF_P1
852
  mulss    xmm5, xmm3               // Y2 * Z
853
  movss    xmm7, DWORD [SSE_SINCOF_P2]
854
  addss    xmm5, xmm7               // Y2 := Y2 * Z + SINCOF_P2
855
  mulss    xmm5, xmm3               // Y2 * Z
856
  mulss    xmm5, xmm0               // Y2 * (Z * X)
857
  addss    xmm5, xmm0               // (xmm5) Y2 := Y2 * (Z * X) + X
858
  movaps   xmm0, xmm2               // Y
859
  movaps   xmm3, xmm5               // Y2
860
  andps    xmm5, xmm4               // ((J and 2) = 0)? Yes: Y2, No: 0
861
  andnps   xmm4, xmm2               // ((J and 2) = 0)? Yes: 0 , No: Y
862
  subss    xmm3, xmm5               // ((J and 2) = 0)? Yes: 0 , No: Y2
863
  subss    xmm0, xmm4               // ((J and 2) = 0)? Yes: Y , No: 0
864
  addps    xmm4, xmm5               // ((J and 2) = 0)? Yes: Y2, No: Y
865
  addps    xmm3, xmm0               // ((J and 2) = 0)? Yes: Y , No: Y2
866
  xorps    xmm4, xmm1               // Sin
867
  xorps    xmm3, xmm6               // Cos
868
  movss    [ASin], xmm4
869
  movss    [ACos], xmm3
870

871
  movdqa   xmm6, [rsp-24]
872
  movdqa   xmm7, [rsp-40]
873
end;
874

875
procedure FastSinCos(const ARadians: TVector2; out ASin, ACos: TVector2); assembler;
876
asm
877
  movdqa   [rsp-24], xmm6
878
  movdqa   [rsp-40], xmm7
879

880
  {$IF RTLVersion >= 33}
881
  movlps   xmm0, [ARadians]
882
  {$ELSE}
883
  movq     xmm0, ARadians
884
  {$ENDIF}
885
  movlps   xmm2, QWORD [SSE_MASK_SIGN]
886
  movlps   xmm3, QWORD [SSE_MASK_ABS_VAL]
887
  movaps   xmm1, xmm0
888
  pand     xmm0, xmm3               // (xmm0) X := Abs(ARadians)
889
  pand     xmm1, xmm2               // (xmm1) SignBitSin
890
  movaps   xmm4, xmm0
891
  movlps   xmm5, QWORD [SSE_FOPI]
892
  movlps   xmm6, QWORD [SSE_INT_ONE]
893
  mulps    xmm4, xmm5
894
  movlps   xmm7, QWORD [SSE_INT_NOT_ONE]
895
  cvtps2dq xmm4, xmm4               // (xmm4) J := Trunc(X * FOPI)
896
  movlps   xmm5, QWORD [SSE_INT_FOUR]
897
  paddd    xmm4, xmm6
898
  pand     xmm4, xmm7               // (xmm4) J := (J + 1) and (not 1)
899
  movlps   xmm7, QWORD [SSE_INT_TWO]
900
  cvtdq2ps xmm2, xmm4               // (xmm2) Y := J
901
  movaps   xmm3, xmm4
902
  movaps   xmm6, xmm4               // (xmm6) J
903
  pand     xmm3, xmm5               // J and 4
904
  pand     xmm4, xmm7               // J and 2
905
  pxor     xmm5, xmm5
906
  pslld    xmm3, 29                 // (xmm3) SwapSignBitSin := (J and 4) shl 29
907
  movlps   xmm7, QWORD [SSE_PI_OVER_4]
908
  pcmpeqd  xmm4, xmm5               // (xmm4) PolyMask := ((J and 2) = 0)? Yes: $FFFFFFFF, No: $00000000
909
  mulps    xmm2, xmm7               // Y * Pi / 4
910
  movlps   xmm5, QWORD [SSE_INT_TWO]
911
  subps    xmm0, xmm2               // (xmm0) X := X - (Y * Pi / 4)
912
  psubd    xmm6, xmm5               // J - 2
913
  movlps   xmm7, QWORD [SSE_INT_FOUR]
914
  pxor     xmm1, xmm3               // (xmm1) SignBitSin := SignBitSin xor SwapSignBitSin
915
  andnps   xmm6, xmm7               // (not (J - 2)) and 4
916
  movaps   xmm3, xmm0
917
  pslld    xmm6, 29                 // (xmm6) SignBitCos := ((not (J - 2)) and 4) shl 29
918
  mulps    xmm3, xmm3               // (xmm3) Z := X * X
919
  movlps   xmm2, QWORD [SSE_COSCOF_P0]
920
  movlps   xmm5, QWORD [SSE_COSCOF_P1]
921
  movlps   xmm7, QWORD [SSE_COSCOF_P2]
922
  mulps    xmm2, xmm3               // COSCOF_P0 * Z
923
  addps    xmm2, xmm5               // Y := COSCOF_P0 * Z + COSCOF_P1
924
  movlps   xmm5, QWORD [SSE_ONE_HALF]
925
  mulps    xmm2, xmm3               // Y * Z
926
  addps    xmm2, xmm7               // Y := (Y * Z) + COSCOF_P2
927
  movlps   xmm7, QWORD [SSE_ONE]
928
  mulps    xmm2, xmm3               // Y * Z
929
  mulps    xmm5, xmm3               // 0.5 * Z
930
  mulps    xmm2, xmm3               // Y * (Z * Z)
931
  subps    xmm2, xmm5               // Y - 0.5 * Z
932
  movlps   xmm5, QWORD [SSE_SINCOF_P0]
933
  addps    xmm2, xmm7               // (xmm2) Y := Y - 0.5 * Z + 1
934
  movlps   xmm7, QWORD [SSE_SINCOF_P1]
935
  mulps    xmm5, xmm3               // SINCOF_P0 * Z
936
  addps    xmm5, xmm7               // Y2 := SINCOF_P0 * Z + SINCOF_P1
937
  mulps    xmm5, xmm3               // Y2 * Z
938
  movlps   xmm7, QWORD [SSE_SINCOF_P2]
939
  addps    xmm5, xmm7               // Y2 := Y2 * Z + SINCOF_P2
940
  mulps    xmm5, xmm3               // Y2 * Z
941
  mulps    xmm5, xmm0               // Y2 * (Z * X)
942
  addps    xmm5, xmm0               // (xmm5) Y2 := Y2 * (Z * X) + X
943
  movaps   xmm0, xmm2               // Y
944
  movaps   xmm3, xmm5               // Y2
945
  andps    xmm5, xmm4               // ((J and 2) = 0)? Yes: Y2, No: 0
946
  andnps   xmm4, xmm2               // ((J and 2) = 0)? Yes: 0 , No: Y
947
  subps    xmm3, xmm5               // ((J and 2) = 0)? Yes: 0 , No: Y2
948
  subps    xmm0, xmm4               // ((J and 2) = 0)? Yes: Y , No: 0
949
  addps    xmm4, xmm5               // ((J and 2) = 0)? Yes: Y2, No: Y
950
  addps    xmm3, xmm0               // ((J and 2) = 0)? Yes: Y , No: Y2
951
  xorps    xmm4, xmm1               // Sin
952
  xorps    xmm3, xmm6               // Cos
953
  movlps   [ASin], xmm4
954
  movlps   [ACos], xmm3
955

956
  movdqa   xmm6, [rsp-24]
957
  movdqa   xmm7, [rsp-40]
958
end;
959

960
procedure FastSinCos(const ARadians: TVector3; out ASin, ACos: TVector3); assembler;
961
asm
962
  movdqa   [rsp-24], xmm6
963
  movdqa   [rsp-40], xmm7
964

965
  movq     xmm0, [ARadians]
966
  movss    xmm1, DWORD [ARadians+8]
967
  movlhps  xmm0, xmm1
968
  movups   xmm2, [SSE_MASK_SIGN]
969
  movups   xmm3, [SSE_MASK_ABS_VAL]
970
  movaps   xmm1, xmm0
971
  pand     xmm0, xmm3               // (xmm0) X := Abs(ARadians)
972
  pand     xmm1, xmm2               // (xmm1) SignBitSin
973
  movaps   xmm4, xmm0
974
  movups   xmm5, [SSE_FOPI]
975
  movups   xmm6, [SSE_INT_ONE]
976
  mulps    xmm4, xmm5
977
  movups   xmm7, [SSE_INT_NOT_ONE]
978
  cvtps2dq xmm4, xmm4               // (xmm4) J := Trunc(X * FOPI)
979
  movups   xmm5, [SSE_INT_FOUR]
980
  paddd    xmm4, xmm6
981
  pand     xmm4, xmm7               // (xmm4) J := (J + 1) and (not 1)
982
  movups   xmm7, [SSE_INT_TWO]
983
  cvtdq2ps xmm2, xmm4               // (xmm2) Y := J
984
  movaps   xmm3, xmm4
985
  movaps   xmm6, xmm4               // (xmm6) J
986
  pand     xmm3, xmm5               // J and 4
987
  pand     xmm4, xmm7               // J and 2
988
  pxor     xmm5, xmm5
989
  pslld    xmm3, 29                 // (xmm3) SwapSignBitSin := (J and 4) shl 29
990
  movups   xmm7, [SSE_PI_OVER_4]
991
  pcmpeqd  xmm4, xmm5               // (xmm4) PolyMask := ((J and 2) = 0)? Yes: $FFFFFFFF, No: $00000000
992
  mulps    xmm2, xmm7               // Y * Pi / 4
993
  movups   xmm5, [SSE_INT_TWO]
994
  subps    xmm0, xmm2               // (xmm0) X := X - (Y * Pi / 4)
995
  psubd    xmm6, xmm5               // J - 2
996
  movups   xmm7, [SSE_INT_FOUR]
997
  pxor     xmm1, xmm3               // (xmm1) SignBitSin := SignBitSin xor SwapSignBitSin
998
  andnps   xmm6, xmm7               // (not (J - 2)) and 4
999
  movaps   xmm3, xmm0
1000
  pslld    xmm6, 29                 // (xmm6) SignBitCos := ((not (J - 2)) and 4) shl 29
1001
  mulps    xmm3, xmm3               // (xmm3) Z := X * X
1002
  movups   xmm2, [SSE_COSCOF_P0]
1003
  movups   xmm5, [SSE_COSCOF_P1]
1004
  movups   xmm7, [SSE_COSCOF_P2]
1005
  mulps    xmm2, xmm3               // COSCOF_P0 * Z
1006
  addps    xmm2, xmm5               // Y := COSCOF_P0 * Z + COSCOF_P1
1007
  movups   xmm5, [SSE_ONE_HALF]
1008
  mulps    xmm2, xmm3               // Y * Z
1009
  addps    xmm2, xmm7               // Y := (Y * Z) + COSCOF_P2
1010
  movups   xmm7, [SSE_ONE]
1011
  mulps    xmm2, xmm3               // Y * Z
1012
  mulps    xmm5, xmm3               // 0.5 * Z
1013
  mulps    xmm2, xmm3               // Y * (Z * Z)
1014
  subps    xmm2, xmm5               // Y - 0.5 * Z
1015
  movups   xmm5, [SSE_SINCOF_P0]
1016
  addps    xmm2, xmm7               // (xmm2) Y := Y - 0.5 * Z + 1
1017
  movups   xmm7, [SSE_SINCOF_P1]
1018
  mulps    xmm5, xmm3               // SINCOF_P0 * Z
1019
  addps    xmm5, xmm7               // Y2 := SINCOF_P0 * Z + SINCOF_P1
1020
  mulps    xmm5, xmm3               // Y2 * Z
1021
  movups   xmm7, [SSE_SINCOF_P2]
1022
  addps    xmm5, xmm7               // Y2 := Y2 * Z + SINCOF_P2
1023
  mulps    xmm5, xmm3               // Y2 * Z
1024
  mulps    xmm5, xmm0               // Y2 * (Z * X)
1025
  addps    xmm5, xmm0               // (xmm5) Y2 := Y2 * (Z * X) + X
1026
  movaps   xmm0, xmm2               // Y
1027
  movaps   xmm3, xmm5               // Y2
1028
  andps    xmm5, xmm4               // ((J and 2) = 0)? Yes: Y2, No: 0
1029
  andnps   xmm4, xmm2               // ((J and 2) = 0)? Yes: 0 , No: Y
1030
  subps    xmm3, xmm5               // ((J and 2) = 0)? Yes: 0 , No: Y2
1031
  subps    xmm0, xmm4               // ((J and 2) = 0)? Yes: Y , No: 0
1032
  addps    xmm4, xmm5               // ((J and 2) = 0)? Yes: Y2, No: Y
1033
  addps    xmm3, xmm0               // ((J and 2) = 0)? Yes: Y , No: Y2
1034
  xorps    xmm4, xmm1               // Sin
1035
  xorps    xmm3, xmm6               // Cos
1036
  movhlps  xmm5, xmm4
1037
  movhlps  xmm2, xmm3
1038
  movq     [ASin], xmm4
1039
  movss    DWORD [ASin+8], xmm5
1040
  movq     [ACos], xmm3
1041
  movss    DWORD [ACos+8], xmm2
1042

1043
  movdqa   xmm6, [rsp-24]
1044
  movdqa   xmm7, [rsp-40]
1045
end;
1046

1047
procedure FastSinCos(const ARadians: TVector4; out ASin, ACos: TVector4); assembler;
1048
asm
1049
  movdqa   [rsp-24], xmm6
1050
  movdqa   [rsp-40], xmm7
1051

1052
  movups   xmm0, [ARadians]
1053
  movups   xmm2, [SSE_MASK_SIGN]
1054
  movups   xmm3, [SSE_MASK_ABS_VAL]
1055
  movaps   xmm1, xmm0
1056
  pand     xmm0, xmm3               // (xmm0) X := Abs(ARadians)
1057
  pand     xmm1, xmm2               // (xmm1) SignBitSin
1058
  movaps   xmm4, xmm0
1059
  movups   xmm5, [SSE_FOPI]
1060
  movups   xmm6, [SSE_INT_ONE]
1061
  mulps    xmm4, xmm5
1062
  movups   xmm7, [SSE_INT_NOT_ONE]
1063
  cvtps2dq xmm4, xmm4               // (xmm4) J := Trunc(X * FOPI)
1064
  movups   xmm5, [SSE_INT_FOUR]
1065
  paddd    xmm4, xmm6
1066
  pand     xmm4, xmm7               // (xmm4) J := (J + 1) and (not 1)
1067
  movups   xmm7, [SSE_INT_TWO]
1068
  cvtdq2ps xmm2, xmm4               // (xmm2) Y := J
1069
  movaps   xmm3, xmm4
1070
  movaps   xmm6, xmm4               // (xmm6) J
1071
  pand     xmm3, xmm5               // J and 4
1072
  pand     xmm4, xmm7               // J and 2
1073
  pxor     xmm5, xmm5
1074
  pslld    xmm3, 29                 // (xmm3) SwapSignBitSin := (J and 4) shl 29
1075
  movups   xmm7, [SSE_PI_OVER_4]
1076
  pcmpeqd  xmm4, xmm5               // (xmm4) PolyMask := ((J and 2) = 0)? Yes: $FFFFFFFF, No: $00000000
1077
  mulps    xmm2, xmm7               // Y * Pi / 4
1078
  movups   xmm5, [SSE_INT_TWO]
1079
  subps    xmm0, xmm2               // (xmm0) X := X - (Y * Pi / 4)
1080
  psubd    xmm6, xmm5               // J - 2
1081
  movups   xmm7, [SSE_INT_FOUR]
1082
  pxor     xmm1, xmm3               // (xmm1) SignBitSin := SignBitSin xor SwapSignBitSin
1083
  andnps   xmm6, xmm7               // (not (J - 2)) and 4
1084
  movaps   xmm3, xmm0
1085
  pslld    xmm6, 29                 // (xmm6) SignBitCos := ((not (J - 2)) and 4) shl 29
1086
  mulps    xmm3, xmm3               // (xmm3) Z := X * X
1087
  movups   xmm2, [SSE_COSCOF_P0]
1088
  movups   xmm5, [SSE_COSCOF_P1]
1089
  movups   xmm7, [SSE_COSCOF_P2]
1090
  mulps    xmm2, xmm3               // COSCOF_P0 * Z
1091
  addps    xmm2, xmm5               // Y := COSCOF_P0 * Z + COSCOF_P1
1092
  movups   xmm5, [SSE_ONE_HALF]
1093
  mulps    xmm2, xmm3               // Y * Z
1094
  addps    xmm2, xmm7               // Y := (Y * Z) + COSCOF_P2
1095
  movups   xmm7, [SSE_ONE]
1096
  mulps    xmm2, xmm3               // Y * Z
1097
  mulps    xmm5, xmm3               // 0.5 * Z
1098
  mulps    xmm2, xmm3               // Y * (Z * Z)
1099
  subps    xmm2, xmm5               // Y - 0.5 * Z
1100
  movups   xmm5, [SSE_SINCOF_P0]
1101
  addps    xmm2, xmm7               // (xmm2) Y := Y - 0.5 * Z + 1
1102
  movups   xmm7, [SSE_SINCOF_P1]
1103
  mulps    xmm5, xmm3               // SINCOF_P0 * Z
1104
  addps    xmm5, xmm7               // Y2 := SINCOF_P0 * Z + SINCOF_P1
1105
  mulps    xmm5, xmm3               // Y2 * Z
1106
  movups   xmm7, [SSE_SINCOF_P2]
1107
  addps    xmm5, xmm7               // Y2 := Y2 * Z + SINCOF_P2
1108
  mulps    xmm5, xmm3               // Y2 * Z
1109
  mulps    xmm5, xmm0               // Y2 * (Z * X)
1110
  addps    xmm5, xmm0               // (xmm5) Y2 := Y2 * (Z * X) + X
1111
  movaps   xmm0, xmm2               // Y
1112
  movaps   xmm3, xmm5               // Y2
1113
  andps    xmm5, xmm4               // ((J and 2) = 0)? Yes: Y2, No: 0
1114
  andnps   xmm4, xmm2               // ((J and 2) = 0)? Yes: 0 , No: Y
1115
  subps    xmm3, xmm5               // ((J and 2) = 0)? Yes: 0 , No: Y2
1116
  subps    xmm0, xmm4               // ((J and 2) = 0)? Yes: Y , No: 0
1117
  addps    xmm4, xmm5               // ((J and 2) = 0)? Yes: Y2, No: Y
1118
  addps    xmm3, xmm0               // ((J and 2) = 0)? Yes: Y , No: Y2
1119
  xorps    xmm4, xmm1               // Sin
1120
  xorps    xmm3, xmm6               // Cos
1121
  movups   [ASin], xmm4
1122
  movups   [ACos], xmm3
1123

1124
  movdqa   xmm6, [rsp-24]
1125
  movdqa   xmm7, [rsp-40]
1126
end;
1127

1128
function FastExp(const A: Single): Single; assembler;
1129
asm
1130
  movdqa   [rsp-24], xmm6
1131
  movdqa   [rsp-40], xmm7
1132

1133
  movss    xmm1, DWORD [SSE_EXP_A1]
1134
  movss    xmm2, DWORD [SSE_EXP_A2]
1135

1136
  // Val := 12102203.1615614 * A + 1065353216.0
1137
  mulss    xmm0, xmm1
1138
  movss    xmm3, DWORD [SSE_EXP_CST]
1139
  addss    xmm0, xmm2
1140

1141
  // if (Val >= EXP_CST) then Val := EXP_CST
1142
  movss    xmm1, xmm0
1143
  cmpltss  xmm0, xmm3 // (Val < EXP_CST)? Yes: $FFFFFFFF, No: $00000000
1144
  andps    xmm1, xmm0 // (Val < EXP_CST)? Yes: Val, No: 0
1145
  andnps   xmm0, xmm3 // (Val < EXP_CST)? Yes: 0, No: EXP_CST
1146
  orps     xmm0, xmm1 // (Val < EXP_CST)? Yes: Val, No: EXP_CST
1147

1148
  // IVal := Trunc(Val)
1149
  xorps    xmm3, xmm3
1150
  cvtps2dq xmm1, xmm0
1151

1152
  // if (IVal < 0) then I := 0
1153
  movss    xmm2, DWORD [SSE_MASK_EXPONENT]
1154
  movdqa   xmm0, xmm1 // IVal
1155
  pcmpgtd  xmm1, xmm3 // (IVal > 0)? Yes: $FFFFFFFF, No: $00000000
1156
  movss    xmm3, DWORD [SSE_MASK_FRACTION]
1157
  pand     xmm0, xmm1 // (IVal > 0)? Yes: IVal, No: 0
1158

1159
  // XU.I := IVal and $7F800000
1160
  movss    xmm4, DWORD [SSE_EXP_I1]
1161
  movss    xmm1, xmm0
1162
  pand     xmm0, xmm2 // XU.I / XU.S
1163

1164
  // XU2.I := (IVal and $007FFFFF) or $3F800000;
1165
  pand     xmm1, xmm3
1166
  movss    xmm6, DWORD [SSE_EXP_F5]
1167
  por      xmm1, xmm4 // XU2.I / XU2.S
1168

1169
  //  Result := XU.S *
1170
  //    ( 0.509964287281036376953125 + B *
1171
  //    ( 0.3120158612728118896484375 + B *
1172
  //    ( 0.1666135489940643310546875 + B *
1173
  //    (-2.12528370320796966552734375e-3 + B *
1174
  //      1.3534179888665676116943359375e-2))));
1175
  movss    xmm5, DWORD [SSE_EXP_F4]
1176
  movss    xmm7, xmm1
1177

1178
  mulss    xmm1, xmm6
1179
  movss    xmm4, DWORD [SSE_EXP_F3]
1180
  addss    xmm1, xmm5
1181
  movss    xmm3, DWORD [SSE_EXP_F2]
1182
  mulss    xmm1, xmm7
1183
  movss    xmm2, DWORD [SSE_EXP_F1]
1184
  addss    xmm1, xmm4
1185
  mulss    xmm1, xmm7
1186
  addss    xmm1, xmm3
1187
  mulss    xmm1, xmm7
1188
  addss    xmm1, xmm2
1189
  mulss    xmm1, xmm0
1190

1191
  movss    xmm0, xmm1
1192

1193
  movdqa   xmm6, [rsp-24]
1194
  movdqa   xmm7, [rsp-40]
1195
end;
1196

1197
function FastExp(const A: TVector2): TVector2;
1198
asm
1199
  movdqa   [rsp-24], xmm6
1200
  movdqa   [rsp-40], xmm7
1201

1202
  {$IF RTLVersion >= 33}
1203
  movlps   xmm0, [A]
1204
  {$ELSE}
1205
  movq     xmm0, A
1206
  {$ENDIF}
1207
  movlps   xmm1, QWORD [SSE_EXP_A1]
1208
  movlps   xmm2, QWORD [SSE_EXP_A2]
1209

1210
  // Val := 12102203.1615614 * A + 1065353216.0
1211
  mulps    xmm0, xmm1
1212
  movlps   xmm3, QWORD [SSE_EXP_CST]
1213
  addps    xmm0, xmm2
1214

1215
  // if (Val >= EXP_CST) then Val := EXP_CST
1216
  movaps   xmm1, xmm0
1217
  cmpltps  xmm0, xmm3 // (Val < EXP_CST)? Yes: $FFFFFFFF, No: $00000000
1218
  andps    xmm1, xmm0 // (Val < EXP_CST)? Yes: Val, No: 0
1219
  andnps   xmm0, xmm3 // (Val < EXP_CST)? Yes: 0, No: EXP_CST
1220
  orps     xmm0, xmm1 // (Val < EXP_CST)? Yes: Val, No: EXP_CST
1221

1222
  // IVal := Trunc(Val)
1223
  xorps    xmm3, xmm3
1224
  cvtps2dq xmm1, xmm0
1225

1226
  // if (IVal < 0) then I := 0
1227
  movlps   xmm2, QWORD [SSE_MASK_EXPONENT]
1228
  movdqa   xmm0, xmm1 // IVal
1229
  pcmpgtd  xmm1, xmm3 // (IVal > 0)? Yes: $FFFFFFFF, No: $00000000
1230
  movlps   xmm3, QWORD [SSE_MASK_FRACTION]
1231
  pand     xmm0, xmm1 // (IVal > 0)? Yes: IVal, No: 0
1232

1233
  // XU.I := IVal and $7F800000
1234
  movlps   xmm4, QWORD [SSE_EXP_I1]
1235
  movdqa   xmm1, xmm0
1236
  pand     xmm0, xmm2 // XU.I / XU.S
1237

1238
  // XU2.I := (IVal and $007FFFFF) or $3F800000;
1239
  pand     xmm1, xmm3
1240
  movlps   xmm6, QWORD [SSE_EXP_F5]
1241
  por      xmm1, xmm4 // XU2.I / XU2.S
1242

1243
  //  Result := XU.S *
1244
  //    ( 0.509964287281036376953125 + B *
1245
  //    ( 0.3120158612728118896484375 + B *
1246
  //    ( 0.1666135489940643310546875 + B *
1247
  //    (-2.12528370320796966552734375e-3 + B *
1248
  //      1.3534179888665676116943359375e-2))));
1249
  movlps   xmm5, QWORD [SSE_EXP_F4]
1250
  movaps   xmm7, xmm1
1251

1252
  mulps    xmm1, xmm6
1253
  movlps   xmm4, QWORD [SSE_EXP_F3]
1254
  addps    xmm1, xmm5
1255
  movlps   xmm3, QWORD [SSE_EXP_F2]
1256
  mulps    xmm1, xmm7
1257
  movlps   xmm2, QWORD [SSE_EXP_F1]
1258
  addps    xmm1, xmm4
1259
  mulps    xmm1, xmm7
1260
  addps    xmm1, xmm3
1261
  mulps    xmm1, xmm7
1262
  addps    xmm1, xmm2
1263
  mulps    xmm1, xmm0
1264

1265
  movq     rax, xmm1
1266

1267
  movdqa   xmm6, [rsp-24]
1268
  movdqa   xmm7, [rsp-40]
1269
end;
1270

1271
function FastExp(const A: TVector3): TVector3;
1272
asm
1273
  movdqa   [rsp-24], xmm6
1274
  movdqa   [rsp-40], xmm7
1275

1276
  movq     xmm0, [A]
1277
  movss    xmm1, DWORD [A+8]
1278
  movlhps  xmm0, xmm1
1279
  movups   xmm1, [SSE_EXP_A1]
1280
  movups   xmm2, [SSE_EXP_A2]
1281

1282
  // Val := 12102203.1615614 * A + 1065353216.0
1283
  mulps    xmm0, xmm1
1284
  movups   xmm3, [SSE_EXP_CST]
1285
  addps    xmm0, xmm2
1286

1287
  // if (Val >= EXP_CST) then Val := EXP_CST
1288
  movaps   xmm1, xmm0
1289
  cmpltps  xmm0, xmm3 // (Val < EXP_CST)? Yes: $FFFFFFFF, No: $00000000
1290
  andps    xmm1, xmm0 // (Val < EXP_CST)? Yes: Val, No: 0
1291
  andnps   xmm0, xmm3 // (Val < EXP_CST)? Yes: 0, No: EXP_CST
1292
  orps     xmm0, xmm1 // (Val < EXP_CST)? Yes: Val, No: EXP_CST
1293

1294
  // IVal := Trunc(Val)
1295
  xorps    xmm3, xmm3
1296
  cvtps2dq xmm1, xmm0
1297

1298
  // if (IVal < 0) then I := 0
1299
  movups   xmm2, [SSE_MASK_EXPONENT]
1300
  movdqa   xmm0, xmm1 // IVal
1301
  pcmpgtd  xmm1, xmm3 // (IVal > 0)? Yes: $FFFFFFFF, No: $00000000
1302
  movups   xmm3, [SSE_MASK_FRACTION]
1303
  pand     xmm0, xmm1 // (IVal > 0)? Yes: IVal, No: 0
1304

1305
  // XU.I := IVal and $7F800000
1306
  movups   xmm4, [SSE_EXP_I1]
1307
  movdqa   xmm1, xmm0
1308
  pand     xmm0, xmm2 // XU.I / XU.S
1309

1310
  // XU2.I := (IVal and $007FFFFF) or $3F800000;
1311
  pand     xmm1, xmm3
1312
  movups   xmm6, [SSE_EXP_F5]
1313
  por      xmm1, xmm4 // XU2.I / XU2.S
1314

1315
  //  Result := XU.S *
1316
  //    ( 0.509964287281036376953125 + B *
1317
  //    ( 0.3120158612728118896484375 + B *
1318
  //    ( 0.1666135489940643310546875 + B *
1319
  //    (-2.12528370320796966552734375e-3 + B *
1320
  //      1.3534179888665676116943359375e-2))));
1321
  movups   xmm5, [SSE_EXP_F4]
1322
  movaps   xmm7, xmm1
1323

1324
  mulps    xmm1, xmm6
1325
  movups   xmm4, [SSE_EXP_F3]
1326
  addps    xmm1, xmm5
1327
  movups   xmm3, [SSE_EXP_F2]
1328
  mulps    xmm1, xmm7
1329
  movups   xmm2, [SSE_EXP_F1]
1330
  addps    xmm1, xmm4
1331
  mulps    xmm1, xmm7
1332
  addps    xmm1, xmm3
1333
  mulps    xmm1, xmm7
1334
  addps    xmm1, xmm2
1335
  mulps    xmm1, xmm0
1336

1337
  movhlps  xmm0, xmm1
1338
  movq     [Result], xmm1
1339
  movss    DWORD [Result+8], xmm0
1340

1341
  movdqa   xmm6, [rsp-24]
1342
  movdqa   xmm7, [rsp-40]
1343
end;
1344

1345
function FastExp(const A: TVector4): TVector4;
1346
asm
1347
  movdqa   [rsp-24], xmm6
1348
  movdqa   [rsp-40], xmm7
1349

1350
  movups   xmm0, [A]
1351
  movups   xmm1, [SSE_EXP_A1]
1352
  movups   xmm2, [SSE_EXP_A2]
1353

1354
  // Val := 12102203.1615614 * A + 1065353216.0
1355
  mulps    xmm0, xmm1
1356
  movups   xmm3, [SSE_EXP_CST]
1357
  addps    xmm0, xmm2
1358

1359
  // if (Val >= EXP_CST) then Val := EXP_CST
1360
  movaps   xmm1, xmm0
1361
  cmpltps  xmm0, xmm3 // (Val < EXP_CST)? Yes: $FFFFFFFF, No: $00000000
1362
  andps    xmm1, xmm0 // (Val < EXP_CST)? Yes: Val, No: 0
1363
  andnps   xmm0, xmm3 // (Val < EXP_CST)? Yes: 0, No: EXP_CST
1364
  orps     xmm0, xmm1 // (Val < EXP_CST)? Yes: Val, No: EXP_CST
1365

1366
  // IVal := Trunc(Val)
1367
  xorps    xmm3, xmm3
1368
  cvtps2dq xmm1, xmm0
1369

1370
  // if (IVal < 0) then I := 0
1371
  movups   xmm2, [SSE_MASK_EXPONENT]
1372
  movdqa   xmm0, xmm1 // IVal
1373
  pcmpgtd  xmm1, xmm3 // (IVal > 0)? Yes: $FFFFFFFF, No: $00000000
1374
  movups   xmm3, [SSE_MASK_FRACTION]
1375
  pand     xmm0, xmm1 // (IVal > 0)? Yes: IVal, No: 0
1376

1377
  // XU.I := IVal and $7F800000
1378
  movups   xmm4, [SSE_EXP_I1]
1379
  movdqa   xmm1, xmm0
1380
  pand     xmm0, xmm2 // XU.I / XU.S
1381

1382
  // XU2.I := (IVal and $007FFFFF) or $3F800000;
1383
  pand     xmm1, xmm3
1384
  movups   xmm6, [SSE_EXP_F5]
1385
  por      xmm1, xmm4 // XU2.I / XU2.S
1386

1387
  //  Result := XU.S *
1388
  //    ( 0.509964287281036376953125 + B *
1389
  //    ( 0.3120158612728118896484375 + B *
1390
  //    ( 0.1666135489940643310546875 + B *
1391
  //    (-2.12528370320796966552734375e-3 + B *
1392
  //      1.3534179888665676116943359375e-2))));
1393
  movups   xmm5, [SSE_EXP_F4]
1394
  movaps   xmm7, xmm1
1395

1396
  mulps    xmm1, xmm6
1397
  movups   xmm4, [SSE_EXP_F3]
1398
  addps    xmm1, xmm5
1399
  movups   xmm3, [SSE_EXP_F2]
1400
  mulps    xmm1, xmm7
1401
  movups   xmm2, [SSE_EXP_F1]
1402
  addps    xmm1, xmm4
1403
  mulps    xmm1, xmm7
1404
  addps    xmm1, xmm3
1405
  mulps    xmm1, xmm7
1406
  addps    xmm1, xmm2
1407
  mulps    xmm1, xmm0
1408

1409
  movups   [Result], xmm1
1410

1411
  movdqa   xmm6, [rsp-24]
1412
  movdqa   xmm7, [rsp-40]
1413
end;
1414

1415
function FastLn(const A: Single): Single; assembler;
1416
asm
1417
  movdqa   [rsp-24], xmm6
1418
  movdqa   [rsp-40], xmm7
1419

1420
  xorps    xmm2, xmm2
1421
  movss    xmm1, xmm0
1422
  movss    xmm3, DWORD [SSE_LN_CST]
1423
  movss    xmm4, DWORD [SSE_NEG_INFINITY]
1424

1425
  // Exp := Val.I shr 23
1426
  psrld    xmm0, 23
1427
  movss    xmm5, xmm1
1428
  cvtdq2ps xmm0, xmm0 // xmm0=Exp
1429

1430
  // if (A > 0) then AddCst := -89.93423858 else AddCst := NegInfinity
1431
  cmpnless xmm1, xmm2 // (A > 0)? Yes: $FFFFFFFF, No: $00000000
1432
  movss    xmm2, DWORD [SSE_MASK_FRACTION]
1433
  andps    xmm3, xmm1 // (A > 0)? Yes: -89.93423858, No: 0
1434
  andnps   xmm1, xmm4 // (A > 0)? Yes: 0, No: NegInfinity
1435
  movss    xmm4, DWORD [SSE_EXP_I1]
1436
  orps     xmm1, xmm3 // (A > 0)? Yes: -89.93423858, No: NegInfinity
1437

1438
  // Val.I := (Val.I and $007FFFFF) or $3F800000
1439
  pand     xmm5, xmm2
1440
  movss    xmm2, DWORD [SSE_LN_F5]
1441
  por      xmm5, xmm4
1442
  movss    xmm6, DWORD [SSE_LN_F3]
1443
  movss    xmm3, xmm5 // xmm3=X
1444
  mulss    xmm5, xmm5 // xmm5=X2
1445

1446
  movss    xmm4, xmm3
1447
  movss    xmm7, DWORD [SSE_LN_F4]
1448
  mulss    xmm4, xmm6
1449
  mulss    xmm0, xmm2 // xmm0 = Exp * 0.69314718055995
1450
  subss    xmm4, xmm7
1451
  movss    xmm7, DWORD [SSE_LN_F2]
1452
  movss    xmm6, xmm3
1453
  mulss    xmm4, xmm5 // xmm4 = X2 * (0.024982445 * X - 0.24371102)
1454
  subss    xmm6, xmm7
1455
  movss    xmm2, DWORD [SSE_LN_F1]
1456
  addss    xmm4, xmm6 // xmm4 = (X - 2.2744832) + X2 * (0.024982445 * X - 0.24371102)
1457
  mulss    xmm3, xmm2
1458
  mulss    xmm4, xmm5 // xmm4 = X2 * ((X - 2.2744832) + X2 * (0.024982445 * X - 0.24371102))
1459
  addss    xmm3, xmm1 // xmm3 = (3.3977745 * X + AddCst)
1460
  addss    xmm4, xmm0
1461
  addss    xmm3, xmm4
1462

1463
  movss    xmm0, xmm3
1464

1465
  movdqa   xmm6, [rsp-24]
1466
  movdqa   xmm7, [rsp-40]
1467
end;
1468

1469
function FastLn(const A: TVector2): TVector2; assembler;
1470
asm
1471
  movdqa   [rsp-24], xmm6
1472
  movdqa   [rsp-40], xmm7
1473

1474
  {$IF RTLVersion >= 33}
1475
  movlps   xmm0, [A]
1476
  {$ELSE}
1477
  movq     xmm0, A
1478
  {$ENDIF}
1479
  xorps    xmm2, xmm2
1480
  movaps   xmm1, xmm0
1481
  movlps   xmm3, QWORD [SSE_LN_CST]
1482
  movlps   xmm4, QWORD [SSE_NEG_INFINITY]
1483

1484
  // Exp := Val.I shr 23
1485
  psrld    xmm0, 23
1486
  movaps   xmm5, xmm1
1487
  cvtdq2ps xmm0, xmm0 // xmm0=Exp
1488

1489
  // if (A > 0) then AddCst := -89.93423858 else AddCst := NegInfinity
1490
  cmpnleps xmm1, xmm2 // (A > 0)? Yes: $FFFFFFFF, No: $00000000
1491
  movlps   xmm2, QWORD [SSE_MASK_FRACTION]
1492
  andps    xmm3, xmm1 // (A > 0)? Yes: -89.93423858, No: 0
1493
  andnps   xmm1, xmm4 // (A > 0)? Yes: 0, No: NegInfinity
1494
  movlps   xmm4, QWORD [SSE_EXP_I1]
1495
  orps     xmm1, xmm3 // (A > 0)? Yes: -89.93423858, No: NegInfinity
1496

1497
  // Val.I := (Val.I and $007FFFFF) or $3F800000
1498
  pand     xmm5, xmm2
1499
  movlps   xmm2, QWORD [SSE_LN_F5]
1500
  por      xmm5, xmm4
1501
  movlps   xmm6, QWORD [SSE_LN_F3]
1502
  movaps   xmm3, xmm5 // xmm3=X
1503
  mulps    xmm5, xmm5 // xmm5=X2
1504

1505
  movaps   xmm4, xmm3
1506
  movlps   xmm7, QWORD [SSE_LN_F4]
1507
  mulps    xmm4, xmm6
1508
  mulps    xmm0, xmm2 // xmm0 = Exp * 0.69314718055995
1509
  subps    xmm4, xmm7
1510
  movlps   xmm7, QWORD [SSE_LN_F2]
1511
  movaps   xmm6, xmm3
1512
  mulps    xmm4, xmm5 // xmm4 = X2 * (0.024982445 * X - 0.24371102)
1513
  subps    xmm6, xmm7
1514
  movlps   xmm2, QWORD [SSE_LN_F1]
1515
  addps    xmm4, xmm6 // xmm4 = (X - 2.2744832) + X2 * (0.024982445 * X - 0.24371102)
1516
  mulps    xmm3, xmm2
1517
  mulps    xmm4, xmm5 // xmm4 = X2 * ((X - 2.2744832) + X2 * (0.024982445 * X - 0.24371102))
1518
  addps    xmm3, xmm1 // xmm3 = (3.3977745 * X + AddCst)
1519
  addps    xmm4, xmm0
1520
  addps    xmm3, xmm4
1521

1522
  movq     rax, xmm3
1523

1524
  movdqa   xmm6, [rsp-24]
1525
  movdqa   xmm7, [rsp-40]
1526
end;
1527

1528
function FastLn(const A: TVector3): TVector3; assembler;
1529
asm
1530
  movdqa   [rsp-24], xmm6
1531
  movdqa   [rsp-40], xmm7
1532

1533
  movq     xmm0, [A]
1534
  movss    xmm1, DWORD [A+8]
1535
  movlhps  xmm0, xmm1
1536
  xorps    xmm2, xmm2
1537
  movaps   xmm1, xmm0
1538
  movups   xmm3, [SSE_LN_CST]
1539
  movups   xmm4, [SSE_NEG_INFINITY]
1540

1541
  // Exp := Val.I shr 23
1542
  psrld    xmm0, 23
1543
  movaps   xmm5, xmm1
1544
  cvtdq2ps xmm0, xmm0 // xmm0=Exp
1545

1546
  // if (A > 0) then AddCst := -89.93423858 else AddCst := NegInfinity
1547
  cmpnleps xmm1, xmm2 // (A > 0)? Yes: $FFFFFFFF, No: $00000000
1548
  movups   xmm2, [SSE_MASK_FRACTION]
1549
  andps    xmm3, xmm1 // (A > 0)? Yes: -89.93423858, No: 0
1550
  andnps   xmm1, xmm4 // (A > 0)? Yes: 0, No: NegInfinity
1551
  movups   xmm4, [SSE_EXP_I1]
1552
  orps     xmm1, xmm3 // (A > 0)? Yes: -89.93423858, No: NegInfinity
1553

1554
  // Val.I := (Val.I and $007FFFFF) or $3F800000
1555
  pand     xmm5, xmm2
1556
  movups   xmm2, [SSE_LN_F5]
1557
  por      xmm5, xmm4
1558
  movups   xmm6, [SSE_LN_F3]
1559
  movaps   xmm3, xmm5 // xmm3=X
1560
  mulps    xmm5, xmm5 // xmm5=X2
1561

1562
  movaps   xmm4, xmm3
1563
  movups   xmm7, [SSE_LN_F4]
1564
  mulps    xmm4, xmm6
1565
  mulps    xmm0, xmm2 // xmm0 = Exp * 0.69314718055995
1566
  subps    xmm4, xmm7
1567
  movups   xmm7, [SSE_LN_F2]
1568
  movaps   xmm6, xmm3
1569
  mulps    xmm4, xmm5 // xmm4 = X2 * (0.024982445 * X - 0.24371102)
1570
  subps    xmm6, xmm7
1571
  movups   xmm2, [SSE_LN_F1]
1572
  addps    xmm4, xmm6 // xmm4 = (X - 2.2744832) + X2 * (0.024982445 * X - 0.24371102)
1573
  mulps    xmm3, xmm2
1574
  mulps    xmm4, xmm5 // xmm4 = X2 * ((X - 2.2744832) + X2 * (0.024982445 * X - 0.24371102))
1575
  addps    xmm3, xmm1 // xmm3 = (3.3977745 * X + AddCst)
1576
  addps    xmm4, xmm0
1577
  addps    xmm3, xmm4
1578

1579
  movhlps  xmm2, xmm3
1580
  movq     [Result], xmm3
1581
  movss    DWORD [Result+8], xmm2
1582

1583
  movdqa   xmm6, [rsp-24]
1584
  movdqa   xmm7, [rsp-40]
1585
end;
1586

1587
function FastLn(const A: TVector4): TVector4; assembler;
1588
asm
1589
  movdqa   [rsp-24], xmm6
1590
  movdqa   [rsp-40], xmm7
1591

1592
  movups   xmm0, [A]
1593
  xorps    xmm2, xmm2
1594
  movaps   xmm1, xmm0
1595
  movups   xmm3, [SSE_LN_CST]
1596
  movups   xmm4, [SSE_NEG_INFINITY]
1597

1598
  // Exp := Val.I shr 23
1599
  psrld    xmm0, 23
1600
  movaps   xmm5, xmm1
1601
  cvtdq2ps xmm0, xmm0 // xmm0=Exp
1602

1603
  // if (A > 0) then AddCst := -89.93423858 else AddCst := NegInfinity
1604
  cmpnleps xmm1, xmm2 // (A > 0)? Yes: $FFFFFFFF, No: $00000000
1605
  movups   xmm2, [SSE_MASK_FRACTION]
1606
  andps    xmm3, xmm1 // (A > 0)? Yes: -89.93423858, No: 0
1607
  andnps   xmm1, xmm4 // (A > 0)? Yes: 0, No: NegInfinity
1608
  movups   xmm4, [SSE_EXP_I1]
1609
  orps     xmm1, xmm3 // (A > 0)? Yes: -89.93423858, No: NegInfinity
1610

1611
  // Val.I := (Val.I and $007FFFFF) or $3F800000
1612
  pand     xmm5, xmm2
1613
  movups   xmm2, [SSE_LN_F5]
1614
  por      xmm5, xmm4
1615
  movups   xmm6, [SSE_LN_F3]
1616
  movaps   xmm3, xmm5 // xmm3=X
1617
  mulps    xmm5, xmm5 // xmm5=X2
1618

1619
  movaps   xmm4, xmm3
1620
  movups   xmm7, [SSE_LN_F4]
1621
  mulps    xmm4, xmm6
1622
  mulps    xmm0, xmm2 // xmm0 = Exp * 0.69314718055995
1623
  subps    xmm4, xmm7
1624
  movups   xmm7, [SSE_LN_F2]
1625
  movaps   xmm6, xmm3
1626
  mulps    xmm4, xmm5 // xmm4 = X2 * (0.024982445 * X - 0.24371102)
1627
  subps    xmm6, xmm7
1628
  movups   xmm2, [SSE_LN_F1]
1629
  addps    xmm4, xmm6 // xmm4 = (X - 2.2744832) + X2 * (0.024982445 * X - 0.24371102)
1630
  mulps    xmm3, xmm2
1631
  mulps    xmm4, xmm5 // xmm4 = X2 * ((X - 2.2744832) + X2 * (0.024982445 * X - 0.24371102))
1632
  addps    xmm3, xmm1 // xmm3 = (3.3977745 * X + AddCst)
1633
  addps    xmm4, xmm0
1634
  addps    xmm3, xmm4
1635

1636
  movups   [Result], xmm3
1637

1638
  movdqa   xmm6, [rsp-24]
1639
  movdqa   xmm7, [rsp-40]
1640
end;
1641

1642
function FastLog2(const A: Single): Single; assembler;
1643
asm
1644
  movss    xmm2, DWORD [SSE_MASK_FRACTION]
1645
  movss    xmm1, xmm0
1646

1647
  // MX.I := (VX.I and $007FFFFF) or $3F000000
1648
  movss    xmm3, DWORD [SSE_LOG2_I1]
1649
  pand     xmm0, xmm2
1650
  cvtdq2ps xmm1, xmm1
1651
  movss    xmm4, DWORD [SSE_LOG2_F1]
1652
  por      xmm0, xmm3
1653

1654
  movss    xmm2, DWORD [SSE_LOG2_F2]
1655
  mulss    xmm1, xmm4 // VX.I * 1.1920928955078125e-7
1656
  movss    xmm3, DWORD [SSE_LOG2_F3]
1657
  subss    xmm1, xmm2 // Result - 124.22551499
1658
  mulss    xmm3, xmm0
1659
  movss    xmm4, DWORD [SSE_LOG2_F5]
1660
  subss    xmm1, xmm3 // Result - 124.22551499 - 1.498030302 * MX.S
1661
  movss    xmm2, DWORD [SSE_LOG2_F4]
1662
  addss    xmm0, xmm4
1663
  divss    xmm2, xmm0
1664
  subss    xmm1, xmm2 // Result - 124.22551499 - 1.498030302 * MX.S - 1.72587999 / (0.3520887068 + MX.S)
1665

1666
  movss    xmm0, xmm1
1667
end;
1668

1669
function FastLog2(const A: TVector2): TVector2; assembler;
1670
asm
1671
  {$IF RTLVersion >= 33}
1672
  movlps   xmm0, [A]
1673
  {$ELSE}
1674
  movq     xmm0, A
1675
  {$ENDIF}
1676
  movlps   xmm2, QWORD [SSE_MASK_FRACTION]
1677
  movaps   xmm1, xmm0
1678

1679
  // MX.I := (VX.I and $007FFFFF) or $3F000000
1680
  movlps   xmm3, QWORD [SSE_LOG2_I1]
1681
  pand     xmm0, xmm2
1682
  cvtdq2ps xmm1, xmm1
1683
  movlps   xmm4, QWORD [SSE_LOG2_F1]
1684
  por      xmm0, xmm3
1685

1686
  movlps   xmm2, QWORD [SSE_LOG2_F2]
1687
  mulps    xmm1, xmm4 // VX.I * 1.1920928955078125e-7
1688
  movlps   xmm3, QWORD [SSE_LOG2_F3]
1689
  subps    xmm1, xmm2 // Result - 124.22551499
1690
  mulps    xmm3, xmm0
1691
  movlps   xmm4, QWORD [SSE_LOG2_F5]
1692
  subps    xmm1, xmm3 // Result - 124.22551499 - 1.498030302 * MX.S
1693
  movlps   xmm2, QWORD [SSE_LOG2_F4]
1694
  addps    xmm0, xmm4
1695
  divps    xmm2, xmm0
1696
  subps    xmm1, xmm2 // Result - 124.22551499 - 1.498030302 * MX.S - 1.72587999 / (0.3520887068 + MX.S)
1697

1698
  movq     rax, xmm1
1699
end;
1700

1701
function FastLog2(const A: TVector3): TVector3; assembler;
1702
asm
1703
  movq     xmm0, [A]
1704
  movss    xmm1, DWORD [A+8]
1705
  movlhps  xmm0, xmm1
1706
  movups   xmm2, [SSE_MASK_FRACTION]
1707
  movaps   xmm1, xmm0
1708

1709
  // MX.I := (VX.I and $007FFFFF) or $3F000000
1710
  movups   xmm3, [SSE_LOG2_I1]
1711
  pand     xmm0, xmm2
1712
  cvtdq2ps xmm1, xmm1
1713
  movups   xmm4, [SSE_LOG2_F1]
1714
  por      xmm0, xmm3
1715

1716
  movups   xmm2, [SSE_LOG2_F2]
1717
  mulps    xmm1, xmm4 // VX.I * 1.1920928955078125e-7
1718
  movups   xmm3, [SSE_LOG2_F3]
1719
  subps    xmm1, xmm2 // Result - 124.22551499
1720
  mulps    xmm3, xmm0
1721
  movups   xmm4, [SSE_LOG2_F5]
1722
  subps    xmm1, xmm3 // Result - 124.22551499 - 1.498030302 * MX.S
1723
  movups   xmm2, [SSE_LOG2_F4]
1724
  addps    xmm0, xmm4
1725
  divps    xmm2, xmm0
1726
  subps    xmm1, xmm2 // Result - 124.22551499 - 1.498030302 * MX.S - 1.72587999 / (0.3520887068 + MX.S)
1727

1728
  movhlps  xmm0, xmm1
1729
  movq     [Result], xmm1
1730
  movss    DWORD [Result+8], xmm0
1731
end;
1732

1733
function FastLog2(const A: TVector4): TVector4; assembler;
1734
asm
1735
  movups   xmm0, [A]
1736
  movups   xmm2, [SSE_MASK_FRACTION]
1737
  movaps   xmm1, xmm0
1738

1739
  // MX.I := (VX.I and $007FFFFF) or $3F000000
1740
  movups   xmm3, [SSE_LOG2_I1]
1741
  pand     xmm0, xmm2
1742
  cvtdq2ps xmm1, xmm1
1743
  movups   xmm4, [SSE_LOG2_F1]
1744
  por      xmm0, xmm3
1745

1746
  movups   xmm2, [SSE_LOG2_F2]
1747
  mulps    xmm1, xmm4 // VX.I * 1.1920928955078125e-7
1748
  movups   xmm3, [SSE_LOG2_F3]
1749
  subps    xmm1, xmm2 // Result - 124.22551499
1750
  mulps    xmm3, xmm0
1751
  movups   xmm4, [SSE_LOG2_F5]
1752
  subps    xmm1, xmm3 // Result - 124.22551499 - 1.498030302 * MX.S
1753
  movups   xmm2, [SSE_LOG2_F4]
1754
  addps    xmm0, xmm4
1755
  divps    xmm2, xmm0
1756
  subps    xmm1, xmm2 // Result - 124.22551499 - 1.498030302 * MX.S - 1.72587999 / (0.3520887068 + MX.S)
1757

1758
  movups   [Result], xmm1
1759
end;
1760

1761
function FastExp2(const A: Single): Single; assembler;
1762
var
1763
  OldFlags, NewFlags: UInt32;
1764
asm
1765
  // Set rounding mode to Round Positive (=Round Down)
1766
  stmxcsr  [OldFlags]
1767
  mov      ecx, [OldFlags]
1768
  xorps    xmm1, xmm1
1769
  and      ecx, SSE_ROUND_MASK
1770
  movss    xmm3, xmm0
1771
  or       ecx, SSE_ROUND_DOWN
1772
  movss    xmm5, xmm0
1773
  mov      [NewFlags], ecx
1774

1775
  movss    xmm1, DWORD [SSE_EXP2_F1]
1776
  ldmxcsr  [NewFlags]
1777

1778
  // Z := A - RoundDown(A)
1779
  cvtps2dq xmm3, xmm3
1780
  addss    xmm1, xmm5 // A + 121.2740575
1781
  cvtdq2ps xmm3, xmm3
1782
  movss    xmm2, DWORD [SSE_EXP2_F2]
1783
  subss    xmm0, xmm3
1784

1785
  movss    xmm3, DWORD [SSE_EXP2_F3]
1786
  movss    xmm4, DWORD [SSE_EXP2_F4]
1787
  subss    xmm3, xmm0 // (4.84252568 - Z)
1788
  mulss    xmm0, xmm4 // 1.49012907 * Z
1789
  divss    xmm2, xmm3
1790
  movss    xmm5, DWORD [SSE_EXP2_F5]
1791
  addss    xmm1, xmm2 // A + 121.2740575 + 27.7280233 / (4.84252568 - Z)
1792
  subss    xmm1, xmm0 // A + 121.2740575 + 27.7280233 / (4.84252568 - Z) - 1.49012907 * Z
1793
  mulss    xmm1, xmm5 // (1 shl 23) * (A + 121.2740575 + 27.7280233 / (4.84252568 - Z) - 1.49012907 * Z)
1794
  cvtps2dq xmm1, xmm1
1795

1796
  // Restore rounding mode
1797
  ldmxcsr  [OldFlags]
1798

1799
  movss    xmm0, xmm1
1800
end;
1801

1802
function FastExp2(const A: TVector2): TVector2; assembler;
1803
var
1804
  OldFlags, NewFlags: UInt32;
1805
asm
1806
  // Set rounding mode to Round Positive (=Round Down)
1807
  stmxcsr  [OldFlags]
1808
  {$IF RTLVersion >= 33}
1809
  movlps   xmm0, [A]
1810
  {$ELSE}
1811
  movq     xmm0, A
1812
  {$ENDIF}
1813
  mov      ecx, [OldFlags]
1814
  xorps    xmm1, xmm1
1815
  and      ecx, SSE_ROUND_MASK
1816
  movaps   xmm3, xmm0
1817
  or       ecx, SSE_ROUND_DOWN
1818
  movaps   xmm5, xmm0
1819
  mov      [NewFlags], ecx
1820

1821
  movlps   xmm1, QWORD [SSE_EXP2_F1]
1822
  ldmxcsr  [NewFlags]
1823

1824
  // Z := A - RoundDown(A)
1825
  cvtps2dq xmm3, xmm3
1826
  addps    xmm1, xmm5 // A + 121.2740575
1827
  cvtdq2ps xmm3, xmm3
1828
  movlps   xmm2, QWORD [SSE_EXP2_F2]
1829
  subps    xmm0, xmm3
1830

1831
  movlps   xmm3, QWORD [SSE_EXP2_F3]
1832
  movlps   xmm4, QWORD [SSE_EXP2_F4]
1833
  subps    xmm3, xmm0 // (4.84252568 - Z)
1834
  mulps    xmm0, xmm4 // 1.49012907 * Z
1835
  divps    xmm2, xmm3
1836
  movlps   xmm5, QWORD [SSE_EXP2_F5]
1837
  addps    xmm1, xmm2 // A + 121.2740575 + 27.7280233 / (4.84252568 - Z)
1838
  subps    xmm1, xmm0 // A + 121.2740575 + 27.7280233 / (4.84252568 - Z) - 1.49012907 * Z
1839
  mulps    xmm1, xmm5 // (1 shl 23) * (A + 121.2740575 + 27.7280233 / (4.84252568 - Z) - 1.49012907 * Z)
1840
  cvtps2dq xmm1, xmm1
1841

1842
  // Restore rounding mode
1843
  ldmxcsr  [OldFlags]
1844

1845
  movq     rax, xmm1
1846
end;
1847

1848
function FastExp2(const A: TVector3): TVector3; assembler;
1849
var
1850
  OldFlags, NewFlags: UInt32;
1851
asm
1852
  // Set rounding mode to Round Positive (=Round Down)
1853
  stmxcsr  [OldFlags]
1854
  movq     xmm0, [A]
1855
  movss    xmm1, DWORD [A+8]
1856
  movlhps  xmm0, xmm1
1857
  mov      edx, [OldFlags]
1858
  xorps    xmm1, xmm1
1859
  and      edx, SSE_ROUND_MASK
1860
  movaps   xmm3, xmm0
1861
  or       edx, SSE_ROUND_DOWN
1862
  movaps   xmm5, xmm0
1863
  mov      [NewFlags], edx
1864

1865
  movups   xmm1, [SSE_EXP2_F1]
1866
  ldmxcsr  [NewFlags]
1867

1868
  // Z := A - RoundDown(A)
1869
  cvtps2dq xmm3, xmm3
1870
  addps    xmm1, xmm5 // A + 121.2740575
1871
  cvtdq2ps xmm3, xmm3
1872
  movups   xmm2, [SSE_EXP2_F2]
1873
  subps    xmm0, xmm3
1874

1875
  movups   xmm3, [SSE_EXP2_F3]
1876
  movups   xmm4, [SSE_EXP2_F4]
1877
  subps    xmm3, xmm0 // (4.84252568 - Z)
1878
  mulps    xmm0, xmm4 // 1.49012907 * Z
1879
  divps    xmm2, xmm3
1880
  movups   xmm5, [SSE_EXP2_F5]
1881
  addps    xmm1, xmm2 // A + 121.2740575 + 27.7280233 / (4.84252568 - Z)
1882
  subps    xmm1, xmm0 // A + 121.2740575 + 27.7280233 / (4.84252568 - Z) - 1.49012907 * Z
1883
  mulps    xmm1, xmm5 // (1 shl 23) * (A + 121.2740575 + 27.7280233 / (4.84252568 - Z) - 1.49012907 * Z)
1884
  cvtps2dq xmm1, xmm1
1885

1886
  // Restore rounding mode
1887
  ldmxcsr  [OldFlags]
1888

1889
  movhlps  xmm0, xmm1
1890
  movq     [Result], xmm1
1891
  movss    DWORD [Result+8], xmm0
1892
end;
1893

1894
function FastExp2(const A: TVector4): TVector4; assembler;
1895
var
1896
  OldFlags, NewFlags: UInt32;
1897
asm
1898
  // Set rounding mode to Round Positive (=Round Down)
1899
  stmxcsr  [OldFlags]
1900
  movups   xmm0, [A]
1901
  mov      edx, [OldFlags]
1902
  xorps    xmm1, xmm1
1903
  and      edx, SSE_ROUND_MASK
1904
  movaps   xmm3, xmm0
1905
  or       edx, SSE_ROUND_DOWN
1906
  movaps   xmm5, xmm0
1907
  mov      [NewFlags], edx
1908

1909
  movups   xmm1, [SSE_EXP2_F1]
1910
  ldmxcsr  [NewFlags]
1911

1912
  // Z := A - RoundDown(A)
1913
  cvtps2dq xmm3, xmm3
1914
  addps    xmm1, xmm5 // A + 121.2740575
1915
  cvtdq2ps xmm3, xmm3
1916
  movups   xmm2, [SSE_EXP2_F2]
1917
  subps    xmm0, xmm3
1918

1919
  movups   xmm3, [SSE_EXP2_F3]
1920
  movups   xmm4, [SSE_EXP2_F4]
1921
  subps    xmm3, xmm0 // (4.84252568 - Z)
1922
  mulps    xmm0, xmm4 // 1.49012907 * Z
1923
  divps    xmm2, xmm3
1924
  movups   xmm5, [SSE_EXP2_F5]
1925
  addps    xmm1, xmm2 // A + 121.2740575 + 27.7280233 / (4.84252568 - Z)
1926
  subps    xmm1, xmm0 // A + 121.2740575 + 27.7280233 / (4.84252568 - Z) - 1.49012907 * Z
1927
  mulps    xmm1, xmm5 // (1 shl 23) * (A + 121.2740575 + 27.7280233 / (4.84252568 - Z) - 1.49012907 * Z)
1928
  cvtps2dq xmm1, xmm1
1929

1930
  // Restore rounding mode
1931
  ldmxcsr  [OldFlags]
1932

1933
  movups   [Result], xmm1
1934
end;
1935

1936
{ Common Functions }
1937

1938
function Abs(const A: Single): Single;
1939
begin
1940
  Result := System.Abs(A);
1941
end;
1942

1943
function Abs(const A: TVector2): TVector2;
1944
begin
1945
  Result.Init(System.Abs(A.X), System.Abs(A.Y));
1946
end;
1947

1948
function Abs(const A: TVector3): TVector3; assembler;
1949
asm
1950
  movq     xmm0, [A]
1951
  movss    xmm1, DWORD [A+8]
1952
  movups   xmm2, [SSE_MASK_ABS_VAL]
1953
  andps    xmm0, xmm2
1954
  pand     xmm1, xmm2
1955
  movq     [Result], xmm0
1956
  movss    DWORD [Result+8], xmm1
1957
end;
1958

1959
function Abs(const A: TVector4): TVector4; assembler;
1960
asm
1961
  movups   xmm0, [A]
1962
  movups   xmm1, [SSE_MASK_ABS_VAL]
1963
  andps    xmm0, xmm1
1964
  movups   [Result], xmm0
1965
end;
1966

1967
function Sign(const A: Single): Single; assembler;
1968
asm
1969
  movss    xmm1, DWORD [SSE_ONE]
1970
  movss    xmm2, xmm0
1971
  movss    xmm3, DWORD [SSE_MASK_SIGN]
1972

1973
  andps    xmm0, xmm3 // (A < 0)? Yes: $80000000, No: $00000000
1974
  xorps    xmm4, xmm4
1975
  orps     xmm0, xmm1 // (A < 0)? Yes: -1, No: 1
1976
  cmpneqss xmm2, xmm4 // (A = 0)? Yes: $00000000, No: $FFFFFFFF
1977
  andps    xmm0, xmm2 // (A = 0)? Yes: 0, No: -1 or 1
1978
end;
1979

1980
function Sign(const A: TVector2): TVector2; assembler;
1981
asm
1982
  {$IF RTLVersion >= 33}
1983
  movlps   xmm0, [A]
1984
  {$ELSE}
1985
  movq     xmm0, A
1986
  {$ENDIF}
1987
  movlps   xmm1, QWORD [SSE_ONE]
1988
  movaps   xmm2, xmm0
1989
  movlps   xmm3, QWORD [SSE_MASK_SIGN]
1990

1991
  andps    xmm0, xmm3 // (A < 0)? Yes: $80000000, No: $00000000
1992
  xorps    xmm4, xmm4
1993
  orps     xmm0, xmm1 // (A < 0)? Yes: -1, No: 1
1994
  cmpneqps xmm2, xmm4 // (A = 0)? Yes: $00000000, No: $FFFFFFFF
1995
  andps    xmm0, xmm2 // (A = 0)? Yes: 0, No: -1 or 1
1996
  movq     rax, xmm0
1997
end;
1998

1999
function Sign(const A: TVector3): TVector3; assembler;
2000
asm
2001
  movq     xmm0, [A]
2002
  movss    xmm1, DWORD [A+8]
2003
  movlhps  xmm0, xmm1
2004
  movups   xmm1, [SSE_ONE]
2005
  movaps   xmm2, xmm0
2006
  movups   xmm3, [SSE_MASK_SIGN]
2007

2008
  andps    xmm0, xmm3 // (A < 0)? Yes: $80000000, No: $00000000
2009
  xorps    xmm4, xmm4
2010
  orps     xmm0, xmm1 // (A < 0)? Yes: -1, No: 1
2011
  cmpneqps xmm2, xmm4 // (A = 0)? Yes: $00000000, No: $FFFFFFFF
2012
  andps    xmm0, xmm2 // (A = 0)? Yes: 0, No: -1 or 1
2013
  movhlps  xmm1, xmm0
2014
  movq     [Result], xmm0
2015
  movss    DWORD [Result+8], xmm1
2016
end;
2017

2018
function Sign(const A: TVector4): TVector4; assembler;
2019
asm
2020
  movups   xmm0, [A]
2021
  movups   xmm1, [SSE_ONE]
2022
  movaps   xmm2, xmm0
2023
  movups   xmm3, [SSE_MASK_SIGN]
2024

2025
  andps    xmm0, xmm3 // (A < 0)? Yes: $80000000, No: $00000000
2026
  xorps    xmm4, xmm4
2027
  orps     xmm0, xmm1 // (A < 0)? Yes: -1, No: 1
2028
  cmpneqps xmm2, xmm4 // (A = 0)? Yes: $00000000, No: $FFFFFFFF
2029
  andps    xmm0, xmm2 // (A = 0)? Yes: 0, No: -1 or 1
2030
  movups   [Result], xmm0
2031
end;
2032

2033
function Floor(const A: Single): Integer;
2034
begin
2035
  Result := System.Math.Floor(A);
2036
end;
2037
{function Floor(const A: Single): Integer; assembler;
2038
var
2039
  OldFlags, NewFlags: UInt32;
2040
asm
2041
  // Set rounding mode to Round Down
2042
  stmxcsr  [OldFlags]
2043
  mov      ecx, [OldFlags]
2044
  and      ecx, SSE_ROUND_MASK
2045
  or       ecx, SSE_ROUND_DOWN
2046
  mov      [NewFlags], ecx
2047
  ldmxcsr  [NewFlags]
2048

2049
  cvtps2dq xmm0, xmm0
2050

2051
  // Restore rounding mode
2052
  ldmxcsr  [OldFlags]
2053

2054
  movd     eax, xmm0
2055
end;}
2056

2057
function Floor(const A: TVector2): TIVector2; assembler;
2058
var
2059
  OldFlags, NewFlags: UInt32;
2060
asm
2061
  // Set rounding mode to Round Down
2062
  stmxcsr  [OldFlags]
2063
  mov      eax, [OldFlags]
2064
  and      eax, SSE_ROUND_MASK
2065
  or       eax, SSE_ROUND_DOWN
2066
  mov      [NewFlags], eax
2067
  {$IF RTLVersion >= 33}
2068
  movlps   xmm0, [A]
2069
  {$ELSE}
2070
  movq     xmm0, A
2071
  {$ENDIF}
2072
  ldmxcsr  [NewFlags]
2073

2074
  cvtps2dq xmm0, xmm0
2075

2076
  // Restore rounding mode
2077
  ldmxcsr  [OldFlags]
2078

2079
  movq     rax, xmm0
2080
end;
2081

2082
function Floor(const A: TVector3): TIVector3; assembler;
2083
var
2084
  OldFlags, NewFlags: UInt32;
2085
asm
2086
  // Set rounding mode to Round Down
2087
  stmxcsr  [OldFlags]
2088
  mov      eax, [OldFlags]
2089
  and      eax, SSE_ROUND_MASK
2090
  or       eax, SSE_ROUND_DOWN
2091
  mov      [NewFlags], eax
2092
  movq     xmm0, [A]
2093
  movss    xmm1, DWORD [A+8]
2094
  movlhps  xmm0, xmm1
2095
  ldmxcsr  [NewFlags]
2096

2097
  cvtps2dq xmm0, xmm0
2098

2099
  // Restore rounding mode
2100
  ldmxcsr  [OldFlags]
2101

2102
  movhlps  xmm1, xmm0
2103
  movq     [Result], xmm0
2104
  movss    DWORD [Result+8], xmm1
2105
end;
2106

2107
function Floor(const A: TVector4): TIVector4; assembler;
2108
var
2109
  OldFlags, NewFlags: UInt32;
2110
asm
2111
  // Set rounding mode to Round Down
2112
  stmxcsr  [OldFlags]
2113
  mov      eax, [OldFlags]
2114
  and      eax, SSE_ROUND_MASK
2115
  or       eax, SSE_ROUND_DOWN
2116
  mov      [NewFlags], eax
2117
  movups   xmm0, [A]
2118
  ldmxcsr  [NewFlags]
2119

2120
  cvtps2dq xmm0, xmm0
2121

2122
  // Restore rounding mode
2123
  ldmxcsr  [OldFlags]
2124

2125
  movups   [Result], xmm0
2126
end;
2127

2128
function Trunc(const A: Single): Integer;
2129
begin
2130
  Result := System.Trunc(A);
2131
end;
2132

2133
{function Trunc(const A: Single): Integer; assembler;
2134
var
2135
  OldFlags, NewFlags: UInt32;
2136
asm
2137
  // Set rounding mode to Truncate
2138
  stmxcsr  [OldFlags]
2139
  mov      ecx, [OldFlags]
2140
  and      ecx, SSE_ROUND_MASK
2141
  or       ecx, SSE_ROUND_TRUNC
2142
  mov      [NewFlags], ecx
2143
  ldmxcsr  [NewFlags]
2144

2145
  cvtps2dq xmm0, xmm0
2146

2147
  // Restore rounding mode
2148
  ldmxcsr  [OldFlags]
2149

2150
  movd     eax, xmm0
2151
end;}
2152

2153
function Trunc(const A: TVector2): TIVector2; assembler;
2154
var
2155
  OldFlags, NewFlags: UInt32;
2156
asm
2157
  // Set rounding mode to Truncate
2158
  stmxcsr  [OldFlags]
2159
  mov      eax, [OldFlags]
2160
  and      eax, SSE_ROUND_MASK
2161
  or       eax, SSE_ROUND_TRUNC
2162
  mov      [NewFlags], eax
2163
  {$IF RTLVersion >= 33}
2164
  movlps   xmm0, [A]
2165
  {$ELSE}
2166
  movq     xmm0, A
2167
  {$ENDIF}
2168
  ldmxcsr  [NewFlags]
2169

2170
  cvtps2dq xmm0, xmm0
2171

2172
  // Restore rounding mode
2173
  ldmxcsr  [OldFlags]
2174

2175
  movq     rax, xmm0
2176
end;
2177

2178
function Trunc(const A: TVector3): TIVector3; assembler;
2179
var
2180
  OldFlags, NewFlags: UInt32;
2181
asm
2182
  // Set rounding mode to Truncate
2183
  stmxcsr  [OldFlags]
2184
  mov      eax, [OldFlags]
2185
  and      eax, SSE_ROUND_MASK
2186
  or       eax, SSE_ROUND_TRUNC
2187
  mov      [NewFlags], eax
2188
  movq     xmm0, [A]
2189
  movss    xmm1, DWORD [A+8]
2190
  movlhps  xmm0, xmm1
2191
  ldmxcsr  [NewFlags]
2192

2193
  cvtps2dq xmm0, xmm0
2194

2195
  // Restore rounding mode
2196
  ldmxcsr  [OldFlags]
2197

2198
  movhlps  xmm1, xmm0
2199
  movq     [Result], xmm0
2200
  movss    DWORD [Result+8], xmm1
2201
end;
2202

2203
function Trunc(const A: TVector4): TIVector4; assembler;
2204
var
2205
  OldFlags, NewFlags: UInt32;
2206
asm
2207
  // Set rounding mode to Truncate
2208
  stmxcsr  [OldFlags]
2209
  mov      eax, [OldFlags]
2210
  and      eax, SSE_ROUND_MASK
2211
  or       eax, SSE_ROUND_TRUNC
2212
  mov      [NewFlags], eax
2213
  movups   xmm0, [A]
2214
  ldmxcsr  [NewFlags]
2215

2216
  cvtps2dq xmm0, xmm0
2217

2218
  // Restore rounding mode
2219
  ldmxcsr  [OldFlags]
2220

2221
  movups   [Result], xmm0
2222
end;
2223

2224
function Round(const A: Single): Integer;
2225
begin
2226
  Result := System.Round(A);
2227
end;
2228

2229
function Round(const A: TVector2): TIVector2; assembler;
2230
asm
2231
  // Rounding mode defaults to round-to-nearest
2232
  {$IF RTLVersion >= 33}
2233
  movlps   xmm0, [A]
2234
  {$ELSE}
2235
  movq     xmm0, A
2236
  {$ENDIF}
2237
  cvtps2dq xmm0, xmm0
2238
  movq     rax, xmm0
2239
end;
2240

2241
function Round(const A: TVector3): TIVector3; assembler;
2242
asm
2243
  // Rounding mode defaults to round-to-nearest
2244
  movq     xmm0, [A]
2245
  movss    xmm1, DWORD [A+8]
2246
  movlhps  xmm0, xmm1
2247
  cvtps2dq xmm0, xmm0
2248
  movhlps  xmm1, xmm0
2249
  movq     [Result], xmm0
2250
  movss    DWORD [Result+8], xmm1
2251
end;
2252

2253
function Round(const A: TVector4): TIVector4; assembler;
2254
asm
2255
  // Rounding mode defaults to round-to-nearest
2256
  movups   xmm0, [A]
2257
  cvtps2dq xmm0, xmm0
2258
  movups   [Result], xmm0
2259
end;
2260

2261
function Ceil(const A: Single): Integer;
2262
begin
2263
  Result := System.Math.Ceil(A);
2264
end;
2265
{function Ceil(const A: Single): Integer; assembler;
2266
var
2267
  OldFlags, NewFlags: UInt32;
2268
asm
2269
  // Set rounding mode to Ceil Down
2270
  stmxcsr  [OldFlags]
2271
  mov      ecx, [OldFlags]
2272
  and      ecx, SSE_ROUND_MASK
2273
  or       ecx, SSE_ROUND_UP
2274
  mov      [NewFlags], ecx
2275
  ldmxcsr  [NewFlags]
2276

2277
  cvtps2dq xmm0, xmm0
2278

2279
  // Restore rounding mode
2280
  ldmxcsr  [OldFlags]
2281

2282
  movd     eax, xmm0
2283
end;}
2284

2285
function Ceil(const A: TVector2): TIVector2; assembler;
2286
var
2287
  OldFlags, NewFlags: UInt32;
2288
asm
2289
  // Set rounding mode to Ceil Down
2290
  stmxcsr  [OldFlags]
2291
  mov      eax, [OldFlags]
2292
  and      eax, SSE_ROUND_MASK
2293
  or       eax, SSE_ROUND_UP
2294
  mov      [NewFlags], eax
2295
  {$IF RTLVersion >= 33}
2296
  movlps   xmm0, [A]
2297
  {$ELSE}
2298
  movq     xmm0, A
2299
  {$ENDIF}
2300
  ldmxcsr  [NewFlags]
2301

2302
  cvtps2dq xmm0, xmm0
2303

2304
  // Restore rounding mode
2305
  ldmxcsr  [OldFlags]
2306

2307
  movq     rax, xmm0
2308
end;
2309

2310
function Ceil(const A: TVector3): TIVector3; assembler;
2311
var
2312
  OldFlags, NewFlags: UInt32;
2313
asm
2314
  // Set rounding mode to Ceil Down
2315
  stmxcsr  [OldFlags]
2316
  mov      eax, [OldFlags]
2317
  and      eax, SSE_ROUND_MASK
2318
  or       eax, SSE_ROUND_UP
2319
  mov      [NewFlags], eax
2320
  movq     xmm0, [A]
2321
  movss    xmm1, DWORD [A+8]
2322
  movlhps  xmm0, xmm1
2323
  ldmxcsr  [NewFlags]
2324

2325
  cvtps2dq xmm0, xmm0
2326

2327
  // Restore rounding mode
2328
  ldmxcsr  [OldFlags]
2329

2330
  movhlps  xmm1, xmm0
2331
  movq     [Result], xmm0
2332
  movss    DWORD [Result+8], xmm1
2333
end;
2334

2335
function Ceil(const A: TVector4): TIVector4; assembler;
2336
var
2337
  OldFlags, NewFlags: UInt32;
2338
asm
2339
  // Set rounding mode to Ceil Down
2340
  stmxcsr  [OldFlags]
2341
  mov      eax, [OldFlags]
2342
  and      eax, SSE_ROUND_MASK
2343
  or       eax, SSE_ROUND_UP
2344
  mov      [NewFlags], eax
2345
  movups   xmm0, [A]
2346
  ldmxcsr  [NewFlags]
2347

2348
  cvtps2dq xmm0, xmm0
2349

2350
  // Restore rounding mode
2351
  ldmxcsr  [OldFlags]
2352

2353
  movups   [Result], xmm0
2354
end;
2355

2356
function Frac(const A: Single): Single;
2357
begin
2358
  Result := System.Frac(A);
2359
end;
2360
{function Frac(const A: Single): Single; assembler;
2361
var
2362
  OldFlags, NewFlags: UInt32;
2363
asm
2364
  // Set rounding mode to Truncate
2365
  stmxcsr  [OldFlags]
2366
  mov      ecx, [OldFlags]
2367
  and      ecx, SSE_ROUND_MASK
2368
  or       ecx, SSE_ROUND_TRUNC
2369
  mov      [NewFlags], ecx
2370
  movss    xmm1, xmm0
2371
  ldmxcsr  [NewFlags]
2372

2373
  cvtps2dq xmm0, xmm0
2374
  ldmxcsr  [OldFlags]
2375
  cvtdq2ps xmm0, xmm0
2376
  subss    xmm1, xmm0 // A - Trunc(A)
2377

2378
  movss    xmm0, xmm1
2379
end;}
2380

2381
function Frac(const A: TVector2): TVector2; assembler;
2382
var
2383
  OldFlags, NewFlags: UInt32;
2384
asm
2385
  // Set rounding mode to Truncate
2386
  stmxcsr  [OldFlags]
2387
  mov      edx, [OldFlags]
2388
  and      edx, SSE_ROUND_MASK
2389
  or       edx, SSE_ROUND_TRUNC
2390
  {$IF RTLVersion >= 33}
2391
  movlps   xmm0, [A]
2392
  {$ELSE}
2393
  movq     xmm0, A
2394
  {$ENDIF}
2395
  mov      [NewFlags], edx
2396
  movaps   xmm1, xmm0
2397
  ldmxcsr  [NewFlags]
2398

2399
  cvtps2dq xmm0, xmm0
2400
  ldmxcsr  [OldFlags]
2401
  cvtdq2ps xmm0, xmm0
2402
  subps    xmm1, xmm0 // A - Trunc(A)
2403

2404
  movq     rax, xmm1
2405
end;
2406

2407
function Frac(const A: TVector3): TVector3; assembler;
2408
var
2409
  OldFlags, NewFlags: UInt32;
2410
asm
2411
  // Set rounding mode to Truncate
2412
  stmxcsr  [OldFlags]
2413
  mov      eax, [OldFlags]
2414
  and      eax, SSE_ROUND_MASK
2415
  or       eax, SSE_ROUND_TRUNC
2416
  movq     xmm0, [A]
2417
  movss    xmm1, DWORD [A+8]
2418
  movlhps  xmm0, xmm1
2419
  mov      [NewFlags], eax
2420
  movaps   xmm1, xmm0
2421
  ldmxcsr  [NewFlags]
2422

2423
  cvtps2dq xmm0, xmm0
2424
  ldmxcsr  [OldFlags]
2425
  cvtdq2ps xmm0, xmm0
2426
  subps    xmm1, xmm0 // A - Trunc(A)
2427

2428
  movhlps  xmm0, xmm1
2429
  movq     [Result], xmm1
2430
  movss    DWORD [Result+8], xmm0
2431
end;
2432

2433
function Frac(const A: TVector4): TVector4; assembler;
2434
var
2435
  OldFlags, NewFlags: UInt32;
2436
asm
2437
  // Set rounding mode to Truncate
2438
  stmxcsr  [OldFlags]
2439
  mov      eax, [OldFlags]
2440
  and      eax, SSE_ROUND_MASK
2441
  or       eax, SSE_ROUND_TRUNC
2442
  movups   xmm0, [A]
2443
  mov      [NewFlags], eax
2444
  movaps   xmm1, xmm0
2445
  ldmxcsr  [NewFlags]
2446

2447
  cvtps2dq xmm0, xmm0
2448
  ldmxcsr  [OldFlags]
2449
  cvtdq2ps xmm0, xmm0
2450
  subps    xmm1, xmm0 // A - Trunc(A)
2451

2452
  movups   [Result], xmm1
2453
end;
2454

2455
function FMod(const A, B: Single): Single;
2456
begin
2457
  Result := A - (B * Trunc(A / B));
2458
end;
2459
{function FMod(const A, B: Single): Single; assembler;
2460
var
2461
  OldFlags, NewFlags: UInt32;
2462
asm
2463
  // Set rounding mode to Truncate
2464
  stmxcsr  [OldFlags]
2465
  mov      edx, [OldFlags]
2466
  movss    xmm2, xmm0
2467
  and      edx, SSE_ROUND_MASK
2468
  movss    xmm3, xmm1
2469
  or       edx, SSE_ROUND_TRUNC
2470
  divss    xmm2, xmm3 // A / B
2471
  mov      [NewFlags], edx
2472
  ldmxcsr  [NewFlags]
2473

2474
  cvtps2dq xmm2, xmm2
2475
  cvtdq2ps xmm2, xmm2 // Trunc(A / B)
2476
  mulss    xmm2, xmm1
2477
  subss    xmm0, xmm2 // A - (B * Trunc(A / B))
2478

2479
  // Restore rounding mode
2480
  ldmxcsr  [OldFlags]
2481
end;}
2482

2483
function FMod(const A: TVector2; const B: Single): TVector2; assembler;
2484
var
2485
  OldFlags, NewFlags: UInt32;
2486
asm
2487
  // Set rounding mode to Truncate
2488
  {$IF RTLVersion >= 33}
2489
  movlps   xmm0, [A]
2490
  {$ELSE}
2491
  movq     xmm0, A
2492
  {$ENDIF}
2493
  stmxcsr  [OldFlags]
2494
  mov      ecx, [OldFlags]
2495
  shufps   xmm1, xmm1, $00 // Replicate B
2496
  and      ecx, SSE_ROUND_MASK
2497
  movaps   xmm2, xmm0
2498
  or       ecx, SSE_ROUND_TRUNC
2499
  movaps   xmm3, xmm1
2500
  mov      [NewFlags], ecx
2501
  divps    xmm2, xmm3 // A / B
2502
  ldmxcsr  [NewFlags]
2503

2504
  cvtps2dq xmm2, xmm2
2505
  cvtdq2ps xmm2, xmm2 // Trunc(A / B)
2506
  mulps    xmm2, xmm1
2507
  subps    xmm0, xmm2 // A - (B * Trunc(A / B))
2508

2509
  // Restore rounding mode
2510
  ldmxcsr  [OldFlags]
2511

2512
  movq     rax, xmm0
2513
end;
2514

2515
function FMod(const A, B: TVector2): TVector2; assembler;
2516
var
2517
  OldFlags, NewFlags: UInt32;
2518
asm
2519
  // Set rounding mode to Truncate
2520
  {$IF RTLVersion >= 33}
2521
  movlps   xmm0, [A]
2522
  {$ELSE}
2523
  movq     xmm0, A
2524
  {$ENDIF}
2525
  stmxcsr  [OldFlags]
2526
  {$IF RTLVersion >= 33}
2527
  movlps   xmm1, [B]
2528
  {$ELSE}
2529
  movq     xmm1, B
2530
  {$ENDIF}
2531
  mov      edx, [OldFlags]
2532
  movaps   xmm2, xmm0
2533
  and      edx, SSE_ROUND_MASK
2534
  movaps   xmm3, xmm1
2535
  or       edx, SSE_ROUND_TRUNC
2536
  divps    xmm2, xmm3 // A / B
2537
  mov      [NewFlags], edx
2538
  ldmxcsr  [NewFlags]
2539

2540
  cvtps2dq xmm2, xmm2
2541
  cvtdq2ps xmm2, xmm2 // Trunc(A / B)
2542
  mulps    xmm2, xmm1
2543
  subps    xmm0, xmm2 // A - (B * Trunc(A / B))
2544

2545
  // Restore rounding mode
2546
  ldmxcsr  [OldFlags]
2547

2548
  movq     rax, xmm0
2549
end;
2550

2551
function FMod(const A: TVector3; const B: Single): TVector3; assembler;
2552
var
2553
  OldFlags, NewFlags: UInt32;
2554
asm
2555
  // Set rounding mode to Truncate
2556
  movq     xmm0, [A]
2557
  movss    xmm1, DWORD [A+8]
2558
  movlhps  xmm0, xmm1
2559
  movss    xmm1, B
2560
  stmxcsr  [OldFlags]
2561
  mov      edx, [OldFlags]
2562
  shufps   xmm1, xmm1, $00 // Replicate B
2563
  and      edx, SSE_ROUND_MASK
2564
  movaps   xmm2, xmm0
2565
  or       edx, SSE_ROUND_TRUNC
2566
  movaps   xmm3, xmm1
2567
  mov      [NewFlags], edx
2568
  divps    xmm2, xmm3 // A / B
2569
  ldmxcsr  [NewFlags]
2570

2571
  cvtps2dq xmm2, xmm2
2572
  cvtdq2ps xmm2, xmm2 // Trunc(A / B)
2573
  mulps    xmm2, xmm1
2574
  subps    xmm0, xmm2 // A - (B * Trunc(A / B))
2575

2576
  // Restore rounding mode
2577
  ldmxcsr  [OldFlags]
2578

2579
  movhlps  xmm1, xmm0
2580
  movq     [Result], xmm0
2581
  movss    DWORD [Result+8], xmm1
2582
end;
2583

2584
function FMod(const A, B: TVector3): TVector3;
2585
begin
2586
  Result.Init(Neslib.FastMath.FMod(A.X, B.X), Neslib.FastMath.FMod(A.Y, B.Y), Neslib.FastMath.FMod(A.Z, B.Z));
2587
end;
2588
{function FMod(const A, B: TVector3): TVector3; assembler;
2589
var
2590
  OldFlags, NewFlags: UInt32;
2591
asm
2592
  // Set rounding mode to Truncate
2593
  movq     xmm0, [A]
2594
  movss    xmm1, DWORD [A+8]
2595
  movlhps  xmm0, xmm1
2596
  stmxcsr  [OldFlags]
2597
  movq     xmm1, [B]
2598
  movss    xmm2, DWORD [B+8]
2599
  movlhps  xmm1, xmm2
2600
  mov      edx, [OldFlags]
2601
  movaps   xmm2, xmm0
2602
  and      edx, SSE_ROUND_MASK
2603
  movaps   xmm3, xmm1
2604
  or       edx, SSE_ROUND_TRUNC
2605
  divps    xmm2, xmm3 // A / B
2606
  mov      [NewFlags], edx
2607
  ldmxcsr  [NewFlags]
2608

2609
  cvtps2dq xmm2, xmm2
2610
  cvtdq2ps xmm2, xmm2 // Trunc(A / B)
2611
  mulps    xmm2, xmm1
2612
  subps    xmm0, xmm2 // A - (B * Trunc(A / B))
2613

2614
  // Restore rounding mode
2615
  ldmxcsr  [OldFlags]
2616

2617
  movhlps  xmm1, xmm0
2618
  movq     [Result], xmm0
2619
  movss    DWORD [Result+8], xmm1
2620
end;}
2621

2622
function FMod(const A: TVector4; const B: Single): TVector4; assembler;
2623
var
2624
  OldFlags, NewFlags: UInt32;
2625
asm
2626
  // Set rounding mode to Truncate
2627
  movups   xmm0, [A]
2628
  movss    xmm1, B
2629
  stmxcsr  [OldFlags]
2630
  mov      edx, [OldFlags]
2631
  shufps   xmm1, xmm1, $00 // Replicate B
2632
  and      edx, SSE_ROUND_MASK
2633
  movaps   xmm2, xmm0
2634
  or       edx, SSE_ROUND_TRUNC
2635
  movaps   xmm3, xmm1
2636
  mov      [NewFlags], edx
2637
  divps    xmm2, xmm3 // A / B
2638
  ldmxcsr  [NewFlags]
2639

2640
  cvtps2dq xmm2, xmm2
2641
  cvtdq2ps xmm2, xmm2 // Trunc(A / B)
2642
  mulps    xmm2, xmm1
2643
  subps    xmm0, xmm2 // A - (B * Trunc(A / B))
2644

2645
  // Restore rounding mode
2646
  ldmxcsr  [OldFlags]
2647

2648
  movups   [Result], xmm0
2649
end;
2650

2651
function FMod(const A, B: TVector4): TVector4; assembler;
2652
var
2653
  OldFlags, NewFlags: UInt32;
2654
asm
2655
  // Set rounding mode to Truncate
2656
  movups   xmm0, [A]
2657
  stmxcsr  [OldFlags]
2658
  movups   xmm1, [B]
2659
  mov      edx, [OldFlags]
2660
  movaps   xmm2, xmm0
2661
  and      edx, SSE_ROUND_MASK
2662
  movaps   xmm3, xmm1
2663
  or       edx, SSE_ROUND_TRUNC
2664
  divps    xmm2, xmm3 // A / B
2665
  mov      [NewFlags], edx
2666
  ldmxcsr  [NewFlags]
2667

2668
  cvtps2dq xmm2, xmm2
2669
  cvtdq2ps xmm2, xmm2 // Trunc(A / B)
2670
  mulps    xmm2, xmm1
2671
  subps    xmm0, xmm2 // A - (B * Trunc(A / B))
2672

2673
  // Restore rounding mode
2674
  ldmxcsr  [OldFlags]
2675

2676
  movups   [Result], xmm0
2677
end;
2678

2679
function ModF(const A: Single; out B: Integer): Single;
2680
begin
2681
  B := Trunc(A);
2682
  Result := Frac(A);
2683
end;
2684
{function ModF(const A: Single; out B: Integer): Single; assembler;
2685
var
2686
  OldFlags, NewFlags: UInt32;
2687
asm
2688
  // Set rounding mode to Truncate
2689
  stmxcsr  [OldFlags]
2690
  mov      ecx, [OldFlags]
2691
  and      ecx, SSE_ROUND_MASK
2692
  or       ecx, SSE_ROUND_TRUNC
2693
  mov      [NewFlags], ecx
2694
  ldmxcsr  [NewFlags]
2695

2696
  movss    xmm1, xmm0
2697
  cvtps2dq xmm0, xmm0
2698
  movss    [B], xmm0  // B = Trunc(A)
2699
  cvtdq2ps xmm0, xmm0
2700
  subss    xmm1, xmm0 // A - Trunc(A)
2701

2702
  // Restore rounding mode
2703
  ldmxcsr  [OldFlags]
2704

2705
  movss    xmm0, xmm1
2706
end;}
2707

2708
function ModF(const A: TVector2; out B: TIVector2): TVector2; assembler;
2709
var
2710
  OldFlags, NewFlags: UInt32;
2711
asm
2712
  {$IF RTLVersion >= 33}
2713
  movlps   xmm0, [A]
2714
  {$ELSE}
2715
  movq     xmm0, A
2716
  {$ENDIF}
2717

2718
  // Set rounding mode to Truncate
2719
  stmxcsr  [OldFlags]
2720
  mov      eax, [OldFlags]
2721
  and      eax, SSE_ROUND_MASK
2722
  or       eax, SSE_ROUND_TRUNC
2723
  mov      [NewFlags], eax
2724
  ldmxcsr  [NewFlags]
2725

2726
  movaps   xmm1, xmm0
2727
  cvtps2dq xmm0, xmm0
2728
  movlps   [B], xmm0  // B = Trunc(A)
2729
  cvtdq2ps xmm0, xmm0
2730
  subps    xmm1, xmm0 // A - Trunc(A)
2731

2732
  // Restore rounding mode
2733
  ldmxcsr  [OldFlags]
2734

2735
  movq     rax, xmm1
2736
end;
2737

2738
function ModF(const A: TVector3; out B: TIVector3): TVector3; assembler;
2739
var
2740
  OldFlags, NewFlags: UInt32;
2741
asm
2742
  movq     xmm0, [A]
2743
  movss    xmm1, DWORD [A+8]
2744
  movlhps  xmm0, xmm1
2745

2746
  // Set rounding mode to Truncate
2747
  stmxcsr  [OldFlags]
2748
  mov      eax, [OldFlags]
2749
  and      eax, SSE_ROUND_MASK
2750
  or       eax, SSE_ROUND_TRUNC
2751
  mov      [NewFlags], eax
2752
  ldmxcsr  [NewFlags]
2753

2754
  movaps   xmm1, xmm0
2755
  cvtps2dq xmm0, xmm0
2756
  movhlps  xmm2, xmm0
2757
  movq     [B], xmm0  // B = Trunc(A)
2758
  movd     DWORD [B+8], xmm2
2759
  cvtdq2ps xmm0, xmm0
2760
  subps    xmm1, xmm0 // A - Trunc(A)
2761

2762
  // Restore rounding mode
2763
  ldmxcsr  [OldFlags]
2764

2765
  movhlps  xmm0, xmm1
2766
  movq     [Result], xmm1
2767
  movss    DWORD [Result+8], xmm0
2768
end;
2769

2770
function ModF(const A: TVector4; out B: TIVector4): TVector4; assembler;
2771
var
2772
  OldFlags, NewFlags: UInt32;
2773
asm
2774
  movups   xmm0, [A]
2775

2776
  // Set rounding mode to Truncate
2777
  stmxcsr  [OldFlags]
2778
  mov      eax, [OldFlags]
2779
  and      eax, SSE_ROUND_MASK
2780
  or       eax, SSE_ROUND_TRUNC
2781
  mov      [NewFlags], eax
2782
  ldmxcsr  [NewFlags]
2783

2784
  movaps   xmm1, xmm0
2785
  cvtps2dq xmm0, xmm0
2786
  movups   [B], xmm0  // B = Trunc(A)
2787
  cvtdq2ps xmm0, xmm0
2788
  subps    xmm1, xmm0 // A - Trunc(A)
2789

2790
  // Restore rounding mode
2791
  ldmxcsr  [OldFlags]
2792

2793
  movups   [Result], xmm1
2794
end;
2795

2796
function Min(const A: TVector2; const B: Single): TVector2; assembler;
2797
asm
2798
  {$IF RTLVersion >= 33}
2799
  movlps xmm0, [A]
2800
  {$ELSE}
2801
  movq   xmm0, A
2802
  {$ENDIF}
2803
  shufps xmm1, xmm1, $00 // Replicate B
2804
  minps  xmm0, xmm1
2805
  movq   rax, xmm0
2806
end;
2807

2808
function Min(const A, B: TVector2): TVector2; assembler;
2809
asm
2810
  {$IF RTLVersion >= 33}
2811
  movlps xmm0, [A]
2812
  movlps xmm1, [B]
2813
  {$ELSE}
2814
  movq   xmm0, A
2815
  movq   xmm1, B
2816
  {$ENDIF}
2817
  minps  xmm0, xmm1
2818
  movq   rax, xmm0
2819
end;
2820

2821
function Min(const A: TVector3; const B: Single): TVector3; assembler;
2822
asm
2823
  movq     xmm0, [A]
2824
  movss    xmm1, DWORD [A+8]
2825
  movlhps  xmm0, xmm1
2826
  shufps   xmm2, xmm2, $00 // Replicate B
2827
  minps    xmm0, xmm2
2828
  movhlps  xmm1, xmm0
2829
  movq     [Result], xmm0
2830
  movss    DWORD [Result+8], xmm1
2831
end;
2832

2833
function Min(const A, B: TVector3): TVector3; assembler;
2834
asm
2835
  movq     xmm0, [A]
2836
  movss    xmm1, DWORD [A+8]
2837
  movlhps  xmm0, xmm1
2838
  movq     xmm1, [B]
2839
  movss    xmm2, DWORD [B+8]
2840
  movlhps  xmm1, xmm2
2841
  minps    xmm0, xmm1
2842
  movhlps  xmm1, xmm0
2843
  movq     [Result], xmm0
2844
  movss    DWORD [Result+8], xmm1
2845
end;
2846

2847
function Min(const A: TVector4; const B: Single): TVector4; assembler;
2848
asm
2849
  movups xmm0, [A]
2850
  shufps xmm2, xmm2, $00 // Replicate B
2851
  minps  xmm0, xmm2
2852
  movups [Result], xmm0
2853
end;
2854

2855
function Min(const A, B: TVector4): TVector4; assembler;
2856
asm
2857
  movups xmm0, [A]
2858
  movups xmm1, [B]
2859
  minps  xmm0, xmm1
2860
  movups [Result], xmm0
2861
end;
2862

2863
function Max(const A: TVector2; const B: Single): TVector2; assembler;
2864
asm
2865
  {$IF RTLVersion >= 33}
2866
  movlps xmm0, [A]
2867
  {$ELSE}
2868
  movq   xmm0, A
2869
  {$ENDIF}
2870
  shufps xmm1, xmm1, $00 // Replicate B
2871
  maxps  xmm0, xmm1
2872
  movq   rax, xmm0
2873
end;
2874

2875
function Max(const A, B: TVector2): TVector2; assembler;
2876
asm
2877
  {$IF RTLVersion >= 33}
2878
  movlps xmm0, [A]
2879
  movlps xmm1, [B]
2880
  {$ELSE}
2881
  movq   xmm0, A
2882
  movq   xmm1, B
2883
  {$ENDIF}
2884
  maxps  xmm0, xmm1
2885
  movq   rax, xmm0
2886
end;
2887

2888
function Max(const A: TVector3; const B: Single): TVector3; assembler;
2889
asm
2890
  movq     xmm0, [A]
2891
  movss    xmm1, DWORD [A+8]
2892
  movlhps  xmm0, xmm1
2893
  shufps   xmm2, xmm2, $00 // Replicate B
2894
  maxps    xmm0, xmm2
2895
  movhlps  xmm1, xmm0
2896
  movq     [Result], xmm0
2897
  movss    DWORD [Result+8], xmm1
2898
end;
2899

2900
function Max(const A, B: TVector3): TVector3; assembler;
2901
asm
2902
  movq     xmm0, [A]
2903
  movss    xmm1, DWORD [A+8]
2904
  movlhps  xmm0, xmm1
2905
  movq     xmm1, [B]
2906
  movss    xmm2, DWORD [B+8]
2907
  movlhps  xmm1, xmm2
2908
  maxps    xmm0, xmm1
2909
  movhlps  xmm1, xmm0
2910
  movq     [Result], xmm0
2911
  movss    DWORD [Result+8], xmm1
2912
end;
2913

2914
function Max(const A: TVector4; const B: Single): TVector4; assembler;
2915
asm
2916
  movups xmm0, [A]
2917
  shufps xmm2, xmm2, $00 // Replicate B
2918
  maxps  xmm0, xmm2
2919
  movups [Result], xmm0
2920
end;
2921

2922
function Max(const A, B: TVector4): TVector4; assembler;
2923
asm
2924
  movups xmm0, [A]
2925
  movups xmm1, [B]
2926
  maxps  xmm0, xmm1
2927
  movups [Result], xmm0
2928
end;
2929

2930
function EnsureRange(const A, AMin, AMax: Single): Single; assembler;
2931
asm
2932
  maxss  xmm0, xmm1
2933
  minss  xmm0, xmm2
2934
end;
2935

2936
function EnsureRange(const A: TVector2; const AMin, AMax: Single): TVector2; assembler;
2937
asm
2938
  {$IF RTLVersion >= 33}
2939
  movlps xmm0, [A]
2940
  {$ELSE}
2941
  movq   xmm0, A
2942
  {$ENDIF}
2943
  shufps xmm1, xmm1, $00 // Replicate AMin
2944
  shufps xmm2, xmm2, $00 // Replicate AMax
2945
  maxps  xmm0, xmm1
2946
  minps  xmm0, xmm2
2947
  movq   rax, xmm0
2948
end;
2949

2950
function EnsureRange(const A, AMin, AMax: TVector2): TVector2; assembler;
2951
asm
2952
  {$IF RTLVersion >= 33}
2953
  movlps xmm0, [A]
2954
  movlps xmm1, [AMin]
2955
  movlps xmm2, [AMax]
2956
  {$ELSE}
2957
  movq   xmm0, A
2958
  movq   xmm1, AMin
2959
  movq   xmm2, AMax
2960
  {$ENDIF}
2961
  maxps  xmm0, xmm1
2962
  minps  xmm0, xmm2
2963
  movq   rax, xmm0
2964
end;
2965

2966
function EnsureRange(const A: TVector3; const AMin, AMax: Single): TVector3; assembler;
2967
asm
2968
  movq     xmm0, [A]
2969
  movss    xmm1, DWORD [A+8]
2970
  movlhps  xmm0, xmm1
2971
  shufps   xmm2, xmm2, $00 // Replicate AMin
2972
  shufps   xmm3, xmm3, $00 // Replicate AMax
2973
  maxps    xmm0, xmm2
2974
  minps    xmm0, xmm3
2975
  movhlps  xmm1, xmm0
2976
  movq     [Result], xmm0
2977
  movss    DWORD [Result+8], xmm1
2978
end;
2979

2980
function EnsureRange(const A, AMin, AMax: TVector3): TVector3; assembler;
2981
asm
2982
  movq     xmm0, [A]
2983
  movss    xmm1, DWORD [A+8]
2984
  movlhps  xmm0, xmm1
2985
  movq     xmm1, [AMin]
2986
  movss    xmm2, DWORD [AMin+8]
2987
  movlhps  xmm1, xmm2
2988
  movq     xmm2, [AMax]
2989
  movss    xmm3, DWORD [AMax+8]
2990
  movlhps  xmm2, xmm3
2991
  maxps    xmm0, xmm1
2992
  minps    xmm0, xmm2
2993
  movhlps  xmm1, xmm0
2994
  movq     [Result], xmm0
2995
  movss    DWORD [Result+8], xmm1
2996
end;
2997

2998
function EnsureRange(const A: TVector4; const AMin, AMax: Single): TVector4; assembler;
2999
asm
3000
  movups xmm0, [A]
3001
  shufps xmm2, xmm2, $00 // Replicate AMin
3002
  shufps xmm3, xmm3, $00 // Replicate AMax
3003
  maxps  xmm0, xmm2
3004
  minps  xmm0, xmm3
3005
  movups [Result], xmm0
3006
end;
3007

3008
function EnsureRange(const A, AMin, AMax: TVector4): TVector4; assembler;
3009
asm
3010
  movups xmm0, [A]
3011
  movups xmm1, [AMin]
3012
  movups xmm2, [AMax]
3013
  maxps  xmm0, xmm1
3014
  minps  xmm0, xmm2
3015
  movups [Result], xmm0
3016
end;
3017

3018
function Mix(const A, B: TVector2; const T: Single): TVector2;
3019
begin
3020
  Result.Init(Mix(A.X, B.X, T), Mix(A.Y, B.Y, T));
3021
end;
3022

3023
function Mix(const A, B, T: TVector2): TVector2;
3024
begin
3025
  Result.Init(Mix(A.X, B.X, T.X), Mix(A.Y, B.Y, T.Y));
3026
end;
3027

3028
function Mix(const A, B: TVector3; const T: Single): TVector3; assembler;
3029
asm
3030
  movq     xmm0, [A]
3031
  movss    xmm1, DWORD [A+8]
3032
  movlhps  xmm0, xmm1
3033
  movq     xmm1, [B]
3034
  movss    xmm2, DWORD [B+8]
3035
  movlhps  xmm1, xmm2
3036
  shufps   xmm3, xmm3, $00 // Replicate T
3037
  subps    xmm1, xmm0
3038
  mulps    xmm1, xmm3
3039
  addps    xmm0, xmm1 // A + (T * (B - A))
3040
  movhlps  xmm1, xmm0
3041
  movq     [Result], xmm0
3042
  movss    DWORD [Result+8], xmm1
3043
end;
3044

3045
function Mix(const A, B, T: TVector3): TVector3; assembler;
3046
asm
3047
  movq     xmm0, [A]
3048
  movss    xmm1, DWORD [A+8]
3049
  movlhps  xmm0, xmm1
3050
  movq     xmm1, [B]
3051
  movss    xmm2, DWORD [B+8]
3052
  movlhps  xmm1, xmm2
3053
  movq     xmm2, [T]
3054
  movss    xmm3, DWORD [T+8]
3055
  movlhps  xmm2, xmm3
3056
  subps    xmm1, xmm0
3057
  mulps    xmm1, xmm2
3058
  addps    xmm0, xmm1 // A + (T * (B - A))
3059
  movhlps  xmm1, xmm0
3060
  movq     [Result], xmm0
3061
  movss    DWORD [Result+8], xmm1
3062
end;
3063

3064
function Mix(const A, B: TVector4; const T: Single): TVector4; assembler;
3065
asm
3066
  movups xmm0, [A]
3067
  movups xmm1, [B]
3068
  shufps xmm3, xmm3, $00 // Replicate T
3069
  subps  xmm1, xmm0
3070
  mulps  xmm1, xmm3
3071
  addps  xmm0, xmm1 // A + (T * (B - A))
3072
  movups [Result], xmm0
3073
end;
3074

3075
function Mix(const A, B, T: TVector4): TVector4; assembler;
3076
asm
3077
  movups xmm0, [A]
3078
  movups xmm1, [B]
3079
  movups xmm2, [T]
3080
  subps  xmm1, xmm0
3081
  mulps  xmm1, xmm2
3082
  addps  xmm0, xmm1 // A + (T * (B - A))
3083
  movups [Result], xmm0
3084
end;
3085

3086
function Step(const AEdge: Single; const A: TVector2): TVector2; assembler;
3087
asm
3088
  {$IF RTLVersion >= 33}
3089
  movlps   xmm1, [A]
3090
  {$ELSE}
3091
  movq     xmm1, A
3092
  {$ENDIF}
3093
  shufps   xmm0, xmm0, $00 // Replicate AEdge
3094
  movlps   xmm2, QWORD [SSE_ONE]
3095
  cmpnltps xmm1, xmm0      // (A >= AEdge)? Yes: $FFFFFFFF, No: $00000000
3096
  andps    xmm1, xmm2      // (A >= AEdge)? Yes: 1, No: 0
3097
  movq     rax, xmm1
3098
end;
3099

3100
function Step(const AEdge, A: TVector2): TVector2; assembler;
3101
asm
3102
  {$IF RTLVersion >= 33}
3103
  movlps   xmm0, [AEdge]
3104
  movlps   xmm1, [A]
3105
  {$ELSE}
3106
  movq     xmm0, AEdge
3107
  movq     xmm1, A
3108
  {$ENDIF}
3109
  movlps   xmm2, QWORD [SSE_ONE]
3110
  cmpnltps xmm1, xmm0 // (A >= AEdge)? Yes: $FFFFFFFF, No: $00000000
3111
  andps    xmm1, xmm2 // (A >= AEdge)? Yes: 1, No: 0
3112
  movq     rax, xmm1
3113
end;
3114

3115
function Step(const AEdge: Single; const A: TVector3): TVector3; assembler;
3116
asm
3117
  movq     xmm0, [A]
3118
  movss    xmm2, DWORD [A+8]
3119
  movlhps  xmm0, xmm2
3120
  shufps   xmm1, xmm1, $00 // Replicate AEdge
3121
  movups   xmm2, [SSE_ONE]
3122
  cmpnltps xmm0, xmm1      // (A >= AEdge)? Yes: $FFFFFFFF, No: $00000000
3123
  andps    xmm0, xmm2      // (A >= AEdge)? Yes: 1, No: 0
3124
  movhlps  xmm1, xmm0
3125
  movq     [Result], xmm0
3126
  movss    DWORD [Result+8], xmm1
3127
end;
3128

3129
function Step(const AEdge, A: TVector3): TVector3; assembler;
3130
asm
3131
  movq     xmm0, [AEdge]
3132
  movss    xmm1, DWORD [AEdge+8]
3133
  movlhps  xmm0, xmm1
3134
  movq     xmm1, [A]
3135
  movss    xmm2, DWORD [A+8]
3136
  movlhps  xmm1, xmm2
3137
  movups   xmm2, [SSE_ONE]
3138
  cmpnltps xmm1, xmm0 // (A >= AEdge)? Yes: $FFFFFFFF, No: $00000000
3139
  andps    xmm1, xmm2 // (A >= AEdge)? Yes: 1, No: 0
3140
  movhlps  xmm0, xmm1
3141
  movq     [Result], xmm1
3142
  movss    DWORD [Result+8], xmm0
3143
end;
3144

3145
function Step(const AEdge: Single; const A: TVector4): TVector4; assembler;
3146
asm
3147
  movups   xmm0, [A]
3148
  shufps   xmm1, xmm1, $00 // Replicate AEdge
3149
  movups   xmm2, [SSE_ONE]
3150
  cmpnltps xmm0, xmm1      // (A >= AEdge)? Yes: $FFFFFFFF, No: $00000000
3151
  andps    xmm0, xmm2      // (A >= AEdge)? Yes: 1, No: 0
3152
  movups   [Result], xmm0
3153
end;
3154

3155
function Step(const AEdge, A: TVector4): TVector4;
3156
asm
3157
  movups   xmm0, [AEdge]
3158
  movups   xmm1, [A]
3159
  movups   xmm2, [SSE_ONE]
3160
  cmpnltps xmm1, xmm0 // (A >= AEdge)? Yes: $FFFFFFFF, No: $00000000
3161
  andps    xmm1, xmm2 // (A >= AEdge)? Yes: 1, No: 0
3162
  movups   [Result], xmm1
3163
end;
3164

3165
function SmoothStep(const AEdge0, AEdge1: Single; const A: TVector2): TVector2;
3166
begin
3167
  Result.Init(SmoothStep(AEdge0, AEdge1, A.X), SmoothStep(AEdge0, AEdge1, A.Y));
3168
end;
3169

3170
function SmoothStep(const AEdge0, AEdge1, A: TVector2): TVector2;
3171
begin
3172
  Result.Init(SmoothStep(AEdge0.X, AEdge1.X, A.X), SmoothStep(AEdge0.Y, AEdge1.Y, A.Y));
3173
end;
3174

3175
function SmoothStep(const AEdge0, AEdge1: Single; const A: TVector3): TVector3;
3176
begin
3177
  Result.Init(SmoothStep(AEdge0, AEdge1, A.X), SmoothStep(AEdge0, AEdge1, A.Y), SmoothStep(AEdge0, AEdge1, A.Z));
3178
end;
3179
{function SmoothStep(const AEdge0, AEdge1: Single; const A: TVector3): TVector3; assembler;
3180
asm
3181
  movdqa   [rsp-24], xmm6
3182
  movdqa   [rsp-40], xmm7
3183

3184
  movq     xmm0, [A]
3185
  movss    xmm3, DWORD [A+8]
3186
  movlhps  xmm0, xmm3
3187
  shufps   xmm1, xmm1, $00 // Replicate AEdge0
3188
  shufps   xmm2, xmm2, $00 // Replicate AEdge1
3189
  movaps   xmm3, xmm0
3190
  movaps   xmm4, xmm0
3191
  movaps   xmm5, xmm0
3192
  movups   xmm6, [SSE_ONE]
3193

3194
  cmpnltps xmm3, xmm1 // (A >= AEdge0)? Yes: $FFFFFFFF, No: $00000000
3195
  cmpleps  xmm4, xmm2 // (A <= AEdge1)? Yes: $FFFFFFFF, No: $00000000
3196
  subps    xmm2, xmm1
3197
  movaps   xmm5, xmm4
3198
  subps    xmm0, xmm1
3199
  andnps   xmm5, xmm6 // (A >  AEdge1)? Yes: 1.0, No: 0.0
3200

3201
  movups   xmm6, [SSE_TWO]
3202
  divps    xmm0, xmm2 // Temp := (A - AEdge0) / (AEdge1 - AEdge0)
3203
  movups   xmm7, [SSE_THREE]
3204
  mulps    xmm6, xmm0 // 2 * Temp
3205
  subps    xmm7, xmm6 // 3 - (2 * Temp)
3206
  mulps    xmm7, xmm0
3207
  mulps    xmm7, xmm0 // Result := Temp * Temp * (3 - (2 * Temp))
3208
  andps    xmm7, xmm3 // (A < AEdge0)? Yes: 0, No: Result
3209
  andps    xmm7, xmm4 // (A > AEdge1)? Yes: 0, No: Result
3210
  orps     xmm7, xmm5 // (A > AEdge1)? Yes: 1, No: Result
3211

3212
  movhlps  xmm6, xmm7
3213
  movq     [Result], xmm7
3214
  movss    DWORD [Result+8], xmm6
3215

3216
  movdqa   xmm6, [rsp-24]
3217
  movdqa   xmm7, [rsp-40]
3218
end;}
3219

3220
function SmoothStep(const AEdge0, AEdge1, A: TVector3): TVector3; assembler;
3221
asm
3222
  movdqa   [rsp-24], xmm6
3223
  movdqa   [rsp-40], xmm7
3224

3225
  movq     xmm2, [A]
3226
  movss    xmm3, DWORD [A+8]
3227
  movlhps  xmm2, xmm3
3228
  movq     xmm0, [AEdge0]
3229
  movss    xmm1, DWORD [AEdge0+8]
3230
  movlhps  xmm0, xmm1
3231
  movq     xmm1, [AEdge1]
3232
  movss    xmm3, DWORD [AEdge1+8]
3233
  movlhps  xmm1, xmm3
3234

3235
  movaps   xmm3, xmm2
3236
  movaps   xmm4, xmm2
3237
  movaps   xmm5, xmm2
3238
  movups   xmm6, [SSE_ONE]
3239

3240
  cmpnltps xmm3, xmm0 // (A >= AEdge0)? Yes: $FFFFFFFF, No: $00000000
3241
  cmpleps  xmm4, xmm1 // (A <= AEdge1)? Yes: $FFFFFFFF, No: $00000000
3242
  subps    xmm1, xmm0
3243
  movaps   xmm5, xmm4
3244
  subps    xmm2, xmm0
3245
  andnps   xmm5, xmm6 // (A >  AEdge1)? Yes: 1.0, No: 0.0
3246

3247
  movups   xmm6, [SSE_TWO]
3248
  divps    xmm2, xmm1 // Temp := (A - AEdge0) / (AEdge1 - AEdge0)
3249
  movups   xmm7, [SSE_THREE]
3250
  mulps    xmm6, xmm2 // 2 * Temp
3251
  subps    xmm7, xmm6 // 3 - (2 * Temp)
3252
  mulps    xmm7, xmm2
3253
  mulps    xmm7, xmm2 // Result := Temp * Temp * (3 - (2 * Temp))
3254
  andps    xmm7, xmm3 // (A < AEdge0)? Yes: 0, No: Result
3255
  andps    xmm7, xmm4 // (A > AEdge1)? Yes: 0, No: Result
3256
  orps     xmm7, xmm5 // (A > AEdge1)? Yes: 1, No: Result
3257

3258
  movhlps  xmm6, xmm7
3259
  movq     [Result], xmm7
3260
  movss    DWORD [Result+8], xmm6
3261

3262
  movdqa   xmm6, [rsp-24]
3263
  movdqa   xmm7, [rsp-40]
3264
end;
3265

3266
function SmoothStep(const AEdge0, AEdge1: Single; const A: TVector4): TVector4;
3267
asm
3268
  movdqa   [rsp-24], xmm6
3269
  movdqa   [rsp-40], xmm7
3270

3271
  movups   xmm0, [A]
3272
  shufps   xmm1, xmm1, $00 // Replicate AEdge0
3273
  shufps   xmm2, xmm2, $00 // Replicate AEdge1
3274
  movaps   xmm3, xmm0
3275
  movaps   xmm4, xmm0
3276
  movaps   xmm5, xmm0
3277
  movups   xmm6, [SSE_ONE]
3278

3279
  cmpnltps xmm3, xmm1 // (A >= AEdge0)? Yes: $FFFFFFFF, No: $00000000
3280
  cmpleps  xmm4, xmm2 // (A <= AEdge1)? Yes: $FFFFFFFF, No: $00000000
3281
  subps    xmm2, xmm1
3282
  movaps   xmm5, xmm4
3283
  subps    xmm0, xmm1
3284
  andnps   xmm5, xmm6 // (A >  AEdge1)? Yes: 1.0, No: 0.0
3285

3286
  movups   xmm6, [SSE_TWO]
3287
  divps    xmm0, xmm2 // Temp := (A - AEdge0) / (AEdge1 - AEdge0)
3288
  movups   xmm7, [SSE_THREE]
3289
  mulps    xmm6, xmm0 // 2 * Temp
3290
  subps    xmm7, xmm6 // 3 - (2 * Temp)
3291
  mulps    xmm7, xmm0
3292
  mulps    xmm7, xmm0 // Result := Temp * Temp * (3 - (2 * Temp))
3293
  andps    xmm7, xmm3 // (A < AEdge0)? Yes: 0, No: Result
3294
  andps    xmm7, xmm4 // (A > AEdge1)? Yes: 0, No: Result
3295
  orps     xmm7, xmm5 // (A > AEdge1)? Yes: 1, No: Result
3296

3297
  movups   [Result], xmm7
3298

3299
  movdqa   xmm6, [rsp-24]
3300
  movdqa   xmm7, [rsp-40]
3301
end;
3302

3303
function SmoothStep(const AEdge0, AEdge1, A: TVector4): TVector4; assembler;
3304
asm
3305
  movdqa   [rsp-24], xmm6
3306
  movdqa   [rsp-40], xmm7
3307

3308
  movups   xmm2, [A]
3309
  movups   xmm0, [AEdge0]
3310
  movups   xmm1, [AEdge1]
3311
  movaps   xmm3, xmm2
3312
  movaps   xmm4, xmm2
3313
  movaps   xmm5, xmm2
3314
  movups   xmm6, [SSE_ONE]
3315

3316
  cmpnltps xmm3, xmm0 // (A >= AEdge0)? Yes: $FFFFFFFF, No: $00000000
3317
  cmpleps  xmm4, xmm1 // (A <= AEdge1)? Yes: $FFFFFFFF, No: $00000000
3318
  subps    xmm1, xmm0
3319
  movaps   xmm5, xmm4
3320
  subps    xmm2, xmm0
3321
  andnps   xmm5, xmm6 // (A >  AEdge1)? Yes: 1.0, No: 0.0
3322

3323
  movups   xmm6, [SSE_TWO]
3324
  divps    xmm2, xmm1 // Temp := (A - AEdge0) / (AEdge1 - AEdge0)
3325
  movups   xmm7, [SSE_THREE]
3326
  mulps    xmm6, xmm2 // 2 * Temp
3327
  subps    xmm7, xmm6 // 3 - (2 * Temp)
3328
  mulps    xmm7, xmm2
3329
  mulps    xmm7, xmm2 // Result := Temp * Temp * (3 - (2 * Temp))
3330
  andps    xmm7, xmm3 // (A < AEdge0)? Yes: 0, No: Result
3331
  andps    xmm7, xmm4 // (A > AEdge1)? Yes: 0, No: Result
3332
  orps     xmm7, xmm5 // (A > AEdge1)? Yes: 1, No: Result
3333

3334
  movups   [Result], xmm7
3335

3336
  movdqa   xmm6, [rsp-24]
3337
  movdqa   xmm7, [rsp-40]
3338
end;
3339

3340
function FMA(const A, B, C: TVector2): TVector2; assembler;
3341
asm
3342
  {$IF RTLVersion >= 33}
3343
  movlps xmm0, [A]
3344
  movlps xmm1, [B]
3345
  movlps xmm2, [C]
3346
  {$ELSE}
3347
  movq   xmm0, A
3348
  movq   xmm1, B
3349
  movq   xmm2, C
3350
  {$ENDIF}
3351
  mulps  xmm0, xmm1
3352
  addps  xmm0, xmm2
3353
  movq   rax, xmm0
3354
end;
3355

3356
function FMA(const A, B, C: TVector3): TVector3; assembler;
3357
asm
3358
  movq     xmm0, [A]
3359
  movss    xmm1, DWORD [A+8]
3360
  movlhps  xmm0, xmm1
3361
  movq     xmm1, [B]
3362
  movss    xmm2, DWORD [B+8]
3363
  movlhps  xmm1, xmm2
3364
  movq     xmm2, [C]
3365
  movss    xmm3, DWORD [C+8]
3366
  movlhps  xmm2, xmm3
3367
  mulps    xmm0, xmm1
3368
  addps    xmm0, xmm2
3369
  movhlps  xmm1, xmm0
3370
  movq     [Result], xmm0
3371
  movss    DWORD [Result+8], xmm1
3372
end;
3373

3374
function FMA(const A, B, C: TVector4): TVector4; assembler;
3375
asm
3376
  movups xmm0, [A]
3377
  movups xmm1, [B]
3378
  movups xmm2, [C]
3379
  mulps  xmm0, xmm1
3380
  addps  xmm0, xmm2
3381
  movups [Result], xmm0
3382
end;
3383

3384
{ Matrix functions }
3385

3386
{$IFDEF FM_COLUMN_MAJOR}
3387
function OuterProduct(const C, R: TVector2): TMatrix2; assembler;
3388
asm
3389
  {$IF RTLVersion >= 33}
3390
  movlps xmm0, [R]     // # # C.Y C.X
3391
  movlps xmm1, [C]     // # # R.Y R.X
3392
  {$ELSE}
3393
  movq   xmm0, R       // # # C.Y C.X
3394
  movq   xmm1, C       // # # R.Y R.X
3395
  {$ENDIF}
3396

3397
  shufps xmm0, xmm0, $50 // C.Y C.X C.Y C.X
3398
  shufps xmm1, xmm1, $44 // R.Y R.Y R.X R.X
3399

3400
  mulps  xmm1, xmm0      // (C.Y*R.Y) (C.X*R.Y) (C.Y*R.X) (C.X*R.X)
3401

3402
  // Store as matrix
3403
  movups [Result], xmm1
3404
end;
3405

3406
function OuterProduct(const C, R: TVector3): TMatrix3; assembler;
3407
asm
3408
  movq     xmm0, [C]
3409
  movss    xmm1, DWORD [C+8]
3410
  movlhps  xmm0, xmm1
3411
  movq     xmm1, [R]
3412
  movss    xmm2, DWORD [R+8]
3413
  movlhps  xmm1, xmm2
3414
  movaps   xmm2, xmm1
3415
  movaps   xmm3, xmm1
3416

3417
  shufps   xmm1, xmm1, $00 // C.X (4x)
3418
  shufps   xmm2, xmm2, $55 // C.Y (4x)
3419
  shufps   xmm3, xmm3, $AA // C.Z (4x)
3420

3421
  mulps    xmm1, xmm0      // R * C.X
3422
  mulps    xmm2, xmm0      // R * C.Y
3423
  mulps    xmm3, xmm0      // R * C.Z
3424

3425
  // Store as matrix
3426
  movhlps  xmm0, xmm1
3427
  movhlps  xmm4, xmm2
3428
  movhlps  xmm5, xmm3
3429
  movq     [Result+$00], xmm1
3430
  movss    DWORD [Result+$08], xmm0
3431
  movq     [Result+$0C], xmm2
3432
  movss    DWORD [Result+$14], xmm4
3433
  movq     [Result+$18], xmm3
3434
  movss    DWORD [Result+$20], xmm5
3435
end;
3436

3437
function OuterProduct(const C, R: TVector4): TMatrix4; assembler;
3438
asm
3439
  movups xmm0, [C]
3440
  movups xmm1, [R]
3441
  movaps xmm2, xmm1
3442
  movaps xmm3, xmm1
3443
  movaps xmm4, xmm1
3444

3445
  shufps xmm1, xmm1, $00 // C.X (4x)
3446
  shufps xmm2, xmm2, $55 // C.Y (4x)
3447
  shufps xmm3, xmm3, $AA // C.Z (4x)
3448
  shufps xmm4, xmm4, $FF // C.W (4x)
3449

3450
  mulps  xmm1, xmm0      // R * C.X
3451
  mulps  xmm2, xmm0      // R * C.Y
3452
  mulps  xmm3, xmm0      // R * C.Z
3453
  mulps  xmm4, xmm0      // R * C.W
3454

3455
  // Store as matrix
3456
  movups DQWORD [Result + $00], xmm1
3457
  movups DQWORD [Result + $10], xmm2
3458
  movups DQWORD [Result + $20], xmm3
3459
  movups DQWORD [Result + $30], xmm4
3460
end;
3461
{$ELSE}
3462
function OuterProduct(const C, R: TVector2): TMatrix2; assembler;
3463
asm
3464
  {$IF RTLVersion >= 33}
3465
  movlps xmm0, [C]     // # # C.Y C.X
3466
  movlps xmm1, [R]     // # # R.Y R.X
3467
  {$ELSE}
3468
  movq   xmm0, C       // # # C.Y C.X
3469
  movq   xmm1, R       // # # R.Y R.X
3470
  {$ENDIF}
3471

3472
  shufps xmm0, xmm0, $50 // C.Y C.X C.Y C.X
3473
  shufps xmm1, xmm1, $44 // R.Y R.Y R.X R.X
3474

3475
  mulps  xmm1, xmm0      // (C.Y*R.Y) (C.X*R.Y) (C.Y*R.X) (C.X*R.X)
3476

3477
  // Store as matrix
3478
  movups [Result], xmm1
3479
end;
3480

3481
function OuterProduct(const C, R: TVector3): TMatrix3; assembler;
3482
asm
3483
  movq     xmm0, [R]
3484
  movss    xmm1, DWORD [R+8]
3485
  movlhps  xmm0, xmm1
3486
  movq     xmm1, [C]
3487
  movss    xmm2, DWORD [C+8]
3488
  movlhps  xmm1, xmm2
3489
  movaps   xmm2, xmm1
3490
  movaps   xmm3, xmm1
3491

3492
  shufps   xmm1, xmm1, $00 // C.X (4x)
3493
  shufps   xmm2, xmm2, $55 // C.Y (4x)
3494
  shufps   xmm3, xmm3, $AA // C.Z (4x)
3495

3496
  mulps    xmm1, xmm0      // R * C.X
3497
  mulps    xmm2, xmm0      // R * C.Y
3498
  mulps    xmm3, xmm0      // R * C.Z
3499

3500
  // Store as matrix
3501
  movhlps  xmm0, xmm1
3502
  movhlps  xmm4, xmm2
3503
  movhlps  xmm5, xmm3
3504
  movq     [Result+$00], xmm1
3505
  movss    DWORD [Result+$08], xmm0
3506
  movq     [Result+$0C], xmm2
3507
  movss    DWORD [Result+$14], xmm4
3508
  movq     [Result+$18], xmm3
3509
  movss    DWORD [Result+$20], xmm5
3510
end;
3511

3512
function OuterProduct(const C, R: TVector4): TMatrix4; assembler;
3513
asm
3514
  movups xmm0, [R]
3515
  movups xmm1, [C]
3516
  movaps xmm2, xmm1
3517
  movaps xmm3, xmm1
3518
  movaps xmm4, xmm1
3519

3520
  shufps xmm1, xmm1, $00 // C.X (4x)
3521
  shufps xmm2, xmm2, $55 // C.Y (4x)
3522
  shufps xmm3, xmm3, $AA // C.Z (4x)
3523
  shufps xmm4, xmm4, $FF // C.W (4x)
3524

3525
  mulps  xmm1, xmm0      // R * C.X
3526
  mulps  xmm2, xmm0      // R * C.Y
3527
  mulps  xmm3, xmm0      // R * C.Z
3528
  mulps  xmm4, xmm0      // R * C.W
3529

3530
  // Store as matrix
3531
  movups DQWORD [Result + $00], xmm1
3532
  movups DQWORD [Result + $10], xmm2
3533
  movups DQWORD [Result + $20], xmm3
3534
  movups DQWORD [Result + $30], xmm4
3535
end;
3536
{$ENDIF}
3537

3538
{ TVector2 }
3539

3540
class operator TVector2.Add(const A: TVector2; const B: Single): TVector2;
3541
begin
3542
  Result.X := A.X + B;
3543
  Result.Y := A.Y + B;
3544
end;
3545

3546
class operator TVector2.Add(const A: Single; const B: TVector2): TVector2;
3547
begin
3548
  Result.X := A + B.X;
3549
  Result.Y := A + B.Y;
3550
end;
3551

3552
class operator TVector2.Add(const A, B: TVector2): TVector2;
3553
begin
3554
  Result.X := A.X + B.X;
3555
  Result.Y := A.Y + B.Y;
3556
end;
3557

3558
function TVector2.Distance(const AOther: TVector2): Single;
3559
begin
3560
  Result := (Self - AOther).Length;
3561
end;
3562

3563
function TVector2.DistanceSquared(const AOther: TVector2): Single;
3564
begin
3565
  Result := (Self - AOther).LengthSquared;
3566
end;
3567

3568
class operator TVector2.Divide(const A: TVector2; const B: Single): TVector2; assembler;
3569
asm
3570
  {$IF RTLVersion >= 33}
3571
  movlps xmm0, [A]
3572
  {$ELSE}
3573
  movq   xmm0, A
3574
  {$ENDIF}
3575
  shufps xmm1, xmm1, 0
3576
  divps  xmm0, xmm1
3577
  movq   rax, xmm0
3578
end;
3579

3580
class operator TVector2.Divide(const A: Single; const B: TVector2): TVector2; assembler;
3581
asm
3582
  {$IF RTLVersion >= 33}
3583
  movlps xmm1, [B]
3584
  {$ELSE}
3585
  movq   xmm1, B
3586
  {$ENDIF}
3587
  shufps xmm0, xmm0, 0
3588
  divps  xmm0, xmm1
3589
  movq   rax, xmm0
3590
end;
3591

3592
class operator TVector2.Divide(const A, B: TVector2): TVector2; assembler;
3593
asm
3594
  {$IF RTLVersion >= 33}
3595
  movlps xmm0, [A]
3596
  movlps xmm1, [B]
3597
  {$ELSE}
3598
  movq   xmm0, A
3599
  movq   xmm1, B
3600
  {$ENDIF}
3601
  divps  xmm0, xmm1
3602
  movq   rax, xmm0
3603
end;
3604

3605
function TVector2.Dot(const AOther: TVector2): Single;
3606
begin
3607
  Result := (X * AOther.X) + (Y * AOther.Y);
3608
end;
3609

3610
function TVector2.FaceForward(const I, NRef: TVector2): TVector2;
3611
begin
3612
  if (NRef.Dot(I) < 0) then
3613
    Result := Self
3614
  else
3615
    Result := -Self;
3616
end;
3617

3618
function TVector2.GetLength: Single;
3619
begin
3620
  Result := Sqrt((X * X) + (Y * Y));
3621
end;
3622

3623
function TVector2.GetLengthSquared: Single;
3624
begin
3625
  Result := (X * X) + (Y * Y);
3626
end;
3627

3628
class operator TVector2.Multiply(const A: TVector2; const B: Single): TVector2;
3629
begin
3630
  Result.X := A.X * B;
3631
  Result.Y := A.Y * B;
3632
end;
3633

3634
class operator TVector2.Multiply(const A: Single; const B: TVector2): TVector2;
3635
begin
3636
  Result.X := A * B.X;
3637
  Result.Y := A * B.Y;
3638
end;
3639

3640
class operator TVector2.Multiply(const A, B: TVector2): TVector2;
3641
begin
3642
  Result.X := A.X * B.X;
3643
  Result.Y := A.Y * B.Y;
3644
end;
3645

3646
function TVector2.NormalizeFast: TVector2; assembler;
3647
asm
3648
  movlps  xmm0, [Self]    // Y X
3649
  movaps  xmm2, xmm0
3650
  mulps   xmm0, xmm0      // Y*Y X*X
3651
  pshufd  xmm1, xmm0, $01 // X*X Y*Y
3652
  addps   xmm0, xmm1      // (X*X+Y*Y) (2x)
3653
  rsqrtps xmm0, xmm0      // (1 / Sqrt(X*X + Y*Y)) (4x)
3654
  mulps   xmm0, xmm2      // A * (1 / Sqrt(Dot(A, A)))
3655
  movq    rax, xmm0
3656
end;
3657

3658
function TVector2.Reflect(const N: TVector2): TVector2;
3659
begin
3660
  Result := Self - ((2 * N.Dot(Self)) * N);
3661
end;
3662

3663
function TVector2.Refract(const N: TVector2; const Eta: Single): TVector2;
3664
var
3665
  D, K: Single;
3666
begin
3667
  D := N.Dot(Self);
3668
  K := 1 - Eta * Eta * (1 - D * D);
3669
  if (K < 0) then
3670
    Result.Init
3671
  else
3672
    Result := (Eta * Self) - ((Eta * D + Sqrt(K)) * N);
3673
end;
3674

3675
procedure TVector2.SetNormalizedFast; assembler;
3676
asm
3677
  movlps  xmm0, [Self]    // Y X
3678
  movaps  xmm2, xmm0
3679
  mulps   xmm0, xmm0      // Y*Y X*X
3680
  pshufd  xmm1, xmm0, $01 // X*X Y*Y
3681
  addps   xmm0, xmm1      // (X*X+Y*Y) (2x)
3682
  rsqrtps xmm0, xmm0      // (1 / Sqrt(X*X + Y*Y)) (4x)
3683
  mulps   xmm0, xmm2      // A * (1 / Sqrt(Dot(A, A)))
3684
  movlps  [Self], xmm0
3685
end;
3686

3687
class operator TVector2.Subtract(const A: TVector2; const B: Single): TVector2;
3688
begin
3689
  Result.X := A.X - B;
3690
  Result.Y := A.Y - B;
3691
end;
3692

3693
class operator TVector2.Subtract(const A: Single; const B: TVector2): TVector2;
3694
begin
3695
  Result.X := A - B.X;
3696
  Result.Y := A - B.Y;
3697
end;
3698

3699
class operator TVector2.Subtract(const A, B: TVector2): TVector2;
3700
begin
3701
  Result.X := A.X - B.X;
3702
  Result.Y := A.Y - B.Y;
3703
end;
3704

3705
{ TVector3 }
3706

3707
class operator TVector3.Add(const A: TVector3; const B: Single): TVector3; assembler;
3708
asm
3709
  movq   xmm0, [A]      // Load 3 floating-point values
3710
  movss  xmm1, DWORD [A+8]
3711
  shufps xmm2, xmm2, 0  // Replicate B
3712
  addps  xmm0, xmm2     // A + B
3713
  addss  xmm1, xmm2
3714
  movq   [Result], xmm0 // Store result
3715
  movss  DWORD [Result+8], xmm1
3716
end;
3717

3718
class operator TVector3.Add(const A: Single; const B: TVector3): TVector3; assembler;
3719
asm
3720
  movq   xmm0, [B]
3721
  movss  xmm2, DWORD [B+8]
3722
  shufps xmm1, xmm1, 0
3723
  addps  xmm0, xmm1
3724
  addss  xmm2, xmm1
3725
  movq   [Result], xmm0
3726
  movss  DWORD [Result+8], xmm2
3727
end;
3728

3729
class operator TVector3.Add(const A, B: TVector3): TVector3; assembler;
3730
asm
3731
  movq   xmm0, [A]
3732
  movss  xmm1, DWORD [A+8]
3733
  movq   xmm2, [B]
3734
  movss  xmm3, DWORD [B+8]
3735
  addps  xmm0, xmm2
3736
  addss  xmm1, xmm3
3737
  movq   [Result], xmm0
3738
  movss  DWORD [Result+8], xmm1
3739
end;
3740

3741
function TVector3.Distance(const AOther: TVector3): Single; assembler;
3742
asm
3743
  movq    xmm0, [Self]
3744
  movss   xmm1, DWORD [Self+8]
3745
  movq    xmm2, [AOther]
3746
  movss   xmm3, DWORD [AOther+8]
3747
  movlhps xmm0, xmm1
3748
  movlhps xmm2, xmm3
3749
  subps   xmm0, xmm2 // A - B
3750

3751
  // (A - B).Length
3752
  mulps   xmm0, xmm0
3753
  pshufd  xmm1, xmm0, $0E
3754
  addps   xmm0, xmm1
3755
  pshufd  xmm1, xmm0, $01
3756
  addss   xmm0, xmm1
3757
  sqrtss  xmm0, xmm0
3758
end;
3759

3760
function TVector3.DistanceSquared(const AOther: TVector3): Single; assembler;
3761
asm
3762
  movq    xmm0, [Self]
3763
  movss   xmm1, DWORD [Self+8]
3764
  movq    xmm2, [AOther]
3765
  movss   xmm3, DWORD [AOther+8]
3766
  movlhps xmm0, xmm1
3767
  movlhps xmm2, xmm3
3768
  subps   xmm0, xmm2 // A - B
3769

3770
  // (A - B).Length
3771
  mulps   xmm0, xmm0
3772
  pshufd  xmm1, xmm0, $0E
3773
  addps   xmm0, xmm1
3774
  pshufd  xmm1, xmm0, $01
3775
  addss   xmm0, xmm1
3776
end;
3777

3778
class operator TVector3.Divide(const A: TVector3; const B: Single): TVector3;
3779
var
3780
  InvB: Single;
3781
begin
3782
  InvB := 1 / B;
3783
  Result.X := A.X * InvB;
3784
  Result.Y := A.Y * InvB;
3785
  Result.Z := A.Z * InvB;
3786
end;
3787
{class operator TVector3.Divide(const A: TVector3; const B: Single): TVector3; assembler;
3788
asm
3789
  movq   xmm0, [A]
3790
  movss  xmm1, DWORD [A+8]
3791
  shufps xmm2, xmm2, 0
3792
  divps  xmm0, xmm2
3793
  divss  xmm1, xmm2
3794
  movq   [Result], xmm0
3795
  movss  DWORD [Result+8], xmm1
3796
end;}
3797

3798
class operator TVector3.Divide(const A: Single; const B: TVector3): TVector3; assembler;
3799
asm
3800
  movq   xmm0, [B]
3801
  movss  xmm2, DWORD [B+8]
3802
  movss  xmm3, xmm1
3803
  shufps xmm1, xmm1, 0
3804
  divps  xmm1, xmm0
3805
  divss  xmm3, xmm2
3806
  movq   [Result], xmm1
3807
  movss  DWORD [Result+8], xmm3
3808
end;
3809

3810
class operator TVector3.Divide(const A, B: TVector3): TVector3; assembler;
3811
asm
3812
  movq   xmm0, [A]
3813
  movss  xmm1, DWORD [A+8]
3814
  movq   xmm2, [B]
3815
  movss  xmm3, DWORD [B+8]
3816
  divps  xmm0, xmm2
3817
  divss  xmm1, xmm3
3818
  movq   [Result], xmm0
3819
  movss  DWORD [Result+8], xmm1
3820
end;
3821

3822
function TVector3.Cross(const AOther: TVector3): TVector3;
3823
begin
3824
  Result.X := (Y * AOther.Z) - (AOther.Y * Z);
3825
  Result.Y := (Z * AOther.X) - (AOther.Z * X);
3826
  Result.Z := (X * AOther.Y) - (AOther.X * Y);
3827
end;
3828

3829
function TVector3.Dot(const AOther: TVector3): Single;
3830
begin
3831
  Result := (X * AOther.X) + (Y * AOther.Y) + (Z * AOther.Z);
3832
end;
3833

3834
function TVector3.FaceForward(const I, NRef: TVector3): TVector3; assembler;
3835
begin
3836
  if (NRef.Dot(I) < 0) then
3837
    Result := Self
3838
  else
3839
    Result := -Self;
3840
end;
3841

3842
function TVector3.GetLength: Single; assembler;
3843
asm
3844
  movq    xmm0, [Self]    // 0 0 Y X
3845
  movss   xmm1, DWORD [Self+8]  // 0 0 0 Z
3846
  movlhps xmm0, xmm1      // 0 Z Y Z
3847
  mulps   xmm0, xmm0      //  0  Z*Z Y*Y X*X
3848
  pshufd  xmm1, xmm0, $0E // Y*Y X*X  0  Z*Z
3849
  addps   xmm0, xmm1      //     #         #     (Y*Y)     (X*X+Z*Z)
3850
  pshufd  xmm1, xmm0, $01 // (X*X+Z*Z) (X*X+Z*Z) (X*X+Z*Z) (Y*Y)
3851
  addss   xmm0, xmm1      // (X*X + Y*Y + Z*Z)
3852
  sqrtss  xmm0, xmm0      // Sqrt(X*X + Y*Y + Z*Z)
3853
end;
3854

3855
function TVector3.GetLengthSquared: Single;
3856
begin
3857
  Result := (X * X) + (Y * Y) + (Z * Z);
3858
end;
3859
{function TVector3.GetLengthSquared: Single; assembler;
3860
asm
3861
  movq    xmm0, [Self]    // 0 0 Y X
3862
  movss   xmm1, DWORD [Self+8]  // 0 0 0 Z
3863
  movlhps xmm0, xmm1      // 0 Z Y Z
3864
  mulps   xmm0, xmm0      //  0  Z*Z Y*Y X*X
3865
  pshufd  xmm1, xmm0, $0E // Y*Y X*X  0  Z*Z
3866
  addps   xmm0, xmm1      //     #         #     (Y*Y)     (X*X+Z*Z)
3867
  pshufd  xmm1, xmm0, $01 // (X*X+Z*Z) (X*X+Z*Z) (X*X+Z*Z) (Y*Y)
3868
  addss   xmm0, xmm1      // (X*X + Y*Y + Z*Z)
3869
end;}
3870

3871
class operator TVector3.Multiply(const A: TVector3; const B: Single): TVector3; assembler;
3872
asm
3873
  movq   xmm0, [A]
3874
  movss  xmm1, DWORD [A+8]
3875
  shufps xmm2, xmm2, 0
3876
  mulps  xmm0, xmm2
3877
  mulss  xmm1, xmm2
3878
  movq   [Result], xmm0
3879
  movss  DWORD [Result+8], xmm1
3880
end;
3881

3882
class operator TVector3.Multiply(const A: Single; const B: TVector3): TVector3; assembler;
3883
asm
3884
  movq   xmm0, [B]
3885
  movss  xmm2, DWORD [B+8]
3886
  shufps xmm1, xmm1, 0
3887
  mulps  xmm0, xmm1
3888
  mulss  xmm2, xmm1
3889
  movq   [Result], xmm0
3890
  movss  DWORD [Result+8], xmm2
3891
end;
3892

3893
class operator TVector3.Multiply(const A, B: TVector3): TVector3; assembler;
3894
asm
3895
  movq   xmm0, [A]
3896
  movss  xmm1, DWORD [A+8]
3897
  movq   xmm2, [B]
3898
  movss  xmm3, DWORD [B+8]
3899
  mulps  xmm0, xmm2
3900
  mulss  xmm1, xmm3
3901
  movq   [Result], xmm0
3902
  movss  DWORD [Result+8], xmm1
3903
end;
3904

3905
class operator TVector3.Negative(const A: TVector3): TVector3;
3906
begin
3907
  Result.X := -A.X;
3908
  Result.Y := -A.Y;
3909
  Result.Z := -A.Z;
3910
end;
3911
{class operator TVector3.Negative(const A: TVector3): TVector3; assembler;
3912
asm
3913
  movups xmm0, [SSE_MASK_SIGN] // Load mask with 4 sign (upper) bits
3914
  movq   xmm1, [A]
3915
  movss  xmm2, DWORD [A+8]
3916
  xorps  xmm1, xmm0            // Flip sign bit
3917
  xorps  xmm2, xmm0
3918
  movq   [Result], xmm1
3919
  movss  DWORD [Result+8], xmm2
3920
end;}
3921

3922
function TVector3.NormalizeFast: TVector3; assembler;
3923
asm
3924
  movq    xmm0, [Self]    // 0 0 Y X
3925
  movss   xmm1, DWORD [Self+8]  // 0 0 0 Z
3926
  movlhps xmm0, xmm1      // 0 Z Y Z
3927
  movaps  xmm2, xmm0
3928

3929
  // Dot(A, A)
3930
  mulps   xmm0, xmm0      //  0  Z*Z Y*Y X*X
3931
  pshufd  xmm1, xmm0, $4E // Y*Y X*X  0  Z*Z
3932
  addps   xmm0, xmm1      //   (Y*Y) (X*X+Z*Z) (Y*Y) (X*X+Z*Z)
3933
  pshufd  xmm1, xmm0, $11 // (X*X+Z*Z) (Y*Y) (X*X+Z*Z) (Y*Y)
3934
  addps   xmm0, xmm1      // (X*X + Y*Y + Z*Z) (4x)
3935

3936
  rsqrtps xmm0, xmm0      // (1 / Sqrt(X*X + Y*Y + Z*Z)) (4x)
3937
  mulps   xmm0, xmm2      // A * (1 / Sqrt(Dot(A, A)))
3938
  movhlps xmm1, xmm0
3939
  movq    [Result], xmm0
3940
  movss   DWORD [Result+8], xmm1
3941
end;
3942

3943
function TVector3.Reflect(const N: TVector3): TVector3; assembler;
3944
asm
3945
  movq     xmm0, [Self]
3946
  movss    xmm2, DWORD [Self+8]
3947
  movq     xmm1, [N]
3948
  movss    xmm3, DWORD [N+8]
3949
  movlhps  xmm0, xmm2
3950
  movlhps  xmm1, xmm3
3951
  movaps   xmm2, xmm0
3952
  movups   xmm3, [SSE_TWO]
3953

3954
  // Dot(N, I)
3955
  mulps    xmm0, xmm1
3956
  mulps    xmm3, xmm1 // N * 2
3957
  pshufd   xmm1, xmm0, $4E
3958
  addps    xmm0, xmm1
3959
  pshufd   xmm1, xmm0, $11
3960
  addps    xmm0, xmm1
3961

3962
  // (2 * Dot(N, I)) * N
3963
  mulps    xmm0, xmm3
3964

3965
  // I - ((2 * Dot(N, I)) * N)
3966
  subps    xmm2, xmm0
3967
  movhlps  xmm3, xmm2
3968
  movq     [Result], xmm2
3969
  movss    DWORD [Result+8], xmm3
3970
end;
3971

3972
function TVector3.Refract(const N: TVector3; const Eta: Single): TVector3; assembler;
3973
asm
3974
  movdqa   [rsp-24], xmm6
3975
  movdqa   [rsp-40], xmm7
3976

3977
  movq     xmm0, [Self]
3978
  movss    xmm2, DWORD [Self+8]
3979
  movq     xmm1, [N]
3980
  movss    xmm4, DWORD [N+8]
3981
  movlhps  xmm0, xmm2
3982
  movlhps  xmm1, xmm4
3983
  movups   xmm7, xmm0
3984
  movss    xmm2, DWORD [SSE_ONE]
3985

3986
  // D := Dot(N, I)
3987
  mulps    xmm0, xmm1
3988
  movss    xmm4, xmm2 // 1
3989
  pshufd   xmm1, xmm0, $4E
3990
  movss    xmm5, xmm3 // Eta
3991
  addps    xmm0, xmm1
3992
  mulss    xmm5, xmm5 // Eta * Eta
3993
  pshufd   xmm1, xmm0, $11
3994
  addss    xmm0, xmm1
3995

3996
  // K := 1 - Eta * Eta * (1 - D * D)
3997
  movss    xmm6, xmm0  // D
3998
  mulss    xmm0, xmm0  // D * D
3999
  subss    xmm4, xmm0  // 1 - D * D
4000
  mulss    xmm4, xmm5  // Eta * Eta * (1 - D * D)
4001
  xorps    xmm5, xmm5  // 0
4002
  subss    xmm2, xmm4  // K := 1 - Eta * Eta * (1 - D * D)
4003

4004
  // if (K < 0) then
4005
  comiss   xmm2, xmm5
4006

4007
  jb       @KLessThanZero
4008

4009
  // K >= 0
4010
  mulss    xmm6, xmm3    // Eta * D
4011
  shufps   xmm3, xmm3, 0 // Replicate Eta (4x)
4012
  mulps    xmm7, xmm3    // Eta * I
4013
  sqrtss   xmm2, xmm2    // Sqrt(K)
4014
  addss    xmm6, xmm2    // Eta * D + Sqrt(K)
4015
  shufps   xmm6, xmm6, 0 // Replicate Eta * D + Sqrt(K) (4x)
4016
  movups   xmm1, [N]
4017
  mulps    xmm6, xmm1    // ((Eta * D + Sqrt(K)) * N)
4018
  subps    xmm7, xmm6    // (Eta * I) - ((Eta * D + Sqrt(K)) * N)
4019
  movhlps  xmm0, xmm7
4020
  movq     [Result], xmm7
4021
  movss    DWORD [Result+8], xmm0
4022
  jmp      @Finish
4023

4024
@KLessThanZero:
4025
  // K < 0: Result := Vector4(0, 0, 0, 0)
4026
  movlhps  xmm6, xmm5
4027
  movq     [Result], xmm5
4028
  movss    DWORD [Result+8], xmm6
4029

4030
@Finish:
4031
  movdqa   xmm6, [rsp-24]
4032
  movdqa   xmm7, [rsp-40]
4033
end;
4034

4035
procedure TVector3.SetNormalizedFast; assembler;
4036
asm
4037
  movq    xmm0, [Self]    // 0 0 Y X
4038
  movss   xmm1, DWORD [Self+8]  // 0 0 0 Z
4039
  movlhps xmm0, xmm1      // 0 Z Y Z
4040
  movaps  xmm2, xmm0
4041

4042
  // Dot(A, A)
4043
  mulps   xmm0, xmm0      //  0  Z*Z Y*Y X*X
4044
  pshufd  xmm1, xmm0, $4E // Y*Y X*X  0  Z*Z
4045
  addps   xmm0, xmm1      //   (Y*Y) (X*X+Z*Z) (Y*Y) (X*X+Z*Z)
4046
  pshufd  xmm1, xmm0, $11 // (X*X+Z*Z) (Y*Y) (X*X+Z*Z) (Y*Y)
4047
  addps   xmm0, xmm1      // (X*X + Y*Y + Z*Z) (4x)
4048

4049
  rsqrtps xmm0, xmm0      // (1 / Sqrt(X*X + Y*Y + Z*Z)) (4x)
4050
  mulps   xmm0, xmm2      // A * (1 / Sqrt(Dot(A, A)))
4051
  movhlps xmm1, xmm0
4052
  movq    [Self], xmm0
4053
  movss   DWORD [Self+8], xmm1
4054
end;
4055

4056
class operator TVector3.Subtract(const A: TVector3; const B: Single): TVector3; assembler;
4057
asm
4058
  movq   xmm0, [A]
4059
  movss  xmm1, DWORD [A+8]
4060
  shufps xmm2, xmm2, 0
4061
  subps  xmm0, xmm2
4062
  subss  xmm1, xmm2
4063
  movq   [Result], xmm0
4064
  movss  DWORD [Result+8], xmm1
4065
end;
4066

4067
class operator TVector3.Subtract(const A: Single; const B: TVector3): TVector3; assembler;
4068
asm
4069
  movq   xmm4, [B]
4070
  movss  xmm2, DWORD [B+8]
4071
  movss  xmm3, xmm1
4072
  shufps xmm1, xmm1, 0
4073
  subps  xmm1, xmm4
4074
  subss  xmm3, xmm2
4075
  movq   [Result], xmm1
4076
  movss  DWORD [Result+8], xmm3
4077
end;
4078

4079
class operator TVector3.Subtract(const A, B: TVector3): TVector3; assembler;
4080
asm
4081
  movq   xmm0, [A]
4082
  movss  xmm1, DWORD [A+8]
4083
  movq   xmm2, [B]
4084
  movss  xmm3, DWORD [B+8]
4085
  subps  xmm0, xmm2
4086
  subss  xmm1, xmm3
4087
  movq   [Result], xmm0
4088
  movss  DWORD [Result+8], xmm1
4089
end;
4090

4091
{ TVector4 }
4092

4093
class operator TVector4.Add(const A: TVector4; const B: Single): TVector4; assembler;
4094
asm
4095
  movups xmm0, [A]      // Load 4 floating-point values
4096
  shufps xmm2, xmm2, 0  // Replicate B
4097
  addps  xmm0, xmm2     // A + B
4098
  movups [Result], xmm0 // Store result
4099
end;
4100

4101
class operator TVector4.Add(const A: Single; const B: TVector4): TVector4; assembler;
4102
asm
4103
  movups xmm0, [B]
4104
  shufps xmm1, xmm1, 0
4105
  addps  xmm0, xmm1
4106
  movups [Result], xmm0
4107
end;
4108

4109
class operator TVector4.Add(const A, B: TVector4): TVector4; assembler;
4110
asm
4111
  movups xmm0, [A]
4112
  movups xmm1, [B]
4113
  addps  xmm0, xmm1
4114
  movups [Result], xmm0
4115
end;
4116

4117
function TVector4.Distance(const AOther: TVector4): Single; assembler;
4118
asm
4119
  movups xmm0, [Self]
4120
  movups xmm1, [AOther]
4121
  subps  xmm0, xmm1 // A - B
4122

4123
  // (A - B).Length
4124
  mulps  xmm0, xmm0
4125
  pshufd xmm1, xmm0, $0E
4126
  addps  xmm0, xmm1
4127
  pshufd xmm1, xmm0, $01
4128
  addss  xmm0, xmm1
4129
  sqrtss xmm0, xmm0
4130
end;
4131

4132
function TVector4.DistanceSquared(const AOther: TVector4): Single; assembler;
4133
asm
4134
  movups xmm0, [Self]
4135
  movups xmm1, [AOther]
4136
  subps  xmm0, xmm1 // A - B
4137

4138
  // (A - B).LengthSquared
4139
  mulps  xmm0, xmm0
4140
  pshufd xmm1, xmm0, $0E
4141
  addps  xmm0, xmm1
4142
  pshufd xmm1, xmm0, $01
4143
  addss  xmm0, xmm1
4144
end;
4145

4146
class operator TVector4.Divide(const A: TVector4; const B: Single): TVector4; assembler;
4147
asm
4148
  movups xmm0, [A]
4149
  shufps xmm2, xmm2, 0
4150
  divps  xmm0, xmm2
4151
  movups [Result], xmm0
4152
end;
4153

4154
class operator TVector4.Divide(const A: Single; const B: TVector4): TVector4; assembler;
4155
asm
4156
  movups xmm0, [B]
4157
  shufps xmm1, xmm1, 0
4158
  divps  xmm1, xmm0
4159
  movups [Result], xmm1
4160
end;
4161

4162
class operator TVector4.Divide(const A, B: TVector4): TVector4; assembler;
4163
asm
4164
  movups xmm0, [A]
4165
  movups xmm1, [B]
4166
  divps  xmm0, xmm1
4167
  movups [Result], xmm0
4168
end;
4169

4170
function TVector4.Dot(const AOther: TVector4): Single;
4171
begin
4172
  Result := (X * AOther.X) + (Y * AOther.Y) + (Z * AOther.Z) + (W * AOther.W);
4173
end;
4174

4175
function TVector4.FaceForward(const I, NRef: TVector4): TVector4; assembler;
4176
asm
4177
  movups   xmm0, [Self]
4178
  movups   xmm1, [I]
4179
  movups   xmm2, [NRef]
4180
  xorps    xmm3, xmm3 // 0
4181
  movups   xmm4, [SSE_MASK_SIGN]
4182

4183
  // Dot(NRef, I)
4184
  mulps    xmm2, xmm1
4185
  pshufd   xmm1, xmm2, $4E
4186
  addps    xmm2, xmm1
4187
  pshufd   xmm1, xmm2, $11
4188
  addps    xmm2, xmm1
4189

4190
  // Dot(NRef, I) >= 0?  Yes: $FFFFFFFF, No: $00000000
4191
  cmpnltps xmm2, xmm3
4192
  andps    xmm2, xmm4 // Yes: $80000000, No: $00000000
4193

4194
  // Flip sign of N if (Dot(NRef, I) >= 0)
4195
  xorps    xmm0, xmm2
4196
  movups   [Result], xmm0
4197
end;
4198

4199
function TVector4.GetLength: Single; assembler;
4200
asm
4201
  movups xmm0, [Self]    // W Z Y X
4202
  mulps  xmm0, xmm0      // W*W Z*Z Y*Y X*X
4203
  pshufd xmm1, xmm0, $0E // Y*Y X*X W*W Z*Z
4204
  addps  xmm0, xmm1      //     #         #     (Y*Y+W*W) (X*X+Z*Z)
4205
  pshufd xmm1, xmm0, $01 // (X*X+Z*Z) (X*X+Z*Z) (X*X+Z*Z) (Y*Y+W*W)
4206
  addss  xmm0, xmm1      // (X*X + Y*Y + Z*Z + W*W)
4207
  sqrtss xmm0, xmm0      // Sqrt(X*X + Y*Y + Z*Z + W*W)
4208
end;
4209

4210
function TVector4.GetLengthSquared: Single;
4211
begin
4212
  Result := (X * X) + (Y * Y) + (Z * Z) + (W * W);
4213
end;
4214
{function TVector4.GetLengthSquared: Single; assembler;
4215
asm
4216
  movups xmm0, [Self]    // W Z Y X
4217
  mulps  xmm0, xmm0      // W*W Z*Z Y*Y X*X
4218
  pshufd xmm1, xmm0, $0E // Y*Y X*X W*W Z*Z
4219
  addps  xmm0, xmm1      //     #         #     (Y*Y+W*W) (X*X+Z*Z)
4220
  pshufd xmm1, xmm0, $01 // (X*X+Z*Z) (X*X+Z*Z) (X*X+Z*Z) (Y*Y+W*W)
4221
  addss  xmm0, xmm1      // (X*X + Y*Y + Z*Z + W*W)
4222
end;}
4223

4224
class operator TVector4.Multiply(const A: TVector4; const B: Single): TVector4; assembler;
4225
asm
4226
  movups xmm0, [A]
4227
  shufps xmm2, xmm2, 0
4228
  mulps  xmm0, xmm2
4229
  movups [Result], xmm0
4230
end;
4231

4232
class operator TVector4.Multiply(const A: Single; const B: TVector4): TVector4; assembler;
4233
asm
4234
  movups xmm0, [B]
4235
  shufps xmm1, xmm1, 0
4236
  mulps  xmm1, xmm0
4237
  movups [Result], xmm1
4238
end;
4239

4240
class operator TVector4.Multiply(const A, B: TVector4): TVector4; assembler;
4241
asm
4242
  movups xmm0, [A]
4243
  movups xmm1, [B]
4244
  mulps  xmm0, xmm1
4245
  movups [Result], xmm0
4246
end;
4247

4248
class operator TVector4.Negative(const A: TVector4): TVector4; assembler;
4249
asm
4250
  movups xmm0, [SSE_MASK_SIGN] // Load mask with 4 sign (upper) bits
4251
  movups xmm1, [A]
4252
  xorps  xmm0, xmm1            // Flip sign bit
4253
  movups [Result], xmm0
4254
end;
4255

4256
function TVector4.NormalizeFast: TVector4;
4257
asm
4258
  movups  xmm0, [Self]    // W Z Y X
4259
  movaps  xmm2, xmm0
4260

4261
  // Dot(A, A)
4262
  mulps   xmm0, xmm0      // W*W Z*Z Y*Y X*X
4263
  pshufd  xmm1, xmm0, $4E // Y*Y X*X W*W Z*Z
4264
  addps   xmm0, xmm1      // (Y*Y+W*W) (X*X+Z*Z) (Y*Y+W*W) (X*X+Z*Z)
4265
  pshufd  xmm1, xmm0, $11 // (X*X+Z*Z) (Y*Y+W*W) (X*X+Z*Z) (Y*Y+W*W)
4266
  addps   xmm0, xmm1      // (X*X + Y*Y + Z*Z + W*W) (4x)
4267

4268
  rsqrtps xmm0, xmm0      // (1 / Sqrt(X*X + Y*Y + Z*Z + W*W)) (4x)
4269
  mulps   xmm0, xmm2      // A * (1 / Sqrt(Dot(A, A)))
4270
  movups  [Result], xmm0
4271
end;
4272

4273
function TVector4.Reflect(const N: TVector4): TVector4; assembler;
4274
asm
4275
  movups   xmm0, [Self]
4276
  movups   xmm1, [N]
4277
  movaps   xmm2, xmm0
4278
  movups   xmm3, [SSE_TWO]
4279

4280
  // Dot(N, I)
4281
  mulps    xmm0, xmm1
4282
  mulps    xmm3, xmm1 // N * 2
4283
  pshufd   xmm1, xmm0, $4E
4284
  addps    xmm0, xmm1
4285
  pshufd   xmm1, xmm0, $11
4286
  addps    xmm0, xmm1
4287

4288
  // (2 * Dot(N, I)) * N
4289
  mulps    xmm0, xmm3
4290

4291
  // I - ((2 * Dot(N, I)) * N)
4292
  subps    xmm2, xmm0
4293
  movups   [Result], xmm2
4294
end;
4295

4296
function TVector4.Refract(const N: TVector4; const Eta: Single): TVector4; assembler;
4297
asm
4298
  movdqa   [rsp-24], xmm6
4299
  movdqa   [rsp-40], xmm7
4300

4301
  movups   xmm0, [Self]
4302
  movups   xmm1, [N]
4303
  movups   xmm7, xmm0
4304
  movss    xmm2, DWORD [SSE_ONE]
4305

4306
  // D := Dot(N, I)
4307
  mulps    xmm0, xmm1
4308
  movss    xmm4, xmm2 // 1
4309
  pshufd   xmm1, xmm0, $4E
4310
  movss    xmm5, xmm3 // Eta
4311
  addps    xmm0, xmm1
4312
  mulss    xmm5, xmm5 // Eta * Eta
4313
  pshufd   xmm1, xmm0, $11
4314
  addss    xmm0, xmm1
4315

4316
  // K := 1 - Eta * Eta * (1 - D * D)
4317
  movss    xmm6, xmm0  // D
4318
  mulss    xmm0, xmm0  // D * D
4319
  subss    xmm4, xmm0  // 1 - D * D
4320
  mulss    xmm4, xmm5  // Eta * Eta * (1 - D * D)
4321
  xorps    xmm5, xmm5  // 0
4322
  subss    xmm2, xmm4  // K := 1 - Eta * Eta * (1 - D * D)
4323

4324
  // if (K < 0) then
4325
  comiss   xmm2, xmm5
4326

4327
  jb       @KLessThanZero
4328

4329
  // K >= 0
4330
  mulss    xmm6, xmm3    // Eta * D
4331
  shufps   xmm3, xmm3, 0 // Replicate Eta (4x)
4332
  mulps    xmm7, xmm3    // Eta * I
4333
  sqrtss   xmm2, xmm2    // Sqrt(K)
4334
  addss    xmm6, xmm2    // Eta * D + Sqrt(K)
4335
  shufps   xmm6, xmm6, 0 // Replicate Eta * D + Sqrt(K) (4x)
4336
  movups   xmm1, [N]
4337
  mulps    xmm6, xmm1    // ((Eta * D + Sqrt(K)) * N)
4338
  subps    xmm7, xmm6    // (Eta * I) - ((Eta * D + Sqrt(K)) * N)
4339
  movups   [Result], xmm7
4340
  jmp      @Finish
4341

4342
@KLessThanZero:
4343
  // K < 0: Result := Vector4(0, 0, 0, 0)
4344
  movups   [Result], xmm5
4345

4346
@Finish:
4347
  movdqa   xmm6, [rsp-24]
4348
  movdqa   xmm7, [rsp-40]
4349
end;
4350

4351
procedure TVector4.SetNormalizedFast; assembler;
4352
asm
4353
  movups  xmm0, [Self]    // W Z Y X
4354
  movaps  xmm2, xmm0
4355

4356
  // Dot(A, A)
4357
  mulps   xmm0, xmm0      // W*W Z*Z Y*Y X*X
4358
  pshufd  xmm1, xmm0, $4E // Y*Y X*X W*W Z*Z
4359
  addps   xmm0, xmm1      // (Y*Y+W*W) (X*X+Z*Z) (Y*Y+W*W) (X*X+Z*Z)
4360
  pshufd  xmm1, xmm0, $11 // (X*X+Z*Z) (Y*Y+W*W) (X*X+Z*Z) (Y*Y+W*W)
4361
  addps   xmm0, xmm1      // (X*X + Y*Y + Z*Z + W*W) (4x)
4362

4363
  rsqrtps xmm0, xmm0      // (1 / Sqrt(X*X + Y*Y + Z*Z + W*W)) (4x)
4364
  mulps   xmm0, xmm2      // A * (1 / Sqrt(Dot(A, A)))
4365
  movups  [Self], xmm0
4366
end;
4367

4368
class operator TVector4.Subtract(const A: TVector4; const B: Single): TVector4; assembler;
4369
asm
4370
  movups xmm0, [A]
4371
  shufps xmm2, xmm2, 0
4372
  subps  xmm0, xmm2
4373
  movups [Result], xmm0
4374
end;
4375

4376
class operator TVector4.Subtract(const A: Single; const B: TVector4): TVector4; assembler;
4377
asm
4378
  movups xmm0, [B]
4379
  shufps xmm1, xmm1, 0
4380
  subps  xmm1, xmm0
4381
  movups [Result], xmm1
4382
end;
4383

4384
class operator TVector4.Subtract(const A, B: TVector4): TVector4; assembler;
4385
asm
4386
  movups xmm0, [A]
4387
  movups xmm1, [B]
4388
  subps  xmm0, xmm1
4389
  movups [Result], xmm0
4390
end;
4391

4392
{ TQuaternion }
4393

4394
class operator TQuaternion.Add(const A, B: TQuaternion): TQuaternion;
4395
asm
4396
  movups xmm0, [A]
4397
  movups xmm1, [B]
4398
  addps  xmm0, xmm1
4399
  movups [Result], xmm0
4400
end;
4401

4402
function TQuaternion.GetLength: Single;
4403
asm
4404
  movups xmm0, [Self]    // W Z Y X
4405
  mulps  xmm0, xmm0      // W*W Z*Z Y*Y X*X
4406
  pshufd xmm1, xmm0, $0E // Y*Y X*X W*W Z*Z
4407
  addps  xmm0, xmm1      //     #         #     (Y*Y+W*W) (X*X+Z*Z)
4408
  pshufd xmm1, xmm0, $01 // (X*X+Z*Z) (X*X+Z*Z) (X*X+Z*Z) (Y*Y+W*W)
4409
  addss  xmm0, xmm1      // (X*X + Y*Y + Z*Z + W*W)
4410
  sqrtss xmm0, xmm0      // Sqrt(X*X + Y*Y + Z*Z + W*W)
4411
end;
4412

4413
function TQuaternion.GetLengthSquared: Single;
4414
asm
4415
  movups xmm0, [Self]    // W Z Y X
4416
  mulps  xmm0, xmm0      // W*W Z*Z Y*Y X*X
4417
  pshufd xmm1, xmm0, $0E // Y*Y X*X W*W Z*Z
4418
  addps  xmm0, xmm1      //     #         #     (Y*Y+W*W) (X*X+Z*Z)
4419
  pshufd xmm1, xmm0, $01 // (X*X+Z*Z) (X*X+Z*Z) (X*X+Z*Z) (Y*Y+W*W)
4420
  addss  xmm0, xmm1      // (X*X + Y*Y + Z*Z + W*W)
4421
end;
4422

4423
class operator TQuaternion.Multiply(const A: TQuaternion; const B: Single): TQuaternion;
4424
asm
4425
  movups xmm0, [A]
4426
  shufps xmm2, xmm2, 0
4427
  mulps  xmm0, xmm2
4428
  movups [Result], xmm0
4429
end;
4430

4431
class operator TQuaternion.Multiply(const A: Single; const B: TQuaternion): TQuaternion;
4432
asm
4433
  movups xmm0, [B]
4434
  shufps xmm1, xmm1, 0
4435
  mulps  xmm1, xmm0
4436
  movups [Result], xmm1
4437
end;
4438

4439
class operator TQuaternion.Multiply(const A, B: TQuaternion): TQuaternion;
4440
begin
4441
  Result.X := (A.W * B.X) + (A.X * B.W) + (A.Y * B.Z) - (A.Z * B.Y);
4442
  Result.Y := (A.W * B.Y) + (A.Y * B.W) + (A.Z * B.X) - (A.X * B.Z);
4443
  Result.Z := (A.W * B.Z) + (A.Z * B.W) + (A.X * B.Y) - (A.Y * B.X);
4444
  Result.W := (A.W * B.W) - (A.X * B.X) - (A.Y * B.Y) - (A.Z * B.Z);
4445
end;
4446

4447
function TQuaternion.NormalizeFast: TQuaternion;
4448
asm
4449
  movups  xmm0, [Self]    // W Z Y X
4450
  movaps  xmm2, xmm0
4451

4452
  // Dot(A, A)
4453
  mulps   xmm0, xmm0      // W*W Z*Z Y*Y X*X
4454
  pshufd  xmm1, xmm0, $4E // Y*Y X*X W*W Z*Z
4455
  addps   xmm0, xmm1      // (Y*Y+W*W) (X*X+Z*Z) (Y*Y+W*W) (X*X+Z*Z)
4456
  pshufd  xmm1, xmm0, $11 // (X*X+Z*Z) (Y*Y+W*W) (X*X+Z*Z) (Y*Y+W*W)
4457
  addps   xmm0, xmm1      // (X*X + Y*Y + Z*Z + W*W) (4x)
4458

4459
  rsqrtps xmm0, xmm0      // (1 / Sqrt(X*X + Y*Y + Z*Z + W*W)) (4x)
4460
  mulps   xmm0, xmm2      // A * (1 / Sqrt(Dot(A, A)))
4461
  movups  [Result], xmm0
4462
end;
4463

4464
procedure TQuaternion.SetNormalizedFast;
4465
asm
4466
  movups  xmm0, [Self]    // W Z Y X
4467
  movaps  xmm2, xmm0
4468

4469
  // Dot(A, A)
4470
  mulps   xmm0, xmm0      // W*W Z*Z Y*Y X*X
4471
  pshufd  xmm1, xmm0, $4E // Y*Y X*X W*W Z*Z
4472
  addps   xmm0, xmm1      // (Y*Y+W*W) (X*X+Z*Z) (Y*Y+W*W) (X*X+Z*Z)
4473
  pshufd  xmm1, xmm0, $11 // (X*X+Z*Z) (Y*Y+W*W) (X*X+Z*Z) (Y*Y+W*W)
4474
  addps   xmm0, xmm1      // (X*X + Y*Y + Z*Z + W*W) (4x)
4475

4476
  rsqrtps xmm0, xmm0      // (1 / Sqrt(X*X + Y*Y + Z*Z + W*W)) (4x)
4477
  mulps   xmm0, xmm2      // A * (1 / Sqrt(Dot(A, A)))
4478
  movups  [Self], xmm0
4479
end;
4480

4481
{ TMatrix 2 }
4482

4483
class operator TMatrix2.Add(const A: TMatrix2; const B: Single): TMatrix2; assembler;
4484
asm
4485
  movups xmm1, [A]              // Load matrix
4486
  shufps xmm2, xmm2, 0          // Replicate B
4487
  addps  xmm1, xmm2             // Add B
4488
  movups [Result], xmm1
4489
end;
4490

4491
class operator TMatrix2.Add(const A: Single; const B: TMatrix2): TMatrix2; assembler;
4492
asm
4493
  movups xmm0, [B]              // Load matrix
4494
  shufps xmm1, xmm1, 0          // Replicate A
4495
  addps  xmm0, xmm1             // Add A
4496
  movups [Result], xmm0
4497
end;
4498

4499
class operator TMatrix2.Add(const A, B: TMatrix2): TMatrix2; assembler;
4500
asm
4501
  movups xmm0, [A]   // Load A
4502
  movups xmm1, [B]   // Load B
4503
  addps  xmm0, xmm1  // Add
4504
  movups [Result], xmm0
4505
end;
4506

4507
function TMatrix2.CompMult(const AOther: TMatrix2): TMatrix2; assembler;
4508
asm
4509
  movups xmm0, [Self]
4510
  movups xmm1, [AOther]
4511

4512
  // Component-wise multiplication
4513
  mulps  xmm0, xmm1
4514

4515
  // Store result
4516
  movups [Result], xmm0
4517
end;
4518

4519
class operator TMatrix2.Divide(const A: TMatrix2; const B: Single): TMatrix2; assembler;
4520
asm
4521
  movups xmm1, [A]              // Load matrix
4522
  shufps xmm2, xmm2, 0          // Replicate B
4523
  divps  xmm1, xmm2             // Divide B
4524
  movups [Result], xmm1
4525
end;
4526

4527
class operator TMatrix2.Divide(const A: Single; const B: TMatrix2): TMatrix2; assembler;
4528
asm
4529
  movups xmm0, [B]              // Load matrix
4530
  shufps xmm1, xmm1, 0          // Replicate A
4531
  divps  xmm1, xmm0             // Divide B
4532
  movups [Result], xmm1
4533
end;
4534

4535
class operator TMatrix2.Multiply(const A: TMatrix2; const B: Single): TMatrix2; assembler;
4536
asm
4537
  movups xmm1, [A]              // Load matrix
4538
  shufps xmm2, xmm2, 0          // Replicate B
4539
  mulps  xmm1, xmm2             // Multiply
4540
  movups [Result], xmm1
4541
end;
4542

4543
class operator TMatrix2.Multiply(const A: Single; const B: TMatrix2): TMatrix2; assembler;
4544
asm
4545
  movups xmm0, [B]              // Load matrix
4546
  shufps xmm1, xmm1, 0          // Replicate A
4547
  mulps  xmm0, xmm1             // Multiply
4548
  movups [Result], xmm0
4549
end;
4550

4551
class operator TMatrix2.Multiply(const A: TVector2; const B: TMatrix2): TVector2;
4552
begin
4553
  Result.X := (A.X * B.M[0,0]) + (A.Y * B.M[0,1]);
4554
  Result.Y := (A.X * B.M[1,0]) + (A.Y * B.M[1,1]);
4555
end;
4556

4557
class operator TMatrix2.Multiply(const A: TMatrix2; const B: TVector2): TVector2;
4558
begin
4559
  Result.X := (A.M[0,0] * B.X) + (A.M[1,0] * B.Y);
4560
  Result.Y := (A.M[0,1] * B.X) + (A.M[1,1] * B.Y);
4561
end;
4562

4563
class operator TMatrix2.Multiply(const A, B: TMatrix2): TMatrix2;
4564
begin
4565
  Result.M[0,0] := (A.M[0,0] * B.M[0,0]) + (A.M[1,0] * B.M[0,1]);
4566
  Result.M[0,1] := (A.M[0,1] * B.M[0,0]) + (A.M[1,1] * B.M[0,1]);
4567
  Result.M[1,0] := (A.M[0,0] * B.M[1,0]) + (A.M[1,0] * B.M[1,1]);
4568
  Result.M[1,1] := (A.M[0,1] * B.M[1,0]) + (A.M[1,1] * B.M[1,1]);
4569
end;
4570

4571
class operator TMatrix2.Negative(const A: TMatrix2): TMatrix2; assembler;
4572
asm
4573
  movups xmm0, [SSE_MASK_SIGN]  // Load mask with 4 sign (upper) bits
4574
  movups xmm1, [A]              // Load matrix
4575
  xorps  xmm1, xmm0             // Flip sign bits
4576
  movups [Result], xmm1
4577
end;
4578

4579
procedure TMatrix2.SetTransposed;
4580
begin
4581
  Self := Transpose;
4582
end;
4583

4584
class operator TMatrix2.Subtract(const A: TMatrix2; const B: Single): TMatrix2; assembler;
4585
asm
4586
  movups xmm1, [A]              // Load matrix
4587
  shufps xmm2, xmm2, 0          // Replicate B
4588
  subps  xmm1, xmm2             // Subtract B
4589
  movups [Result], xmm1
4590
end;
4591

4592
class operator TMatrix2.Subtract(const A: Single; const B: TMatrix2): TMatrix2; assembler;
4593
asm
4594
  movups xmm0, [B]              // Load matrix
4595
  shufps xmm1, xmm1, 0          // Replicate A
4596
  subps  xmm1, xmm0             // Subtract B
4597
  movups [Result], xmm1
4598
end;
4599

4600
class operator TMatrix2.Subtract(const A, B: TMatrix2): TMatrix2; assembler;
4601
asm
4602
  movups xmm0, [A]   // Load A
4603
  movups xmm1, [B]   // Load B
4604
  subps  xmm0, xmm1  // Subtract
4605
  movups [Result], xmm0
4606
end;
4607

4608
function TMatrix2.Transpose: TMatrix2;
4609
begin
4610
  Result.M[0,0] := M[0,0];
4611
  Result.M[0,1] := M[1,0];
4612

4613
  Result.M[1,0] := M[0,1];
4614
  Result.M[1,1] := M[1,1];
4615
end;
4616

4617
{ TMatrix3 }
4618

4619
class operator TMatrix3.Add(const A: TMatrix3; const B: Single): TMatrix3; assembler;
4620
asm
4621
  movups xmm1, DQWORD [A + $00] // Load 3 rows
4622
  shufps xmm2, xmm2, 0          // Replicate B
4623
  movups xmm3, DQWORD [A + $10]
4624
  movss  xmm4, DWORD [A + $20]
4625
  addps  xmm1, xmm2             // Add B to each row
4626
  addps  xmm3, xmm2
4627
  addss  xmm4, xmm2
4628
  movups DQWORD [Result + $00], xmm1
4629
  movups DQWORD [Result + $10], xmm3
4630
  movss  DWORD [Result + $20], xmm4
4631
end;
4632

4633
class operator TMatrix3.Add(const A: Single; const B: TMatrix3): TMatrix3; assembler;
4634
asm
4635
  movups xmm0, DQWORD [B + $00] // Load 3 rows
4636
  shufps xmm1, xmm1, 0          // Replicate A
4637
  movups xmm2, DQWORD [B + $10]
4638
  movss  xmm3, DWORD [B + $20]
4639
  addps  xmm0, xmm1             // Add A to each row
4640
  addps  xmm2, xmm1
4641
  addss  xmm3, xmm1
4642
  movups DQWORD [Result + $00], xmm0
4643
  movups DQWORD [Result + $10], xmm2
4644
  movss  DWORD [Result + $20], xmm3
4645
end;
4646

4647
class operator TMatrix3.Add(const A, B: TMatrix3): TMatrix3; assembler;
4648
asm
4649
  movups xmm0, DQWORD [A + $00] // Load 3 rows of A
4650
  movups xmm1, DQWORD [A + $10]
4651
  movss  xmm2, DWORD [A + $20]
4652
  movups xmm4, DQWORD [B + $00] // Load 3 rows of B
4653
  movups xmm5, DQWORD [B + $10]
4654
  movss  xmm3, DWORD [B + $20]
4655
  addps  xmm0, xmm4             // Add rows
4656
  addps  xmm1, xmm5
4657
  addss  xmm2, xmm3
4658
  movups DQWORD [Result + $00], xmm0
4659
  movups DQWORD [Result + $10], xmm1
4660
  movss  DWORD [Result + $20], xmm2
4661
end;
4662

4663
function TMatrix3.CompMult(const AOther: TMatrix3): TMatrix3; assembler;
4664
asm
4665
  movups xmm0, DQWORD[Self + $00]   // Self[0]
4666
  movups xmm1, DQWORD[Self + $10]   // Self[1]
4667
  movss  xmm2, DWORD[Self + $20]   // Self[2]
4668
  movups xmm4, DQWORD[AOther + $00] // AOther[0]
4669
  movups xmm5, DQWORD[AOther + $10] // AOther[1]
4670
  movss  xmm3, DWORD[AOther + $20] // AOther[2]
4671

4672
  // Component-wise multiplication
4673
  mulps  xmm0, xmm4
4674
  mulps  xmm1, xmm5
4675
  mulss  xmm2, xmm3
4676

4677
  // Store result
4678
  movups DQWORD [Result + $00], xmm0
4679
  movups DQWORD [Result + $10], xmm1
4680
  movss  DWORD [Result + $20], xmm2
4681
end;
4682

4683
class operator TMatrix3.Divide(const A: Single; const B: TMatrix3): TMatrix3; assembler;
4684
asm
4685
  movups xmm4, DQWORD [B + $00] // Load 3 rows
4686
  shufps xmm1, xmm1, 0          // Replicate A
4687
  movups xmm5, DQWORD [B + $10]
4688
  movaps xmm0, xmm1
4689
  movaps xmm2, xmm1
4690
  movss  xmm3, DWORD [B + $20]
4691
  divps  xmm1, xmm4             // Divide A by each row
4692
  divps  xmm0, xmm5
4693
  divss  xmm2, xmm3
4694
  movups DQWORD [Result + $00], xmm1
4695
  movups DQWORD [Result + $10], xmm0
4696
  movss  DWORD [Result + $20], xmm2
4697
end;
4698

4699
class operator TMatrix3.Divide(const A: TMatrix3; const B: Single): TMatrix3; assembler;
4700
asm
4701
  movups xmm1, DQWORD [A + $00] // Load 3 rows
4702
  shufps xmm2, xmm2, 0          // Replicate B
4703
  movups xmm0, DQWORD [A + $10]
4704
  movss  xmm3, DWORD [A + $20]
4705
  divps  xmm1, xmm2             // Divide each row by B
4706
  divps  xmm0, xmm2
4707
  divss  xmm3, xmm2
4708
  movups DQWORD [Result + $00], xmm1
4709
  movups DQWORD [Result + $10], xmm0
4710
  movss  DWORD [Result + $20], xmm3
4711
end;
4712

4713
class operator TMatrix3.Multiply(const A: Single; const B: TMatrix3): TMatrix3; assembler;
4714
asm
4715
  movups xmm0, DQWORD [B + $00] // Load 3 rows
4716
  shufps xmm1, xmm1, 0          // Replicate A
4717
  movups xmm2, DQWORD [B + $10]
4718
  movss  xmm3, DWORD [B + $20]
4719
  mulps  xmm0, xmm1             // Multiply each row by A
4720
  mulps  xmm2, xmm1
4721
  mulss  xmm3, xmm1
4722
  movups DQWORD [Result + $00], xmm0
4723
  movups DQWORD [Result + $10], xmm2
4724
  movss  DWORD [Result + $20], xmm3
4725
end;
4726

4727
class operator TMatrix3.Multiply(const A: TMatrix3; const B: Single): TMatrix3; assembler;
4728
asm
4729
  movups xmm1, DQWORD [A + $00] // Load 3 rows
4730
  shufps xmm2, xmm2, 0          // Replicate B
4731
  movups xmm0, DQWORD [A + $10]
4732
  movss  xmm3, DWORD [A + $20]
4733
  mulps  xmm1, xmm2             // Multiply each row by B
4734
  mulps  xmm0, xmm2
4735
  mulss  xmm3, xmm2
4736
  movups DQWORD [Result + $00], xmm1
4737
  movups DQWORD [Result + $10], xmm0
4738
  movss  DWORD [Result + $20], xmm3
4739
end;
4740

4741
{$IFDEF FM_COLUMN_MAJOR}
4742
class operator TMatrix3.Multiply(const A: TMatrix3; const B: TVector3): TVector3; assembler;
4743
asm
4744
  movdqa  [rsp-24], xmm6
4745

4746
  movq    xmm0, [B]
4747
  movss   xmm1, DWORD [B+8]
4748
  movlhps xmm0, xmm1
4749

4750
  movq    xmm4, QWORD [A + $00]
4751
  movss   xmm1, DWORD [A + $08]
4752
  movlhps xmm4, xmm1
4753

4754
  movaps  xmm1, xmm0
4755
  movaps  xmm2, xmm0
4756
  shufps  xmm0, xmm0, $00
4757
  shufps  xmm1, xmm1, $55
4758
  shufps  xmm2, xmm2, $AA
4759

4760
  movq    xmm5, QWORD [A + $0C]
4761
  movss   xmm3, DWORD [A + $14]
4762
  movlhps xmm5, xmm3
4763

4764
  movq    xmm6, QWORD [A + $18]
4765
  movss   xmm3, DWORD [A + $20]
4766
  movlhps xmm6, xmm3
4767

4768
  mulps   xmm0, xmm4
4769
  mulps   xmm1, xmm5
4770
  mulps   xmm2, xmm6
4771
  addps   xmm0, xmm1
4772
  addps   xmm0, xmm2
4773
  movhlps xmm1, xmm0
4774
  movq    [Result], xmm0
4775
  movss   DWORD [Result+8], xmm1
4776

4777
  movdqa  xmm6, [rsp-24]
4778
end;
4779

4780
class operator TMatrix3.Multiply(const A: TVector3; const B: TMatrix3): TVector3; assembler;
4781
asm
4782
  movdqa   [rsp-24], xmm6
4783

4784
  movq     xmm0, [A]
4785
  movss    xmm1, DWORD [A+8]
4786
  movlhps  xmm0, xmm1
4787

4788
  movq     xmm4, QWORD [B + $00]
4789
  movss    xmm1, DWORD [B + $08]
4790
  movlhps  xmm4, xmm1
4791

4792
  movaps   xmm1, xmm0
4793
  movaps   xmm2, xmm0
4794

4795
  movq     xmm5, QWORD [B + $0C]
4796
  movss    xmm6, DWORD [B + $14]
4797
  movlhps  xmm5, xmm6
4798

4799
  movq     xmm6, QWORD [B + $18]
4800
  movss    xmm3, DWORD [B + $20]
4801
  movlhps  xmm6, xmm3
4802

4803
  mulps    xmm0, xmm4
4804
  mulps    xmm1, xmm5
4805
  mulps    xmm2, xmm6
4806
  xorps    xmm3, xmm3
4807

4808
  { Transpose xmm0-xmm2 }
4809
  movaps   xmm4, xmm2
4810
  unpcklps xmm2, xmm3
4811
  unpckhps xmm4, xmm3
4812

4813
  movaps   xmm3, xmm0
4814
  unpcklps xmm0, xmm1
4815
  unpckhps xmm3, xmm1
4816

4817
  movaps   xmm1, xmm0
4818
  unpcklpd xmm0, xmm2
4819
  unpckhpd xmm1, xmm2
4820

4821
  unpcklpd xmm3, xmm4
4822

4823
  addps    xmm0, xmm1
4824
  addps    xmm0, xmm3
4825
  movhlps  xmm1, xmm0
4826
  movq     [Result], xmm0
4827
  movss    DWORD [Result+8], xmm1
4828

4829
  movdqa   xmm6, [rsp-24]
4830
end;
4831

4832
class operator TMatrix3.Multiply(const A, B: TMatrix3): TMatrix3; assembler;
4833
{ Code below consists of 3 Vector*Matrix calculations }
4834
asm
4835
  movdqa  [rsp-24], xmm6
4836

4837
  movq    xmm0, QWORD [B + $00]
4838
  movss   xmm1, DWORD [B + $08]
4839
  movlhps xmm0, xmm1
4840

4841
  movq    xmm4, QWORD [A + $00]
4842
  movss   xmm1, DWORD [A + $08]
4843
  movlhps xmm4, xmm1
4844

4845
  movaps  xmm1, xmm0
4846
  movaps  xmm2, xmm0
4847
  shufps  xmm0, xmm0, $00
4848
  shufps  xmm1, xmm1, $55
4849
  shufps  xmm2, xmm2, $AA
4850

4851
  movq    xmm5, QWORD [A + $0C]
4852
  movss   xmm3, DWORD [A + $14]
4853
  movlhps xmm5, xmm3
4854

4855
  movq    xmm6, QWORD [A + $18]
4856
  movss   xmm3, DWORD [A + $20]
4857
  movlhps xmm6, xmm3
4858

4859
  mulps   xmm0, xmm4
4860
  mulps   xmm1, xmm5
4861
  mulps   xmm2, xmm6
4862
  addps   xmm0, xmm1
4863
  addps   xmm0, xmm2
4864
  movhlps xmm1, xmm0
4865
  movq    QWORD [Result + $00], xmm0
4866
  movss   DWORD [Result + $08], xmm1
4867

4868
  movq    xmm0, QWORD [B + $0C]
4869
  movss   xmm1, DWORD [B + $14]
4870
  movlhps xmm0, xmm1
4871

4872
  movaps  xmm1, xmm0
4873
  movaps  xmm2, xmm0
4874
  shufps  xmm0, xmm0, $00
4875
  shufps  xmm1, xmm1, $55
4876
  shufps  xmm2, xmm2, $AA
4877
  mulps   xmm0, xmm4
4878
  mulps   xmm1, xmm5
4879
  mulps   xmm2, xmm6
4880
  addps   xmm0, xmm1
4881
  addps   xmm0, xmm2
4882
  movhlps xmm1, xmm0
4883
  movq    QWORD [Result + $0C], xmm0
4884
  movss   DWORD [Result + $14], xmm1
4885

4886
  movq    xmm0, QWORD [B + $18]
4887
  movss   xmm1, DWORD [B + $20]
4888
  movlhps xmm0, xmm1
4889

4890
  movaps  xmm1, xmm0
4891
  movaps  xmm2, xmm0
4892
  shufps  xmm0, xmm0, $00
4893
  shufps  xmm1, xmm1, $55
4894
  shufps  xmm2, xmm2, $AA
4895
  mulps   xmm0, xmm4
4896
  mulps   xmm1, xmm5
4897
  mulps   xmm2, xmm6
4898
  addps   xmm0, xmm1
4899
  addps   xmm0, xmm2
4900
  movhlps xmm1, xmm0
4901
  movq    QWORD [Result + $18], xmm0
4902
  movss   DWORD [Result + $20], xmm1
4903

4904
  movdqa  xmm6, [rsp-24]
4905
end;
4906
{$ELSE}
4907
class operator TMatrix3.Multiply(const A: TMatrix3; const B: TVector3): TVector3; assembler;
4908
asm
4909
  movdqa   [rsp-24], xmm6
4910

4911
  movq     xmm0, [B]              // Load vector
4912
  movss    xmm1, DWORD [B+8]
4913
  movlhps  xmm0, xmm1
4914

4915
  movq     xmm4, QWORD [A + $00]  // Load 3 rows
4916
  movss    xmm1, DWORD [A + $08]
4917
  movlhps  xmm4, xmm1
4918

4919
  movaps   xmm1, xmm0
4920
  movaps   xmm2, xmm0
4921

4922
  movq     xmm5, QWORD [A + $0C]
4923
  movss    xmm6, DWORD [A + $14]
4924
  movlhps  xmm5, xmm6
4925

4926
  movq     xmm6, QWORD [A + $18]
4927
  movss    xmm3, DWORD [A + $20]
4928
  movlhps  xmm6, xmm3
4929

4930
  mulps    xmm0, xmm4             // ###, (Az * B02), (Ay * B01), (Ax * B00)
4931
  mulps    xmm1, xmm5             // ###, (Az * B12), (Ay * B11), (Ax * B10)
4932
  mulps    xmm2, xmm6             // ###, (Az * B22), (Ay * B21), (Ax * B20)
4933
  xorps    xmm3, xmm3             // 000
4934

4935
  { Transpose xmm0-xmm2 }
4936
  movaps   xmm4, xmm2
4937
  unpcklps xmm2, xmm3             // 000 B21 000 B20
4938
  unpckhps xmm4, xmm3             // 000 ### 000 B22
4939

4940
  movaps   xmm3, xmm0
4941
  unpcklps xmm0, xmm1             // B11 B01 B10 B00
4942
  unpckhps xmm3, xmm1             // ### ### B12 B02
4943

4944
  movaps   xmm1, xmm0
4945
  unpcklpd xmm0, xmm2             // 000 B20 B10 B00
4946
  unpckhpd xmm1, xmm2             // 000 B21 B11 B01
4947

4948
  unpcklpd xmm3, xmm4             // 000 B22 B12 B02
4949

4950
  addps    xmm0, xmm1             // Add rows
4951
  addps    xmm0, xmm3
4952
  movhlps  xmm1, xmm0
4953
  movq     [Result], xmm0
4954
  movss    DWORD [Result+8], xmm1
4955

4956
  movdqa   xmm6, [rsp-24]
4957
end;
4958

4959
class operator TMatrix3.Multiply(const A: TVector3; const B: TMatrix3): TVector3; assembler;
4960
asm
4961
  movdqa  [rsp-24], xmm6
4962

4963
  movq    xmm0, [A]              // Load vector
4964
  movss   xmm1, DWORD [A+8]
4965
  movlhps xmm0, xmm1
4966

4967
  movq    xmm4, QWORD [B + $00]  // Load 3 rows
4968
  movss   xmm1, DWORD [B + $08]
4969
  movlhps xmm4, xmm1
4970

4971
  movaps  xmm1, xmm0
4972
  movaps  xmm2, xmm0
4973
  shufps  xmm0, xmm0, $00        // Bx Bx Bx Bx
4974
  shufps  xmm1, xmm1, $55        // By By By By
4975
  shufps  xmm2, xmm2, $AA        // Bz Bz Bz Bz
4976

4977
  movq    xmm5, QWORD [B + $0C]
4978
  movss   xmm3, DWORD [B + $14]
4979
  movlhps xmm5, xmm3
4980

4981
  movq    xmm6, QWORD [B + $18]
4982
  movss   xmm3, DWORD [B + $20]
4983
  movlhps xmm6, xmm3
4984

4985
  mulps   xmm0, xmm4             // (A00 * Bx), (A01 * Bx), (A02 * Bx), #
4986
  mulps   xmm1, xmm5             // (A10 * By), (A11 * By), (A12 * By), #
4987
  mulps   xmm2, xmm6             // (A20 * Bz), (A21 * Bz), (A22 * Bz), #
4988
  addps   xmm0, xmm1             // Add rows
4989
  addps   xmm0, xmm2
4990
  movhlps xmm1, xmm0
4991
  movq    [Result], xmm0
4992
  movss   DWORD [Result+8], xmm1
4993

4994
  movdqa  xmm6, [rsp-24]
4995
end;
4996

4997
class operator TMatrix3.Multiply(const A, B: TMatrix3): TMatrix3; assembler;
4998
{ Code below consists of 3 Vector*Matrix calculations }
4999
asm
5000
  movdqa  [rsp-24], xmm6
5001

5002
  { A.R[0] * B }
5003
  movq    xmm0, QWORD [A + $00]
5004
  movss   xmm1, DWORD [A + $08]
5005
  movlhps xmm0, xmm1
5006

5007
  movq    xmm4, QWORD [B + $00]
5008
  movss   xmm1, DWORD [B + $08]
5009
  movlhps xmm4, xmm1
5010

5011
  movaps  xmm1, xmm0
5012
  movaps  xmm2, xmm0
5013
  shufps  xmm0, xmm0, $00
5014
  shufps  xmm1, xmm1, $55
5015
  shufps  xmm2, xmm2, $AA
5016

5017
  movq    xmm5, QWORD [B + $0C]
5018
  movss   xmm3, DWORD [B + $14]
5019
  movlhps xmm5, xmm3
5020

5021
  movq    xmm6, QWORD [B + $18]
5022
  movss   xmm3, DWORD [B + $20]
5023
  movlhps xmm6, xmm3
5024

5025
  mulps   xmm0, xmm4
5026
  mulps   xmm1, xmm5
5027
  mulps   xmm2, xmm6
5028
  addps   xmm0, xmm1
5029
  addps   xmm0, xmm2
5030
  movhlps xmm1, xmm0
5031
  movq    QWORD [Result + $00], xmm0
5032
  movss   DWORD [Result + $08], xmm1
5033

5034
  { A.R[1] * B }
5035
  movq    xmm0, QWORD [A + $0C]
5036
  movss   xmm1, DWORD [A + $14]
5037
  movlhps xmm0, xmm1
5038

5039
  movaps  xmm1, xmm0
5040
  movaps  xmm2, xmm0
5041
  shufps  xmm0, xmm0, $00
5042
  shufps  xmm1, xmm1, $55
5043
  shufps  xmm2, xmm2, $AA
5044
  mulps   xmm0, xmm4
5045
  mulps   xmm1, xmm5
5046
  mulps   xmm2, xmm6
5047
  addps   xmm0, xmm1
5048
  addps   xmm0, xmm2
5049
  movhlps xmm1, xmm0
5050
  movq    QWORD [Result + $0C], xmm0
5051
  movss   DWORD [Result + $14], xmm1
5052

5053
  { A.R[2] * B }
5054
  movq    xmm0, QWORD [A + $18]
5055
  movss   xmm1, DWORD [A + $20]
5056
  movlhps xmm0, xmm1
5057

5058
  movaps  xmm1, xmm0
5059
  movaps  xmm2, xmm0
5060
  shufps  xmm0, xmm0, $00
5061
  shufps  xmm1, xmm1, $55
5062
  shufps  xmm2, xmm2, $AA
5063
  mulps   xmm0, xmm4
5064
  mulps   xmm1, xmm5
5065
  mulps   xmm2, xmm6
5066
  addps   xmm0, xmm1
5067
  addps   xmm0, xmm2
5068
  movhlps xmm1, xmm0
5069
  movq    QWORD [Result + $18], xmm0
5070
  movss   DWORD [Result + $20], xmm1
5071

5072
  movdqa  xmm6, [rsp-24]
5073
end;
5074
{$ENDIF}
5075

5076
class operator TMatrix3.Negative(const A: TMatrix3): TMatrix3; assembler;
5077
asm
5078
  movups xmm0, [SSE_MASK_SIGN]  // Load mask with 4 sign (upper) bits
5079
  movups xmm1, DQWORD [A + $00] // Load 3 rows
5080
  movups xmm2, DQWORD [A + $10]
5081
  movss  xmm3, DWORD [A + $20]
5082
  xorps  xmm1, xmm0             // Flip sign bits of each element in each row
5083
  xorps  xmm2, xmm0
5084
  pxor   xmm3, xmm0
5085
  movups DQWORD [Result + $00], xmm1
5086
  movups DQWORD [Result + $10], xmm2
5087
  movss  DWORD [Result + $20], xmm3
5088
end;
5089

5090
procedure TMatrix3.SetTransposed; assembler;
5091
asm
5092
  movss  xmm0, DWORD [Self + $04]
5093
  movss  xmm1, DWORD [Self + $08]
5094

5095
  movss  xmm2, DWORD [Self + $0C]
5096
  movss  xmm3, DWORD [Self + $14]
5097

5098
  movss  xmm4, DWORD [Self + $18]
5099
  movss  xmm5, DWORD [Self + $1C]
5100

5101
  movss  DWORD [Self + $0C], xmm0
5102
  movss  DWORD [Self + $18], xmm1
5103

5104
  movss  DWORD [Self + $04], xmm2
5105
  movss  DWORD [Self + $1C], xmm3
5106

5107
  movss  DWORD [Self + $08], xmm4
5108
  movss  DWORD [Self + $14], xmm5
5109
end;
5110

5111
class operator TMatrix3.Subtract(const A: TMatrix3; const B: Single): TMatrix3; assembler;
5112
asm
5113
  movups xmm1, DQWORD [A + $00] // Load 3 rows
5114
  shufps xmm2, xmm2, 0          // Replicate B
5115
  movups xmm0, DQWORD [A + $10]
5116
  movss  xmm3, DWORD [A + $20]
5117
  subps  xmm1, xmm2             // Subtract B from each row
5118
  subps  xmm0, xmm2
5119
  subss  xmm3, xmm2
5120
  movups DQWORD [Result + $00], xmm1
5121
  movups DQWORD [Result + $10], xmm0
5122
  movss  DWORD [Result + $20], xmm3
5123
end;
5124

5125
class operator TMatrix3.Subtract(const A: Single; const B: TMatrix3): TMatrix3; assembler;
5126
asm
5127
  movups xmm4, DQWORD [B + $00] // Load 3 rows
5128
  shufps xmm1, xmm1, 0          // Replicate A
5129
  movups xmm5, DQWORD [B + $10]
5130
  movaps xmm0, xmm1
5131
  movaps xmm2, xmm1
5132
  movss  xmm6, DWORD [B + $20]
5133
  subps  xmm1, xmm4             // Subtract each row from A
5134
  subps  xmm0, xmm5
5135
  subss  xmm2, xmm6
5136
  movups DQWORD [Result + $00], xmm1
5137
  movups DQWORD [Result + $10], xmm0
5138
  movss  DWORD [Result + $20], xmm2
5139
end;
5140

5141
class operator TMatrix3.Subtract(const A, B: TMatrix3): TMatrix3; assembler;
5142
asm
5143
  movups xmm0, DQWORD [A + $00] // Load 3 rows of A
5144
  movups xmm1, DQWORD [A + $10]
5145
  movss  xmm2, DWORD [A + $20]
5146
  movups xmm4, DQWORD [B + $00] // Load 3 rows of B
5147
  movups xmm5, DQWORD [B + $10]
5148
  movss  xmm6, DWORD [B + $20]
5149
  subps  xmm0, xmm4             // Subtract rows
5150
  subps  xmm1, xmm5
5151
  subss  xmm2, xmm6
5152
  movups DQWORD [Result + $00], xmm0
5153
  movups DQWORD [Result + $10], xmm1
5154
  movss  DWORD [Result + $20], xmm2
5155
end;
5156

5157
function TMatrix3.Transpose: TMatrix3; assembler;
5158
asm
5159
  movss xmm0, DWORD [Self + $00]
5160
  movss xmm1, DWORD [Self + $04]
5161
  movss xmm2, DWORD [Self + $08]
5162

5163
  movss DWORD [Result + $00], xmm0
5164
  movss DWORD [Result + $0C], xmm1
5165
  movss DWORD [Result + $18], xmm2
5166

5167
  movss xmm0, DWORD [Self + $0C]
5168
  movss xmm1, DWORD [Self + $10]
5169
  movss xmm2, DWORD [Self + $14]
5170

5171
  movss DWORD [Result + $04], xmm0
5172
  movss DWORD [Result + $10], xmm1
5173
  movss DWORD [Result + $1C], xmm2
5174

5175
  movss xmm0, DWORD [Self + $18]
5176
  movss xmm1, DWORD [Self + $1C]
5177
  movss xmm2, DWORD [Self + $20]
5178

5179
  movss DWORD [Result + $08], xmm0
5180
  movss DWORD [Result + $14], xmm1
5181
  movss DWORD [Result + $20], xmm2
5182
end;
5183

5184
{ TMatrix 4 }
5185

5186
class operator TMatrix4.Add(const A: TMatrix4; const B: Single): TMatrix4; assembler;
5187
asm
5188
  movups xmm1, DQWORD [A + $00] // Load 4 rows
5189
  shufps xmm2, xmm2, 0          // Replicate B
5190
  movups xmm0, DQWORD [A + $10]
5191
  movups xmm3, DQWORD [A + $20]
5192
  movups xmm4, DQWORD [A + $30]
5193
  addps  xmm1, xmm2             // Add B to each row
5194
  addps  xmm0, xmm2
5195
  addps  xmm3, xmm2
5196
  addps  xmm4, xmm2
5197
  movups DQWORD [Result + $00], xmm1
5198
  movups DQWORD [Result + $10], xmm0
5199
  movups DQWORD [Result + $20], xmm3
5200
  movups DQWORD [Result + $30], xmm4
5201
end;
5202

5203
class operator TMatrix4.Add(const A: Single; const B: TMatrix4): TMatrix4; assembler;
5204
asm
5205
  movups xmm0, DQWORD [B + $00] // Load 4 rows
5206
  shufps xmm1, xmm1, 0          // Replicate A
5207
  movups xmm2, DQWORD [B + $10]
5208
  movups xmm3, DQWORD [B + $20]
5209
  movups xmm4, DQWORD [B + $30]
5210
  addps  xmm0, xmm1             // Add A to each row
5211
  addps  xmm2, xmm1
5212
  addps  xmm3, xmm1
5213
  addps  xmm4, xmm1
5214
  movups DQWORD [Result + $00], xmm0
5215
  movups DQWORD [Result + $10], xmm2
5216
  movups DQWORD [Result + $20], xmm3
5217
  movups DQWORD [Result + $30], xmm4
5218
end;
5219

5220
class operator TMatrix4.Add(const A, B: TMatrix4): TMatrix4; assembler;
5221
asm
5222
  movups xmm0, DQWORD [A + $00] // Load 4 rows of A
5223
  movups xmm1, DQWORD [A + $10]
5224
  movups xmm2, DQWORD [A + $20]
5225
  movups xmm3, DQWORD [A + $30]
5226
  movups xmm4, DQWORD [B + $00] // Load 2 rows of B
5227
  movups xmm5, DQWORD [B + $10]
5228
  addps  xmm0, xmm4             // Add rows
5229
  addps  xmm1, xmm5
5230
  movups xmm4, DQWORD [B + $20] // Load 2 rows of B
5231
  movups xmm5, DQWORD [B + $30]
5232
  addps  xmm2, xmm4             // Add rows
5233
  addps  xmm3, xmm5
5234
  movups DQWORD [Result + $00], xmm0
5235
  movups DQWORD [Result + $10], xmm1
5236
  movups DQWORD [Result + $20], xmm2
5237
  movups DQWORD [Result + $30], xmm3
5238
end;
5239

5240
function TMatrix4.CompMult(const AOther: TMatrix4): TMatrix4; assembler;
5241
asm
5242
  movups xmm0, DQWORD[Self + $00]   // Self[0]
5243
  movups xmm1, DQWORD[Self + $10]   // Self[1]
5244
  movups xmm2, DQWORD[Self + $20]   // Self[2]
5245
  movups xmm3, DQWORD[Self + $30]   // Self[3]
5246
  movups xmm4, DQWORD[AOther + $00] // AOther[0]
5247
  movups xmm5, DQWORD[AOther + $10] // AOther[1]
5248

5249
  // Component-wise multiplication
5250
  mulps  xmm0, xmm4
5251
  mulps  xmm1, xmm5
5252
  movups xmm4, DQWORD[AOther + $20] // AOther[2]
5253
  movups xmm5, DQWORD[AOther + $30] // AOther[3]
5254
  mulps  xmm2, xmm4
5255
  mulps  xmm3, xmm5
5256

5257
  // Store result
5258
  movups DQWORD [Result + $00], xmm0
5259
  movups DQWORD [Result + $10], xmm1
5260
  movups DQWORD [Result + $20], xmm2
5261
  movups DQWORD [Result + $30], xmm3
5262
end;
5263

5264
class operator TMatrix4.Divide(const A: Single; const B: TMatrix4): TMatrix4; assembler;
5265
asm
5266
  movups xmm4, DQWORD [B + $00] // Load 4 rows
5267
  shufps xmm1, xmm1, 0          // Replicate A
5268
  movups xmm5, DQWORD [B + $10]
5269
  movaps xmm0, xmm1
5270
  movaps xmm2, xmm1
5271
  movaps xmm3, xmm1
5272
  divps  xmm1, xmm4             // Divide A by each row
5273
  divps  xmm0, xmm5
5274
  movups xmm4, DQWORD [B + $20]
5275
  movups xmm5, DQWORD [B + $30]
5276
  divps  xmm2, xmm4
5277
  divps  xmm3, xmm5
5278
  movups DQWORD [Result + $00], xmm1
5279
  movups DQWORD [Result + $10], xmm0
5280
  movups DQWORD [Result + $20], xmm2
5281
  movups DQWORD [Result + $30], xmm3
5282
end;
5283

5284
class operator TMatrix4.Divide(const A: TMatrix4; const B: Single): TMatrix4; assembler;
5285
asm
5286
  movups xmm1, DQWORD [A + $00] // Load 4 rows
5287
  shufps xmm2, xmm2, 0          // Replicate B
5288
  movups xmm0, DQWORD [A + $10]
5289
  movups xmm3, DQWORD [A + $20]
5290
  movups xmm4, DQWORD [A + $30]
5291
  divps  xmm1, xmm2             // Divide each row by B
5292
  divps  xmm0, xmm2             // NOTE: We could speed it up by multiplying by
5293
  divps  xmm3, xmm2             // 1/B instead, using the "rcpps" instruction,
5294
  divps  xmm4, xmm2             // but that instruction is an approximation,
5295
                                // so we lose accuracy.
5296
  movups DQWORD [Result + $00], xmm1
5297
  movups DQWORD [Result + $10], xmm0
5298
  movups DQWORD [Result + $20], xmm3
5299
  movups DQWORD [Result + $30], xmm4
5300
end;
5301

5302
function TMatrix4.Inverse: TMatrix4; assembler;
5303
type
5304
  TStack = record
5305
    case Byte of
5306
      0: (WorkSpace: array [0..7] of TVector4);
5307
      1: (F0, F1, F2, F3, F4, F5, X6, X7: TVector4);
5308
  end;
5309
var
5310
  Stack: TStack;
5311
asm
5312
  movdqa [Stack.X6], xmm6
5313
  movdqa [Stack.X7], xmm6
5314

5315
  movups xmm1, DQWORD[Self + $10] // M[1]
5316
  movups xmm2, DQWORD[Self + $20] // M[2]
5317
  movups xmm3, DQWORD[Self + $30] // M[3]
5318

5319
  //  C00 := (A.M[2,2] * A.M[3,3]) - (A.M[3,2] * A.M[2,3]);
5320
  //  C02 := (A.M[1,2] * A.M[3,3]) - (A.M[3,2] * A.M[1,3]);
5321
  //  C03 := (A.M[1,2] * A.M[2,3]) - (A.M[2,2] * A.M[1,3]);
5322
  //  F0 := Vector4(C00, C00, C02, C03);
5323
  movaps xmm5, xmm2            // M[2]
5324
  movaps xmm7, xmm2            // M[2]
5325
  movaps xmm0, xmm3            // M[3]
5326
  movaps xmm6, xmm3            // M[3]
5327
  shufps xmm6, xmm2, $AA       // M22 M22 M32 M32
5328
  shufps xmm0, xmm2, $FF       // M23 M23 M33 M33
5329
  shufps xmm7, xmm1, $FF       // M13 M13 M23 M23
5330
  pshufd xmm4, xmm0, $80       // M23 M33 M33 M33
5331
  shufps xmm5, xmm1, $AA       // M12 M12 M22 M22
5332
  pshufd xmm0, xmm6, $80       // M22 M32 M32 M32
5333
  mulps  xmm5, xmm4            // (M12 * M23) (M12 * M33) (M22 * M33) (M22 * M33)
5334
  mulps  xmm7, xmm0            // (M22 * M13) (M32 * M13) (M32 * M23) (M32 * M23)
5335
  subps  xmm5, xmm7            // C03=(M12*M23)-(M22*M13), C02=(M12*M33)-(M32*M13), C00=(M22*M33)-(M32*M23), C00=(M22*M33)-(M32*M23)
5336
  movups [Stack.F0], xmm5
5337

5338
  //  C04 := (A.M[2,1] * A.M[3,3]) - (A.M[3,1] * A.M[2,3]);
5339
  //  C06 := (A.M[1,1] * A.M[3,3]) - (A.M[3,1] * A.M[1,3]);
5340
  //  C07 := (A.M[1,1] * A.M[2,3]) - (A.M[2,1] * A.M[1,3]);
5341
  //  F1 := Vector4(C04, C04, C06, C07);
5342
  movaps xmm5, xmm2            // M[2]
5343
  movaps xmm7, xmm2            // M[2]
5344
  movaps xmm0, xmm3            // M[3]
5345
  movaps xmm6, xmm3            // M[3]
5346
  shufps xmm6, xmm2, $55       // M21 M21 M31 M31
5347
  shufps xmm0, xmm2, $FF       // M23 M23 M33 M33
5348
  shufps xmm7, xmm1, $FF       // M13 M13 M23 M23
5349
  pshufd xmm4, xmm0, $80       // M23 M33 M33 M33
5350
  shufps xmm5, xmm1, $55       // M11 M11 M21 M21
5351
  pshufd xmm0, xmm6, $80       // M21 M31 M31 M31
5352
  mulps  xmm5, xmm4            // (M11 * M23) (M11 * M33) (M21 * M33) (M21 * M33)
5353
  mulps  xmm7, xmm0            // (M21 * M13) (M31 * M13) (M31 * M23) (M31 * M23)
5354
  subps  xmm5, xmm7            // C07=(M11*M23)-(M21*M13), C06=(M11*M33)-(M31*M13), C04=(M21*M33)-(M31*M23), C04=(M21*M33)-(M31*M23)
5355
  movups [Stack.F1], xmm5
5356

5357
  //  C08 := (A.M[2,1] * A.M[3,2]) - (A.M[3,1] * A.M[2,2]);
5358
  //  C10 := (A.M[1,1] * A.M[3,2]) - (A.M[3,1] * A.M[1,2]);
5359
  //  C11 := (A.M[1,1] * A.M[2,2]) - (A.M[2,1] * A.M[1,2]);
5360
  //  F2 := Vector4(C08, C08, C10, C11);
5361
  movaps xmm5, xmm2            // M[2]
5362
  movaps xmm7, xmm2            // M[2]
5363
  movaps xmm0, xmm3            // M[3]
5364
  movaps xmm6, xmm3            // M[3]
5365
  shufps xmm6, xmm2, $55       // M21 M21 M31 M31
5366
  shufps xmm0, xmm2, $AA       // M22 M22 M32 M32
5367
  shufps xmm7, xmm1, $AA       // M12 M12 M22 M22
5368
  pshufd xmm4, xmm0, $80       // M22 M32 M32 M32
5369
  shufps xmm5, xmm1, $55       // M11 M11 M21 M21
5370
  pshufd xmm0, xmm6, $80       // M21 M31 M31 M31
5371
  mulps  xmm5, xmm4            // (M11 * M22) (M11 * M32) (M21 * M32) (M21 * M32)
5372
  mulps  xmm7, xmm0            // (M21 * M12) (M31 * M12) (M31 * M22) (M32 * M22)
5373
  subps  xmm5, xmm7            // C11=(M11*M22)-(M21*M12), C10=(M11*M32)-(M31*M12), C08=(M21*M32)-(M31*M22), C08=(M21*M32)-(M31*M22)
5374
  movups [Stack.F2], xmm5
5375

5376
  //  C12 := (A.M[2,0] * A.M[3,3]) - (A.M[3,0] * A.M[2,3]);
5377
  //  C14 := (A.M[1,0] * A.M[3,3]) - (A.M[3,0] * A.M[1,3]);
5378
  //  C15 := (A.M[1,0] * A.M[2,3]) - (A.M[2,0] * A.M[1,3]);
5379
  //  F3 := Vector4(C12, C12, C14, C15);
5380
  movaps xmm5, xmm2            // M[2]
5381
  movaps xmm7, xmm2            // M[2]
5382
  movaps xmm0, xmm3            // M[3]
5383
  movaps xmm6, xmm3            // M[3]
5384
  shufps xmm6, xmm2, $00       // M20 M20 M30 M30
5385
  shufps xmm0, xmm2, $FF       // M23 M23 M33 M33
5386
  shufps xmm7, xmm1, $FF       // M13 M13 M23 M23
5387
  pshufd xmm4, xmm0, $80       // M23 M33 M33 M33
5388
  shufps xmm5, xmm1, $00       // M10 M10 M20 M20
5389
  pshufd xmm0, xmm6, $80       // M20 M30 M30 M30
5390
  mulps  xmm5, xmm4            // (M10 * M23) (M10 * M33) (M20 * M33) (M20 * M33)
5391
  mulps  xmm7, xmm0            // (M20 * M13) (M30 * M13) (M30 * M23) (M30 * M23)
5392
  subps  xmm5, xmm7            // C15=(M10*M23)-(M20*M13), C14=(M10*M33)-(M30*M13), C12=(M20*M33)-(M30*M23), C12=(M20*M33)-(M30*M23)
5393
  movups [Stack.F3], xmm5
5394

5395
  //  C16 := (A.M[2,0] * A.M[3,2]) - (A.M[3,0] * A.M[2,2]);
5396
  //  C18 := (A.M[1,0] * A.M[3,2]) - (A.M[3,0] * A.M[1,2]);
5397
  //  C19 := (A.M[1,0] * A.M[2,2]) - (A.M[2,0] * A.M[1,2]);
5398
  //  F4 := Vector4(C16, C16, C18, C19);
5399
  movaps xmm5, xmm2            // M[2]
5400
  movaps xmm7, xmm2            // M[2]
5401
  movaps xmm0, xmm3            // M[3]
5402
  movaps xmm6, xmm3            // M[3]
5403
  shufps xmm6, xmm2, $00       // M20 M20 M30 M30
5404
  shufps xmm0, xmm2, $AA       // M22 M22 M32 M32
5405
  shufps xmm7, xmm1, $AA       // M12 M12 M22 M22
5406
  pshufd xmm4, xmm0, $80       // M22 M32 M32 M32
5407
  shufps xmm5, xmm1, $00       // M10 M10 M20 M20
5408
  pshufd xmm0, xmm6, $80       // M20 M30 M30 M30
5409
  mulps  xmm5, xmm4            // (M10 * M22) (M10 * M32) (M20 * M32) (M20 * M32)
5410
  mulps  xmm7, xmm0            // (M20 * M12) (M30 * M12) (M30 * M22) (M30 * M22)
5411
  subps  xmm5, xmm7            // C19=(M10*M22)-(M20*M12), C18=(M10*M32)-(M30*M12), C16=(M20*M32)-(M30*M22), C16=(M20*M32)-(M30*M22)
5412
  movups [Stack.F4], xmm5
5413

5414
  //  C20 := (A.M[2,0] * A.M[3,1]) - (A.M[3,0] * A.M[2,1]);
5415
  //  C22 := (A.M[1,0] * A.M[3,1]) - (A.M[3,0] * A.M[1,1]);
5416
  //  C23 := (A.M[1,0] * A.M[2,1]) - (A.M[2,0] * A.M[1,1]);
5417
  //  F5 := Vector4(C20, C20, C22, C23);
5418
  movaps xmm5, xmm2            // M[2]
5419
  movaps xmm7, xmm2            // M[2]
5420
  movaps xmm0, xmm3            // M[3]
5421
  movaps xmm6, xmm3            // M[3]
5422
  shufps xmm6, xmm2, $00       // M20 M20 M30 M30
5423
  shufps xmm0, xmm2, $55       // M21 M21 M31 M31
5424
  shufps xmm7, xmm1, $55       // M11 M11 M21 M21
5425
  pshufd xmm4, xmm0, $80       // M21 M31 M31 M31
5426
  shufps xmm5, xmm1, $00       // M10 M10 M20 M20
5427
  pshufd xmm0, xmm6, $80       // M20 M30 M30 M30
5428
  mulps  xmm5, xmm4            // (M10 * M21) (M10 * M31) (M20 * M31) (M20 * M31)
5429
  mulps  xmm7, xmm0            // (M20 * M11) (M30 * M11) (M30 * M21) (M30 * M21)
5430
  subps  xmm5, xmm7            // C23=(M10*M21)-(M20*M11), C22=(M10*M31)-(M30*M11), C20=(M20*M31)-(M30*M21), C20=(M20*M31)-(M30*M21)
5431
  movups [Stack.F5], xmm5
5432

5433
  //  V0 := Vector4(A.M[1,0], A.M[0,0], A.M[0,0], A.M[0,0]);
5434
  //  V1 := Vector4(A.M[1,1], A.M[0,1], A.M[0,1], A.M[0,1]);
5435
  //  V2 := Vector4(A.M[1,2], A.M[0,2], A.M[0,2], A.M[0,2]);
5436
  //  V3 := Vector4(A.M[1,3], A.M[0,3], A.M[0,3], A.M[0,3]);
5437
  movups xmm0, DQWORD[Self + $00] // M[0]
5438
  movaps xmm4, xmm1            // M[1]
5439
  movaps xmm5, xmm1            // M[1]
5440
  movaps xmm6, xmm1            // M[1]
5441
  movaps xmm7, xmm1            // M[1]
5442

5443
  shufps xmm4, xmm0, $00       // M00 M00 M10 M10
5444
  shufps xmm5, xmm0, $55       // M01 M01 M11 M11
5445
  shufps xmm6, xmm0, $AA       // M02 M02 M12 M12
5446
  shufps xmm7, xmm0, $FF       // M03 M03 M13 M13
5447

5448
  pshufd xmm4, xmm4, $A8       // V0=M00 M00 M00 M10
5449
  pshufd xmm5, xmm5, $A8       // V1=M01 M01 M01 M11
5450
  pshufd xmm6, xmm6, $A8       // V2=M02 M02 M02 M12
5451
  pshufd xmm7, xmm7, $A8       // V3=M03 M03 M03 M13
5452

5453
  //  I0 := (V1 * F0) - (V2 * F1) + (V3 * F2);
5454
  //  I1 := (V0 * F0) - (V2 * F3) + (V3 * F4);
5455
  //  I2 := (V0 * F1) - (V1 * F3) + (V3 * F5);
5456
  //  I3 := (V0 * F2) - (V1 * F4) + (V2 * F5);
5457
  movaps xmm0, xmm5            // V1
5458
  movaps xmm1, xmm6            // V2
5459
  movaps xmm2, xmm7            // V3
5460
  mulps  xmm0, [Stack.F0]      // V1 * F0
5461
  mulps  xmm1, [Stack.F1]      // V2 * F1
5462
  mulps  xmm2, [Stack.F2]      // V3 * F2
5463
  subps  xmm0, xmm1            // (V1 * F0) - (V2 * F1)
5464
  movaps xmm1, xmm4            // V0
5465
  addps  xmm0, xmm2            // I0=(V1 * F0) - (V2 * F1) + (V3 * F2)
5466

5467
  movaps xmm2, xmm6            // V2
5468
  movaps xmm3, xmm7            // V3
5469
  mulps  xmm1, [Stack.F0]      // V0 * F0
5470
  mulps  xmm2, [Stack.F3]      // V2 * F3
5471
  mulps  xmm3, [Stack.F4]      // V3 * F4
5472
  subps  xmm1, xmm2            // (V0 * F0) - (V2 * F3)
5473
  movaps xmm2, xmm4            // V0
5474
  addps  xmm1, xmm3            // I1=(V0 * F0) - (V2 * F3) + (V3 * F4)
5475

5476
  movaps xmm3, xmm5            // V1
5477
  mulps  xmm2, [Stack.F1]      // V0 * F1
5478
  mulps  xmm3, [Stack.F3]      // V1 * F3
5479
  mulps  xmm7, [Stack.F5]      // V3 * F5
5480
  subps  xmm2, xmm3            // (V0 * F1) - (V1 * F3)
5481
  mulps  xmm4, [Stack.F2]      // V0 * F2
5482
  addps  xmm2, xmm7            // I2=(V0 * F1) - (V1 * F3) + (V3 * F5)
5483

5484
  mulps  xmm5, [Stack.F4]      // V1 * F4
5485
  mulps  xmm6, [Stack.F5]      // V2 * F5
5486
  subps  xmm4, xmm5            // (V0 * F2) - (V1 * F4)
5487
  addps  xmm4, xmm6            // I3=(V0 * F2) - (V1 * F4) + (V2 * F5)
5488

5489
  //  SA := Vector4(+1, -1, +1, -1);
5490
  //  SB := Vector4(-1, +1, -1, +1);
5491
  //  Inv := Matrix4(I0 * SA, I1 * SB, I2 * SA, I3 * SB);
5492

5493
  movups xmm6, [SSE_MASK_PNPN] // SA
5494
  movups xmm7, [SSE_MASK_NPNP] // SB
5495
  xorps  xmm0, xmm6            // Inv[0] = I0 * SA
5496
  xorps  xmm1, xmm7            // Inv[1] = I1 * SB
5497
  xorps  xmm2, xmm6            // Inv[2] = I2 * SA
5498
  xorps  xmm4, xmm7            // Inv[3] = I3 * SB
5499

5500
  //  Row := Vector4(Inv[0,0], Inv[1,0], Inv[2,0], Inv[3,0]);
5501
  movaps   xmm3, xmm0
5502
  movaps   xmm5, xmm2
5503
  movaps   xmm6, xmm1
5504

5505
  unpcklps xmm3, xmm1          // Inv[1,1] Inv[0,1] Inv[1,0] Inv[0,0]
5506
  unpcklps xmm5, xmm4          // Inv[3,1] Inv[2,1] Inv[3,0] Inv[2,0]
5507
  movups   xmm6, DQWORD[Self + $00] // A.C[0]
5508
  movlhps  xmm3, xmm5          // Inv[3,0] Inv[2,0] Inv[1,0] Inv[0,0]
5509

5510
  //  Dot := A.C[0] * Row;
5511
  mulps    xmm3, xmm6          // Dot.W  Dot.Z  Dot.Y  Dot.X
5512

5513
  //  OneOverDeterminant := 1 / ((Dot.X + Dot.Y) + (Dot.Z + Dot.W));
5514
  pshufd   xmm6, xmm3, $4E     // Dot.Y  Dot.X  Dot.W  Dot.Z
5515
  addps    xmm3, xmm6          // W+Y Z+X Y+W X+Z
5516
  pshufd   xmm6, xmm3, $11     // X+Z Y+X X+Z Y+W
5517
  movups   xmm5, [SSE_ONE]     // 1.0 (4x)
5518
  addps    xmm3, xmm6          // X+Y+Z+W (4x)
5519
  divps    xmm5, xmm3          // OneOverDeterminant (4x)
5520

5521
  //  Result := Inv * OneOverDeterminant;
5522
  mulps    xmm0, xmm5
5523
  mulps    xmm1, xmm5
5524
  mulps    xmm2, xmm5
5525
  mulps    xmm4, xmm5
5526

5527
  movups   DQWORD[Result + $00], xmm0
5528
  movups   DQWORD[Result + $10], xmm1
5529
  movups   DQWORD[Result + $20], xmm2
5530
  movups   DQWORD[Result + $30], xmm4
5531

5532
  movdqa   xmm6, [Stack.X6]
5533
  movdqa   xmm7, [Stack.X7]
5534
end;
5535

5536
class operator TMatrix4.Multiply(const A: Single; const B: TMatrix4): TMatrix4; assembler;
5537
asm
5538
  movups xmm0, DQWORD [B + $00] // Load 4 rows
5539
  shufps xmm1, xmm1, 0          // Replicate A
5540
  movups xmm2, DQWORD [B + $10]
5541
  movups xmm3, DQWORD [B + $20]
5542
  movups xmm4, DQWORD [B + $30]
5543
  mulps  xmm0, xmm1             // Multiply each row by A
5544
  mulps  xmm2, xmm1
5545
  mulps  xmm3, xmm1
5546
  mulps  xmm4, xmm1
5547
  movups DQWORD [Result + $00], xmm0
5548
  movups DQWORD [Result + $10], xmm2
5549
  movups DQWORD [Result + $20], xmm3
5550
  movups DQWORD [Result + $30], xmm4
5551
end;
5552

5553
class operator TMatrix4.Multiply(const A: TMatrix4; const B: Single): TMatrix4; assembler;
5554
asm
5555
  movups xmm1, DQWORD [A + $00] // Load 4 rows
5556
  shufps xmm2, xmm2, 0          // Replicate B
5557
  movups xmm0, DQWORD [A + $10]
5558
  movups xmm3, DQWORD [A + $20]
5559
  movups xmm4, DQWORD [A + $30]
5560
  mulps  xmm1, xmm2             // Multiply each row by B
5561
  mulps  xmm0, xmm2
5562
  mulps  xmm3, xmm2
5563
  mulps  xmm4, xmm2
5564
  movups DQWORD [Result + $00], xmm1
5565
  movups DQWORD [Result + $10], xmm0
5566
  movups DQWORD [Result + $20], xmm3
5567
  movups DQWORD [Result + $30], xmm4
5568
end;
5569

5570
{$IFDEF FM_COLUMN_MAJOR}
5571
class operator TMatrix4.Multiply(const A: TMatrix4; const B: TVector4): TVector4; assembler;
5572
asm
5573
  movups xmm0, [B]
5574
  movups xmm4, DQWORD [A + $00]
5575
  movaps xmm1, xmm0
5576
  movaps xmm2, xmm0
5577
  movaps xmm3, xmm0
5578
  shufps xmm0, xmm0, $00
5579
  shufps xmm1, xmm1, $55
5580
  shufps xmm2, xmm2, $AA
5581
  shufps xmm3, xmm3, $FF
5582
  movups xmm5, DQWORD [A + $10]
5583
  mulps  xmm0, xmm4
5584
  mulps  xmm1, xmm5
5585
  movups xmm4, DQWORD [A + $20]
5586
  movups xmm5, DQWORD [A + $30]
5587
  mulps  xmm2, xmm4
5588
  mulps  xmm3, xmm5
5589
  addps  xmm0, xmm1
5590
  addps  xmm2, xmm3
5591
  addps  xmm0, xmm2
5592
  movups [Result], xmm0
5593
end;
5594

5595
class operator TMatrix4.Multiply(const A: TVector4; const B: TMatrix4): TVector4; assembler;
5596
asm
5597
  movups   xmm0, [A]
5598
  movups   xmm4, DQWORD [B + $00]
5599
  movaps   xmm1, xmm0
5600
  movaps   xmm2, xmm0
5601
  movaps   xmm3, xmm0
5602
  movups   xmm5, DQWORD [B + $10]
5603
  mulps    xmm0, xmm4
5604
  mulps    xmm1, xmm5
5605
  movups   xmm4, DQWORD [B + $20]
5606
  movups   xmm5, DQWORD [B + $30]
5607
  mulps    xmm2, xmm4
5608
  mulps    xmm3, xmm5
5609

5610
  { Transpose xmm0-xmm3 }
5611
  movaps   xmm4, xmm2
5612
  unpcklps xmm2, xmm3
5613
  unpckhps xmm4, xmm3
5614

5615
  movaps   xmm3, xmm0
5616
  unpcklps xmm0, xmm1
5617
  unpckhps xmm3, xmm1
5618

5619
  movaps   xmm1, xmm0
5620
  unpcklpd xmm0, xmm2
5621
  unpckhpd xmm1, xmm2
5622

5623
  movaps   xmm2, xmm3
5624
  unpcklpd xmm2, xmm4
5625
  unpckhpd xmm3, xmm4
5626

5627
  addps    xmm0, xmm1
5628
  addps    xmm2, xmm3
5629
  addps    xmm0, xmm2
5630
  movups   [Result], xmm0
5631
end;
5632

5633
class operator TMatrix4.Multiply(const A, B: TMatrix4): TMatrix4; assembler;
5634
{ Code below consists of 4 Vector*Matrix calculations }
5635
asm
5636
  movdqa [rsp-24], xmm6
5637
  movdqa [rsp-40], xmm7
5638

5639
  movups xmm0, DQWORD [B + $00]
5640
  movups xmm4, DQWORD [A + $00]
5641
  movaps xmm1, xmm0
5642
  movaps xmm2, xmm0
5643
  movaps xmm3, xmm0
5644
  shufps xmm0, xmm0, $00
5645
  shufps xmm1, xmm1, $55
5646
  shufps xmm2, xmm2, $AA
5647
  shufps xmm3, xmm3, $FF
5648
  movups xmm5, DQWORD [A + $10]
5649
  movups xmm6, DQWORD [A + $20]
5650
  movups xmm7, DQWORD [A + $30]
5651
  mulps  xmm0, xmm4
5652
  mulps  xmm1, xmm5
5653
  mulps  xmm2, xmm6
5654
  mulps  xmm3, xmm7
5655
  addps  xmm0, xmm1
5656
  addps  xmm2, xmm3
5657
  addps  xmm0, xmm2
5658
  movups DQWORD [Result + $00], xmm0
5659

5660
  movups xmm0, DQWORD [B + $10]
5661
  movaps xmm1, xmm0
5662
  movaps xmm2, xmm0
5663
  movaps xmm3, xmm0
5664
  shufps xmm0, xmm0, $00
5665
  shufps xmm1, xmm1, $55
5666
  shufps xmm2, xmm2, $AA
5667
  shufps xmm3, xmm3, $FF
5668
  mulps  xmm0, xmm4
5669
  mulps  xmm1, xmm5
5670
  mulps  xmm2, xmm6
5671
  mulps  xmm3, xmm7
5672
  addps  xmm0, xmm1
5673
  addps  xmm2, xmm3
5674
  addps  xmm0, xmm2
5675
  movups DQWORD [Result + $10], xmm0
5676

5677
  movups xmm0, DQWORD [B + $20]
5678
  movaps xmm1, xmm0
5679
  movaps xmm2, xmm0
5680
  movaps xmm3, xmm0
5681
  shufps xmm0, xmm0, $00
5682
  shufps xmm1, xmm1, $55
5683
  shufps xmm2, xmm2, $AA
5684
  shufps xmm3, xmm3, $FF
5685
  mulps  xmm0, xmm4
5686
  mulps  xmm1, xmm5
5687
  mulps  xmm2, xmm6
5688
  mulps  xmm3, xmm7
5689
  addps  xmm0, xmm1
5690
  addps  xmm2, xmm3
5691
  addps  xmm0, xmm2
5692
  movups DQWORD [Result + $20], xmm0
5693

5694
  movups xmm0, DQWORD [B + $30]
5695
  movaps xmm1, xmm0
5696
  movaps xmm2, xmm0
5697
  movaps xmm3, xmm0
5698
  shufps xmm0, xmm0, $00
5699
  shufps xmm1, xmm1, $55
5700
  shufps xmm2, xmm2, $AA
5701
  shufps xmm3, xmm3, $FF
5702
  mulps  xmm0, xmm4
5703
  mulps  xmm1, xmm5
5704
  mulps  xmm2, xmm6
5705
  mulps  xmm3, xmm7
5706
  addps  xmm0, xmm1
5707
  addps  xmm2, xmm3
5708
  addps  xmm0, xmm2
5709
  movups DQWORD [Result + $30], xmm0
5710

5711
  movdqa xmm6, [rsp-24]
5712
  movdqa xmm7, [rsp-40]
5713
end;
5714
{$ELSE}
5715
class operator TMatrix4.Multiply(const A: TMatrix4; const B: TVector4): TVector4; assembler;
5716
asm
5717
  movups   xmm0, [B]              // Load vector
5718
  movups   xmm4, DQWORD [A + $00] // Load 4 rows
5719
  movaps   xmm1, xmm0
5720
  movaps   xmm2, xmm0
5721
  movaps   xmm3, xmm0
5722
  movups   xmm5, DQWORD [A + $10]
5723
  mulps    xmm0, xmm4             // (Ax * B00), (Ay * B01), (Az * B02), (Aw * B03)
5724
  mulps    xmm1, xmm5             // (Ax * B10), (Ay * B11), (Az * B12), (Aw * B13)
5725
  movups   xmm4, DQWORD [A + $20]
5726
  movups   xmm5, DQWORD [A + $30]
5727
  mulps    xmm2, xmm4             // (Ax * B20), (Ay * B21), (Az * B22), (Aw * B23)
5728
  mulps    xmm3, xmm5             // (Ax * B30), (Ay * B31), (Az * B32), (Aw * B33)
5729

5730
  { Transpose xmm0-xmm3 }
5731
  movaps   xmm4, xmm2
5732
  unpcklps xmm2, xmm3             // B32 B22 B33 B23
5733
  unpckhps xmm4, xmm3             // B30 B20 B31 B21
5734

5735
  movaps   xmm3, xmm0
5736
  unpcklps xmm0, xmm1             // B12 B02 B13 B03
5737
  unpckhps xmm3, xmm1             // B10 B00 B11 B01
5738

5739
  movaps   xmm1, xmm0
5740
  unpcklpd xmm0, xmm2             // B33 B23 B13 B03
5741
  unpckhpd xmm1, xmm2             // B32 B22 B12 B02
5742

5743
  movaps   xmm2, xmm3
5744
  unpcklpd xmm2, xmm4             // B31 B21 B11 B01
5745
  unpckhpd xmm3, xmm4             // B30 B20 B10 B00
5746

5747
  addps    xmm0, xmm1             // Add rows
5748
  addps    xmm2, xmm3
5749
  addps    xmm0, xmm2
5750
  movups   [Result], xmm0
5751
end;
5752

5753
class operator TMatrix4.Multiply(const A: TVector4; const B: TMatrix4): TVector4; assembler;
5754
asm
5755
  movups xmm0, [A]              // Load vector
5756
  movups xmm4, DQWORD [B + $00] // Load 4 rows
5757
  movaps xmm1, xmm0
5758
  movaps xmm2, xmm0
5759
  movaps xmm3, xmm0
5760
  shufps xmm0, xmm0, $00        // Bx Bx Bx Bx
5761
  shufps xmm1, xmm1, $55        // By By By By
5762
  shufps xmm2, xmm2, $AA        // Bz Bz Bz Bz
5763
  shufps xmm3, xmm3, $FF        // Bw Bw Bw Bw
5764
  movups xmm5, DQWORD [B + $10]
5765
  mulps  xmm0, xmm4             // (A00 * Bx), (A01 * Bx), (A02 * Bx), (A03 * Bx)
5766
  mulps  xmm1, xmm5             // (A10 * By), (A11 * By), (A12 * By), (A13 * By)
5767
  movups xmm4, DQWORD [B + $20]
5768
  movups xmm5, DQWORD [B + $30]
5769
  mulps  xmm2, xmm4             // (A20 * Bz), (A21 * Bz), (A22 * Bz), (A23 * Bz)
5770
  mulps  xmm3, xmm5             // (A30 * Bw), (A31 * Bw), (A32 * Bw), (A33 * Bw)
5771
  addps  xmm0, xmm1             // Add rows
5772
  addps  xmm2, xmm3
5773
  addps  xmm0, xmm2
5774
  movups [Result], xmm0
5775
end;
5776

5777
class operator TMatrix4.Multiply(const A, B: TMatrix4): TMatrix4; assembler;
5778
{ Code below consists of 4 Vector*Matrix calculations }
5779
asm
5780
  movdqa [rsp-24], xmm6
5781
  movdqa [rsp-40], xmm7
5782

5783
  { A.R[0] * B }
5784
  movups xmm0, DQWORD [A + $00]
5785
  movups xmm4, DQWORD [B + $00]
5786
  movaps xmm1, xmm0
5787
  movaps xmm2, xmm0
5788
  movaps xmm3, xmm0
5789
  shufps xmm0, xmm0, $00
5790
  shufps xmm1, xmm1, $55
5791
  shufps xmm2, xmm2, $AA
5792
  shufps xmm3, xmm3, $FF
5793
  movups xmm5, DQWORD [B + $10]
5794
  movups xmm6, DQWORD [B + $20]
5795
  movups xmm7, DQWORD [B + $30]
5796
  mulps  xmm0, xmm4
5797
  mulps  xmm1, xmm5
5798
  mulps  xmm2, xmm6
5799
  mulps  xmm3, xmm7
5800
  addps  xmm0, xmm1
5801
  addps  xmm2, xmm3
5802
  addps  xmm0, xmm2
5803
  movups DQWORD [Result + $00], xmm0
5804

5805
  { A.R[1] * B }
5806
  movups xmm0, DQWORD [A + $10]
5807
  movaps xmm1, xmm0
5808
  movaps xmm2, xmm0
5809
  movaps xmm3, xmm0
5810
  shufps xmm0, xmm0, $00
5811
  shufps xmm1, xmm1, $55
5812
  shufps xmm2, xmm2, $AA
5813
  shufps xmm3, xmm3, $FF
5814
  mulps  xmm0, xmm4
5815
  mulps  xmm1, xmm5
5816
  mulps  xmm2, xmm6
5817
  mulps  xmm3, xmm7
5818
  addps  xmm0, xmm1
5819
  addps  xmm2, xmm3
5820
  addps  xmm0, xmm2
5821
  movups DQWORD [Result + $10], xmm0
5822

5823
  { A.R[2] * B }
5824
  movups xmm0, DQWORD [A + $20]
5825
  movaps xmm1, xmm0
5826
  movaps xmm2, xmm0
5827
  movaps xmm3, xmm0
5828
  shufps xmm0, xmm0, $00
5829
  shufps xmm1, xmm1, $55
5830
  shufps xmm2, xmm2, $AA
5831
  shufps xmm3, xmm3, $FF
5832
  mulps  xmm0, xmm4
5833
  mulps  xmm1, xmm5
5834
  mulps  xmm2, xmm6
5835
  mulps  xmm3, xmm7
5836
  addps  xmm0, xmm1
5837
  addps  xmm2, xmm3
5838
  addps  xmm0, xmm2
5839
  movups DQWORD [Result + $20], xmm0
5840

5841
  { A.R[3] * B }
5842
  movups xmm0, DQWORD [A + $30]
5843
  movaps xmm1, xmm0
5844
  movaps xmm2, xmm0
5845
  movaps xmm3, xmm0
5846
  shufps xmm0, xmm0, $00
5847
  shufps xmm1, xmm1, $55
5848
  shufps xmm2, xmm2, $AA
5849
  shufps xmm3, xmm3, $FF
5850
  mulps  xmm0, xmm4
5851
  mulps  xmm1, xmm5
5852
  mulps  xmm2, xmm6
5853
  mulps  xmm3, xmm7
5854
  addps  xmm0, xmm1
5855
  addps  xmm2, xmm3
5856
  addps  xmm0, xmm2
5857
  movups DQWORD [Result + $30], xmm0
5858

5859
  movdqa xmm6, [rsp-24]
5860
  movdqa xmm7, [rsp-40]
5861
end;
5862
{$ENDIF}
5863

5864
class operator TMatrix4.Negative(const A: TMatrix4): TMatrix4; assembler;
5865
asm
5866
  movups xmm0, [SSE_MASK_SIGN]  // Load mask with 4 sign (upper) bits
5867
  movups xmm1, DQWORD [A + $00] // Load 4 rows
5868
  movups xmm2, DQWORD [A + $10]
5869
  movups xmm3, DQWORD [A + $20]
5870
  movups xmm4, DQWORD [A + $30]
5871
  xorps  xmm1, xmm0             // Flip sign bits of each element in each row
5872
  xorps  xmm2, xmm0
5873
  xorps  xmm3, xmm0
5874
  xorps  xmm4, xmm0
5875
  movups DQWORD [Result + $00], xmm1
5876
  movups DQWORD [Result + $10], xmm2
5877
  movups DQWORD [Result + $20], xmm3
5878
  movups DQWORD [Result + $30], xmm4
5879
end;
5880

5881
procedure TMatrix4.SetInversed;
5882
begin
5883
  Self := Inverse;
5884
end;
5885

5886
procedure TMatrix4.SetTransposed;
5887
begin
5888
  Self := Transpose;
5889
end;
5890

5891
class operator TMatrix4.Subtract(const A: TMatrix4; const B: Single): TMatrix4; assembler;
5892
asm
5893
  movups xmm1, DQWORD [A + $00] // Load 4 rows
5894
  shufps xmm2, xmm2, 0          // Replicate B
5895
  movups xmm0, DQWORD [A + $10]
5896
  movups xmm3, DQWORD [A + $20]
5897
  movups xmm4, DQWORD [A + $30]
5898
  subps  xmm1, xmm2             // Subtract B from each row
5899
  subps  xmm0, xmm2
5900
  subps  xmm3, xmm2
5901
  subps  xmm4, xmm2
5902
  movups DQWORD [Result + $00], xmm1
5903
  movups DQWORD [Result + $10], xmm0
5904
  movups DQWORD [Result + $20], xmm3
5905
  movups DQWORD [Result + $30], xmm4
5906
end;
5907

5908
class operator TMatrix4.Subtract(const A: Single; const B: TMatrix4): TMatrix4; assembler;
5909
asm
5910
  movups xmm4, DQWORD [B + $00] // Load 4 rows
5911
  shufps xmm1, xmm1, 0          // Replicate A
5912
  movups xmm5, DQWORD [B + $10]
5913
  movaps xmm0, xmm1
5914
  movaps xmm2, xmm1
5915
  movaps xmm3, xmm1
5916
  subps  xmm1, xmm4             // Subtract each row from A
5917
  subps  xmm0, xmm5
5918
  movups xmm4, DQWORD [B + $20]
5919
  movups xmm5, DQWORD [B + $30]
5920
  subps  xmm2, xmm4
5921
  subps  xmm3, xmm5
5922
  movups DQWORD [Result + $00], xmm1
5923
  movups DQWORD [Result + $10], xmm0
5924
  movups DQWORD [Result + $20], xmm2
5925
  movups DQWORD [Result + $30], xmm3
5926
end;
5927

5928
class operator TMatrix4.Subtract(const A, B: TMatrix4): TMatrix4; assembler;
5929
asm
5930
  movups xmm0, DQWORD [A + $00] // Load 4 rows of A
5931
  movups xmm1, DQWORD [A + $10]
5932
  movups xmm2, DQWORD [A + $20]
5933
  movups xmm3, DQWORD [A + $30]
5934
  movups xmm4, DQWORD [B + $00] // Load 4 rows of B
5935
  movups xmm5, DQWORD [B + $10]
5936
  subps  xmm0, xmm4             // Subtract rows
5937
  subps  xmm1, xmm5
5938
  movups xmm4, DQWORD [B + $20]
5939
  movups xmm5, DQWORD [B + $30]
5940
  subps  xmm2, xmm4
5941
  subps  xmm3, xmm5
5942
  movups DQWORD [Result + $00], xmm0
5943
  movups DQWORD [Result + $10], xmm1
5944
  movups DQWORD [Result + $20], xmm2
5945
  movups DQWORD [Result + $30], xmm3
5946
end;
5947

5948
function TMatrix4.Transpose: TMatrix4; assembler;
5949
asm
5950
  movups   xmm0, DQWORD[Self + $00] // A03 A02 A01 A00
5951
  movups   xmm1, DQWORD[Self + $10] // A13 A12 A11 A10
5952
  movups   xmm2, DQWORD[Self + $20] // A23 A22 A21 A20
5953
  movups   xmm3, DQWORD[Self + $30] // A33 A32 A31 A30
5954

5955
  movaps   xmm4, xmm2
5956
  unpcklps xmm2, xmm3               // A31 A21 A30 A20
5957
  unpckhps xmm4, xmm3               // A33 A23 A32 A22
5958

5959
  movaps   xmm3, xmm0
5960
  unpcklps xmm0, xmm1               // A11 A01 A10 A00
5961
  unpckhps xmm3, xmm1               // A13 A03 A12 A02
5962

5963
  movaps   xmm1, xmm0
5964
  unpcklpd xmm0, xmm2               // A30 A20 A10 A00
5965
  unpckhpd xmm1, xmm2               // A31 A21 A11 A01
5966

5967
  movaps   xmm2, xmm3
5968
  unpcklpd xmm2, xmm4               // A32 A22 A12 A02
5969
  unpckhpd xmm3, xmm4               // A33 A23 A13 A03
5970

5971
  movups   DQWORD[Result + $00], xmm0
5972
  movups   DQWORD[Result + $10], xmm1
5973
  movups   DQWORD[Result + $20], xmm2
5974
  movups   DQWORD[Result + $30], xmm3
5975
end;
5976

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.