SlideShare a Scribd company logo
VECTORIZATION ON X86
Roberto A. Vitillo (LBNL)

10/23/2013 Software & Computing workshop
Why do we care?

2.6 GHz SB-EP CPU, 8 cores	

8 add + 8 mul FLOPs/cycle/core

Theoretical performance in
GFLOPS

Theoretical performance /
maximum

multi-threaded and vectorized

166.4

100%

multi-threaded and not
vectorized

20.8

12.5%

serial and vectorized

20.8

12.5%

serial and not vectorized

2.6

1.6%

2
Single Instruction, Multiple Data	

•

processor throughput is increased by handling multiple data in parallel
(MMX, SSE, AVX, ...)	


•

vector of data is packed in one large register and handled in one operation	


•

SIMD instructions are energy-efficient	


•

exploiting SIMD is even more of importance for the Xeon PHI and future
architectures

3

blogs.intel.com
20

	

RT
DA 13

void multiplyMatrices(Float32x4List A, Float32x4List B, Float32x4List R) {
var a0 = A[0];
var a1 = A[1];
var a2 = A[2];
var a3 = A[3];

Vector sizes keep increasing but
programmers continue to use simple
abstractions of hardware registers

var b0
R[0] =
var b1
R[1] =
var b2
R[2] =
var b3
R[3] =

= B[0];
b0.xxxx
= B[1];
b1.xxxx
= B[2];
b2.xxxx
= B[3];
b3.xxxx

* a0 + b0.yyyy * a1 + b0.zzzz * a2 + b0.wwww * a3;
* a0 + b1.yyyy * a1 + b1.zzzz * a2 + b1.wwww * a3;
* a0 + b2.yyyy * a1 + b2.zzzz * a2 + b2.wwww * a3;
* a0 + b3.yyyy * a1 + b3.zzzz * a2 + b3.wwww * a3;

}

#ifdef ID_WIN_X86_SSE2_INTRIN

!
const __m128 mask_keep_last = __m128c( _mm_set_epi32( 0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000 ) );
!

__m128
__m128
__m128
__m128
__m128
__m128

m2a0
m2b0
m2c0
m2a1
m2b1
m2c1

__m128
__m128
__m128
__m128
__m128

tj0
tk0
tl0
tj1
tk1

=
=
=
=
=
=
=
=
=
=
=

_mm_load_ps(
_mm_load_ps(
_mm_load_ps(
_mm_load_ps(
_mm_load_ps(
_mm_load_ps(
_mm_and_ps(
_mm_and_ps(
_mm_and_ps(
_mm_and_ps(
_mm_and_ps(

inFloats2
inFloats2
inFloats2
inFloats2
inFloats2
inFloats2

m1a0,
m1b0,
m1c0,
m1a1,
m1b1,

+
+
+
+
+
+

0
0
0
1
1
1

*
*
*
*
*
*

12
12
12
12
12
12

mask_keep_last
mask_keep_last
mask_keep_last
mask_keep_last
mask_keep_last

+= 2 * 12, inFloats2 += 2 * 12, outFloats += 2 * 12 ) {
+ 0 );
+ 4 );
+ 8 );
+ 0 );
+ 4 );
+ 8 );

3	

M
O
O 05
D 20

for ( int i = 0; i  numJoints; i += 2, inFloats1
__m128 m1a0 = _mm_load_ps( inFloats1 + 0 * 12
__m128 m1b0 = _mm_load_ps( inFloats1 + 0 * 12
__m128 m1c0 = _mm_load_ps( inFloats1 + 0 * 12
__m128 m1a1 = _mm_load_ps( inFloats1 + 1 * 12
__m128 m1b1 = _mm_load_ps( inFloats1 + 1 * 12
__m128 m1c1 = _mm_load_ps( inFloats1 + 1 * 12

+
+
+
+
+
+

0
4
8
0
4
8

);
);
);
);
);

4

);
);
);
);
);
);
Technology
Autovectorization
Intel SIMD Directives

PROS
requires minimal code changes


CONS
unreliable*

might change program meaning,
work well for specific use cases
GCC mainline support missing
unreliable*, requires code
restructuring

OpenCL

clear conceptual model

Vectorized Libraries, e.g. MKL,
Eigen

programmer doesn’t need to
know low level details

Cilk Plus Array Notation

mostly reliable

GCC mainline support missing

ISPC


mostly reliable, clear conceptual
model

proper PHI support missing,
requires code restructuring

* unless significant time is invested inspecting the generated assembly and deciphering compiler messages

5
X

Y

Z

X

Y

Z

X

X

Y

Y

Z

Z

Addressing modes	

•

SSEx and AVX1 do not support strided accesses pattern and gather-scatter
accesses which force the compiler to generate scalar instructions	


•

even when “fancy” access patterns are supported (e.g. IMCI and AVX2) a
significant penalty is paid 	


•

convert your arrays of structures to structures of arrays
6
cac.cornell.edu

Memory alignment	

• unaligned

memory access may generate scalar instructions for
otherwise vectorizable code	


• vectorized
• align

loads from unaligned memory suffer from a penalty	


data in memory to the width of the SIMD unit
7
void foo(double *a, double *b)
{
for(int i = 0; i  SIZE; i++)
{
a[i] += b[i];
}
}

what we would like:

SIZE is a multiple of the
vector lane width	

Alignment of arrays
unknown to the
compiler	


vmovapd ymm0,YMMWORD PTR [rdi+rax*1]
vaddpd ymm0,ymm0,YMMWORD PTR [rsi+rax*1]
vmovapd YMMWORD PTR [rdi+rax*1],ymm0
add
rax,0x20
cmp
rax,0x186a0
jne
1a8 foo2+0x8

0:
4:
7:

lea
cmp
jb

9:
b:
10:
15:
1a:
22:
2a:
2e:
33:
3b:
3f:
45:
47:
4a:

xor
eax,eax
nop
DWORD PTR [rax+rax*1+0x0]
vmovupd xmm0,XMMWORD PTR [rsi+rax*1]
vmovupd xmm1,XMMWORD PTR [rdi+rax*1]
vinsertf128 ymm0,ymm0,XMMWORD PTR [rsi+rax*1+0x10],0x1
vinsertf128 ymm1,ymm1,XMMWORD PTR [rdi+rax*1+0x10],0x1
vaddpd ymm0,ymm1,ymm0
vmovupd XMMWORD PTR [rdi+rax*1],xmm0
vextractf128 XMMWORD PTR [rdi+rax*1+0x10],ymm0,0x1
add
rax,0x20
cmp
rax,0x186a0
jne
10 foo+0x10
vzeroupper
ret

4b:
4f:
52:
54:
56:
60:
65:
6a:
6f:
73:
79:

lea
cmp
jae
xor
nop
vmovsd
vaddsd
vmovsd
add
cmp
jne

!
what we actually get:
with autovectorization:

GCC has to check if the
arrays overlap	


!

If they do, scalar addition
is performed	

•

1a8:
1ad:
1b2:
1b7:
1bb:
1c1:

otherwise loop is
partially vectorized
8

rax,[rsi+0x20]
rdi,rax
4b foo+0x4b

rax,[rdi+0x20]
rsi,rax
9 foo+0x9
eax,eax
WORD PTR cs:[rax+rax*1+0x0]
xmm0,QWORD PTR [rdi+rax*1]
xmm0,xmm0,QWORD PTR [rsi+rax*1]
QWORD PTR [rdi+rax*1],xmm0
rax,0x8
rax,0x186a0
60 foo+0x60
void foo(double * restrict a, double * restrict b)
{
double *x = __builtin_assume_aligned(a, 16);
double *y = __builtin_assume_aligned(b, 16);

}

1a8:
1ad:
1b2:
1b7:
1bb:
1c1:

for(int i = 0; i  SIZE; i++)
{
x[i] += y[i];
}

vmovapd ymm0,YMMWORD PTR [rdi+rax*1]
vaddpd ymm0,ymm0,YMMWORD PTR [rsi+rax*1]
vmovapd YMMWORD PTR [rdi+rax*1],ymm0
add
rax,0x20
cmp
rax,0x186a0
jne
1a8 foo2+0x8

GCC finally generates optimal code	

Be ready to babysit the compiler with options and directives if
you want to use autovectorization

9
SIMD directives in ICC help in loop
nests with no dependencies and
branches 	

•

HPC friendly code	


•

Xeon PHI is well supported as
advertised	


Suggested pragmas:	

•

#pragma unroll(N)	


•

for (i=0; icount; i++){
for (y=1; y  height-1; y++) {
int c = 1 + y*WIDTHP+1;
int n = c-WIDTHP;
int s = c+WIDTHP;
int e = c+1;
int w = c-1;
int nw = n-1;
int ne = n+1;
int sw = s-1;
int se = s+1;
#pragma simd
#pragma vector nontemporal
for (x=1; x  width-1; x++) {
fout[c] = diag * fin[nw] +
diag * fin[ne] +
diag * fin[sw] +
diag * fin[se] +
next * fin[w] +
next * fin[e] +
next * fin[n] +
next * fin[s] +
ctr * fin[c];

#pragma vector aligned	


•

#pragma vector nontemporal	


•

}

#pragma simd assert

}

10

// increment to next location
c++;n++;s++;e++;w++;nw++;ne++;sw++;se++;

}
REAL *ftmp = fin;
fin = fout;
fout = ftmp;
How GPUs solved the problem
__global__ void mxm(float *X, float *Y){
const int i = blockIdx.x*blockDim.x + threadIdx.x;

!

if(X[i] != 0)
X[i] = X[i] - Y[i];
else
Y[i] = X[i] * Y[i];

}

even though scalar code is generated, the
hardware implicitly vectorizes it (SIMT)
/*0058*/
/*0060*/
/*0068*/
/*0070*/
/*0078*/
/*0080*/

@!P0
@P0
@!P0
@P0

FSETP.NEU.AND P0, PT, R2, RZ, PT;
FMUL R3, R2, R0;
FADD R0, R2, -R0;
ST.E [R6], R3;
ST.E [R4], R0;
EXIT ;

instead of branching, instructions are predicated

Scalar loads/stores in GPUs are automatically coalesced into vector
loads/stores by the hardware	

Predicated instructions are generated for simple branching constructs
11
PUSH

POP

POP

...
if(...){
if(...){

PUSH

}
}else{
!
}
...

Branch Synchronization Stack (BSS)	

•

entries consists of a mask (1bit) that determines which lane commits its
results	


•

comes in handy for nested branching constructs	


Instruction markers	

•

push/pop a mask on the BSS when a branch diverges/converges into/from
multiple execution paths	


In other words, writing a vectorizing compiler for a GPU is an easier task than
writing one for CPU
12
How Intel is learning from GPUs

Opmask registers are expressed using the notation “k1” through “k7”.	

Instructions supporting conditional vector operation are expressed
by attaching the notation {k[1-7]} next to the destination operand	

E.g.: VADDPS	
  zmm1	
  {k1}{z},	
  zmm2,	
  zmm3

13
Let’s emulate a GPU in software

Intel SPMD Program Compiler (ISPC) extends a C-based language
with “single program, multiple data” (SPMD) constructs	

An ISPC program describes the behavior of a single program
instance	

•

even though a “gang” of them is in reality being executed	


•

gang size is usually no more than 2-4x the native SIMD width of
the machine	


For CUDA affectionados	

•

ISPC Program is similar to a CUDA thread	


•

An ISPC gang is similar to a CUDA warp
14
Execution of a SPMD program with a gang size of 4

Observations:	

•

diverging control flow reduces the utilization of vector
instructions	


•

vectorization adds masking overhead
15
uniform variable is shared among program instances
make function available to be called from
application code
foreach expresses a parallel computation

each program instance has a private instance of
a non-uniform variable (a.k.a. varying variable)

!

export void simple(uniform float vin[], uniform float vout[],
uniform int count) {
foreach (index = 0 ... count) {
float v = vin[index];
if (v  3.)
v = v * v;
else
v = sqrt(v);
vout[index] = v;
}
}

simple.ispc, compiled with ispc
#include stdio.h
#include simple.h

!

int main() {
float vin[16], vout[16];
for (int i = 0; i  16; ++i)
vin[i] = i;
simple(vin, vout, 16);

}

for (int i = 0; i  16; ++i)
printf(%d: simple(%f) = %fn, i, vin[i], vout[i]);

main.c, compiled with GCC
16

ispc function is called like any other function
from the C/C++ application
foreach(k = 0 ... 6){
int i = k * 7;

Prints [0, 7, 14, 21, 28, 35, ((42)), ((49))]

print(%n, i);
double* dR
double* dA

= P[i];
= P[i+3];

...
}

gang size of 8
0

7

14

21

28

35

(42) (49)

Inactive Program Instances
17
export void foo(uniform float * uniform A, uniform int n){
foreach(i = 0 ... n){
A[i*8] *= A[i*8];
}
}

18
ISPC vs GCC DGEMM
ISPC vs GCC SGEMM
8

7.6

7

inline void mxm(uniform float * uniform A,
uniform float * uniform B,
uniform float * uniform C,
uniform int M,
uniform int N,
uniform int K,
uniform int nmat,
int idx)
{
for(uniform int i = 0; i  M; i++){
for(uniform int j = 0; j  N; j++){
float sum = 0;
for(uniform int k = 0; k  K; k++){
sum += A[i*K*nmat + k*nmat + idx] * B[k*N*nmat + j*nmat + idx];
}

5

C[i*N*nmat + j*nmat + idx] = sum;
}

4
3
1
0

}

3.7

}

!

export void gemm(uniform float * uniform A,
uniform float * uniform B,
uniform float * uniform C,
uniform int M,
uniform int N,
uniform int K,
uniform int nmat)
{
foreach(i = 0 ... nmat){
mxm(A, B, C, M, N, K, nmat, i);
}
}

xGEMM 5x5 speedup over 1000 matrices (GCC 4.8 -O3, Ivy Bridge)

19
scalar

ISPC

8

export void startFilter(uniform KalmanFilter * uniform filter,
uniform KalmanFilterParameter * uniform param){
foreach(i = 0 ... filter-ntracks){
filterTrack(filter, param, i);
}
}

!

7
6
5

5.3

4

inline void filterTrack(uniform KalmanFilter * uniform filter,
uniform KalmanFilterParameter * uniform param,
int i){
...
for(uniform int h = 0; h  param-max_hit_count; h++){
if(h = param-hit_count[i])
continue;
predictState(filter, param, h, i);
predictCovariance(filter, param, h, i);
if(param-hits[h].is2Dim[i]){
...
correctGain2D(filter, i);
correctState2D(filter, i);
correctCovariance2D(filter, i);
}else{
...
correctGain1D(filter, i);
correctState1D(filter, i);
correctCovariance1D(filter, i);
}

3
2
1

1

0
Toy KalmanFilter speedup (double), Ivy Bridge

}
...
}

same technique can be used for Multi-Track Fitter 

(see S. Fleischmann presentation)
20
What did we learn

Use vectorized math libraries like MKL or Eigen wherever
you can	

On Xeons use ISPC to vectorize your code	

•

compatible across different compilers, architectures and
operating systems	


On Xeons/Xeons PHI use SIMD Directives and Cilk Plus to
vectorize your code	

•

if you can use the ICC compiler	


•

if you have “nice” loop nests (HPC-code like)
21
Vectorization on x86: all you need to know

More Related Content

PPTX
εύκρατη ζώνη
PPTX
4. ΤΑ ΑΜΥΝΤΙΚΑ ΜΕΡΗ ΤΩΝ ΚΑΣΤΡΩΝ [1ο Μέρος]
PDF
Τεχνολογία Δικτύων Επικοινωνιών Εργαστήριο Ενότητα 1 1 1
PPT
Αντικαπνιστική Εκστρατεία
PDF
κόμικς για ρατσισμό
PDF
L'élatif et le superlatif en arabe
DOCX
Νεανική Παραβατικότητα
DOCX
Γιάννης Μαγκλής "Γιατί"
εύκρατη ζώνη
4. ΤΑ ΑΜΥΝΤΙΚΑ ΜΕΡΗ ΤΩΝ ΚΑΣΤΡΩΝ [1ο Μέρος]
Τεχνολογία Δικτύων Επικοινωνιών Εργαστήριο Ενότητα 1 1 1
Αντικαπνιστική Εκστρατεία
κόμικς για ρατσισμό
L'élatif et le superlatif en arabe
Νεανική Παραβατικότητα
Γιάννης Μαγκλής "Γιατί"

What's hot (15)

DOCX
φυλλο εργασιας οταν πρωτοκατεβηκα στη σμυρνη
PDF
Τρόποι ανάπτυξης παραγράφου (θεωρία- παραδείγματα)
PPTX
Αναλογικό - Ψηφιακό
PPTX
Στάδια παρασκευής του ψωμιού
DOC
σενάριο και φύλλα εργασίας η επιστροφή του ανδρέα, λογοτεχνία ταρενίδου -βιτω...
PPT
γιορτή 25 μαρτιου γυμνάσιο αιγινίου
PDF
Τα Βασικά Στοιχεία του Θεάτρου
PDF
Ενότητα 1, το ταξίδι των λέξεων στο χρόνο
DOCX
Φύλλο Εργασίας Δομημένης Μορφής Στην Ιλιάδα του Ομήρου– Β Γυμνασίου Ραψωδία ...
PPT
Η γάτα: ιστορία, προλήψεις, παροιμίες-εκφράσεις
DOC
Σημειώσεις για το διήγημα του Γεωργίου Βιζυηνού
PPTX
Η ΕΞΕΛΙΞΗ ΤΟΥ ΣΠΙΤΙΟΥ
PPTX
ο θεσμός της προίκας α2 γελ αιγινίου- Ματσκίδης Γιάννης, Χαραλαμπίδης Δημήτρη...
PDF
Politikikouzina
PPT
αλκοολισμός
φυλλο εργασιας οταν πρωτοκατεβηκα στη σμυρνη
Τρόποι ανάπτυξης παραγράφου (θεωρία- παραδείγματα)
Αναλογικό - Ψηφιακό
Στάδια παρασκευής του ψωμιού
σενάριο και φύλλα εργασίας η επιστροφή του ανδρέα, λογοτεχνία ταρενίδου -βιτω...
γιορτή 25 μαρτιου γυμνάσιο αιγινίου
Τα Βασικά Στοιχεία του Θεάτρου
Ενότητα 1, το ταξίδι των λέξεων στο χρόνο
Φύλλο Εργασίας Δομημένης Μορφής Στην Ιλιάδα του Ομήρου– Β Γυμνασίου Ραψωδία ...
Η γάτα: ιστορία, προλήψεις, παροιμίες-εκφράσεις
Σημειώσεις για το διήγημα του Γεωργίου Βιζυηνού
Η ΕΞΕΛΙΞΗ ΤΟΥ ΣΠΙΤΙΟΥ
ο θεσμός της προίκας α2 γελ αιγινίου- Ματσκίδης Γιάννης, Χαραλαμπίδης Δημήτρη...
Politikikouzina
αλκοολισμός
Ad

Similar to Vectorization on x86: all you need to know (20)

PDF
Дмитрий Вовк: Векторизация кода под мобильные платформы
PDF
Vectorization in ATLAS
PPTX
The System of Automatic Searching for Vulnerabilities or how to use Taint Ana...
PPTX
Static analysis of C++ source code
PPTX
Static analysis of C++ source code
PPTX
Fedor Polyakov - Optimizing computer vision problems on mobile platforms
PPTX
PVS-Studio 5.00, a solution for developers of modern resource-intensive appl...
PPTX
Code vectorization for mobile devices
PPT
Data Acquisition
DOCX
2.1 ### uVision Project, (C) Keil Software .docx
PDF
Windbg랑 친해지기
PPT
Georgy Nosenko - An introduction to the use SMT solvers for software security
PPT
3D-DRESD Lorenzo Pavesi
PDF
Cryptography and secure systems
PDF
Joel Falcou, Boost.SIMD
PPTX
PVS-Studio, a solution for resource intensive applications development
PDF
Exploiting vectorization with ISPC
PDF
Boosting Developer Productivity with Clang
PPTX
Exploring Compiler Optimization Opportunities for the OpenMP 4.x Accelerator...
PDF
Simd programming introduction
Дмитрий Вовк: Векторизация кода под мобильные платформы
Vectorization in ATLAS
The System of Automatic Searching for Vulnerabilities or how to use Taint Ana...
Static analysis of C++ source code
Static analysis of C++ source code
Fedor Polyakov - Optimizing computer vision problems on mobile platforms
PVS-Studio 5.00, a solution for developers of modern resource-intensive appl...
Code vectorization for mobile devices
Data Acquisition
2.1 ### uVision Project, (C) Keil Software .docx
Windbg랑 친해지기
Georgy Nosenko - An introduction to the use SMT solvers for software security
3D-DRESD Lorenzo Pavesi
Cryptography and secure systems
Joel Falcou, Boost.SIMD
PVS-Studio, a solution for resource intensive applications development
Exploiting vectorization with ISPC
Boosting Developer Productivity with Clang
Exploring Compiler Optimization Opportunities for the OpenMP 4.x Accelerator...
Simd programming introduction
Ad

More from Roberto Agostino Vitillo (12)

PDF
Telemetry Onboarding
PDF
Growing a Data Pipeline for Analytics
PDF
Telemetry Datasets
PDF
Growing a SQL Query
PDF
Telemetry Onboarding
PDF
All you need to know about Statistics
PDF
Spark meets Telemetry
PDF
Sharing C++ objects in Linux
PDF
Performance tools developments
PDF
GOoDA tutorial
PDF
Callgraph analysis
PDF
Inter-process communication on steroids
Telemetry Onboarding
Growing a Data Pipeline for Analytics
Telemetry Datasets
Growing a SQL Query
Telemetry Onboarding
All you need to know about Statistics
Spark meets Telemetry
Sharing C++ objects in Linux
Performance tools developments
GOoDA tutorial
Callgraph analysis
Inter-process communication on steroids

Recently uploaded (20)

PDF
Spectral efficient network and resource selection model in 5G networks
PPTX
MYSQL Presentation for SQL database connectivity
PDF
cuic standard and advanced reporting.pdf
PPTX
Effective Security Operations Center (SOC) A Modern, Strategic, and Threat-In...
PDF
Review of recent advances in non-invasive hemoglobin estimation
PDF
Network Security Unit 5.pdf for BCA BBA.
PDF
CIFDAQ's Market Insight: SEC Turns Pro Crypto
PDF
Reach Out and Touch Someone: Haptics and Empathic Computing
PDF
TokAI - TikTok AI Agent : The First AI Application That Analyzes 10,000+ Vira...
PDF
Bridging biosciences and deep learning for revolutionary discoveries: a compr...
PDF
Build a system with the filesystem maintained by OSTree @ COSCUP 2025
PDF
Modernizing your data center with Dell and AMD
PPTX
A Presentation on Artificial Intelligence
PDF
Unlocking AI with Model Context Protocol (MCP)
PPT
Teaching material agriculture food technology
PPTX
KOM of Painting work and Equipment Insulation REV00 update 25-dec.pptx
PDF
Diabetes mellitus diagnosis method based random forest with bat algorithm
PDF
Chapter 3 Spatial Domain Image Processing.pdf
PDF
Dropbox Q2 2025 Financial Results & Investor Presentation
PPTX
Big Data Technologies - Introduction.pptx
Spectral efficient network and resource selection model in 5G networks
MYSQL Presentation for SQL database connectivity
cuic standard and advanced reporting.pdf
Effective Security Operations Center (SOC) A Modern, Strategic, and Threat-In...
Review of recent advances in non-invasive hemoglobin estimation
Network Security Unit 5.pdf for BCA BBA.
CIFDAQ's Market Insight: SEC Turns Pro Crypto
Reach Out and Touch Someone: Haptics and Empathic Computing
TokAI - TikTok AI Agent : The First AI Application That Analyzes 10,000+ Vira...
Bridging biosciences and deep learning for revolutionary discoveries: a compr...
Build a system with the filesystem maintained by OSTree @ COSCUP 2025
Modernizing your data center with Dell and AMD
A Presentation on Artificial Intelligence
Unlocking AI with Model Context Protocol (MCP)
Teaching material agriculture food technology
KOM of Painting work and Equipment Insulation REV00 update 25-dec.pptx
Diabetes mellitus diagnosis method based random forest with bat algorithm
Chapter 3 Spatial Domain Image Processing.pdf
Dropbox Q2 2025 Financial Results & Investor Presentation
Big Data Technologies - Introduction.pptx

Vectorization on x86: all you need to know

  • 1. VECTORIZATION ON X86 Roberto A. Vitillo (LBNL) 10/23/2013 Software & Computing workshop
  • 2. Why do we care? 2.6 GHz SB-EP CPU, 8 cores 8 add + 8 mul FLOPs/cycle/core Theoretical performance in GFLOPS Theoretical performance / maximum multi-threaded and vectorized 166.4 100% multi-threaded and not vectorized 20.8 12.5% serial and vectorized 20.8 12.5% serial and not vectorized 2.6 1.6% 2
  • 3. Single Instruction, Multiple Data • processor throughput is increased by handling multiple data in parallel (MMX, SSE, AVX, ...) • vector of data is packed in one large register and handled in one operation • SIMD instructions are energy-efficient • exploiting SIMD is even more of importance for the Xeon PHI and future architectures 3 blogs.intel.com
  • 4. 20 RT DA 13 void multiplyMatrices(Float32x4List A, Float32x4List B, Float32x4List R) { var a0 = A[0]; var a1 = A[1]; var a2 = A[2]; var a3 = A[3]; Vector sizes keep increasing but programmers continue to use simple abstractions of hardware registers var b0 R[0] = var b1 R[1] = var b2 R[2] = var b3 R[3] = = B[0]; b0.xxxx = B[1]; b1.xxxx = B[2]; b2.xxxx = B[3]; b3.xxxx * a0 + b0.yyyy * a1 + b0.zzzz * a2 + b0.wwww * a3; * a0 + b1.yyyy * a1 + b1.zzzz * a2 + b1.wwww * a3; * a0 + b2.yyyy * a1 + b2.zzzz * a2 + b2.wwww * a3; * a0 + b3.yyyy * a1 + b3.zzzz * a2 + b3.wwww * a3; } #ifdef ID_WIN_X86_SSE2_INTRIN ! const __m128 mask_keep_last = __m128c( _mm_set_epi32( 0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000 ) ); ! __m128 __m128 __m128 __m128 __m128 __m128 m2a0 m2b0 m2c0 m2a1 m2b1 m2c1 __m128 __m128 __m128 __m128 __m128 tj0 tk0 tl0 tj1 tk1 = = = = = = = = = = = _mm_load_ps( _mm_load_ps( _mm_load_ps( _mm_load_ps( _mm_load_ps( _mm_load_ps( _mm_and_ps( _mm_and_ps( _mm_and_ps( _mm_and_ps( _mm_and_ps( inFloats2 inFloats2 inFloats2 inFloats2 inFloats2 inFloats2 m1a0, m1b0, m1c0, m1a1, m1b1, + + + + + + 0 0 0 1 1 1 * * * * * * 12 12 12 12 12 12 mask_keep_last mask_keep_last mask_keep_last mask_keep_last mask_keep_last += 2 * 12, inFloats2 += 2 * 12, outFloats += 2 * 12 ) { + 0 ); + 4 ); + 8 ); + 0 ); + 4 ); + 8 ); 3 M O O 05 D 20 for ( int i = 0; i numJoints; i += 2, inFloats1 __m128 m1a0 = _mm_load_ps( inFloats1 + 0 * 12 __m128 m1b0 = _mm_load_ps( inFloats1 + 0 * 12 __m128 m1c0 = _mm_load_ps( inFloats1 + 0 * 12 __m128 m1a1 = _mm_load_ps( inFloats1 + 1 * 12 __m128 m1b1 = _mm_load_ps( inFloats1 + 1 * 12 __m128 m1c1 = _mm_load_ps( inFloats1 + 1 * 12 + + + + + + 0 4 8 0 4 8 ); ); ); ); ); 4 ); ); ); ); ); );
  • 5. Technology Autovectorization Intel SIMD Directives PROS requires minimal code changes
 CONS unreliable* might change program meaning, work well for specific use cases GCC mainline support missing unreliable*, requires code restructuring OpenCL clear conceptual model Vectorized Libraries, e.g. MKL, Eigen programmer doesn’t need to know low level details Cilk Plus Array Notation mostly reliable GCC mainline support missing ISPC
 mostly reliable, clear conceptual model proper PHI support missing, requires code restructuring * unless significant time is invested inspecting the generated assembly and deciphering compiler messages 5
  • 6. X Y Z X Y Z X X Y Y Z Z Addressing modes • SSEx and AVX1 do not support strided accesses pattern and gather-scatter accesses which force the compiler to generate scalar instructions • even when “fancy” access patterns are supported (e.g. IMCI and AVX2) a significant penalty is paid • convert your arrays of structures to structures of arrays 6
  • 7. cac.cornell.edu Memory alignment • unaligned memory access may generate scalar instructions for otherwise vectorizable code • vectorized • align loads from unaligned memory suffer from a penalty data in memory to the width of the SIMD unit 7
  • 8. void foo(double *a, double *b) { for(int i = 0; i SIZE; i++) { a[i] += b[i]; } } what we would like: SIZE is a multiple of the vector lane width Alignment of arrays unknown to the compiler vmovapd ymm0,YMMWORD PTR [rdi+rax*1] vaddpd ymm0,ymm0,YMMWORD PTR [rsi+rax*1] vmovapd YMMWORD PTR [rdi+rax*1],ymm0 add rax,0x20 cmp rax,0x186a0 jne 1a8 foo2+0x8 0: 4: 7: lea cmp jb 9: b: 10: 15: 1a: 22: 2a: 2e: 33: 3b: 3f: 45: 47: 4a: xor eax,eax nop DWORD PTR [rax+rax*1+0x0] vmovupd xmm0,XMMWORD PTR [rsi+rax*1] vmovupd xmm1,XMMWORD PTR [rdi+rax*1] vinsertf128 ymm0,ymm0,XMMWORD PTR [rsi+rax*1+0x10],0x1 vinsertf128 ymm1,ymm1,XMMWORD PTR [rdi+rax*1+0x10],0x1 vaddpd ymm0,ymm1,ymm0 vmovupd XMMWORD PTR [rdi+rax*1],xmm0 vextractf128 XMMWORD PTR [rdi+rax*1+0x10],ymm0,0x1 add rax,0x20 cmp rax,0x186a0 jne 10 foo+0x10 vzeroupper ret 4b: 4f: 52: 54: 56: 60: 65: 6a: 6f: 73: 79: lea cmp jae xor nop vmovsd vaddsd vmovsd add cmp jne ! what we actually get: with autovectorization: GCC has to check if the arrays overlap ! If they do, scalar addition is performed • 1a8: 1ad: 1b2: 1b7: 1bb: 1c1: otherwise loop is partially vectorized 8 rax,[rsi+0x20] rdi,rax 4b foo+0x4b rax,[rdi+0x20] rsi,rax 9 foo+0x9 eax,eax WORD PTR cs:[rax+rax*1+0x0] xmm0,QWORD PTR [rdi+rax*1] xmm0,xmm0,QWORD PTR [rsi+rax*1] QWORD PTR [rdi+rax*1],xmm0 rax,0x8 rax,0x186a0 60 foo+0x60
  • 9. void foo(double * restrict a, double * restrict b) { double *x = __builtin_assume_aligned(a, 16); double *y = __builtin_assume_aligned(b, 16); } 1a8: 1ad: 1b2: 1b7: 1bb: 1c1: for(int i = 0; i SIZE; i++) { x[i] += y[i]; } vmovapd ymm0,YMMWORD PTR [rdi+rax*1] vaddpd ymm0,ymm0,YMMWORD PTR [rsi+rax*1] vmovapd YMMWORD PTR [rdi+rax*1],ymm0 add rax,0x20 cmp rax,0x186a0 jne 1a8 foo2+0x8 GCC finally generates optimal code Be ready to babysit the compiler with options and directives if you want to use autovectorization 9
  • 10. SIMD directives in ICC help in loop nests with no dependencies and branches • HPC friendly code • Xeon PHI is well supported as advertised Suggested pragmas: • #pragma unroll(N) • for (i=0; icount; i++){ for (y=1; y height-1; y++) { int c = 1 + y*WIDTHP+1; int n = c-WIDTHP; int s = c+WIDTHP; int e = c+1; int w = c-1; int nw = n-1; int ne = n+1; int sw = s-1; int se = s+1; #pragma simd #pragma vector nontemporal for (x=1; x width-1; x++) { fout[c] = diag * fin[nw] + diag * fin[ne] + diag * fin[sw] + diag * fin[se] + next * fin[w] + next * fin[e] + next * fin[n] + next * fin[s] + ctr * fin[c]; #pragma vector aligned • #pragma vector nontemporal • } #pragma simd assert } 10 // increment to next location c++;n++;s++;e++;w++;nw++;ne++;sw++;se++; } REAL *ftmp = fin; fin = fout; fout = ftmp;
  • 11. How GPUs solved the problem __global__ void mxm(float *X, float *Y){ const int i = blockIdx.x*blockDim.x + threadIdx.x; ! if(X[i] != 0) X[i] = X[i] - Y[i]; else Y[i] = X[i] * Y[i]; } even though scalar code is generated, the hardware implicitly vectorizes it (SIMT) /*0058*/ /*0060*/ /*0068*/ /*0070*/ /*0078*/ /*0080*/ @!P0 @P0 @!P0 @P0 FSETP.NEU.AND P0, PT, R2, RZ, PT; FMUL R3, R2, R0; FADD R0, R2, -R0; ST.E [R6], R3; ST.E [R4], R0; EXIT ; instead of branching, instructions are predicated Scalar loads/stores in GPUs are automatically coalesced into vector loads/stores by the hardware Predicated instructions are generated for simple branching constructs 11
  • 12. PUSH POP POP ... if(...){ if(...){ PUSH } }else{ ! } ... Branch Synchronization Stack (BSS) • entries consists of a mask (1bit) that determines which lane commits its results • comes in handy for nested branching constructs Instruction markers • push/pop a mask on the BSS when a branch diverges/converges into/from multiple execution paths In other words, writing a vectorizing compiler for a GPU is an easier task than writing one for CPU 12
  • 13. How Intel is learning from GPUs Opmask registers are expressed using the notation “k1” through “k7”. Instructions supporting conditional vector operation are expressed by attaching the notation {k[1-7]} next to the destination operand E.g.: VADDPS  zmm1  {k1}{z},  zmm2,  zmm3 13
  • 14. Let’s emulate a GPU in software Intel SPMD Program Compiler (ISPC) extends a C-based language with “single program, multiple data” (SPMD) constructs An ISPC program describes the behavior of a single program instance • even though a “gang” of them is in reality being executed • gang size is usually no more than 2-4x the native SIMD width of the machine For CUDA affectionados • ISPC Program is similar to a CUDA thread • An ISPC gang is similar to a CUDA warp 14
  • 15. Execution of a SPMD program with a gang size of 4 Observations: • diverging control flow reduces the utilization of vector instructions • vectorization adds masking overhead 15
  • 16. uniform variable is shared among program instances make function available to be called from application code foreach expresses a parallel computation each program instance has a private instance of a non-uniform variable (a.k.a. varying variable) ! export void simple(uniform float vin[], uniform float vout[], uniform int count) { foreach (index = 0 ... count) { float v = vin[index]; if (v 3.) v = v * v; else v = sqrt(v); vout[index] = v; } } simple.ispc, compiled with ispc #include stdio.h #include simple.h ! int main() { float vin[16], vout[16]; for (int i = 0; i 16; ++i) vin[i] = i; simple(vin, vout, 16); } for (int i = 0; i 16; ++i) printf(%d: simple(%f) = %fn, i, vin[i], vout[i]); main.c, compiled with GCC 16 ispc function is called like any other function from the C/C++ application
  • 17. foreach(k = 0 ... 6){ int i = k * 7; Prints [0, 7, 14, 21, 28, 35, ((42)), ((49))] print(%n, i); double* dR double* dA = P[i]; = P[i+3]; ... } gang size of 8 0 7 14 21 28 35 (42) (49) Inactive Program Instances 17
  • 18. export void foo(uniform float * uniform A, uniform int n){ foreach(i = 0 ... n){ A[i*8] *= A[i*8]; } } 18
  • 19. ISPC vs GCC DGEMM ISPC vs GCC SGEMM 8 7.6 7 inline void mxm(uniform float * uniform A, uniform float * uniform B, uniform float * uniform C, uniform int M, uniform int N, uniform int K, uniform int nmat, int idx) { for(uniform int i = 0; i M; i++){ for(uniform int j = 0; j N; j++){ float sum = 0; for(uniform int k = 0; k K; k++){ sum += A[i*K*nmat + k*nmat + idx] * B[k*N*nmat + j*nmat + idx]; } 5 C[i*N*nmat + j*nmat + idx] = sum; } 4 3 1 0 } 3.7 } ! export void gemm(uniform float * uniform A, uniform float * uniform B, uniform float * uniform C, uniform int M, uniform int N, uniform int K, uniform int nmat) { foreach(i = 0 ... nmat){ mxm(A, B, C, M, N, K, nmat, i); } } xGEMM 5x5 speedup over 1000 matrices (GCC 4.8 -O3, Ivy Bridge) 19
  • 20. scalar ISPC 8 export void startFilter(uniform KalmanFilter * uniform filter, uniform KalmanFilterParameter * uniform param){ foreach(i = 0 ... filter-ntracks){ filterTrack(filter, param, i); } } ! 7 6 5 5.3 4 inline void filterTrack(uniform KalmanFilter * uniform filter, uniform KalmanFilterParameter * uniform param, int i){ ... for(uniform int h = 0; h param-max_hit_count; h++){ if(h = param-hit_count[i]) continue; predictState(filter, param, h, i); predictCovariance(filter, param, h, i); if(param-hits[h].is2Dim[i]){ ... correctGain2D(filter, i); correctState2D(filter, i); correctCovariance2D(filter, i); }else{ ... correctGain1D(filter, i); correctState1D(filter, i); correctCovariance1D(filter, i); } 3 2 1 1 0 Toy KalmanFilter speedup (double), Ivy Bridge } ... } same technique can be used for Multi-Track Fitter 
 (see S. Fleischmann presentation) 20
  • 21. What did we learn Use vectorized math libraries like MKL or Eigen wherever you can On Xeons use ISPC to vectorize your code • compatible across different compilers, architectures and operating systems On Xeons/Xeons PHI use SIMD Directives and Cilk Plus to vectorize your code • if you can use the ICC compiler • if you have “nice” loop nests (HPC-code like) 21