mirror of
https://github.com/minio/minio.git
synced 2025-11-09 21:49:46 -05:00
A full restructure
This commit is contained in:
0
pkg/crypto/sha256/TODO
Normal file
0
pkg/crypto/sha256/TODO
Normal file
261
pkg/crypto/sha256/asm.S
Normal file
261
pkg/crypto/sha256/asm.S
Normal file
@@ -0,0 +1,261 @@
|
||||
#ifndef ENTRY
|
||||
#define ENTRY(name) \
|
||||
.globl name ; \
|
||||
.align 4,0x90 ; \
|
||||
name:
|
||||
#endif
|
||||
|
||||
#ifndef END
|
||||
#define END(name) \
|
||||
.size name, .-name
|
||||
#endif
|
||||
|
||||
#ifndef ENDPROC
|
||||
#define ENDPROC(name) \
|
||||
.type name, @function ; \
|
||||
END(name)
|
||||
#endif
|
||||
|
||||
#define NUM_INVALID 100
|
||||
|
||||
#define TYPE_R32 0
|
||||
#define TYPE_R64 1
|
||||
#define TYPE_XMM 2
|
||||
#define TYPE_INVALID 100
|
||||
|
||||
.macro R32_NUM opd r32
|
||||
\opd = NUM_INVALID
|
||||
.ifc \r32,%eax
|
||||
\opd = 0
|
||||
.endif
|
||||
.ifc \r32,%ecx
|
||||
\opd = 1
|
||||
.endif
|
||||
.ifc \r32,%edx
|
||||
\opd = 2
|
||||
.endif
|
||||
.ifc \r32,%ebx
|
||||
\opd = 3
|
||||
.endif
|
||||
.ifc \r32,%esp
|
||||
\opd = 4
|
||||
.endif
|
||||
.ifc \r32,%ebp
|
||||
\opd = 5
|
||||
.endif
|
||||
.ifc \r32,%esi
|
||||
\opd = 6
|
||||
.endif
|
||||
.ifc \r32,%edi
|
||||
\opd = 7
|
||||
.endif
|
||||
#ifdef X86_64
|
||||
.ifc \r32,%r8d
|
||||
\opd = 8
|
||||
.endif
|
||||
.ifc \r32,%r9d
|
||||
\opd = 9
|
||||
.endif
|
||||
.ifc \r32,%r10d
|
||||
\opd = 10
|
||||
.endif
|
||||
.ifc \r32,%r11d
|
||||
\opd = 11
|
||||
.endif
|
||||
.ifc \r32,%r12d
|
||||
\opd = 12
|
||||
.endif
|
||||
.ifc \r32,%r13d
|
||||
\opd = 13
|
||||
.endif
|
||||
.ifc \r32,%r14d
|
||||
\opd = 14
|
||||
.endif
|
||||
.ifc \r32,%r15d
|
||||
\opd = 15
|
||||
.endif
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro R64_NUM opd r64
|
||||
\opd = NUM_INVALID
|
||||
#ifdef X86_64
|
||||
.ifc \r64,%rax
|
||||
\opd = 0
|
||||
.endif
|
||||
.ifc \r64,%rcx
|
||||
\opd = 1
|
||||
.endif
|
||||
.ifc \r64,%rdx
|
||||
\opd = 2
|
||||
.endif
|
||||
.ifc \r64,%rbx
|
||||
\opd = 3
|
||||
.endif
|
||||
.ifc \r64,%rsp
|
||||
\opd = 4
|
||||
.endif
|
||||
.ifc \r64,%rbp
|
||||
\opd = 5
|
||||
.endif
|
||||
.ifc \r64,%rsi
|
||||
\opd = 6
|
||||
.endif
|
||||
.ifc \r64,%rdi
|
||||
\opd = 7
|
||||
.endif
|
||||
.ifc \r64,%r8
|
||||
\opd = 8
|
||||
.endif
|
||||
.ifc \r64,%r9
|
||||
\opd = 9
|
||||
.endif
|
||||
.ifc \r64,%r10
|
||||
\opd = 10
|
||||
.endif
|
||||
.ifc \r64,%r11
|
||||
\opd = 11
|
||||
.endif
|
||||
.ifc \r64,%r12
|
||||
\opd = 12
|
||||
.endif
|
||||
.ifc \r64,%r13
|
||||
\opd = 13
|
||||
.endif
|
||||
.ifc \r64,%r14
|
||||
\opd = 14
|
||||
.endif
|
||||
.ifc \r64,%r15
|
||||
\opd = 15
|
||||
.endif
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro XMM_NUM opd xmm
|
||||
\opd = NUM_INVALID
|
||||
.ifc \xmm,%xmm0
|
||||
\opd = 0
|
||||
.endif
|
||||
.ifc \xmm,%xmm1
|
||||
\opd = 1
|
||||
.endif
|
||||
.ifc \xmm,%xmm2
|
||||
\opd = 2
|
||||
.endif
|
||||
.ifc \xmm,%xmm3
|
||||
\opd = 3
|
||||
.endif
|
||||
.ifc \xmm,%xmm4
|
||||
\opd = 4
|
||||
.endif
|
||||
.ifc \xmm,%xmm5
|
||||
\opd = 5
|
||||
.endif
|
||||
.ifc \xmm,%xmm6
|
||||
\opd = 6
|
||||
.endif
|
||||
.ifc \xmm,%xmm7
|
||||
\opd = 7
|
||||
.endif
|
||||
.ifc \xmm,%xmm8
|
||||
\opd = 8
|
||||
.endif
|
||||
.ifc \xmm,%xmm9
|
||||
\opd = 9
|
||||
.endif
|
||||
.ifc \xmm,%xmm10
|
||||
\opd = 10
|
||||
.endif
|
||||
.ifc \xmm,%xmm11
|
||||
\opd = 11
|
||||
.endif
|
||||
.ifc \xmm,%xmm12
|
||||
\opd = 12
|
||||
.endif
|
||||
.ifc \xmm,%xmm13
|
||||
\opd = 13
|
||||
.endif
|
||||
.ifc \xmm,%xmm14
|
||||
\opd = 14
|
||||
.endif
|
||||
.ifc \xmm,%xmm15
|
||||
\opd = 15
|
||||
.endif
|
||||
.endm
|
||||
|
||||
.macro TYPE type reg
|
||||
R32_NUM reg_type_r32 \reg
|
||||
R64_NUM reg_type_r64 \reg
|
||||
XMM_NUM reg_type_xmm \reg
|
||||
.if reg_type_r64 <> NUM_INVALID
|
||||
\type = TYPE_R64
|
||||
.elseif reg_type_r32 <> NUM_INVALID
|
||||
\type = TYPE_R32
|
||||
.elseif reg_type_xmm <> NUM_INVALID
|
||||
\type = TYPE_XMM
|
||||
.else
|
||||
\type = TYPE_INVALID
|
||||
.endif
|
||||
.endm
|
||||
|
||||
.macro PFX_OPD_SIZE
|
||||
.byte 0x66
|
||||
.endm
|
||||
|
||||
.macro PFX_REX opd1 opd2 W=0
|
||||
.if ((\opd1 | \opd2) & 8) || \W
|
||||
.byte 0x40 | ((\opd1 & 8) >> 3) | ((\opd2 & 8) >> 1) | (\W << 3)
|
||||
.endif
|
||||
.endm
|
||||
|
||||
.macro MODRM mod opd1 opd2
|
||||
.byte \mod | (\opd1 & 7) | ((\opd2 & 7) << 3)
|
||||
.endm
|
||||
|
||||
.macro PSHUFB_XMM xmm1 xmm2
|
||||
XMM_NUM pshufb_opd1 \xmm1
|
||||
XMM_NUM pshufb_opd2 \xmm2
|
||||
PFX_OPD_SIZE
|
||||
PFX_REX pshufb_opd1 pshufb_opd2
|
||||
.byte 0x0f, 0x38, 0x00
|
||||
MODRM 0xc0 pshufb_opd1 pshufb_opd2
|
||||
.endm
|
||||
|
||||
.macro PCLMULQDQ imm8 xmm1 xmm2
|
||||
XMM_NUM clmul_opd1 \xmm1
|
||||
XMM_NUM clmul_opd2 \xmm2
|
||||
PFX_OPD_SIZE
|
||||
PFX_REX clmul_opd1 clmul_opd2
|
||||
.byte 0x0f, 0x3a, 0x44
|
||||
MODRM 0xc0 clmul_opd1 clmul_opd2
|
||||
.byte \imm8
|
||||
.endm
|
||||
|
||||
.macro PEXTRD imm8 xmm gpr
|
||||
R32_NUM extrd_opd1 \gpr
|
||||
XMM_NUM extrd_opd2 \xmm
|
||||
PFX_OPD_SIZE
|
||||
PFX_REX extrd_opd1 extrd_opd2
|
||||
.byte 0x0f, 0x3a, 0x16
|
||||
MODRM 0xc0 extrd_opd1 extrd_opd2
|
||||
.byte \imm8
|
||||
.endm
|
||||
|
||||
.macro MOVQ_R64_XMM opd1 opd2
|
||||
TYPE movq_r64_xmm_opd1_type \opd1
|
||||
.if movq_r64_xmm_opd1_type == TYPE_XMM
|
||||
XMM_NUM movq_r64_xmm_opd1 \opd1
|
||||
R64_NUM movq_r64_xmm_opd2 \opd2
|
||||
.else
|
||||
R64_NUM movq_r64_xmm_opd1 \opd1
|
||||
XMM_NUM movq_r64_xmm_opd2 \opd2
|
||||
.endif
|
||||
PFX_OPD_SIZE
|
||||
PFX_REX movq_r64_xmm_opd1 movq_r64_xmm_opd2 1
|
||||
.if movq_r64_xmm_opd1_type == TYPE_XMM
|
||||
.byte 0x0f, 0x7e
|
||||
.else
|
||||
.byte 0x0f, 0x6e
|
||||
.endif
|
||||
MODRM 0xc0 movq_r64_xmm_opd1 movq_r64_xmm_opd2
|
||||
.endm
|
||||
493
pkg/crypto/sha256/sha256-avx-asm.S
Normal file
493
pkg/crypto/sha256/sha256-avx-asm.S
Normal file
@@ -0,0 +1,493 @@
|
||||
########################################################################
|
||||
# Implement fast SHA-256 with AVX1 instructions. (x86_64)
|
||||
#
|
||||
# Copyright (C) 2013 Intel Corporation.
|
||||
#
|
||||
# Authors:
|
||||
# James Guilford <james.guilford@intel.com>
|
||||
# Kirk Yap <kirk.s.yap@intel.com>
|
||||
# Tim Chen <tim.c.chen@linux.intel.com>
|
||||
#
|
||||
# This software is available to you under a choice of one of two
|
||||
# licenses. You may choose to be licensed under the terms of the GNU
|
||||
# General Public License (GPL) Version 2, available from the file
|
||||
# COPYING in the main directory of this source tree, or the
|
||||
# OpenIB.org BSD license below:
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or
|
||||
# without modification, are permitted provided that the following
|
||||
# conditions are met:
|
||||
#
|
||||
# - Redistributions of source code must retain the above
|
||||
# copyright notice, this list of conditions and the following
|
||||
# disclaimer.
|
||||
#
|
||||
# - Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following
|
||||
# disclaimer in the documentation and/or other materials
|
||||
# provided with the distribution.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
########################################################################
|
||||
#
|
||||
# This code is described in an Intel White-Paper:
|
||||
# "Fast SHA-256 Implementations on Intel Architecture Processors"
|
||||
#
|
||||
# To find it, surf to http://www.intel.com/p/en_US/embedded
|
||||
# and search for that title.
|
||||
#
|
||||
########################################################################
|
||||
# This code schedules 1 block at a time, with 4 lanes per block
|
||||
########################################################################
|
||||
|
||||
#include "asm.S"
|
||||
|
||||
## assume buffers not aligned
|
||||
#define VMOVDQ vmovdqu
|
||||
|
||||
################################ Define Macros
|
||||
|
||||
# addm [mem], reg
|
||||
# Add reg to mem using reg-mem add and store
|
||||
.macro addm p1 p2
|
||||
add \p1, \p2
|
||||
mov \p2, \p1
|
||||
.endm
|
||||
|
||||
|
||||
.macro MY_ROR p1 p2
|
||||
shld $(32-(\p1)), \p2, \p2
|
||||
.endm
|
||||
|
||||
################################
|
||||
|
||||
# COPY_XMM_AND_BSWAP xmm, [mem], byte_flip_mask
|
||||
# Load xmm with mem and byte swap each dword
|
||||
.macro COPY_XMM_AND_BSWAP p1 p2 p3
|
||||
VMOVDQ \p2, \p1
|
||||
vpshufb \p3, \p1, \p1
|
||||
.endm
|
||||
|
||||
################################
|
||||
|
||||
X0 = %xmm4
|
||||
X1 = %xmm5
|
||||
X2 = %xmm6
|
||||
X3 = %xmm7
|
||||
|
||||
XTMP0 = %xmm0
|
||||
XTMP1 = %xmm1
|
||||
XTMP2 = %xmm2
|
||||
XTMP3 = %xmm3
|
||||
XTMP4 = %xmm8
|
||||
XFER = %xmm9
|
||||
XTMP5 = %xmm11
|
||||
|
||||
SHUF_00BA = %xmm10 # shuffle xBxA -> 00BA
|
||||
SHUF_DC00 = %xmm12 # shuffle xDxC -> DC00
|
||||
BYTE_FLIP_MASK = %xmm13
|
||||
|
||||
NUM_BLKS = %rdx # 3rd arg
|
||||
CTX = %rsi # 2nd arg
|
||||
INP = %rdi # 1st arg
|
||||
|
||||
SRND = %rdi # clobbers INP
|
||||
c = %ecx
|
||||
d = %r8d
|
||||
e = %edx
|
||||
TBL = %rbp
|
||||
a = %eax
|
||||
b = %ebx
|
||||
|
||||
f = %r9d
|
||||
g = %r10d
|
||||
h = %r11d
|
||||
|
||||
y0 = %r13d
|
||||
y1 = %r14d
|
||||
y2 = %r15d
|
||||
|
||||
|
||||
_INP_END_SIZE = 8
|
||||
_INP_SIZE = 8
|
||||
_XFER_SIZE = 16
|
||||
_XMM_SAVE_SIZE = 0
|
||||
|
||||
_INP_END = 0
|
||||
_INP = _INP_END + _INP_END_SIZE
|
||||
_XFER = _INP + _INP_SIZE
|
||||
_XMM_SAVE = _XFER + _XFER_SIZE
|
||||
STACK_SIZE = _XMM_SAVE + _XMM_SAVE_SIZE
|
||||
|
||||
# rotate_Xs
|
||||
# Rotate values of symbols X0...X3
|
||||
.macro rotate_Xs
|
||||
X_ = X0
|
||||
X0 = X1
|
||||
X1 = X2
|
||||
X2 = X3
|
||||
X3 = X_
|
||||
.endm
|
||||
|
||||
# ROTATE_ARGS
|
||||
# Rotate values of symbols a...h
|
||||
.macro ROTATE_ARGS
|
||||
TMP_ = h
|
||||
h = g
|
||||
g = f
|
||||
f = e
|
||||
e = d
|
||||
d = c
|
||||
c = b
|
||||
b = a
|
||||
a = TMP_
|
||||
.endm
|
||||
|
||||
.macro FOUR_ROUNDS_AND_SCHED
|
||||
## compute s0 four at a time and s1 two at a time
|
||||
## compute W[-16] + W[-7] 4 at a time
|
||||
|
||||
mov e, y0 # y0 = e
|
||||
MY_ROR (25-11), y0 # y0 = e >> (25-11)
|
||||
mov a, y1 # y1 = a
|
||||
vpalignr $4, X2, X3, XTMP0 # XTMP0 = W[-7]
|
||||
MY_ROR (22-13), y1 # y1 = a >> (22-13)
|
||||
xor e, y0 # y0 = e ^ (e >> (25-11))
|
||||
mov f, y2 # y2 = f
|
||||
MY_ROR (11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
|
||||
xor a, y1 # y1 = a ^ (a >> (22-13)
|
||||
xor g, y2 # y2 = f^g
|
||||
vpaddd X0, XTMP0, XTMP0 # XTMP0 = W[-7] + W[-16]
|
||||
xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
|
||||
and e, y2 # y2 = (f^g)&e
|
||||
MY_ROR (13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
|
||||
## compute s0
|
||||
vpalignr $4, X0, X1, XTMP1 # XTMP1 = W[-15]
|
||||
xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
|
||||
MY_ROR 6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>25)
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g
|
||||
MY_ROR 2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
|
||||
add y0, y2 # y2 = S1 + CH
|
||||
add _XFER(%rsp), y2 # y2 = k + w + S1 + CH
|
||||
mov a, y0 # y0 = a
|
||||
add y2, h # h = h + S1 + CH + k + w
|
||||
mov a, y2 # y2 = a
|
||||
vpsrld $7, XTMP1, XTMP2
|
||||
or c, y0 # y0 = a|c
|
||||
add h, d # d = d + h + S1 + CH + k + w
|
||||
and c, y2 # y2 = a&c
|
||||
vpslld $(32-7), XTMP1, XTMP3
|
||||
and b, y0 # y0 = (a|c)&b
|
||||
add y1, h # h = h + S1 + CH + k + w + S0
|
||||
vpor XTMP2, XTMP3, XTMP3 # XTMP1 = W[-15] MY_ROR 7
|
||||
or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c)
|
||||
add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
|
||||
ROTATE_ARGS
|
||||
mov a, y1 # y1 = a
|
||||
MY_ROR (25-11), y0 # y0 = e >> (25-11)
|
||||
xor e, y0 # y0 = e ^ (e >> (25-11))
|
||||
mov f, y2 # y2 = f
|
||||
MY_ROR (22-13), y1 # y1 = a >> (22-13)
|
||||
vpsrld $18, XTMP1, XTMP2 #
|
||||
xor a, y1 # y1 = a ^ (a >> (22-13)
|
||||
MY_ROR (11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
|
||||
xor g, y2 # y2 = f^g
|
||||
vpsrld $3, XTMP1, XTMP4 # XTMP4 = W[-15] >> 3
|
||||
MY_ROR (13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
|
||||
xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
|
||||
and e, y2 # y2 = (f^g)&e
|
||||
MY_ROR 6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>25)
|
||||
vpslld $(32-18), XTMP1, XTMP1
|
||||
xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g
|
||||
vpxor XTMP1, XTMP3, XTMP3 #
|
||||
add y0, y2 # y2 = S1 + CH
|
||||
add (1*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH
|
||||
MY_ROR 2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
|
||||
vpxor XTMP2, XTMP3, XTMP3 # XTMP1 = W[-15] MY_ROR 7 ^ W[-15] MY_ROR
|
||||
mov a, y0 # y0 = a
|
||||
add y2, h # h = h + S1 + CH + k + w
|
||||
mov a, y2 # y2 = a
|
||||
vpxor XTMP4, XTMP3, XTMP1 # XTMP1 = s0
|
||||
or c, y0 # y0 = a|c
|
||||
add h, d # d = d + h + S1 + CH + k + w
|
||||
and c, y2 # y2 = a&c
|
||||
## compute low s1
|
||||
vpshufd $0b11111010, X3, XTMP2 # XTMP2 = W[-2] {BBAA}
|
||||
and b, y0 # y0 = (a|c)&b
|
||||
add y1, h # h = h + S1 + CH + k + w + S0
|
||||
vpaddd XTMP1, XTMP0, XTMP0 # XTMP0 = W[-16] + W[-7] + s0
|
||||
or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c)
|
||||
add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
|
||||
ROTATE_ARGS
|
||||
mov e, y0 # y0 = e
|
||||
mov a, y1 # y1 = a
|
||||
MY_ROR (25-11), y0 # y0 = e >> (25-11)
|
||||
xor e, y0 # y0 = e ^ (e >> (25-11))
|
||||
MY_ROR (22-13), y1 # y1 = a >> (22-13)
|
||||
mov f, y2 # y2 = f
|
||||
xor a, y1 # y1 = a ^ (a >> (22-13)
|
||||
MY_ROR (11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
|
||||
vpsrld $10, XTMP2, XTMP4 # XTMP4 = W[-2] >> 10 {BBAA}
|
||||
xor g, y2 # y2 = f^g
|
||||
vpsrlq $19, XTMP2, XTMP3 # XTMP3 = W[-2] MY_ROR 19 {xBxA}
|
||||
xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
|
||||
and e, y2 # y2 = (f^g)&e
|
||||
vpsrlq $17, XTMP2, XTMP2 # XTMP2 = W[-2] MY_ROR 17 {xBxA}
|
||||
MY_ROR (13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
|
||||
xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g
|
||||
MY_ROR 6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>25)
|
||||
vpxor XTMP3, XTMP2, XTMP2 #
|
||||
add y0, y2 # y2 = S1 + CH
|
||||
MY_ROR 2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
|
||||
add (2*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH
|
||||
vpxor XTMP2, XTMP4, XTMP4 # XTMP4 = s1 {xBxA}
|
||||
mov a, y0 # y0 = a
|
||||
add y2, h # h = h + S1 + CH + k + w
|
||||
mov a, y2 # y2 = a
|
||||
vpshufb SHUF_00BA, XTMP4, XTMP4 # XTMP4 = s1 {00BA}
|
||||
or c, y0 # y0 = a|c
|
||||
add h, d # d = d + h + S1 + CH + k + w
|
||||
and c, y2 # y2 = a&c
|
||||
vpaddd XTMP4, XTMP0, XTMP0 # XTMP0 = {..., ..., W[1], W[0]}
|
||||
and b, y0 # y0 = (a|c)&b
|
||||
add y1, h # h = h + S1 + CH + k + w + S0
|
||||
## compute high s1
|
||||
vpshufd $0b01010000, XTMP0, XTMP2 # XTMP2 = W[-2] {DDCC}
|
||||
or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c)
|
||||
add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
|
||||
ROTATE_ARGS
|
||||
mov e, y0 # y0 = e
|
||||
MY_ROR (25-11), y0 # y0 = e >> (25-11)
|
||||
mov a, y1 # y1 = a
|
||||
MY_ROR (22-13), y1 # y1 = a >> (22-13)
|
||||
xor e, y0 # y0 = e ^ (e >> (25-11))
|
||||
mov f, y2 # y2 = f
|
||||
MY_ROR (11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
|
||||
vpsrld $10, XTMP2, XTMP5 # XTMP5 = W[-2] >> 10 {DDCC}
|
||||
xor a, y1 # y1 = a ^ (a >> (22-13)
|
||||
xor g, y2 # y2 = f^g
|
||||
vpsrlq $19, XTMP2, XTMP3 # XTMP3 = W[-2] MY_ROR 19 {xDxC}
|
||||
xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
|
||||
and e, y2 # y2 = (f^g)&e
|
||||
MY_ROR (13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
|
||||
vpsrlq $17, XTMP2, XTMP2 # XTMP2 = W[-2] MY_ROR 17 {xDxC}
|
||||
xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
|
||||
MY_ROR 6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>25)
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g
|
||||
vpxor XTMP3, XTMP2, XTMP2
|
||||
MY_ROR 2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
|
||||
add y0, y2 # y2 = S1 + CH
|
||||
add (3*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH
|
||||
vpxor XTMP2, XTMP5, XTMP5 # XTMP5 = s1 {xDxC}
|
||||
mov a, y0 # y0 = a
|
||||
add y2, h # h = h + S1 + CH + k + w
|
||||
mov a, y2 # y2 = a
|
||||
vpshufb SHUF_DC00, XTMP5, XTMP5 # XTMP5 = s1 {DC00}
|
||||
or c, y0 # y0 = a|c
|
||||
add h, d # d = d + h + S1 + CH + k + w
|
||||
and c, y2 # y2 = a&c
|
||||
vpaddd XTMP0, XTMP5, X0 # X0 = {W[3], W[2], W[1], W[0]}
|
||||
and b, y0 # y0 = (a|c)&b
|
||||
add y1, h # h = h + S1 + CH + k + w + S0
|
||||
or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c)
|
||||
add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
|
||||
ROTATE_ARGS
|
||||
rotate_Xs
|
||||
.endm
|
||||
|
||||
## input is [rsp + _XFER + %1 * 4]
|
||||
.macro DO_ROUND round
|
||||
mov e, y0 # y0 = e
|
||||
MY_ROR (25-11), y0 # y0 = e >> (25-11)
|
||||
mov a, y1 # y1 = a
|
||||
xor e, y0 # y0 = e ^ (e >> (25-11))
|
||||
MY_ROR (22-13), y1 # y1 = a >> (22-13)
|
||||
mov f, y2 # y2 = f
|
||||
xor a, y1 # y1 = a ^ (a >> (22-13)
|
||||
MY_ROR (11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
|
||||
xor g, y2 # y2 = f^g
|
||||
xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
|
||||
MY_ROR (13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
|
||||
and e, y2 # y2 = (f^g)&e
|
||||
xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
|
||||
MY_ROR 6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>25)
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g
|
||||
add y0, y2 # y2 = S1 + CH
|
||||
MY_ROR 2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
|
||||
offset = \round * 4 + _XFER #
|
||||
add offset(%rsp), y2 # y2 = k + w + S1 + CH
|
||||
mov a, y0 # y0 = a
|
||||
add y2, h # h = h + S1 + CH + k + w
|
||||
mov a, y2 # y2 = a
|
||||
or c, y0 # y0 = a|c
|
||||
add h, d # d = d + h + S1 + CH + k + w
|
||||
and c, y2 # y2 = a&c
|
||||
and b, y0 # y0 = (a|c)&b
|
||||
add y1, h # h = h + S1 + CH + k + w + S0
|
||||
or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c)
|
||||
add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
|
||||
ROTATE_ARGS
|
||||
.endm
|
||||
|
||||
########################################################################
|
||||
## void sha256_transform_avx(void *input_data, UINT32 digest[8], UINT64 num_blks)
|
||||
## arg 1 : pointer to input data
|
||||
## arg 2 : pointer to digest
|
||||
## arg 3 : Num blocks
|
||||
########################################################################
|
||||
.text
|
||||
ENTRY(sha256_transform_avx)
|
||||
.align 32
|
||||
pushq %rbx
|
||||
pushq %rbp
|
||||
pushq %r13
|
||||
pushq %r14
|
||||
pushq %r15
|
||||
pushq %r12
|
||||
|
||||
mov %rsp, %r12
|
||||
subq $STACK_SIZE, %rsp # allocate stack space
|
||||
and $~15, %rsp # align stack pointer
|
||||
|
||||
shl $6, NUM_BLKS # convert to bytes
|
||||
jz done_hash
|
||||
add INP, NUM_BLKS # pointer to end of data
|
||||
mov NUM_BLKS, _INP_END(%rsp)
|
||||
|
||||
## load initial digest
|
||||
mov 4*0(CTX), a
|
||||
mov 4*1(CTX), b
|
||||
mov 4*2(CTX), c
|
||||
mov 4*3(CTX), d
|
||||
mov 4*4(CTX), e
|
||||
mov 4*5(CTX), f
|
||||
mov 4*6(CTX), g
|
||||
mov 4*7(CTX), h
|
||||
|
||||
vmovdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), BYTE_FLIP_MASK
|
||||
vmovdqa _SHUF_00BA(%rip), SHUF_00BA
|
||||
vmovdqa _SHUF_DC00(%rip), SHUF_DC00
|
||||
loop0:
|
||||
lea K256(%rip), TBL
|
||||
|
||||
## byte swap first 16 dwords
|
||||
COPY_XMM_AND_BSWAP X0, 0*16(INP), BYTE_FLIP_MASK
|
||||
COPY_XMM_AND_BSWAP X1, 1*16(INP), BYTE_FLIP_MASK
|
||||
COPY_XMM_AND_BSWAP X2, 2*16(INP), BYTE_FLIP_MASK
|
||||
COPY_XMM_AND_BSWAP X3, 3*16(INP), BYTE_FLIP_MASK
|
||||
|
||||
mov INP, _INP(%rsp)
|
||||
|
||||
## schedule 48 input dwords, by doing 3 rounds of 16 each
|
||||
mov $3, SRND
|
||||
.align 16
|
||||
loop1:
|
||||
vpaddd (TBL), X0, XFER
|
||||
vmovdqa XFER, _XFER(%rsp)
|
||||
FOUR_ROUNDS_AND_SCHED
|
||||
|
||||
vpaddd 1*16(TBL), X0, XFER
|
||||
vmovdqa XFER, _XFER(%rsp)
|
||||
FOUR_ROUNDS_AND_SCHED
|
||||
|
||||
vpaddd 2*16(TBL), X0, XFER
|
||||
vmovdqa XFER, _XFER(%rsp)
|
||||
FOUR_ROUNDS_AND_SCHED
|
||||
|
||||
vpaddd 3*16(TBL), X0, XFER
|
||||
vmovdqa XFER, _XFER(%rsp)
|
||||
add $4*16, TBL
|
||||
FOUR_ROUNDS_AND_SCHED
|
||||
|
||||
sub $1, SRND
|
||||
jne loop1
|
||||
|
||||
mov $2, SRND
|
||||
loop2:
|
||||
vpaddd (TBL), X0, XFER
|
||||
vmovdqa XFER, _XFER(%rsp)
|
||||
DO_ROUND 0
|
||||
DO_ROUND 1
|
||||
DO_ROUND 2
|
||||
DO_ROUND 3
|
||||
|
||||
vpaddd 1*16(TBL), X1, XFER
|
||||
vmovdqa XFER, _XFER(%rsp)
|
||||
add $2*16, TBL
|
||||
DO_ROUND 0
|
||||
DO_ROUND 1
|
||||
DO_ROUND 2
|
||||
DO_ROUND 3
|
||||
|
||||
vmovdqa X2, X0
|
||||
vmovdqa X3, X1
|
||||
|
||||
sub $1, SRND
|
||||
jne loop2
|
||||
|
||||
addm (4*0)(CTX),a
|
||||
addm (4*1)(CTX),b
|
||||
addm (4*2)(CTX),c
|
||||
addm (4*3)(CTX),d
|
||||
addm (4*4)(CTX),e
|
||||
addm (4*5)(CTX),f
|
||||
addm (4*6)(CTX),g
|
||||
addm (4*7)(CTX),h
|
||||
|
||||
mov _INP(%rsp), INP
|
||||
add $64, INP
|
||||
cmp _INP_END(%rsp), INP
|
||||
jne loop0
|
||||
|
||||
done_hash:
|
||||
|
||||
mov %r12, %rsp
|
||||
|
||||
popq %r12
|
||||
popq %r15
|
||||
popq %r14
|
||||
popq %r13
|
||||
popq %rbp
|
||||
popq %rbx
|
||||
ret
|
||||
ENDPROC(sha256_transform_avx)
|
||||
|
||||
.data
|
||||
.align 64
|
||||
K256:
|
||||
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
|
||||
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
|
||||
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
|
||||
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
|
||||
.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
|
||||
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
|
||||
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
|
||||
.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
|
||||
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
|
||||
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
|
||||
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
|
||||
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
|
||||
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
|
||||
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
|
||||
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
|
||||
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
|
||||
|
||||
PSHUFFLE_BYTE_FLIP_MASK:
|
||||
.octa 0x0c0d0e0f08090a0b0405060700010203
|
||||
|
||||
# shuffle xBxA -> 00BA
|
||||
_SHUF_00BA:
|
||||
.octa 0xFFFFFFFFFFFFFFFF0b0a090803020100
|
||||
|
||||
# shuffle xDxC -> DC00
|
||||
_SHUF_DC00:
|
||||
.octa 0x0b0a090803020100FFFFFFFFFFFFFFFF
|
||||
770
pkg/crypto/sha256/sha256-avx2-asm.S
Normal file
770
pkg/crypto/sha256/sha256-avx2-asm.S
Normal file
@@ -0,0 +1,770 @@
|
||||
########################################################################
|
||||
# Implement fast SHA-256 with AVX2 instructions. (x86_64)
|
||||
#
|
||||
# Copyright (C) 2013 Intel Corporation.
|
||||
#
|
||||
# Authors:
|
||||
# James Guilford <james.guilford@intel.com>
|
||||
# Kirk Yap <kirk.s.yap@intel.com>
|
||||
# Tim Chen <tim.c.chen@linux.intel.com>
|
||||
#
|
||||
# This software is available to you under a choice of one of two
|
||||
# licenses. You may choose to be licensed under the terms of the GNU
|
||||
# General Public License (GPL) Version 2, available from the file
|
||||
# COPYING in the main directory of this source tree, or the
|
||||
# OpenIB.org BSD license below:
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or
|
||||
# without modification, are permitted provided that the following
|
||||
# conditions are met:
|
||||
#
|
||||
# - Redistributions of source code must retain the above
|
||||
# copyright notice, this list of conditions and the following
|
||||
# disclaimer.
|
||||
#
|
||||
# - Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following
|
||||
# disclaimer in the documentation and/or other materials
|
||||
# provided with the distribution.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
#
|
||||
########################################################################
|
||||
#
|
||||
# This code is described in an Intel White-Paper:
|
||||
# "Fast SHA-256 Implementations on Intel Architecture Processors"
|
||||
#
|
||||
# To find it, surf to http://www.intel.com/p/en_US/embedded
|
||||
# and search for that title.
|
||||
#
|
||||
########################################################################
|
||||
# This code schedules 2 blocks at a time, with 4 lanes per block
|
||||
########################################################################
|
||||
|
||||
#include "asm.S"
|
||||
|
||||
## assume buffers not aligned
|
||||
#define VMOVDQ vmovdqu
|
||||
|
||||
################################ Define Macros
|
||||
|
||||
# addm [mem], reg
|
||||
# Add reg to mem using reg-mem add and store
|
||||
.macro addm p1 p2
|
||||
add \p1, \p2
|
||||
mov \p2, \p1
|
||||
.endm
|
||||
|
||||
################################
|
||||
|
||||
X0 = %ymm4
|
||||
X1 = %ymm5
|
||||
X2 = %ymm6
|
||||
X3 = %ymm7
|
||||
|
||||
# XMM versions of above
|
||||
XWORD0 = %xmm4
|
||||
XWORD1 = %xmm5
|
||||
XWORD2 = %xmm6
|
||||
XWORD3 = %xmm7
|
||||
|
||||
XTMP0 = %ymm0
|
||||
XTMP1 = %ymm1
|
||||
XTMP2 = %ymm2
|
||||
XTMP3 = %ymm3
|
||||
XTMP4 = %ymm8
|
||||
XFER = %ymm9
|
||||
XTMP5 = %ymm11
|
||||
|
||||
SHUF_00BA = %ymm10 # shuffle xBxA -> 00BA
|
||||
SHUF_DC00 = %ymm12 # shuffle xDxC -> DC00
|
||||
BYTE_FLIP_MASK = %ymm13
|
||||
|
||||
X_BYTE_FLIP_MASK = %xmm13 # XMM version of BYTE_FLIP_MASK
|
||||
|
||||
NUM_BLKS = %rdx # 3rd arg
|
||||
CTX = %rsi # 2nd arg
|
||||
INP = %rdi # 1st arg
|
||||
c = %ecx
|
||||
d = %r8d
|
||||
e = %edx # clobbers NUM_BLKS
|
||||
y3 = %edi # clobbers INP
|
||||
|
||||
|
||||
TBL = %rbp
|
||||
SRND = CTX # SRND is same register as CTX
|
||||
|
||||
a = %eax
|
||||
b = %ebx
|
||||
f = %r9d
|
||||
g = %r10d
|
||||
h = %r11d
|
||||
old_h = %r11d
|
||||
|
||||
T1 = %r12d
|
||||
y0 = %r13d
|
||||
y1 = %r14d
|
||||
y2 = %r15d
|
||||
|
||||
|
||||
_XFER_SIZE = 2*64*4 # 2 blocks, 64 rounds, 4 bytes/round
|
||||
_XMM_SAVE_SIZE = 0
|
||||
_INP_END_SIZE = 8
|
||||
_INP_SIZE = 8
|
||||
_CTX_SIZE = 8
|
||||
_RSP_SIZE = 8
|
||||
|
||||
_XFER = 0
|
||||
_XMM_SAVE = _XFER + _XFER_SIZE
|
||||
_INP_END = _XMM_SAVE + _XMM_SAVE_SIZE
|
||||
_INP = _INP_END + _INP_END_SIZE
|
||||
_CTX = _INP + _INP_SIZE
|
||||
_RSP = _CTX + _CTX_SIZE
|
||||
STACK_SIZE = _RSP + _RSP_SIZE
|
||||
|
||||
# rotate_Xs
|
||||
# Rotate values of symbols X0...X3
|
||||
.macro rotate_Xs
|
||||
X_ = X0
|
||||
X0 = X1
|
||||
X1 = X2
|
||||
X2 = X3
|
||||
X3 = X_
|
||||
.endm
|
||||
|
||||
# ROTATE_ARGS
|
||||
# Rotate values of symbols a...h
|
||||
.macro ROTATE_ARGS
|
||||
old_h = h
|
||||
TMP_ = h
|
||||
h = g
|
||||
g = f
|
||||
f = e
|
||||
e = d
|
||||
d = c
|
||||
c = b
|
||||
b = a
|
||||
a = TMP_
|
||||
.endm
|
||||
|
||||
.macro FOUR_ROUNDS_AND_SCHED disp
|
||||
################################### RND N + 0 ############################
|
||||
|
||||
mov a, y3 # y3 = a # MAJA
|
||||
rorx $25, e, y0 # y0 = e >> 25 # S1A
|
||||
rorx $11, e, y1 # y1 = e >> 11 # S1B
|
||||
|
||||
addl \disp(%rsp, SRND), h # h = k + w + h # --
|
||||
or c, y3 # y3 = a|c # MAJA
|
||||
vpalignr $4, X2, X3, XTMP0 # XTMP0 = W[-7]
|
||||
mov f, y2 # y2 = f # CH
|
||||
rorx $13, a, T1 # T1 = a >> 13 # S0B
|
||||
|
||||
xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1
|
||||
xor g, y2 # y2 = f^g # CH
|
||||
vpaddd X0, XTMP0, XTMP0 # XTMP0 = W[-7] + W[-16]# y1 = (e >> 6)# S1
|
||||
rorx $6, e, y1 # y1 = (e >> 6) # S1
|
||||
|
||||
and e, y2 # y2 = (f^g)&e # CH
|
||||
xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1
|
||||
rorx $22, a, y1 # y1 = a >> 22 # S0A
|
||||
add h, d # d = k + w + h + d # --
|
||||
|
||||
and b, y3 # y3 = (a|c)&b # MAJA
|
||||
vpalignr $4, X0, X1, XTMP1 # XTMP1 = W[-15]
|
||||
xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0
|
||||
rorx $2, a, T1 # T1 = (a >> 2) # S0
|
||||
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
|
||||
vpsrld $7, XTMP1, XTMP2
|
||||
xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0
|
||||
mov a, T1 # T1 = a # MAJB
|
||||
and c, T1 # T1 = a&c # MAJB
|
||||
|
||||
add y0, y2 # y2 = S1 + CH # --
|
||||
vpslld $(32-7), XTMP1, XTMP3
|
||||
or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ
|
||||
add y1, h # h = k + w + h + S0 # --
|
||||
|
||||
add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
|
||||
vpor XTMP2, XTMP3, XTMP3 # XTMP3 = W[-15] ror 7
|
||||
|
||||
vpsrld $18, XTMP1, XTMP2
|
||||
add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
|
||||
add y3, h # h = t1 + S0 + MAJ # --
|
||||
|
||||
|
||||
ROTATE_ARGS
|
||||
|
||||
################################### RND N + 1 ############################
|
||||
|
||||
mov a, y3 # y3 = a # MAJA
|
||||
rorx $25, e, y0 # y0 = e >> 25 # S1A
|
||||
rorx $11, e, y1 # y1 = e >> 11 # S1B
|
||||
offset = \disp + 1*4
|
||||
addl offset(%rsp, SRND), h # h = k + w + h # --
|
||||
or c, y3 # y3 = a|c # MAJA
|
||||
|
||||
|
||||
vpsrld $3, XTMP1, XTMP4 # XTMP4 = W[-15] >> 3
|
||||
mov f, y2 # y2 = f # CH
|
||||
rorx $13, a, T1 # T1 = a >> 13 # S0B
|
||||
xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1
|
||||
xor g, y2 # y2 = f^g # CH
|
||||
|
||||
|
||||
rorx $6, e, y1 # y1 = (e >> 6) # S1
|
||||
xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1
|
||||
rorx $22, a, y1 # y1 = a >> 22 # S0A
|
||||
and e, y2 # y2 = (f^g)&e # CH
|
||||
add h, d # d = k + w + h + d # --
|
||||
|
||||
vpslld $(32-18), XTMP1, XTMP1
|
||||
and b, y3 # y3 = (a|c)&b # MAJA
|
||||
xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0
|
||||
|
||||
vpxor XTMP1, XTMP3, XTMP3
|
||||
rorx $2, a, T1 # T1 = (a >> 2) # S0
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
|
||||
|
||||
vpxor XTMP2, XTMP3, XTMP3 # XTMP3 = W[-15] ror 7 ^ W[-15] ror 18
|
||||
xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0
|
||||
mov a, T1 # T1 = a # MAJB
|
||||
and c, T1 # T1 = a&c # MAJB
|
||||
add y0, y2 # y2 = S1 + CH # --
|
||||
|
||||
vpxor XTMP4, XTMP3, XTMP1 # XTMP1 = s0
|
||||
vpshufd $0b11111010, X3, XTMP2 # XTMP2 = W[-2] {BBAA}
|
||||
or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ
|
||||
add y1, h # h = k + w + h + S0 # --
|
||||
|
||||
vpaddd XTMP1, XTMP0, XTMP0 # XTMP0 = W[-16] + W[-7] + s0
|
||||
add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
|
||||
add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
|
||||
add y3, h # h = t1 + S0 + MAJ # --
|
||||
|
||||
vpsrld $10, XTMP2, XTMP4 # XTMP4 = W[-2] >> 10 {BBAA}
|
||||
|
||||
|
||||
ROTATE_ARGS
|
||||
|
||||
################################### RND N + 2 ############################
|
||||
|
||||
mov a, y3 # y3 = a # MAJA
|
||||
rorx $25, e, y0 # y0 = e >> 25 # S1A
|
||||
offset = \disp + 2*4
|
||||
addl offset(%rsp, SRND), h # h = k + w + h # --
|
||||
|
||||
vpsrlq $19, XTMP2, XTMP3 # XTMP3 = W[-2] ror 19 {xBxA}
|
||||
rorx $11, e, y1 # y1 = e >> 11 # S1B
|
||||
or c, y3 # y3 = a|c # MAJA
|
||||
mov f, y2 # y2 = f # CH
|
||||
xor g, y2 # y2 = f^g # CH
|
||||
|
||||
rorx $13, a, T1 # T1 = a >> 13 # S0B
|
||||
xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1
|
||||
vpsrlq $17, XTMP2, XTMP2 # XTMP2 = W[-2] ror 17 {xBxA}
|
||||
and e, y2 # y2 = (f^g)&e # CH
|
||||
|
||||
rorx $6, e, y1 # y1 = (e >> 6) # S1
|
||||
vpxor XTMP3, XTMP2, XTMP2
|
||||
add h, d # d = k + w + h + d # --
|
||||
and b, y3 # y3 = (a|c)&b # MAJA
|
||||
|
||||
xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1
|
||||
rorx $22, a, y1 # y1 = a >> 22 # S0A
|
||||
vpxor XTMP2, XTMP4, XTMP4 # XTMP4 = s1 {xBxA}
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
|
||||
|
||||
vpshufb SHUF_00BA, XTMP4, XTMP4 # XTMP4 = s1 {00BA}
|
||||
xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0
|
||||
rorx $2, a ,T1 # T1 = (a >> 2) # S0
|
||||
vpaddd XTMP4, XTMP0, XTMP0 # XTMP0 = {..., ..., W[1], W[0]}
|
||||
|
||||
xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0
|
||||
mov a, T1 # T1 = a # MAJB
|
||||
and c, T1 # T1 = a&c # MAJB
|
||||
add y0, y2 # y2 = S1 + CH # --
|
||||
vpshufd $0b01010000, XTMP0, XTMP2 # XTMP2 = W[-2] {DDCC}
|
||||
|
||||
or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ
|
||||
add y1,h # h = k + w + h + S0 # --
|
||||
add y2,d # d = k + w + h + d + S1 + CH = d + t1 # --
|
||||
add y2,h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
|
||||
|
||||
add y3,h # h = t1 + S0 + MAJ # --
|
||||
|
||||
|
||||
ROTATE_ARGS
|
||||
|
||||
################################### RND N + 3 ############################
|
||||
|
||||
mov a, y3 # y3 = a # MAJA
|
||||
rorx $25, e, y0 # y0 = e >> 25 # S1A
|
||||
rorx $11, e, y1 # y1 = e >> 11 # S1B
|
||||
offset = \disp + 3*4
|
||||
addl offset(%rsp, SRND), h # h = k + w + h # --
|
||||
or c, y3 # y3 = a|c # MAJA
|
||||
|
||||
|
||||
vpsrld $10, XTMP2, XTMP5 # XTMP5 = W[-2] >> 10 {DDCC}
|
||||
mov f, y2 # y2 = f # CH
|
||||
rorx $13, a, T1 # T1 = a >> 13 # S0B
|
||||
xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1
|
||||
xor g, y2 # y2 = f^g # CH
|
||||
|
||||
|
||||
vpsrlq $19, XTMP2, XTMP3 # XTMP3 = W[-2] ror 19 {xDxC}
|
||||
rorx $6, e, y1 # y1 = (e >> 6) # S1
|
||||
and e, y2 # y2 = (f^g)&e # CH
|
||||
add h, d # d = k + w + h + d # --
|
||||
and b, y3 # y3 = (a|c)&b # MAJA
|
||||
|
||||
vpsrlq $17, XTMP2, XTMP2 # XTMP2 = W[-2] ror 17 {xDxC}
|
||||
xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
|
||||
|
||||
vpxor XTMP3, XTMP2, XTMP2
|
||||
rorx $22, a, y1 # y1 = a >> 22 # S0A
|
||||
add y0, y2 # y2 = S1 + CH # --
|
||||
|
||||
vpxor XTMP2, XTMP5, XTMP5 # XTMP5 = s1 {xDxC}
|
||||
xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0
|
||||
add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
|
||||
|
||||
rorx $2, a, T1 # T1 = (a >> 2) # S0
|
||||
vpshufb SHUF_DC00, XTMP5, XTMP5 # XTMP5 = s1 {DC00}
|
||||
|
||||
vpaddd XTMP0, XTMP5, X0 # X0 = {W[3], W[2], W[1], W[0]}
|
||||
xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0
|
||||
mov a, T1 # T1 = a # MAJB
|
||||
and c, T1 # T1 = a&c # MAJB
|
||||
or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ
|
||||
|
||||
add y1, h # h = k + w + h + S0 # --
|
||||
add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
|
||||
add y3, h # h = t1 + S0 + MAJ # --
|
||||
|
||||
ROTATE_ARGS
|
||||
rotate_Xs
|
||||
.endm
|
||||
|
||||
.macro DO_4ROUNDS disp
|
||||
################################### RND N + 0 ###########################
|
||||
|
||||
mov f, y2 # y2 = f # CH
|
||||
rorx $25, e, y0 # y0 = e >> 25 # S1A
|
||||
rorx $11, e, y1 # y1 = e >> 11 # S1B
|
||||
xor g, y2 # y2 = f^g # CH
|
||||
|
||||
xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1
|
||||
rorx $6, e, y1 # y1 = (e >> 6) # S1
|
||||
and e, y2 # y2 = (f^g)&e # CH
|
||||
|
||||
xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1
|
||||
rorx $13, a, T1 # T1 = a >> 13 # S0B
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
|
||||
rorx $22, a, y1 # y1 = a >> 22 # S0A
|
||||
mov a, y3 # y3 = a # MAJA
|
||||
|
||||
xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0
|
||||
rorx $2, a, T1 # T1 = (a >> 2) # S0
|
||||
addl \disp(%rsp, SRND), h # h = k + w + h # --
|
||||
or c, y3 # y3 = a|c # MAJA
|
||||
|
||||
xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0
|
||||
mov a, T1 # T1 = a # MAJB
|
||||
and b, y3 # y3 = (a|c)&b # MAJA
|
||||
and c, T1 # T1 = a&c # MAJB
|
||||
add y0, y2 # y2 = S1 + CH # --
|
||||
|
||||
|
||||
add h, d # d = k + w + h + d # --
|
||||
or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ
|
||||
add y1, h # h = k + w + h + S0 # --
|
||||
add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
|
||||
|
||||
ROTATE_ARGS
|
||||
|
||||
################################### RND N + 1 ###########################
|
||||
|
||||
add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
|
||||
mov f, y2 # y2 = f # CH
|
||||
rorx $25, e, y0 # y0 = e >> 25 # S1A
|
||||
rorx $11, e, y1 # y1 = e >> 11 # S1B
|
||||
xor g, y2 # y2 = f^g # CH
|
||||
|
||||
xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1
|
||||
rorx $6, e, y1 # y1 = (e >> 6) # S1
|
||||
and e, y2 # y2 = (f^g)&e # CH
|
||||
add y3, old_h # h = t1 + S0 + MAJ # --
|
||||
|
||||
xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1
|
||||
rorx $13, a, T1 # T1 = a >> 13 # S0B
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
|
||||
rorx $22, a, y1 # y1 = a >> 22 # S0A
|
||||
mov a, y3 # y3 = a # MAJA
|
||||
|
||||
xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0
|
||||
rorx $2, a, T1 # T1 = (a >> 2) # S0
|
||||
offset = 4*1 + \disp
|
||||
addl offset(%rsp, SRND), h # h = k + w + h # --
|
||||
or c, y3 # y3 = a|c # MAJA
|
||||
|
||||
xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0
|
||||
mov a, T1 # T1 = a # MAJB
|
||||
and b, y3 # y3 = (a|c)&b # MAJA
|
||||
and c, T1 # T1 = a&c # MAJB
|
||||
add y0, y2 # y2 = S1 + CH # --
|
||||
|
||||
|
||||
add h, d # d = k + w + h + d # --
|
||||
or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ
|
||||
add y1, h # h = k + w + h + S0 # --
|
||||
|
||||
add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
|
||||
|
||||
ROTATE_ARGS
|
||||
|
||||
################################### RND N + 2 ##############################
|
||||
|
||||
add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
|
||||
mov f, y2 # y2 = f # CH
|
||||
rorx $25, e, y0 # y0 = e >> 25 # S1A
|
||||
rorx $11, e, y1 # y1 = e >> 11 # S1B
|
||||
xor g, y2 # y2 = f^g # CH
|
||||
|
||||
xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1
|
||||
rorx $6, e, y1 # y1 = (e >> 6) # S1
|
||||
and e, y2 # y2 = (f^g)&e # CH
|
||||
add y3, old_h # h = t1 + S0 + MAJ # --
|
||||
|
||||
xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1
|
||||
rorx $13, a, T1 # T1 = a >> 13 # S0B
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
|
||||
rorx $22, a, y1 # y1 = a >> 22 # S0A
|
||||
mov a, y3 # y3 = a # MAJA
|
||||
|
||||
xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0
|
||||
rorx $2, a, T1 # T1 = (a >> 2) # S0
|
||||
offset = 4*2 + \disp
|
||||
addl offset(%rsp, SRND), h # h = k + w + h # --
|
||||
or c, y3 # y3 = a|c # MAJA
|
||||
|
||||
xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0
|
||||
mov a, T1 # T1 = a # MAJB
|
||||
and b, y3 # y3 = (a|c)&b # MAJA
|
||||
and c, T1 # T1 = a&c # MAJB
|
||||
add y0, y2 # y2 = S1 + CH # --
|
||||
|
||||
|
||||
add h, d # d = k + w + h + d # --
|
||||
or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ
|
||||
add y1, h # h = k + w + h + S0 # --
|
||||
|
||||
add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
|
||||
|
||||
ROTATE_ARGS
|
||||
|
||||
################################### RND N + 3 ###########################
|
||||
|
||||
add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
|
||||
mov f, y2 # y2 = f # CH
|
||||
rorx $25, e, y0 # y0 = e >> 25 # S1A
|
||||
rorx $11, e, y1 # y1 = e >> 11 # S1B
|
||||
xor g, y2 # y2 = f^g # CH
|
||||
|
||||
xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1
|
||||
rorx $6, e, y1 # y1 = (e >> 6) # S1
|
||||
and e, y2 # y2 = (f^g)&e # CH
|
||||
add y3, old_h # h = t1 + S0 + MAJ # --
|
||||
|
||||
xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1
|
||||
rorx $13, a, T1 # T1 = a >> 13 # S0B
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
|
||||
rorx $22, a, y1 # y1 = a >> 22 # S0A
|
||||
mov a, y3 # y3 = a # MAJA
|
||||
|
||||
xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0
|
||||
rorx $2, a, T1 # T1 = (a >> 2) # S0
|
||||
offset = 4*3 + \disp
|
||||
addl offset(%rsp, SRND), h # h = k + w + h # --
|
||||
or c, y3 # y3 = a|c # MAJA
|
||||
|
||||
xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0
|
||||
mov a, T1 # T1 = a # MAJB
|
||||
and b, y3 # y3 = (a|c)&b # MAJA
|
||||
and c, T1 # T1 = a&c # MAJB
|
||||
add y0, y2 # y2 = S1 + CH # --
|
||||
|
||||
|
||||
add h, d # d = k + w + h + d # --
|
||||
or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ
|
||||
add y1, h # h = k + w + h + S0 # --
|
||||
|
||||
add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
|
||||
|
||||
|
||||
add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
|
||||
|
||||
add y3, h # h = t1 + S0 + MAJ # --
|
||||
|
||||
ROTATE_ARGS
|
||||
|
||||
.endm
|
||||
|
||||
########################################################################
|
||||
## void sha256_transform_rorx(void *input_data, UINT32 digest[8], UINT64 num_blks)
|
||||
## arg 1 : pointer to input data
|
||||
## arg 2 : pointer to digest
|
||||
## arg 3 : Num blocks
|
||||
########################################################################
|
||||
.text
|
||||
ENTRY(sha256_transform_rorx)
|
||||
.align 32
|
||||
pushq %rbx
|
||||
pushq %rbp
|
||||
pushq %r12
|
||||
pushq %r13
|
||||
pushq %r14
|
||||
pushq %r15
|
||||
|
||||
mov %rsp, %rax
|
||||
subq $STACK_SIZE, %rsp
|
||||
and $-32, %rsp # align rsp to 32 byte boundary
|
||||
mov %rax, _RSP(%rsp)
|
||||
|
||||
|
||||
shl $6, NUM_BLKS # convert to bytes
|
||||
jz done_hash
|
||||
lea -64(INP, NUM_BLKS), NUM_BLKS # pointer to last block
|
||||
mov NUM_BLKS, _INP_END(%rsp)
|
||||
|
||||
cmp NUM_BLKS, INP
|
||||
je only_one_block
|
||||
|
||||
## load initial digest
|
||||
mov (CTX), a
|
||||
mov 4*1(CTX), b
|
||||
mov 4*2(CTX), c
|
||||
mov 4*3(CTX), d
|
||||
mov 4*4(CTX), e
|
||||
mov 4*5(CTX), f
|
||||
mov 4*6(CTX), g
|
||||
mov 4*7(CTX), h
|
||||
|
||||
vmovdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), BYTE_FLIP_MASK
|
||||
vmovdqa _SHUF_00BA(%rip), SHUF_00BA
|
||||
vmovdqa _SHUF_DC00(%rip), SHUF_DC00
|
||||
|
||||
mov CTX, _CTX(%rsp)
|
||||
|
||||
loop0:
|
||||
lea K256(%rip), TBL
|
||||
|
||||
## Load first 16 dwords from two blocks
|
||||
VMOVDQ 0*32(INP),XTMP0
|
||||
VMOVDQ 1*32(INP),XTMP1
|
||||
VMOVDQ 2*32(INP),XTMP2
|
||||
VMOVDQ 3*32(INP),XTMP3
|
||||
|
||||
## byte swap data
|
||||
vpshufb BYTE_FLIP_MASK, XTMP0, XTMP0
|
||||
vpshufb BYTE_FLIP_MASK, XTMP1, XTMP1
|
||||
vpshufb BYTE_FLIP_MASK, XTMP2, XTMP2
|
||||
vpshufb BYTE_FLIP_MASK, XTMP3, XTMP3
|
||||
|
||||
## transpose data into high/low halves
|
||||
vperm2i128 $0x20, XTMP2, XTMP0, X0
|
||||
vperm2i128 $0x31, XTMP2, XTMP0, X1
|
||||
vperm2i128 $0x20, XTMP3, XTMP1, X2
|
||||
vperm2i128 $0x31, XTMP3, XTMP1, X3
|
||||
|
||||
last_block_enter:
|
||||
add $64, INP
|
||||
mov INP, _INP(%rsp)
|
||||
|
||||
## schedule 48 input dwords, by doing 3 rounds of 12 each
|
||||
xor SRND, SRND
|
||||
|
||||
.align 16
|
||||
loop1:
|
||||
vpaddd 0*32(TBL, SRND), X0, XFER
|
||||
vmovdqa XFER, 0*32+_XFER(%rsp, SRND)
|
||||
FOUR_ROUNDS_AND_SCHED _XFER + 0*32
|
||||
|
||||
vpaddd 1*32(TBL, SRND), X0, XFER
|
||||
vmovdqa XFER, 1*32+_XFER(%rsp, SRND)
|
||||
FOUR_ROUNDS_AND_SCHED _XFER + 1*32
|
||||
|
||||
vpaddd 2*32(TBL, SRND), X0, XFER
|
||||
vmovdqa XFER, 2*32+_XFER(%rsp, SRND)
|
||||
FOUR_ROUNDS_AND_SCHED _XFER + 2*32
|
||||
|
||||
vpaddd 3*32(TBL, SRND), X0, XFER
|
||||
vmovdqa XFER, 3*32+_XFER(%rsp, SRND)
|
||||
FOUR_ROUNDS_AND_SCHED _XFER + 3*32
|
||||
|
||||
add $4*32, SRND
|
||||
cmp $3*4*32, SRND
|
||||
jb loop1
|
||||
|
||||
loop2:
|
||||
## Do last 16 rounds with no scheduling
|
||||
vpaddd 0*32(TBL, SRND), X0, XFER
|
||||
vmovdqa XFER, 0*32+_XFER(%rsp, SRND)
|
||||
DO_4ROUNDS _XFER + 0*32
|
||||
vpaddd 1*32(TBL, SRND), X1, XFER
|
||||
vmovdqa XFER, 1*32+_XFER(%rsp, SRND)
|
||||
DO_4ROUNDS _XFER + 1*32
|
||||
add $2*32, SRND
|
||||
|
||||
vmovdqa X2, X0
|
||||
vmovdqa X3, X1
|
||||
|
||||
cmp $4*4*32, SRND
|
||||
jb loop2
|
||||
|
||||
mov _CTX(%rsp), CTX
|
||||
mov _INP(%rsp), INP
|
||||
|
||||
addm (4*0)(CTX),a
|
||||
addm (4*1)(CTX),b
|
||||
addm (4*2)(CTX),c
|
||||
addm (4*3)(CTX),d
|
||||
addm (4*4)(CTX),e
|
||||
addm (4*5)(CTX),f
|
||||
addm (4*6)(CTX),g
|
||||
addm (4*7)(CTX),h
|
||||
|
||||
cmp _INP_END(%rsp), INP
|
||||
ja done_hash
|
||||
|
||||
#### Do second block using previously scheduled results
|
||||
xor SRND, SRND
|
||||
.align 16
|
||||
loop3:
|
||||
DO_4ROUNDS _XFER + 0*32 + 16
|
||||
DO_4ROUNDS _XFER + 1*32 + 16
|
||||
add $2*32, SRND
|
||||
cmp $4*4*32, SRND
|
||||
jb loop3
|
||||
|
||||
mov _CTX(%rsp), CTX
|
||||
mov _INP(%rsp), INP
|
||||
add $64, INP
|
||||
|
||||
addm (4*0)(CTX),a
|
||||
addm (4*1)(CTX),b
|
||||
addm (4*2)(CTX),c
|
||||
addm (4*3)(CTX),d
|
||||
addm (4*4)(CTX),e
|
||||
addm (4*5)(CTX),f
|
||||
addm (4*6)(CTX),g
|
||||
addm (4*7)(CTX),h
|
||||
|
||||
cmp _INP_END(%rsp), INP
|
||||
jb loop0
|
||||
ja done_hash
|
||||
|
||||
do_last_block:
|
||||
#### do last block
|
||||
lea K256(%rip), TBL
|
||||
|
||||
VMOVDQ 0*16(INP),XWORD0
|
||||
VMOVDQ 1*16(INP),XWORD1
|
||||
VMOVDQ 2*16(INP),XWORD2
|
||||
VMOVDQ 3*16(INP),XWORD3
|
||||
|
||||
vpshufb X_BYTE_FLIP_MASK, XWORD0, XWORD0
|
||||
vpshufb X_BYTE_FLIP_MASK, XWORD1, XWORD1
|
||||
vpshufb X_BYTE_FLIP_MASK, XWORD2, XWORD2
|
||||
vpshufb X_BYTE_FLIP_MASK, XWORD3, XWORD3
|
||||
|
||||
jmp last_block_enter
|
||||
|
||||
only_one_block:
|
||||
|
||||
## load initial digest
|
||||
mov (4*0)(CTX),a
|
||||
mov (4*1)(CTX),b
|
||||
mov (4*2)(CTX),c
|
||||
mov (4*3)(CTX),d
|
||||
mov (4*4)(CTX),e
|
||||
mov (4*5)(CTX),f
|
||||
mov (4*6)(CTX),g
|
||||
mov (4*7)(CTX),h
|
||||
|
||||
vmovdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), BYTE_FLIP_MASK
|
||||
vmovdqa _SHUF_00BA(%rip), SHUF_00BA
|
||||
vmovdqa _SHUF_DC00(%rip), SHUF_DC00
|
||||
|
||||
mov CTX, _CTX(%rsp)
|
||||
jmp do_last_block
|
||||
|
||||
done_hash:
|
||||
|
||||
mov _RSP(%rsp), %rsp
|
||||
|
||||
popq %r15
|
||||
popq %r14
|
||||
popq %r13
|
||||
popq %r12
|
||||
popq %rbp
|
||||
popq %rbx
|
||||
ret
|
||||
ENDPROC(sha256_transform_rorx)
|
||||
|
||||
.data
|
||||
.align 64
|
||||
K256:
|
||||
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
|
||||
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
|
||||
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
|
||||
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
|
||||
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
|
||||
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
|
||||
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
|
||||
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
|
||||
.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
|
||||
.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
|
||||
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
|
||||
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
|
||||
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
|
||||
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
|
||||
.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
|
||||
.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
|
||||
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
|
||||
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
|
||||
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
|
||||
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
|
||||
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
|
||||
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
|
||||
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
|
||||
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
|
||||
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
|
||||
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
|
||||
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
|
||||
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
|
||||
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
|
||||
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
|
||||
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
|
||||
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
|
||||
|
||||
PSHUFFLE_BYTE_FLIP_MASK:
|
||||
.octa 0x0c0d0e0f08090a0b0405060700010203,0x0c0d0e0f08090a0b0405060700010203
|
||||
|
||||
# shuffle xBxA -> 00BA
|
||||
_SHUF_00BA:
|
||||
.octa 0xFFFFFFFFFFFFFFFF0b0a090803020100,0xFFFFFFFFFFFFFFFF0b0a090803020100
|
||||
|
||||
# shuffle xDxC -> DC00
|
||||
_SHUF_DC00:
|
||||
.octa 0x0b0a090803020100FFFFFFFFFFFFFFFF,0x0b0a090803020100FFFFFFFFFFFFFFFF
|
||||
506
pkg/crypto/sha256/sha256-ssse3-asm.S
Normal file
506
pkg/crypto/sha256/sha256-ssse3-asm.S
Normal file
@@ -0,0 +1,506 @@
|
||||
########################################################################
|
||||
# Implement fast SHA-256 with SSSE3 instructions. (x86_64)
|
||||
#
|
||||
# Copyright (C) 2013 Intel Corporation.
|
||||
#
|
||||
# Authors:
|
||||
# James Guilford <james.guilford@intel.com>
|
||||
# Kirk Yap <kirk.s.yap@intel.com>
|
||||
# Tim Chen <tim.c.chen@linux.intel.com>
|
||||
#
|
||||
# This software is available to you under a choice of one of two
|
||||
# licenses. You may choose to be licensed under the terms of the GNU
|
||||
# General Public License (GPL) Version 2, available from the file
|
||||
# COPYING in the main directory of this source tree, or the
|
||||
# OpenIB.org BSD license below:
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or
|
||||
# without modification, are permitted provided that the following
|
||||
# conditions are met:
|
||||
#
|
||||
# - Redistributions of source code must retain the above
|
||||
# copyright notice, this list of conditions and the following
|
||||
# disclaimer.
|
||||
#
|
||||
# - Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following
|
||||
# disclaimer in the documentation and/or other materials
|
||||
# provided with the distribution.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
#
|
||||
########################################################################
|
||||
#
|
||||
# This code is described in an Intel White-Paper:
|
||||
# "Fast SHA-256 Implementations on Intel Architecture Processors"
|
||||
#
|
||||
# To find it, surf to http://www.intel.com/p/en_US/embedded
|
||||
# and search for that title.
|
||||
#
|
||||
########################################################################
|
||||
|
||||
#include "asm.S"
|
||||
|
||||
## assume buffers not aligned
|
||||
#define MOVDQ movdqu
|
||||
|
||||
################################ Define Macros
|
||||
|
||||
# addm [mem], reg
|
||||
# Add reg to mem using reg-mem add and store
|
||||
.macro addm p1 p2
|
||||
add \p1, \p2
|
||||
mov \p2, \p1
|
||||
.endm
|
||||
|
||||
################################
|
||||
|
||||
# COPY_XMM_AND_BSWAP xmm, [mem], byte_flip_mask
|
||||
# Load xmm with mem and byte swap each dword
|
||||
.macro COPY_XMM_AND_BSWAP p1 p2 p3
|
||||
MOVDQ \p2, \p1
|
||||
pshufb \p3, \p1
|
||||
.endm
|
||||
|
||||
################################
|
||||
|
||||
X0 = %xmm4
|
||||
X1 = %xmm5
|
||||
X2 = %xmm6
|
||||
X3 = %xmm7
|
||||
|
||||
XTMP0 = %xmm0
|
||||
XTMP1 = %xmm1
|
||||
XTMP2 = %xmm2
|
||||
XTMP3 = %xmm3
|
||||
XTMP4 = %xmm8
|
||||
XFER = %xmm9
|
||||
|
||||
SHUF_00BA = %xmm10 # shuffle xBxA -> 00BA
|
||||
SHUF_DC00 = %xmm11 # shuffle xDxC -> DC00
|
||||
BYTE_FLIP_MASK = %xmm12
|
||||
|
||||
NUM_BLKS = %rdx # 3rd arg
|
||||
CTX = %rsi # 2nd arg
|
||||
INP = %rdi # 1st arg
|
||||
|
||||
SRND = %rdi # clobbers INP
|
||||
c = %ecx
|
||||
d = %r8d
|
||||
e = %edx
|
||||
TBL = %rbp
|
||||
a = %eax
|
||||
b = %ebx
|
||||
|
||||
f = %r9d
|
||||
g = %r10d
|
||||
h = %r11d
|
||||
|
||||
y0 = %r13d
|
||||
y1 = %r14d
|
||||
y2 = %r15d
|
||||
|
||||
|
||||
|
||||
_INP_END_SIZE = 8
|
||||
_INP_SIZE = 8
|
||||
_XFER_SIZE = 16
|
||||
_XMM_SAVE_SIZE = 0
|
||||
|
||||
_INP_END = 0
|
||||
_INP = _INP_END + _INP_END_SIZE
|
||||
_XFER = _INP + _INP_SIZE
|
||||
_XMM_SAVE = _XFER + _XFER_SIZE
|
||||
STACK_SIZE = _XMM_SAVE + _XMM_SAVE_SIZE
|
||||
|
||||
# rotate_Xs
|
||||
# Rotate values of symbols X0...X3
|
||||
.macro rotate_Xs
|
||||
X_ = X0
|
||||
X0 = X1
|
||||
X1 = X2
|
||||
X2 = X3
|
||||
X3 = X_
|
||||
.endm
|
||||
|
||||
# ROTATE_ARGS
|
||||
# Rotate values of symbols a...h
|
||||
.macro ROTATE_ARGS
|
||||
TMP_ = h
|
||||
h = g
|
||||
g = f
|
||||
f = e
|
||||
e = d
|
||||
d = c
|
||||
c = b
|
||||
b = a
|
||||
a = TMP_
|
||||
.endm
|
||||
|
||||
.macro FOUR_ROUNDS_AND_SCHED
|
||||
## compute s0 four at a time and s1 two at a time
|
||||
## compute W[-16] + W[-7] 4 at a time
|
||||
movdqa X3, XTMP0
|
||||
mov e, y0 # y0 = e
|
||||
ror $(25-11), y0 # y0 = e >> (25-11)
|
||||
mov a, y1 # y1 = a
|
||||
palignr $4, X2, XTMP0 # XTMP0 = W[-7]
|
||||
ror $(22-13), y1 # y1 = a >> (22-13)
|
||||
xor e, y0 # y0 = e ^ (e >> (25-11))
|
||||
mov f, y2 # y2 = f
|
||||
ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
|
||||
movdqa X1, XTMP1
|
||||
xor a, y1 # y1 = a ^ (a >> (22-13)
|
||||
xor g, y2 # y2 = f^g
|
||||
paddd X0, XTMP0 # XTMP0 = W[-7] + W[-16]
|
||||
xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
|
||||
and e, y2 # y2 = (f^g)&e
|
||||
ror $(13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
|
||||
## compute s0
|
||||
palignr $4, X0, XTMP1 # XTMP1 = W[-15]
|
||||
xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
|
||||
ror $6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>25)
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g
|
||||
movdqa XTMP1, XTMP2 # XTMP2 = W[-15]
|
||||
ror $2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
|
||||
add y0, y2 # y2 = S1 + CH
|
||||
add _XFER(%rsp) , y2 # y2 = k + w + S1 + CH
|
||||
movdqa XTMP1, XTMP3 # XTMP3 = W[-15]
|
||||
mov a, y0 # y0 = a
|
||||
add y2, h # h = h + S1 + CH + k + w
|
||||
mov a, y2 # y2 = a
|
||||
pslld $(32-7), XTMP1 #
|
||||
or c, y0 # y0 = a|c
|
||||
add h, d # d = d + h + S1 + CH + k + w
|
||||
and c, y2 # y2 = a&c
|
||||
psrld $7, XTMP2 #
|
||||
and b, y0 # y0 = (a|c)&b
|
||||
add y1, h # h = h + S1 + CH + k + w + S0
|
||||
por XTMP2, XTMP1 # XTMP1 = W[-15] ror 7
|
||||
or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c)
|
||||
add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
|
||||
#
|
||||
ROTATE_ARGS #
|
||||
movdqa XTMP3, XTMP2 # XTMP2 = W[-15]
|
||||
mov e, y0 # y0 = e
|
||||
mov a, y1 # y1 = a
|
||||
movdqa XTMP3, XTMP4 # XTMP4 = W[-15]
|
||||
ror $(25-11), y0 # y0 = e >> (25-11)
|
||||
xor e, y0 # y0 = e ^ (e >> (25-11))
|
||||
mov f, y2 # y2 = f
|
||||
ror $(22-13), y1 # y1 = a >> (22-13)
|
||||
pslld $(32-18), XTMP3 #
|
||||
xor a, y1 # y1 = a ^ (a >> (22-13)
|
||||
ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
|
||||
xor g, y2 # y2 = f^g
|
||||
psrld $18, XTMP2 #
|
||||
ror $(13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
|
||||
xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
|
||||
and e, y2 # y2 = (f^g)&e
|
||||
ror $6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>25)
|
||||
pxor XTMP3, XTMP1
|
||||
xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g
|
||||
psrld $3, XTMP4 # XTMP4 = W[-15] >> 3
|
||||
add y0, y2 # y2 = S1 + CH
|
||||
add (1*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH
|
||||
ror $2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
|
||||
pxor XTMP2, XTMP1 # XTMP1 = W[-15] ror 7 ^ W[-15] ror 18
|
||||
mov a, y0 # y0 = a
|
||||
add y2, h # h = h + S1 + CH + k + w
|
||||
mov a, y2 # y2 = a
|
||||
pxor XTMP4, XTMP1 # XTMP1 = s0
|
||||
or c, y0 # y0 = a|c
|
||||
add h, d # d = d + h + S1 + CH + k + w
|
||||
and c, y2 # y2 = a&c
|
||||
## compute low s1
|
||||
pshufd $0b11111010, X3, XTMP2 # XTMP2 = W[-2] {BBAA}
|
||||
and b, y0 # y0 = (a|c)&b
|
||||
add y1, h # h = h + S1 + CH + k + w + S0
|
||||
paddd XTMP1, XTMP0 # XTMP0 = W[-16] + W[-7] + s0
|
||||
or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c)
|
||||
add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
|
||||
|
||||
ROTATE_ARGS
|
||||
movdqa XTMP2, XTMP3 # XTMP3 = W[-2] {BBAA}
|
||||
mov e, y0 # y0 = e
|
||||
mov a, y1 # y1 = a
|
||||
ror $(25-11), y0 # y0 = e >> (25-11)
|
||||
movdqa XTMP2, XTMP4 # XTMP4 = W[-2] {BBAA}
|
||||
xor e, y0 # y0 = e ^ (e >> (25-11))
|
||||
ror $(22-13), y1 # y1 = a >> (22-13)
|
||||
mov f, y2 # y2 = f
|
||||
xor a, y1 # y1 = a ^ (a >> (22-13)
|
||||
ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
|
||||
psrlq $17, XTMP2 # XTMP2 = W[-2] ror 17 {xBxA}
|
||||
xor g, y2 # y2 = f^g
|
||||
psrlq $19, XTMP3 # XTMP3 = W[-2] ror 19 {xBxA}
|
||||
xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
|
||||
and e, y2 # y2 = (f^g)&e
|
||||
psrld $10, XTMP4 # XTMP4 = W[-2] >> 10 {BBAA}
|
||||
ror $(13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
|
||||
xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g
|
||||
ror $6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>25)
|
||||
pxor XTMP3, XTMP2
|
||||
add y0, y2 # y2 = S1 + CH
|
||||
ror $2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
|
||||
add (2*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH
|
||||
pxor XTMP2, XTMP4 # XTMP4 = s1 {xBxA}
|
||||
mov a, y0 # y0 = a
|
||||
add y2, h # h = h + S1 + CH + k + w
|
||||
mov a, y2 # y2 = a
|
||||
pshufb SHUF_00BA, XTMP4 # XTMP4 = s1 {00BA}
|
||||
or c, y0 # y0 = a|c
|
||||
add h, d # d = d + h + S1 + CH + k + w
|
||||
and c, y2 # y2 = a&c
|
||||
paddd XTMP4, XTMP0 # XTMP0 = {..., ..., W[1], W[0]}
|
||||
and b, y0 # y0 = (a|c)&b
|
||||
add y1, h # h = h + S1 + CH + k + w + S0
|
||||
## compute high s1
|
||||
pshufd $0b01010000, XTMP0, XTMP2 # XTMP2 = W[-2] {BBAA}
|
||||
or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c)
|
||||
add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
|
||||
#
|
||||
ROTATE_ARGS #
|
||||
movdqa XTMP2, XTMP3 # XTMP3 = W[-2] {DDCC}
|
||||
mov e, y0 # y0 = e
|
||||
ror $(25-11), y0 # y0 = e >> (25-11)
|
||||
mov a, y1 # y1 = a
|
||||
movdqa XTMP2, X0 # X0 = W[-2] {DDCC}
|
||||
ror $(22-13), y1 # y1 = a >> (22-13)
|
||||
xor e, y0 # y0 = e ^ (e >> (25-11))
|
||||
mov f, y2 # y2 = f
|
||||
ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
|
||||
psrlq $17, XTMP2 # XTMP2 = W[-2] ror 17 {xDxC}
|
||||
xor a, y1 # y1 = a ^ (a >> (22-13)
|
||||
xor g, y2 # y2 = f^g
|
||||
psrlq $19, XTMP3 # XTMP3 = W[-2] ror 19 {xDxC}
|
||||
xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25
|
||||
and e, y2 # y2 = (f^g)&e
|
||||
ror $(13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
|
||||
psrld $10, X0 # X0 = W[-2] >> 10 {DDCC}
|
||||
xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22
|
||||
ror $6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>2
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g
|
||||
pxor XTMP3, XTMP2 #
|
||||
ror $2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>2
|
||||
add y0, y2 # y2 = S1 + CH
|
||||
add (3*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH
|
||||
pxor XTMP2, X0 # X0 = s1 {xDxC}
|
||||
mov a, y0 # y0 = a
|
||||
add y2, h # h = h + S1 + CH + k + w
|
||||
mov a, y2 # y2 = a
|
||||
pshufb SHUF_DC00, X0 # X0 = s1 {DC00}
|
||||
or c, y0 # y0 = a|c
|
||||
add h, d # d = d + h + S1 + CH + k + w
|
||||
and c, y2 # y2 = a&c
|
||||
paddd XTMP0, X0 # X0 = {W[3], W[2], W[1], W[0]}
|
||||
and b, y0 # y0 = (a|c)&b
|
||||
add y1, h # h = h + S1 + CH + k + w + S0
|
||||
or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c)
|
||||
add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
|
||||
|
||||
ROTATE_ARGS
|
||||
rotate_Xs
|
||||
.endm
|
||||
|
||||
## input is [rsp + _XFER + %1 * 4]
|
||||
.macro DO_ROUND round
|
||||
mov e, y0 # y0 = e
|
||||
ror $(25-11), y0 # y0 = e >> (25-11)
|
||||
mov a, y1 # y1 = a
|
||||
xor e, y0 # y0 = e ^ (e >> (25-11))
|
||||
ror $(22-13), y1 # y1 = a >> (22-13)
|
||||
mov f, y2 # y2 = f
|
||||
xor a, y1 # y1 = a ^ (a >> (22-13)
|
||||
ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
|
||||
xor g, y2 # y2 = f^g
|
||||
xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
|
||||
ror $(13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
|
||||
and e, y2 # y2 = (f^g)&e
|
||||
xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
|
||||
ror $6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>25)
|
||||
xor g, y2 # y2 = CH = ((f^g)&e)^g
|
||||
add y0, y2 # y2 = S1 + CH
|
||||
ror $2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
|
||||
offset = \round * 4 + _XFER
|
||||
add offset(%rsp), y2 # y2 = k + w + S1 + CH
|
||||
mov a, y0 # y0 = a
|
||||
add y2, h # h = h + S1 + CH + k + w
|
||||
mov a, y2 # y2 = a
|
||||
or c, y0 # y0 = a|c
|
||||
add h, d # d = d + h + S1 + CH + k + w
|
||||
and c, y2 # y2 = a&c
|
||||
and b, y0 # y0 = (a|c)&b
|
||||
add y1, h # h = h + S1 + CH + k + w + S0
|
||||
or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c)
|
||||
add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
|
||||
ROTATE_ARGS
|
||||
.endm
|
||||
|
||||
########################################################################
|
||||
## void sha256_transform_ssse3(void *input_data, UINT32 digest[8], UINT64 num_blks)
|
||||
## arg 1 : pointer to input data
|
||||
## arg 2 : pointer to digest
|
||||
## arg 3 : Num blocks
|
||||
########################################################################
|
||||
.text
|
||||
ENTRY(sha256_transform_ssse3)
|
||||
.align 32
|
||||
pushq %rbx
|
||||
pushq %rbp
|
||||
pushq %r13
|
||||
pushq %r14
|
||||
pushq %r15
|
||||
pushq %r12
|
||||
|
||||
mov %rsp, %r12
|
||||
subq $STACK_SIZE, %rsp
|
||||
and $~15, %rsp
|
||||
|
||||
shl $6, NUM_BLKS # convert to bytes
|
||||
jz done_hash
|
||||
add INP, NUM_BLKS
|
||||
mov NUM_BLKS, _INP_END(%rsp) # pointer to end of data
|
||||
|
||||
## load initial digest
|
||||
mov 4*0(CTX), a
|
||||
mov 4*1(CTX), b
|
||||
mov 4*2(CTX), c
|
||||
mov 4*3(CTX), d
|
||||
mov 4*4(CTX), e
|
||||
mov 4*5(CTX), f
|
||||
mov 4*6(CTX), g
|
||||
mov 4*7(CTX), h
|
||||
|
||||
movdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), BYTE_FLIP_MASK
|
||||
movdqa _SHUF_00BA(%rip), SHUF_00BA
|
||||
movdqa _SHUF_DC00(%rip), SHUF_DC00
|
||||
|
||||
loop0:
|
||||
lea K256(%rip), TBL
|
||||
|
||||
## byte swap first 16 dwords
|
||||
COPY_XMM_AND_BSWAP X0, 0*16(INP), BYTE_FLIP_MASK
|
||||
COPY_XMM_AND_BSWAP X1, 1*16(INP), BYTE_FLIP_MASK
|
||||
COPY_XMM_AND_BSWAP X2, 2*16(INP), BYTE_FLIP_MASK
|
||||
COPY_XMM_AND_BSWAP X3, 3*16(INP), BYTE_FLIP_MASK
|
||||
|
||||
mov INP, _INP(%rsp)
|
||||
|
||||
## schedule 48 input dwords, by doing 3 rounds of 16 each
|
||||
mov $3, SRND
|
||||
.align 16
|
||||
loop1:
|
||||
movdqa (TBL), XFER
|
||||
paddd X0, XFER
|
||||
movdqa XFER, _XFER(%rsp)
|
||||
FOUR_ROUNDS_AND_SCHED
|
||||
|
||||
movdqa 1*16(TBL), XFER
|
||||
paddd X0, XFER
|
||||
movdqa XFER, _XFER(%rsp)
|
||||
FOUR_ROUNDS_AND_SCHED
|
||||
|
||||
movdqa 2*16(TBL), XFER
|
||||
paddd X0, XFER
|
||||
movdqa XFER, _XFER(%rsp)
|
||||
FOUR_ROUNDS_AND_SCHED
|
||||
|
||||
movdqa 3*16(TBL), XFER
|
||||
paddd X0, XFER
|
||||
movdqa XFER, _XFER(%rsp)
|
||||
add $4*16, TBL
|
||||
FOUR_ROUNDS_AND_SCHED
|
||||
|
||||
sub $1, SRND
|
||||
jne loop1
|
||||
|
||||
mov $2, SRND
|
||||
loop2:
|
||||
paddd (TBL), X0
|
||||
movdqa X0, _XFER(%rsp)
|
||||
DO_ROUND 0
|
||||
DO_ROUND 1
|
||||
DO_ROUND 2
|
||||
DO_ROUND 3
|
||||
paddd 1*16(TBL), X1
|
||||
movdqa X1, _XFER(%rsp)
|
||||
add $2*16, TBL
|
||||
DO_ROUND 0
|
||||
DO_ROUND 1
|
||||
DO_ROUND 2
|
||||
DO_ROUND 3
|
||||
|
||||
movdqa X2, X0
|
||||
movdqa X3, X1
|
||||
|
||||
sub $1, SRND
|
||||
jne loop2
|
||||
|
||||
addm (4*0)(CTX),a
|
||||
addm (4*1)(CTX),b
|
||||
addm (4*2)(CTX),c
|
||||
addm (4*3)(CTX),d
|
||||
addm (4*4)(CTX),e
|
||||
addm (4*5)(CTX),f
|
||||
addm (4*6)(CTX),g
|
||||
addm (4*7)(CTX),h
|
||||
|
||||
mov _INP(%rsp), INP
|
||||
add $64, INP
|
||||
cmp _INP_END(%rsp), INP
|
||||
jne loop0
|
||||
|
||||
done_hash:
|
||||
|
||||
mov %r12, %rsp
|
||||
|
||||
popq %r12
|
||||
popq %r15
|
||||
popq %r14
|
||||
popq %r13
|
||||
popq %rbp
|
||||
popq %rbx
|
||||
|
||||
ret
|
||||
ENDPROC(sha256_transform_ssse3)
|
||||
|
||||
.data
|
||||
.align 64
|
||||
K256:
|
||||
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
|
||||
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
|
||||
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
|
||||
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
|
||||
.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
|
||||
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
|
||||
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
|
||||
.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
|
||||
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
|
||||
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
|
||||
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
|
||||
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
|
||||
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
|
||||
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
|
||||
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
|
||||
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
|
||||
|
||||
PSHUFFLE_BYTE_FLIP_MASK:
|
||||
.octa 0x0c0d0e0f08090a0b0405060700010203
|
||||
|
||||
# shuffle xBxA -> 00BA
|
||||
_SHUF_00BA:
|
||||
.octa 0xFFFFFFFFFFFFFFFF0b0a090803020100
|
||||
|
||||
# shuffle xDxC -> DC00
|
||||
_SHUF_DC00:
|
||||
.octa 0x0b0a090803020100FFFFFFFFFFFFFFFF
|
||||
59
pkg/crypto/sha256/sha256.go
Normal file
59
pkg/crypto/sha256/sha256.go
Normal file
@@ -0,0 +1,59 @@
|
||||
// +build amd64
|
||||
|
||||
package sha256
|
||||
|
||||
// #include <stdint.h>
|
||||
// void sha256_transform_avx (uint8_t *input_data, uint32_t digest[8], uint64_t num_blks);
|
||||
// void sha256_transform_ssse3 (uint8_t *input_data, uint32_t digest[8], uint64_t num_blks);
|
||||
// void sha256_transform_rorx (uint8_t *input_data, uint32_t digest[8], uint64_t num_blks);
|
||||
// #define SHA256_DIGEST_SIZE 32
|
||||
// #define SHA256_BLOCK_SIZE 64
|
||||
// #define SHA256_H0 0x6a09e667UL
|
||||
// #define SHA256_H1 0xbb67ae85UL
|
||||
// #define SHA256_H2 0x3c6ef372UL
|
||||
// #define SHA256_H3 0xa54ff53aUL
|
||||
// #define SHA256_H4 0x510e527fUL
|
||||
// #define SHA256_H5 0x9b05688cUL
|
||||
// #define SHA256_H6 0x1f83d9abUL
|
||||
// #define SHA256_H7 0x5be0cd19UL
|
||||
import "C"
|
||||
import (
|
||||
gosha256 "crypto/sha256"
|
||||
"io"
|
||||
)
|
||||
|
||||
/*
|
||||
func Sha256(buffer []byte) ([]uint32, error) {
|
||||
|
||||
if cpu.HasSSE41() {
|
||||
C.sha256_transform_ssse3()
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
if cpu.HasAVX() {
|
||||
C.sha256_transform_avx()
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
if cpu.HasAVX2() {
|
||||
C.sha256_transform_rorx()
|
||||
return 0, nil
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
func Sum(reader io.Reader) ([]byte, error) {
|
||||
hash := gosha256.New()
|
||||
var err error
|
||||
for err == nil {
|
||||
length := 0
|
||||
byteBuffer := make([]byte, 1024*1024)
|
||||
length, err = reader.Read(byteBuffer)
|
||||
byteBuffer = byteBuffer[0:length]
|
||||
hash.Write(byteBuffer)
|
||||
}
|
||||
if err != io.EOF {
|
||||
return nil, err
|
||||
}
|
||||
return hash.Sum(nil), nil
|
||||
}
|
||||
23
pkg/crypto/sha256/sha256_test.go
Normal file
23
pkg/crypto/sha256/sha256_test.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package sha256
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
|
||||
. "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
func Test(t *testing.T) { TestingT(t) }
|
||||
|
||||
type MySuite struct{}
|
||||
|
||||
var _ = Suite(&MySuite{})
|
||||
|
||||
func (s *MySuite) TestSha256Stream(c *C) {
|
||||
testString := []byte("Test string")
|
||||
expectedHash, _ := hex.DecodeString("a3e49d843df13c2e2a7786f6ecd7e0d184f45d718d1ac1a8a63e570466e489dd")
|
||||
hash, err := Sum(bytes.NewBuffer(testString))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(bytes.Equal(expectedHash, hash), Equals, true)
|
||||
}
|
||||
Reference in New Issue
Block a user