Home | History | Annotate | Download | only in asm
      1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
      2 /*
      3  * Copyright (C) 2012 ARM Ltd.
      4  *
      5  * This program is free software; you can redistribute it and/or modify
      6  * it under the terms of the GNU General Public License version 2 as
      7  * published by the Free Software Foundation.
      8  *
      9  * This program is distributed in the hope that it will be useful,
     10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
     11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     12  * GNU General Public License for more details.
     13  *
     14  * You should have received a copy of the GNU General Public License
     15  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
     16  */
     17 #ifndef _UAPI__ASM_SIGCONTEXT_H
     18 #define _UAPI__ASM_SIGCONTEXT_H
     19 
     20 #ifndef __ASSEMBLY__
     21 
     22 #include <linux/types.h>
     23 
     24 /*
     25  * Signal context structure - contains all info to do with the state
     26  * before the signal handler was invoked.
     27  */
     28 struct sigcontext {
     29 	__u64 fault_address;
     30 	/* AArch64 registers */
     31 	__u64 regs[31];
     32 	__u64 sp;
     33 	__u64 pc;
     34 	__u64 pstate;
     35 	/* 4K reserved for FP/SIMD state and future expansion */
     36 	__u8 __reserved[4096] __attribute__((__aligned__(16)));
     37 };
     38 
     39 /*
     40  * Allocation of __reserved[]:
     41  * (Note: records do not necessarily occur in the order shown here.)
     42  *
     43  *	size		description
     44  *
     45  *	0x210		fpsimd_context
     46  *	 0x10		esr_context
     47  *	0x8a0		sve_context (vl <= 64) (optional)
     48  *	 0x20		extra_context (optional)
     49  *	 0x10		terminator (null _aarch64_ctx)
     50  *
     51  *	0x510		(reserved for future allocation)
     52  *
     53  * New records that can exceed this space need to be opt-in for userspace, so
     54  * that an expanded signal frame is not generated unexpectedly.  The mechanism
     55  * for opting in will depend on the extension that generates each new record.
     56  * The above table documents the maximum set and sizes of records than can be
     57  * generated when userspace does not opt in for any such extension.
     58  */
     59 
     60 /*
     61  * Header to be used at the beginning of structures extending the user
     62  * context. Such structures must be placed after the rt_sigframe on the stack
     63  * and be 16-byte aligned. The last structure must be a dummy one with the
     64  * magic and size set to 0.
     65  */
     66 struct _aarch64_ctx {
     67 	__u32 magic;
     68 	__u32 size;
     69 };
     70 
     71 #define FPSIMD_MAGIC	0x46508001
     72 
     73 struct fpsimd_context {
     74 	struct _aarch64_ctx head;
     75 	__u32 fpsr;
     76 	__u32 fpcr;
     77 	__uint128_t vregs[32];
     78 };
     79 
     80 /* ESR_EL1 context */
     81 #define ESR_MAGIC	0x45535201
     82 
     83 struct esr_context {
     84 	struct _aarch64_ctx head;
     85 	__u64 esr;
     86 };
     87 
     88 /*
     89  * extra_context: describes extra space in the signal frame for
     90  * additional structures that don't fit in sigcontext.__reserved[].
     91  *
     92  * Note:
     93  *
     94  * 1) fpsimd_context, esr_context and extra_context must be placed in
     95  * sigcontext.__reserved[] if present.  They cannot be placed in the
     96  * extra space.  Any other record can be placed either in the extra
     97  * space or in sigcontext.__reserved[], unless otherwise specified in
     98  * this file.
     99  *
    100  * 2) There must not be more than one extra_context.
    101  *
    102  * 3) If extra_context is present, it must be followed immediately in
    103  * sigcontext.__reserved[] by the terminating null _aarch64_ctx.
    104  *
    105  * 4) The extra space to which datap points must start at the first
    106  * 16-byte aligned address immediately after the terminating null
    107  * _aarch64_ctx that follows the extra_context structure in
    108  * __reserved[].  The extra space may overrun the end of __reserved[],
    109  * as indicated by a sufficiently large value for the size field.
    110  *
    111  * 5) The extra space must itself be terminated with a null
    112  * _aarch64_ctx.
    113  */
    114 #define EXTRA_MAGIC	0x45585401
    115 
    116 struct extra_context {
    117 	struct _aarch64_ctx head;
    118 	__u64 datap; /* 16-byte aligned pointer to extra space cast to __u64 */
    119 	__u32 size; /* size in bytes of the extra space */
    120 	__u32 __reserved[3];
    121 };
    122 
    123 #define SVE_MAGIC	0x53564501
    124 
    125 struct sve_context {
    126 	struct _aarch64_ctx head;
    127 	__u16 vl;
    128 	__u16 __reserved[3];
    129 };
    130 
    131 #endif /* !__ASSEMBLY__ */
    132 
    133 /*
    134  * The SVE architecture leaves space for future expansion of the
    135  * vector length beyond its initial architectural limit of 2048 bits
    136  * (16 quadwords).
    137  *
    138  * See linux/Documentation/arm64/sve.txt for a description of the VL/VQ
    139  * terminology.
    140  */
    141 #define SVE_VQ_BYTES		16	/* number of bytes per quadword */
    142 
    143 #define SVE_VQ_MIN		1
    144 #define SVE_VQ_MAX		512
    145 
    146 #define SVE_VL_MIN		(SVE_VQ_MIN * SVE_VQ_BYTES)
    147 #define SVE_VL_MAX		(SVE_VQ_MAX * SVE_VQ_BYTES)
    148 
    149 #define SVE_NUM_ZREGS		32
    150 #define SVE_NUM_PREGS		16
    151 
    152 #define sve_vl_valid(vl) \
    153 	((vl) % SVE_VQ_BYTES == 0 && (vl) >= SVE_VL_MIN && (vl) <= SVE_VL_MAX)
    154 #define sve_vq_from_vl(vl)	((vl) / SVE_VQ_BYTES)
    155 #define sve_vl_from_vq(vq)	((vq) * SVE_VQ_BYTES)
    156 
    157 /*
    158  * If the SVE registers are currently live for the thread at signal delivery,
    159  * sve_context.head.size >=
    160  *	SVE_SIG_CONTEXT_SIZE(sve_vq_from_vl(sve_context.vl))
    161  * and the register data may be accessed using the SVE_SIG_*() macros.
    162  *
    163  * If sve_context.head.size <
    164  *	SVE_SIG_CONTEXT_SIZE(sve_vq_from_vl(sve_context.vl)),
    165  * the SVE registers were not live for the thread and no register data
    166  * is included: in this case, the SVE_SIG_*() macros should not be
    167  * used except for this check.
    168  *
    169  * The same convention applies when returning from a signal: a caller
    170  * will need to remove or resize the sve_context block if it wants to
    171  * make the SVE registers live when they were previously non-live or
    172  * vice-versa.  This may require the the caller to allocate fresh
    173  * memory and/or move other context blocks in the signal frame.
    174  *
    175  * Changing the vector length during signal return is not permitted:
    176  * sve_context.vl must equal the thread's current vector length when
    177  * doing a sigreturn.
    178  *
    179  *
    180  * Note: for all these macros, the "vq" argument denotes the SVE
    181  * vector length in quadwords (i.e., units of 128 bits).
    182  *
    183  * The correct way to obtain vq is to use sve_vq_from_vl(vl).  The
    184  * result is valid if and only if sve_vl_valid(vl) is true.  This is
    185  * guaranteed for a struct sve_context written by the kernel.
    186  *
    187  *
    188  * Additional macros describe the contents and layout of the payload.
    189  * For each, SVE_SIG_x_OFFSET(args) is the start offset relative to
    190  * the start of struct sve_context, and SVE_SIG_x_SIZE(args) is the
    191  * size in bytes:
    192  *
    193  *	x	type				description
    194  *	-	----				-----------
    195  *	REGS					the entire SVE context
    196  *
    197  *	ZREGS	__uint128_t[SVE_NUM_ZREGS][vq]	all Z-registers
    198  *	ZREG	__uint128_t[vq]			individual Z-register Zn
    199  *
    200  *	PREGS	uint16_t[SVE_NUM_PREGS][vq]	all P-registers
    201  *	PREG	uint16_t[vq]			individual P-register Pn
    202  *
    203  *	FFR	uint16_t[vq]			first-fault status register
    204  *
    205  * Additional data might be appended in the future.
    206  */
    207 
    208 #define SVE_SIG_ZREG_SIZE(vq)	((__u32)(vq) * SVE_VQ_BYTES)
    209 #define SVE_SIG_PREG_SIZE(vq)	((__u32)(vq) * (SVE_VQ_BYTES / 8))
    210 #define SVE_SIG_FFR_SIZE(vq)	SVE_SIG_PREG_SIZE(vq)
    211 
    212 #define SVE_SIG_REGS_OFFSET					\
    213 	((sizeof(struct sve_context) + (SVE_VQ_BYTES - 1))	\
    214 		/ SVE_VQ_BYTES * SVE_VQ_BYTES)
    215 
    216 #define SVE_SIG_ZREGS_OFFSET	SVE_SIG_REGS_OFFSET
    217 #define SVE_SIG_ZREG_OFFSET(vq, n) \
    218 	(SVE_SIG_ZREGS_OFFSET + SVE_SIG_ZREG_SIZE(vq) * (n))
    219 #define SVE_SIG_ZREGS_SIZE(vq) \
    220 	(SVE_SIG_ZREG_OFFSET(vq, SVE_NUM_ZREGS) - SVE_SIG_ZREGS_OFFSET)
    221 
    222 #define SVE_SIG_PREGS_OFFSET(vq) \
    223 	(SVE_SIG_ZREGS_OFFSET + SVE_SIG_ZREGS_SIZE(vq))
    224 #define SVE_SIG_PREG_OFFSET(vq, n) \
    225 	(SVE_SIG_PREGS_OFFSET(vq) + SVE_SIG_PREG_SIZE(vq) * (n))
    226 #define SVE_SIG_PREGS_SIZE(vq) \
    227 	(SVE_SIG_PREG_OFFSET(vq, SVE_NUM_PREGS) - SVE_SIG_PREGS_OFFSET(vq))
    228 
    229 #define SVE_SIG_FFR_OFFSET(vq) \
    230 	(SVE_SIG_PREGS_OFFSET(vq) + SVE_SIG_PREGS_SIZE(vq))
    231 
    232 #define SVE_SIG_REGS_SIZE(vq) \
    233 	(SVE_SIG_FFR_OFFSET(vq) + SVE_SIG_FFR_SIZE(vq) - SVE_SIG_REGS_OFFSET)
    234 
    235 #define SVE_SIG_CONTEXT_SIZE(vq) (SVE_SIG_REGS_OFFSET + SVE_SIG_REGS_SIZE(vq))
    236 
    237 
    238 #endif /* _UAPI__ASM_SIGCONTEXT_H */
    239