Home | History | Annotate | Download | only in sg_write_buffer
      1 /*
      2  * Copyright (c) 2017-2018 Douglas Gilbert.
      3  * All rights reserved.
      4  * Use of this source code is governed by a BSD-style
      5  * license that can be found in the BSD_LICENSE file.
      6  *
      7  * The code to use the NVMe Management Interface (MI) SES pass-through
      8  * was provided by WDC in November 2017.
      9  */
     10 
     11 /*
     12  * Copyright 2017, Western Digital Corporation
     13  *
     14  * Written by Berck Nash
     15  *
     16  * Use of this source code is governed by a BSD-style
     17  * license that can be found in the BSD_LICENSE file.
     18  *
     19  * Based on the NVM-Express command line utility, which bore the following
     20  * notice:
     21  *
     22  * Copyright (c) 2014-2015, Intel Corporation.
     23  *
     24  * Written by Keith Busch <keith.busch (at) intel.com>
     25  *
     26  * This program is free software; you can redistribute it and/or
     27  * modify it under the terms of the GNU General Public License
     28  * as published by the Free Software Foundation; either version 2
     29  * of the License, or (at your option) any later version.
     30  *
     31  * This program is distributed in the hope that it will be useful,
     32  * but WITHOUT ANY WARRANTY; without even the implied warranty of
     33  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     34  * GNU General Public License for more details.
     35  *
     36  * You should have received a copy of the GNU General Public License
     37  * along with this program; if not, write to the Free Software
     38  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
     39  *                   MA 02110-1301, USA.
     40  */
     41 
     42 /* sg_pt_linux_nvme version 1.04 20180115 */
     43 
     44 
     45 #include <stdio.h>
     46 #include <stdlib.h>
     47 #include <stdarg.h>
     48 #include <stdbool.h>
     49 #include <string.h>
     50 #include <ctype.h>
     51 #include <unistd.h>
     52 #include <errno.h>
     53 #include <fcntl.h>
     54 #define __STDC_FORMAT_MACROS 1
     55 #include <inttypes.h>
     56 #include <sys/ioctl.h>
     57 #include <sys/stat.h>
     58 #include <sys/sysmacros.h>      /* to define 'major' */
     59 #ifndef major
     60 #include <sys/types.h>
     61 #endif
     62 
     63 
     64 #ifdef HAVE_CONFIG_H
     65 #include "config.h"
     66 #endif
     67 
     68 #include <linux/major.h>
     69 
     70 #include "sg_pt.h"
     71 #include "sg_lib.h"
     72 #include "sg_linux_inc.h"
     73 #include "sg_pt_linux.h"
     74 #include "sg_unaligned.h"
     75 
     76 #define SCSI_INQUIRY_OPC     0x12
     77 #define SCSI_REPORT_LUNS_OPC 0xa0
     78 #define SCSI_TEST_UNIT_READY_OPC  0x0
     79 #define SCSI_REQUEST_SENSE_OPC  0x3
     80 #define SCSI_SEND_DIAGNOSTIC_OPC  0x1d
     81 #define SCSI_RECEIVE_DIAGNOSTIC_OPC  0x1c
     82 #define SCSI_MAINT_IN_OPC  0xa3
     83 #define SCSI_REP_SUP_OPCS_OPC  0xc
     84 #define SCSI_REP_SUP_TMFS_OPC  0xd
     85 
     86 /* Additional Sense Code (ASC) */
     87 #define NO_ADDITIONAL_SENSE 0x0
     88 #define LOGICAL_UNIT_NOT_READY 0x4
     89 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
     90 #define UNRECOVERED_READ_ERR 0x11
     91 #define PARAMETER_LIST_LENGTH_ERR 0x1a
     92 #define INVALID_OPCODE 0x20
     93 #define LBA_OUT_OF_RANGE 0x21
     94 #define INVALID_FIELD_IN_CDB 0x24
     95 #define INVALID_FIELD_IN_PARAM_LIST 0x26
     96 #define UA_RESET_ASC 0x29
     97 #define UA_CHANGED_ASC 0x2a
     98 #define TARGET_CHANGED_ASC 0x3f
     99 #define LUNS_CHANGED_ASCQ 0x0e
    100 #define INSUFF_RES_ASC 0x55
    101 #define INSUFF_RES_ASCQ 0x3
    102 #define LOW_POWER_COND_ON_ASC  0x5e     /* ASCQ=0 */
    103 #define POWER_ON_RESET_ASCQ 0x0
    104 #define BUS_RESET_ASCQ 0x2      /* scsi bus reset occurred */
    105 #define MODE_CHANGED_ASCQ 0x1   /* mode parameters changed */
    106 #define CAPACITY_CHANGED_ASCQ 0x9
    107 #define SAVING_PARAMS_UNSUP 0x39
    108 #define TRANSPORT_PROBLEM 0x4b
    109 #define THRESHOLD_EXCEEDED 0x5d
    110 #define LOW_POWER_COND_ON 0x5e
    111 #define MISCOMPARE_VERIFY_ASC 0x1d
    112 #define MICROCODE_CHANGED_ASCQ 0x1      /* with TARGET_CHANGED_ASC */
    113 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
    114 
    115 
    116 static inline bool is_aligned(const void * pointer, size_t byte_count)
    117 {
    118     return ((sg_uintptr_t)pointer % byte_count) == 0;
    119 }
    120 
    121 
    122 #if defined(__GNUC__) || defined(__clang__)
    123 static int pr2ws(const char * fmt, ...)
    124         __attribute__ ((format (printf, 1, 2)));
    125 #else
    126 static int pr2ws(const char * fmt, ...);
    127 #endif
    128 
    129 
    130 static int
    131 pr2ws(const char * fmt, ...)
    132 {
    133     va_list args;
    134     int n;
    135 
    136     va_start(args, fmt);
    137     n = vfprintf(sg_warnings_strm ? sg_warnings_strm : stderr, fmt, args);
    138     va_end(args);
    139     return n;
    140 }
    141 
    142 #if (HAVE_NVME && (! IGNORE_NVME))
    143 
    144 /* This trims given NVMe block device name in Linux (e.g. /dev/nvme0n1p5)
    145  * to the name of its associated char device (e.g. /dev/nvme0). If this
    146  * occurs true is returned and the char device name is placed in 'b' (as
    147  * long as b_len is sufficient). Otherwise false is returned. */
    148 bool
    149 sg_get_nvme_char_devname(const char * nvme_block_devname, uint32_t b_len,
    150                          char * b)
    151 {
    152     uint32_t n, tlen;
    153     const char * cp;
    154     char buff[8];
    155 
    156     if ((NULL == b) || (b_len < 5))
    157         return false;   /* degenerate cases */
    158     cp = strstr(nvme_block_devname, "nvme");
    159     if (NULL == cp)
    160         return false;   /* expected to find "nvme" in given name */
    161     if (1 != sscanf(cp, "nvme%u", &n))
    162         return false;   /* didn't find valid "nvme<number>" */
    163     snprintf(buff, sizeof(buff), "%u", n);
    164     tlen = (cp - nvme_block_devname) + 4 + strlen(buff);
    165     if ((tlen + 1) > b_len)
    166         return false;           /* b isn't long enough to fit output */
    167     memcpy(b, nvme_block_devname, tlen);
    168     b[tlen] = '\0';
    169     return true;
    170 }
    171 
    172 static void
    173 build_sense_buffer(bool desc, uint8_t *buf, uint8_t skey, uint8_t asc,
    174                    uint8_t ascq)
    175 {
    176     if (desc) {
    177         buf[0] = 0x72;  /* descriptor, current */
    178         buf[1] = skey;
    179         buf[2] = asc;
    180         buf[3] = ascq;
    181         buf[7] = 0;
    182     } else {
    183         buf[0] = 0x70;  /* fixed, current */
    184         buf[2] = skey;
    185         buf[7] = 0xa;   /* Assumes length is 18 bytes */
    186         buf[12] = asc;
    187         buf[13] = ascq;
    188     }
    189 }
    190 
    191 /* Set in_bit to -1 to indicate no bit position of invalid field */
    192 static void
    193 mk_sense_asc_ascq(struct sg_pt_linux_scsi * ptp, int sk, int asc, int ascq,
    194                   int vb)
    195 {
    196     bool dsense = ptp->scsi_dsense;
    197     int n;
    198     uint8_t * sbp = (uint8_t *)ptp->io_hdr.response;
    199 
    200     ptp->io_hdr.device_status = SAM_STAT_CHECK_CONDITION;
    201     n = ptp->io_hdr.max_response_len;
    202     if ((n < 8) || ((! dsense) && (n < 14))) {
    203         if (vb)
    204             pr2ws("%s: max_response_len=%d too short, want 14 or more\n",
    205                   __func__, n);
    206         return;
    207     } else
    208         ptp->io_hdr.response_len = dsense ? 8 : ((n < 18) ? n : 18);
    209     memset(sbp, 0, n);
    210     build_sense_buffer(dsense, sbp, sk, asc, ascq);
    211     if (vb > 3)
    212         pr2ws("%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n", __func__, sk,
    213               asc, ascq);
    214 }
    215 
    216 static void
    217 mk_sense_from_nvme_status(struct sg_pt_linux_scsi * ptp, int vb)
    218 {
    219     bool ok;
    220     bool dsense = ptp->scsi_dsense;
    221     int n;
    222     uint8_t sstatus, sk, asc, ascq;
    223     uint8_t * sbp = (uint8_t *)ptp->io_hdr.response;
    224 
    225     ok = sg_nvme_status2scsi(ptp->nvme_status, &sstatus, &sk, &asc, &ascq);
    226     if (! ok) { /* can't find a mapping to a SCSI error, so ... */
    227         sstatus = SAM_STAT_CHECK_CONDITION;
    228         sk = SPC_SK_ILLEGAL_REQUEST;
    229         asc = 0xb;
    230         ascq = 0x0;     /* asc: "WARNING" purposely vague */
    231     }
    232 
    233     ptp->io_hdr.device_status = sstatus;
    234     n = ptp->io_hdr.max_response_len;
    235     if ((n < 8) || ((! dsense) && (n < 14))) {
    236         pr2ws("%s: sense_len=%d too short, want 14 or more\n", __func__, n);
    237         return;
    238     } else
    239         ptp->io_hdr.response_len = (dsense ? 8 : ((n < 18) ? n : 18));
    240     memset(sbp, 0, n);
    241     build_sense_buffer(dsense, sbp, sk, asc, ascq);
    242     if (vb > 3)
    243         pr2ws("%s: [status, sense_key,asc,ascq]: [0x%x, 0x%x,0x%x,0x%x]\n",
    244               __func__, sstatus, sk, asc, ascq);
    245 }
    246 
    247 /* Set in_bit to -1 to indicate no bit position of invalid field */
    248 static void
    249 mk_sense_invalid_fld(struct sg_pt_linux_scsi * ptp, bool in_cdb, int in_byte,
    250                      int in_bit, int vb)
    251 {
    252     bool dsense = ptp->scsi_dsense;
    253     int sl, asc, n;
    254     uint8_t * sbp = (uint8_t *)ptp->io_hdr.response;
    255     uint8_t sks[4];
    256 
    257     ptp->io_hdr.device_status = SAM_STAT_CHECK_CONDITION;
    258     asc = in_cdb ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
    259     n = ptp->io_hdr.max_response_len;
    260     if ((n < 8) || ((! dsense) && (n < 14))) {
    261         if (vb)
    262             pr2ws("%s: max_response_len=%d too short, want 14 or more\n",
    263                   __func__, n);
    264         return;
    265     } else
    266         ptp->io_hdr.response_len = dsense ? 8 : ((n < 18) ? n : 18);
    267     memset(sbp, 0, n);
    268     build_sense_buffer(dsense, sbp, SPC_SK_ILLEGAL_REQUEST, asc, 0);
    269     memset(sks, 0, sizeof(sks));
    270     sks[0] = 0x80;
    271     if (in_cdb)
    272         sks[0] |= 0x40;
    273     if (in_bit >= 0) {
    274         sks[0] |= 0x8;
    275         sks[0] |= (0x7 & in_bit);
    276     }
    277     sg_put_unaligned_be16(in_byte, sks + 1);
    278     if (dsense) {
    279         sl = sbp[7] + 8;
    280         sbp[7] = sl;
    281         sbp[sl] = 0x2;
    282         sbp[sl + 1] = 0x6;
    283         memcpy(sbp + sl + 4, sks, 3);
    284     } else
    285         memcpy(sbp + 15, sks, 3);
    286     if (vb > 3)
    287         pr2ws("%s:  [sense_key,asc,ascq]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
    288               __func__, asc, in_cdb ? 'C' : 'D', in_byte, in_bit);
    289 }
    290 
    291 /* Returns 0 for success. Returns SG_LIB_NVME_STATUS if there is non-zero
    292  * NVMe status (from the completion queue) with the value placed in
    293  * ptp->nvme_status. If Unix error from ioctl then return negated value
    294  * (equivalent -errno from basic Unix system functions like open()).
    295  * CDW0 from the completion queue is placed in ptp->nvme_result in the
    296  * absence of a Unix error. If time_secs is negative it is treated as
    297  * a timeout in milliseconds (of abs(time_secs) ). */
    298 static int
    299 do_nvme_admin_cmd(struct sg_pt_linux_scsi * ptp,
    300                   struct sg_nvme_passthru_cmd *cmdp, void * dp, bool is_read,
    301                   int time_secs, int vb)
    302 {
    303     const uint32_t cmd_len = sizeof(struct sg_nvme_passthru_cmd);
    304     int res;
    305     uint32_t n;
    306     uint16_t sct_sc;
    307     const uint8_t * up = ((const uint8_t *)cmdp) + SG_NVME_PT_OPCODE;
    308 
    309     cmdp->timeout_ms = (time_secs < 0) ? (-time_secs) : (1000 * time_secs);
    310     ptp->os_err = 0;
    311     if (vb > 2) {
    312         pr2ws("NVMe command:\n");
    313         hex2stderr((const uint8_t *)cmdp, cmd_len, 1);
    314         if ((vb > 3) && (! is_read) && dp) {
    315             uint32_t len = sg_get_unaligned_le32(up + SG_NVME_PT_DATA_LEN);
    316 
    317             if (len > 0) {
    318                 n = len;
    319                 if ((len < 512) || (vb > 5))
    320                     pr2ws("\nData-out buffer (%u bytes):\n", n);
    321                 else {
    322                     pr2ws("\nData-out buffer (first 512 of %u bytes):\n", n);
    323                     n = 512;
    324                 }
    325                 hex2stderr((const uint8_t *)dp, n, 0);
    326             }
    327         }
    328     }
    329     res = ioctl(ptp->dev_fd, NVME_IOCTL_ADMIN_CMD, cmdp);
    330     if (res < 0) {  /* OS error (errno negated) */
    331         ptp->os_err = -res;
    332         if (vb > 1) {
    333             pr2ws("%s: ioctl opcode=0x%x failed: %s "
    334                   "(errno=%d)\n", __func__, *up, strerror(-res), -res);
    335         }
    336         return res;
    337     }
    338 
    339     /* Now res contains NVMe completion queue CDW3 31:17 (15 bits) */
    340     ptp->nvme_result = cmdp->result;
    341     if (ptp->nvme_direct && ptp->io_hdr.response &&
    342         (ptp->io_hdr.max_response_len > 3)) {
    343         /* build 16 byte "sense" buffer */
    344         uint8_t * sbp = (uint8_t *)ptp->io_hdr.response;
    345         uint16_t st = (uint16_t)res;
    346 
    347         n = ptp->io_hdr.max_response_len;
    348         n = (n < 16) ? n : 16;
    349         memset(sbp, 0 , n);
    350         ptp->io_hdr.response_len = n;
    351         sg_put_unaligned_le32(cmdp->result,
    352                               sbp + SG_NVME_PT_CQ_RESULT);
    353         if (n > 15) /* LSBit will be 0 (Phase bit) after (st << 1) */
    354             sg_put_unaligned_le16(st << 1, sbp + SG_NVME_PT_CQ_STATUS_P);
    355     }
    356     /* clear upper bits (DNR and More) leaving ((SCT << 8) | SC) */
    357     sct_sc = 0x3ff & res;
    358     ptp->nvme_status = sct_sc;
    359     if (sct_sc) {  /* when non-zero, treat as command error */
    360         if (vb > 1) {
    361             char b[80];
    362 
    363             pr2ws("%s: ioctl opcode=0x%x failed: NVMe status: %s [0x%x]\n",
    364                    __func__, *up,
    365                   sg_get_nvme_cmd_status_str(sct_sc, sizeof(b), b), sct_sc);
    366         }
    367         return SG_LIB_NVME_STATUS;      /* == SCSI_PT_DO_NVME_STATUS */
    368     }
    369     if ((vb > 3) && is_read && dp) {
    370         uint32_t len = sg_get_unaligned_le32(up + SG_NVME_PT_DATA_LEN);
    371 
    372         if (len > 0) {
    373             n = len;
    374             if ((len < 1024) || (vb > 5))
    375                 pr2ws("\nData-in buffer (%u bytes):\n", n);
    376             else {
    377                 pr2ws("\nData-in buffer (first 1024 of %u bytes):\n", n);
    378                 n = 1024;
    379             }
    380             hex2stderr((const uint8_t *)dp, n, 0);
    381         }
    382     }
    383     return 0;
    384 }
    385 
    386 /* Returns 0 on success; otherwise a positive value is returned */
    387 static int
    388 sntl_cache_identity(struct sg_pt_linux_scsi * ptp, int time_secs, int vb)
    389 {
    390     struct sg_nvme_passthru_cmd cmd;
    391     uint32_t pg_sz = sg_get_page_size();
    392     uint8_t * up;
    393 
    394     up = sg_memalign(pg_sz, pg_sz, &ptp->free_nvme_id_ctlp, vb > 3);
    395     ptp->nvme_id_ctlp = up;
    396     if (NULL == up) {
    397         pr2ws("%s: sg_memalign() failed to get memory\n", __func__);
    398         return -ENOMEM;
    399     }
    400     memset(&cmd, 0, sizeof(cmd));
    401     cmd.opcode = 0x6;   /* Identify */
    402     cmd.cdw10 = 0x1;    /* CNS=0x1 Identify controller */
    403     cmd.addr = (uint64_t)(sg_uintptr_t)ptp->nvme_id_ctlp;
    404     cmd.data_len = pg_sz;
    405     return do_nvme_admin_cmd(ptp, &cmd, up, true, time_secs, vb);
    406 }
    407 
    408 static const char * nvme_scsi_vendor_str = "NVMe    ";
    409 static const uint16_t inq_resp_len = 36;
    410 
    411 static int
    412 sntl_inq(struct sg_pt_linux_scsi * ptp, const uint8_t * cdbp, int time_secs,
    413          int vb)
    414 {
    415     bool evpd;
    416     bool cp_id_ctl = false;
    417     int res;
    418     uint16_t n, alloc_len, pg_cd;
    419     uint32_t pg_sz = sg_get_page_size();
    420     uint8_t * nvme_id_ns = NULL;
    421     uint8_t * free_nvme_id_ns = NULL;
    422     uint8_t inq_dout[256];
    423 
    424     if (vb > 3)
    425         pr2ws("%s: time_secs=%d\n", __func__, time_secs);
    426 
    427     if (0x2 & cdbp[1]) {        /* Reject CmdDt=1 */
    428         mk_sense_invalid_fld(ptp, true, 1, 1, vb);
    429         return 0;
    430     }
    431     if (NULL == ptp->nvme_id_ctlp) {
    432         res = sntl_cache_identity(ptp, time_secs, vb);
    433         if (SG_LIB_NVME_STATUS == res) {
    434             mk_sense_from_nvme_status(ptp, vb);
    435             return 0;
    436         } else if (res) /* should be negative errno */
    437             return res;
    438     }
    439     memset(inq_dout, 0, sizeof(inq_dout));
    440     alloc_len = sg_get_unaligned_be16(cdbp + 3);
    441     evpd = !!(0x1 & cdbp[1]);
    442     pg_cd = cdbp[2];
    443     if (evpd) {         /* VPD page responses */
    444         switch (pg_cd) {
    445         case 0:
    446             /* inq_dout[0] = (PQ=0)<<5 | (PDT=0); prefer pdt=0xd --> SES */
    447             inq_dout[1] = pg_cd;
    448             n = 8;
    449             sg_put_unaligned_be16(n - 4, inq_dout + 2);
    450             inq_dout[4] = 0x0;
    451             inq_dout[5] = 0x80;
    452             inq_dout[6] = 0x83;
    453             inq_dout[n - 1] = 0xde;     /* last VPD number */
    454             break;
    455         case 0x80:
    456             /* inq_dout[0] = (PQ=0)<<5 | (PDT=0); prefer pdt=0xd --> SES */
    457             inq_dout[1] = pg_cd;
    458             sg_put_unaligned_be16(20, inq_dout + 2);
    459             memcpy(inq_dout + 4, ptp->nvme_id_ctlp + 4, 20);    /* SN */
    460             n = 24;
    461             break;
    462         case 0x83:
    463             if ((ptp->nvme_nsid > 0) &&
    464                 (ptp->nvme_nsid < SG_NVME_BROADCAST_NSID)) {
    465                 nvme_id_ns = sg_memalign(pg_sz, pg_sz, &free_nvme_id_ns,
    466                                          vb > 3);
    467                 if (nvme_id_ns) {
    468                     struct sg_nvme_passthru_cmd cmd;
    469 
    470                     memset(&cmd, 0, sizeof(cmd));
    471                     cmd.opcode = 0x6;   /* Identify */
    472                     cmd.nsid = ptp->nvme_nsid;
    473                     cmd.cdw10 = 0x0;    /* CNS=0x0 Identify namespace */
    474                     cmd.addr = (uint64_t)(sg_uintptr_t)nvme_id_ns;
    475                     cmd.data_len = pg_sz;
    476                     res = do_nvme_admin_cmd(ptp, &cmd, nvme_id_ns, true,
    477                                             time_secs, vb > 3);
    478                     if (res) {
    479                         free(free_nvme_id_ns);
    480                         free_nvme_id_ns = NULL;
    481                         nvme_id_ns = NULL;
    482                     }
    483                 }
    484             }
    485             n = sg_make_vpd_devid_for_nvme(ptp->nvme_id_ctlp, nvme_id_ns,
    486                                            0 /* pdt */, -1 /*tproto */,
    487                                            inq_dout, sizeof(inq_dout));
    488             if (n > 3)
    489                 sg_put_unaligned_be16(n - 4, inq_dout + 2);
    490             if (free_nvme_id_ns) {
    491                 free(free_nvme_id_ns);
    492                 free_nvme_id_ns = NULL;
    493                 nvme_id_ns = NULL;
    494             }
    495             break;
    496         case 0xde:
    497             inq_dout[1] = pg_cd;
    498             sg_put_unaligned_be16((16 + 4096) - 4, inq_dout + 2);
    499             n = 16 + 4096;
    500             cp_id_ctl = true;
    501             break;
    502         default:        /* Point to page_code field in cdb */
    503             mk_sense_invalid_fld(ptp, true, 2, 7, vb);
    504             return 0;
    505         }
    506         if (alloc_len > 0) {
    507             n = (alloc_len < n) ? alloc_len : n;
    508             n = (n < ptp->io_hdr.din_xfer_len) ? n : ptp->io_hdr.din_xfer_len;
    509             ptp->io_hdr.din_resid = ptp->io_hdr.din_xfer_len - n;
    510             if (n > 0) {
    511                 if (cp_id_ctl) {
    512                     memcpy((uint8_t *)ptp->io_hdr.din_xferp, inq_dout,
    513                            (n < 16 ? n : 16));
    514                     if (n > 16)
    515                         memcpy((uint8_t *)ptp->io_hdr.din_xferp + 16,
    516                                ptp->nvme_id_ctlp, n - 16);
    517                 } else
    518                     memcpy((uint8_t *)ptp->io_hdr.din_xferp, inq_dout, n);
    519             }
    520         }
    521     } else {            /* Standard INQUIRY response */
    522         /* inq_dout[0] = (PQ=0)<<5 | (PDT=0); pdt=0 --> SBC; 0xd --> SES */
    523         inq_dout[2] = 6;   /* version: SPC-4 */
    524         inq_dout[3] = 2;   /* NORMACA=0, HISUP=0, response data format: 2 */
    525         inq_dout[4] = 31;  /* so response length is (or could be) 36 bytes */
    526         inq_dout[6] = 0x40;   /* ENCSERV=1 */
    527         inq_dout[7] = 0x2;    /* CMDQUE=1 */
    528         memcpy(inq_dout + 8, nvme_scsi_vendor_str, 8);  /* NVMe not Intel */
    529         memcpy(inq_dout + 16, ptp->nvme_id_ctlp + 24, 16); /* Prod <-- MN */
    530         memcpy(inq_dout + 32, ptp->nvme_id_ctlp + 64, 4);  /* Rev <-- FR */
    531         if (alloc_len > 0) {
    532             n = (alloc_len < inq_resp_len) ? alloc_len : inq_resp_len;
    533             n = (n < ptp->io_hdr.din_xfer_len) ? n : ptp->io_hdr.din_xfer_len;
    534             ptp->io_hdr.din_resid = ptp->io_hdr.din_xfer_len - n;
    535             if (n > 0)
    536                 memcpy((uint8_t *)ptp->io_hdr.din_xferp, inq_dout, n);
    537         }
    538     }
    539     return 0;
    540 }
    541 
    542 static int
    543 sntl_rluns(struct sg_pt_linux_scsi * ptp, const uint8_t * cdbp, int time_secs,
    544            int vb)
    545 {
    546     int res;
    547     uint16_t sel_report;
    548     uint32_t alloc_len, k, n, num, max_nsid;
    549     uint8_t * rl_doutp;
    550     uint8_t * up;
    551 
    552     if (vb > 3)
    553         pr2ws("%s: time_secs=%d\n", __func__, time_secs);
    554 
    555     sel_report = cdbp[2];
    556     alloc_len = sg_get_unaligned_be32(cdbp + 6);
    557     if (NULL == ptp->nvme_id_ctlp) {
    558         res = sntl_cache_identity(ptp, time_secs, vb);
    559         if (SG_LIB_NVME_STATUS == res) {
    560             mk_sense_from_nvme_status(ptp, vb);
    561             return 0;
    562         } else if (res)
    563             return res;
    564     }
    565     max_nsid = sg_get_unaligned_le32(ptp->nvme_id_ctlp + 516);
    566     switch (sel_report) {
    567     case 0:
    568     case 2:
    569         num = max_nsid;
    570         break;
    571     case 1:
    572     case 0x10:
    573     case 0x12:
    574         num = 0;
    575         break;
    576     case 0x11:
    577         num = (1 == ptp->nvme_nsid) ? max_nsid :  0;
    578         break;
    579     default:
    580         if (vb > 1)
    581             pr2ws("%s: bad select_report value: 0x%x\n", __func__,
    582                   sel_report);
    583         mk_sense_invalid_fld(ptp, true, 2, 7, vb);
    584         return 0;
    585     }
    586     rl_doutp = (uint8_t *)calloc(num + 1, 8);
    587     if (NULL == rl_doutp) {
    588         pr2ws("%s: calloc() failed to get memory\n", __func__);
    589         return -ENOMEM;
    590     }
    591     for (k = 0, up = rl_doutp + 8; k < num; ++k, up += 8)
    592         sg_put_unaligned_be16(k, up);
    593     n = num * 8;
    594     sg_put_unaligned_be32(n, rl_doutp);
    595     n+= 8;
    596     if (alloc_len > 0) {
    597         n = (alloc_len < n) ? alloc_len : n;
    598         n = (n < ptp->io_hdr.din_xfer_len) ? n : ptp->io_hdr.din_xfer_len;
    599         ptp->io_hdr.din_resid = ptp->io_hdr.din_xfer_len - n;
    600         if (n > 0)
    601             memcpy((uint8_t *)ptp->io_hdr.din_xferp, rl_doutp, n);
    602     }
    603     res = 0;
    604     free(rl_doutp);
    605     return res;
    606 }
    607 
    608 static int
    609 sntl_tur(struct sg_pt_linux_scsi * ptp, int time_secs, int vb)
    610 {
    611     int res;
    612     uint32_t pow_state;
    613     struct sg_nvme_passthru_cmd cmd;
    614 
    615     if (vb > 4)
    616         pr2ws("%s: time_secs=%d\n", __func__, time_secs);
    617     if (NULL == ptp->nvme_id_ctlp) {
    618         res = sntl_cache_identity(ptp, time_secs, vb);
    619         if (SG_LIB_NVME_STATUS == res) {
    620             mk_sense_from_nvme_status(ptp, vb);
    621             return 0;
    622         } else if (res)
    623             return res;
    624     }
    625     memset(&cmd, 0, sizeof(cmd));
    626     cmd.opcode = 0xa;   /* Get feature */
    627     cmd.nsid = SG_NVME_BROADCAST_NSID;
    628     cmd.cdw10 = 0x2;    /* SEL=0 (current), Feature=2 Power Management */
    629     cmd.timeout_ms = (time_secs < 0) ? 0 : (1000 * time_secs);
    630     res = do_nvme_admin_cmd(ptp, &cmd, NULL, false, time_secs, vb);
    631     if (0 != res) {
    632         if (SG_LIB_NVME_STATUS == res) {
    633             mk_sense_from_nvme_status(ptp, vb);
    634             return 0;
    635         } else
    636             return res;
    637     } else {
    638         ptp->os_err = 0;
    639         ptp->nvme_status = 0;
    640     }
    641     pow_state = (0x1f & ptp->nvme_result);
    642     if (vb > 3)
    643         pr2ws("%s: pow_state=%u\n", __func__, pow_state);
    644 #if 0   /* pow_state bounces around too much on laptop */
    645     if (pow_state)
    646         mk_sense_asc_ascq(ptp, SPC_SK_NOT_READY, LOW_POWER_COND_ON_ASC, 0,
    647                           vb);
    648 #endif
    649     return 0;
    650 }
    651 
    652 static int
    653 sntl_req_sense(struct sg_pt_linux_scsi * ptp, const uint8_t * cdbp,
    654                int time_secs, int vb)
    655 {
    656     bool desc;
    657     int res;
    658     uint32_t pow_state, alloc_len, n;
    659     struct sg_nvme_passthru_cmd cmd;
    660     uint8_t rs_dout[64];
    661 
    662     if (vb > 3)
    663         pr2ws("%s: time_secs=%d\n", __func__, time_secs);
    664     if (NULL == ptp->nvme_id_ctlp) {
    665         res = sntl_cache_identity(ptp, time_secs, vb);
    666         if (SG_LIB_NVME_STATUS == res) {
    667             mk_sense_from_nvme_status(ptp, vb);
    668             return 0;
    669         } else if (res)
    670             return res;
    671     }
    672     desc = !!(0x1 & cdbp[1]);
    673     alloc_len = cdbp[4];
    674     memset(&cmd, 0, sizeof(cmd));
    675     cmd.opcode = 0xa;   /* Get feature */
    676     cmd.nsid = SG_NVME_BROADCAST_NSID;
    677     cmd.cdw10 = 0x2;    /* SEL=0 (current), Feature=2 Power Management */
    678     cmd.timeout_ms = (time_secs < 0) ? 0 : (1000 * time_secs);
    679     res = do_nvme_admin_cmd(ptp, &cmd, NULL, false, time_secs, vb);
    680     if (0 != res) {
    681         if (SG_LIB_NVME_STATUS == res) {
    682             mk_sense_from_nvme_status(ptp, vb);
    683             return 0;
    684         } else
    685             return res;
    686     } else {
    687         ptp->os_err = 0;
    688         ptp->nvme_status = 0;
    689     }
    690     ptp->io_hdr.response_len = 0;
    691     pow_state = (0x1f & ptp->nvme_result);
    692     if (vb > 3)
    693         pr2ws("%s: pow_state=%u\n", __func__, pow_state);
    694     memset(rs_dout, 0, sizeof(rs_dout));
    695     if (pow_state)
    696         build_sense_buffer(desc, rs_dout, SPC_SK_NO_SENSE,
    697                            LOW_POWER_COND_ON_ASC, 0);
    698     else
    699         build_sense_buffer(desc, rs_dout, SPC_SK_NO_SENSE,
    700                            NO_ADDITIONAL_SENSE, 0);
    701     n = desc ? 8 : 18;
    702     n = (n < alloc_len) ? n : alloc_len;
    703     n = (n < ptp->io_hdr.din_xfer_len) ? n : ptp->io_hdr.din_xfer_len;
    704     ptp->io_hdr.din_resid = ptp->io_hdr.din_xfer_len - n;
    705     if (n > 0)
    706         memcpy((uint8_t *)ptp->io_hdr.din_xferp, rs_dout, n);
    707     return 0;
    708 }
    709 
    710 /* This is not really a SNTL. For SCSI SEND DIAGNOSTIC(PF=1) NVMe-MI
    711  * has a special command (SES Send) to tunnel through pages to an
    712  * enclosure. The NVMe enclosure is meant to understand the SES
    713  * (SCSI Enclosure Services) use of diagnostics pages that are
    714  * related to SES. */
    715 static int
    716 sntl_senddiag(struct sg_pt_linux_scsi * ptp, const uint8_t * cdbp,
    717               int time_secs, int vb)
    718 {
    719     bool pf, self_test;
    720     int res;
    721     uint8_t st_cd, dpg_cd;
    722     uint32_t alloc_len, n, dout_len, dpg_len, nvme_dst;
    723     uint32_t pg_sz = sg_get_page_size();
    724     uint8_t * dop;
    725     struct sg_nvme_passthru_cmd cmd;
    726     uint8_t * cmd_up = (uint8_t *)&cmd;
    727 
    728     st_cd = 0x7 & (cdbp[1] >> 5);
    729     self_test = !! (0x4 & cdbp[1]);
    730     pf = !! (0x10 & cdbp[1]);
    731     if (vb > 3)
    732         pr2ws("%s: pf=%d, self_test=%d (st_code=%d)\n", __func__, (int)pf,
    733               (int)self_test, (int)st_cd);
    734     if (self_test || st_cd) {
    735         memset(cmd_up, 0, sizeof(cmd));
    736         cmd_up[SG_NVME_PT_OPCODE] = 0x14;   /* Device self-test */
    737         /* just this namespace (if there is one) and controller */
    738         sg_put_unaligned_le32(ptp->nvme_nsid, cmd_up + SG_NVME_PT_NSID);
    739         switch (st_cd) {
    740         case 0: /* Here if self_test is set, do short self-test */
    741         case 1: /* Background short */
    742         case 5: /* Foreground short */
    743             nvme_dst = 1;
    744             break;
    745         case 2: /* Background extended */
    746         case 6: /* Foreground extended */
    747             nvme_dst = 2;
    748             break;
    749         case 4: /* Abort self-test */
    750             nvme_dst = 0xf;
    751             break;
    752         default:
    753             pr2ws("%s: bad self-test code [0x%x]\n", __func__, st_cd);
    754             mk_sense_invalid_fld(ptp, true, 1, 7, vb);
    755             return 0;
    756         }
    757         sg_put_unaligned_le32(nvme_dst, cmd_up + SG_NVME_PT_CDW10);
    758         res = do_nvme_admin_cmd(ptp, &cmd, NULL, false, time_secs, vb);
    759         if (0 != res) {
    760             if (SG_LIB_NVME_STATUS == res) {
    761                 mk_sense_from_nvme_status(ptp, vb);
    762                 return 0;
    763             } else
    764                 return res;
    765         }
    766     }
    767     alloc_len = sg_get_unaligned_be16(cdbp + 3); /* parameter list length */
    768     dout_len = ptp->io_hdr.dout_xfer_len;
    769     if (pf) {
    770         if (0 == alloc_len) {
    771             mk_sense_invalid_fld(ptp, true, 3, 7, vb);
    772             if (vb)
    773                 pr2ws("%s: PF bit set bit param_list_len=0\n", __func__);
    774             return 0;
    775         }
    776     } else {    /* PF bit clear */
    777         if (alloc_len) {
    778             mk_sense_invalid_fld(ptp, true, 3, 7, vb);
    779             if (vb)
    780                 pr2ws("%s: param_list_len>0 but PF clear\n", __func__);
    781             return 0;
    782         } else
    783             return 0;     /* nothing to do */
    784         if (dout_len > 0) {
    785             if (vb)
    786                 pr2ws("%s: dout given but PF clear\n", __func__);
    787             return SCSI_PT_DO_BAD_PARAMS;
    788         }
    789     }
    790     if (dout_len < 4) {
    791         if (vb)
    792             pr2ws("%s: dout length (%u bytes) too short\n", __func__,
    793                   dout_len);
    794         return SCSI_PT_DO_BAD_PARAMS;
    795     }
    796     n = dout_len;
    797     n = (n < alloc_len) ? n : alloc_len;
    798     dop = (uint8_t *)ptp->io_hdr.dout_xferp;
    799     if (! is_aligned(dop, pg_sz)) {  /* caller best use sg_memalign(,pg_sz) */
    800         if (vb)
    801             pr2ws("%s: dout [0x%" PRIx64 "] not page aligned\n", __func__,
    802                   (uint64_t)ptp->io_hdr.dout_xferp);
    803         return SCSI_PT_DO_BAD_PARAMS;
    804     }
    805     dpg_cd = dop[0];
    806     dpg_len = sg_get_unaligned_be16(dop + 2) + 4;
    807     /* should we allow for more than one D_PG is dout ?? */
    808     n = (n < dpg_len) ? n : dpg_len;    /* not yet ... */
    809 
    810     if (vb)
    811         pr2ws("%s: passing through d_pg=0x%x, len=%u to NVME_MI SES send\n",
    812               __func__, dpg_cd, dpg_len);
    813     memset(&cmd, 0, sizeof(cmd));
    814     cmd.opcode = 0x1d;  /* MI send; hmmm same opcode as SEND DIAG */
    815     cmd.addr = (uint64_t)(sg_uintptr_t)dop;
    816     cmd.data_len = 0x1000;   /* NVMe 4k page size. Maybe determine this? */
    817                              /* dout_len > 0x1000, is this a problem?? */
    818     cmd.cdw10 = 0x0804;      /* NVMe Message Header */
    819     cmd.cdw11 = 0x9;         /* nvme_mi_ses_send; (0x8 -> mi_ses_recv) */
    820     cmd.cdw13 = n;
    821     res = do_nvme_admin_cmd(ptp, &cmd, dop, false, time_secs, vb);
    822     if (0 != res) {
    823         if (SG_LIB_NVME_STATUS == res) {
    824             mk_sense_from_nvme_status(ptp, vb);
    825             return 0;
    826         }
    827     }
    828     return res;
    829 }
    830 
    831 /* This is not really a SNTL. For SCSI RECEIVE DIAGNOSTIC RESULTS(PCV=1)
    832  * NVMe-MI has a special command (SES Receive) to read pages through a
    833  * tunnel from an enclosure. The NVMe enclosure is meant to understand the
    834  * SES (SCSI Enclosure Services) use of diagnostics pages that are
    835  * related to SES. */
    836 static int
    837 sntl_recvdiag(struct sg_pt_linux_scsi * ptp, const uint8_t * cdbp,
    838               int time_secs, int vb)
    839 {
    840     bool pcv;
    841     int res;
    842     uint8_t dpg_cd;
    843     uint32_t alloc_len, n, din_len;
    844     uint32_t pg_sz = sg_get_page_size();
    845     uint8_t * dip;
    846     struct sg_nvme_passthru_cmd cmd;
    847 
    848     pcv = !! (0x1 & cdbp[1]);
    849     dpg_cd = cdbp[2];
    850     alloc_len = sg_get_unaligned_be16(cdbp + 3); /* parameter list length */
    851     if (vb > 3)
    852         pr2ws("%s: dpg_cd=0x%x, pcv=%d, alloc_len=0x%x\n", __func__,
    853               dpg_cd, (int)pcv, alloc_len);
    854     din_len = ptp->io_hdr.din_xfer_len;
    855     n = din_len;
    856     n = (n < alloc_len) ? n : alloc_len;
    857     dip = (uint8_t *)ptp->io_hdr.din_xferp;
    858     if (! is_aligned(dip, pg_sz)) {  /* caller best use sg_memalign(,pg_sz) */
    859         if (vb)
    860             pr2ws("%s: din [0x%" PRIx64 "] not page aligned\n", __func__,
    861                   (uint64_t)ptp->io_hdr.din_xferp);
    862         return SCSI_PT_DO_BAD_PARAMS;
    863     }
    864 
    865     if (vb)
    866         pr2ws("%s: expecting d_pg=0x%x from NVME_MI SES receive\n", __func__,
    867               dpg_cd);
    868     memset(&cmd, 0, sizeof(cmd));
    869     cmd.opcode = 0x1e;  /* MI receive */
    870     cmd.addr = (uint64_t)(sg_uintptr_t)dip;
    871     cmd.data_len = 0x1000;   /* NVMe 4k page size. Maybe determine this? */
    872                              /* din_len > 0x1000, is this a problem?? */
    873     cmd.cdw10 = 0x0804;      /* NVMe Message Header */
    874     cmd.cdw11 = 0x8;         /* nvme_mi_ses_receive */
    875     cmd.cdw12 = dpg_cd;
    876     cmd.cdw13 = n;
    877     res = do_nvme_admin_cmd(ptp, &cmd, dip, true, time_secs, vb);
    878     if (0 != res) {
    879         if (SG_LIB_NVME_STATUS == res) {
    880             mk_sense_from_nvme_status(ptp, vb);
    881             return 0;
    882         } else
    883             return res;
    884     }
    885     ptp->io_hdr.din_resid = din_len - n;
    886     return res;
    887 }
    888 
    889 #define F_SA_LOW                0x80    /* cdb byte 1, bits 4 to 0 */
    890 #define F_SA_HIGH               0x100   /* as used by variable length cdbs */
    891 #define FF_SA (F_SA_HIGH | F_SA_LOW)
    892 #define F_INV_OP                0x200
    893 
    894 static struct opcode_info_t {
    895         uint8_t opcode;
    896         uint16_t sa;            /* service action, 0 for none */
    897         uint32_t flags;         /* OR-ed set of F_* flags */
    898         uint8_t len_mask[16];   /* len=len_mask[0], then mask for cdb[1]... */
    899                                 /* ignore cdb bytes after position 15 */
    900     } opcode_info_arr[] = {
    901     {0x0, 0, 0, {6,              /* TEST UNIT READY */
    902       0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
    903     {0x3, 0, 0, {6,             /* REQUEST SENSE */
    904       0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
    905     {0x12, 0, 0, {6,            /* INQUIRY */
    906       0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
    907     {0x1c, 0, 0, {6,            /* RECEIVE DIAGNOSTIC RESULTS */
    908       0x1, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
    909     {0x1d, 0, 0, {6,            /* SEND DIAGNOSTIC */
    910       0xf7, 0x0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
    911     {0xa0, 0, 0, {12,           /* REPORT LUNS */
    912       0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0, 0} },
    913     {0xa3, 0xc, F_SA_LOW, {12,  /* REPORT SUPPORTED OPERATION CODES */
    914       0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0,
    915       0} },
    916     {0xa3, 0xd, F_SA_LOW, {12,  /* REPORT SUPPORTED TASK MAN. FUNCTIONS */
    917       0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0, 0} },
    918 
    919     {0xff, 0xffff, 0xffff, {0,  /* Sentinel, keep as last element */
    920       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
    921 };
    922 
    923 static int
    924 sntl_rep_opcodes(struct sg_pt_linux_scsi * ptp, const uint8_t * cdbp,
    925                  int time_secs, int vb)
    926 {
    927     bool rctd;
    928     uint8_t reporting_opts, req_opcode, supp;
    929     uint16_t req_sa, u;
    930     uint32_t alloc_len, offset, a_len;
    931     uint32_t pg_sz = sg_get_page_size();
    932     int k, len, count, bump;
    933     const struct opcode_info_t *oip;
    934     uint8_t *arr;
    935     uint8_t *free_arr;
    936 
    937     if (vb > 3)
    938         pr2ws("%s: time_secs=%d\n", __func__, time_secs);
    939     rctd = !!(cdbp[2] & 0x80);      /* report command timeout desc. */
    940     reporting_opts = cdbp[2] & 0x7;
    941     req_opcode = cdbp[3];
    942     req_sa = sg_get_unaligned_be16(cdbp + 4);
    943     alloc_len = sg_get_unaligned_be32(cdbp + 6);
    944     if (alloc_len < 4 || alloc_len > 0xffff) {
    945         mk_sense_invalid_fld(ptp, true, 6, -1, vb);
    946         return 0;
    947     }
    948     a_len = pg_sz - 72;
    949     arr = sg_memalign(pg_sz, pg_sz, &free_arr, vb > 3);
    950     if (NULL == arr) {
    951         pr2ws("%s: calloc() failed to get memory\n", __func__);
    952         return -ENOMEM;
    953     }
    954     switch (reporting_opts) {
    955     case 0: /* all commands */
    956         count = 0;
    957         bump = rctd ? 20 : 8;
    958         for (offset = 4, oip = opcode_info_arr;
    959              (oip->flags != 0xffff) && (offset < a_len); ++oip) {
    960             if (F_INV_OP & oip->flags)
    961                 continue;
    962             ++count;
    963             arr[offset] = oip->opcode;
    964             sg_put_unaligned_be16(oip->sa, arr + offset + 2);
    965             if (rctd)
    966                 arr[offset + 5] |= 0x2;
    967             if (FF_SA & oip->flags)
    968                 arr[offset + 5] |= 0x1;
    969             sg_put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
    970             if (rctd)
    971                 sg_put_unaligned_be16(0xa, arr + offset + 8);
    972             offset += bump;
    973         }
    974         sg_put_unaligned_be32(count * bump, arr + 0);
    975         break;
    976     case 1: /* one command: opcode only */
    977     case 2: /* one command: opcode plus service action */
    978     case 3: /* one command: if sa==0 then opcode only else opcode+sa */
    979         for (oip = opcode_info_arr; oip->flags != 0xffff; ++oip) {
    980             if ((req_opcode == oip->opcode) && (req_sa == oip->sa))
    981                 break;
    982         }
    983         if ((0xffff == oip->flags) || (F_INV_OP & oip->flags)) {
    984             supp = 1;
    985             offset = 4;
    986         } else {
    987             if (1 == reporting_opts) {
    988                 if (FF_SA & oip->flags) {
    989                     mk_sense_invalid_fld(ptp, true, 2, 2, vb);
    990                     free(free_arr);
    991                     return 0;
    992                 }
    993                 req_sa = 0;
    994             } else if ((2 == reporting_opts) && 0 == (FF_SA & oip->flags)) {
    995                 mk_sense_invalid_fld(ptp, true, 4, -1, vb);
    996                 free(free_arr);
    997                 return 0;
    998             }
    999             if ((0 == (FF_SA & oip->flags)) && (req_opcode == oip->opcode))
   1000                 supp = 3;
   1001             else if (0 == (FF_SA & oip->flags))
   1002                 supp = 1;
   1003             else if (req_sa != oip->sa)
   1004                 supp = 1;
   1005             else
   1006                 supp = 3;
   1007             if (3 == supp) {
   1008                 u = oip->len_mask[0];
   1009                 sg_put_unaligned_be16(u, arr + 2);
   1010                 arr[4] = oip->opcode;
   1011                 for (k = 1; k < u; ++k)
   1012                     arr[4 + k] = (k < 16) ?
   1013                 oip->len_mask[k] : 0xff;
   1014                 offset = 4 + u;
   1015             } else
   1016                 offset = 4;
   1017         }
   1018         arr[1] = (rctd ? 0x80 : 0) | supp;
   1019         if (rctd) {
   1020             sg_put_unaligned_be16(0xa, arr + offset);
   1021             offset += 12;
   1022         }
   1023         break;
   1024     default:
   1025         mk_sense_invalid_fld(ptp, true, 2, 2, vb);
   1026         free(free_arr);
   1027         return 0;
   1028     }
   1029     offset = (offset < a_len) ? offset : a_len;
   1030     len = (offset < alloc_len) ? offset : alloc_len;
   1031     ptp->io_hdr.din_resid = ptp->io_hdr.din_xfer_len - len;
   1032     if (len > 0)
   1033         memcpy((uint8_t *)ptp->io_hdr.din_xferp, arr, len);
   1034     free(free_arr);
   1035     return 0;
   1036 }
   1037 
   1038 static int
   1039 sntl_rep_tmfs(struct sg_pt_linux_scsi * ptp, const uint8_t * cdbp,
   1040               int time_secs, int vb)
   1041 {
   1042     bool repd;
   1043     uint32_t alloc_len, len;
   1044     uint8_t arr[16];
   1045 
   1046     if (vb > 3)
   1047         pr2ws("%s: time_secs=%d\n", __func__, time_secs);
   1048     memset(arr, 0, sizeof(arr));
   1049     repd = !!(cdbp[2] & 0x80);
   1050     alloc_len = sg_get_unaligned_be32(cdbp + 6);
   1051     if (alloc_len < 4) {
   1052         mk_sense_invalid_fld(ptp, true, 6, -1, vb);
   1053         return 0;
   1054     }
   1055     arr[0] = 0xc8;          /* ATS | ATSS | LURS */
   1056     arr[1] = 0x1;           /* ITNRS */
   1057     if (repd) {
   1058         arr[3] = 0xc;
   1059         len = 16;
   1060     } else
   1061         len = 4;
   1062 
   1063     len = (len < alloc_len) ? len : alloc_len;
   1064     ptp->io_hdr.din_resid = ptp->io_hdr.din_xfer_len - len;
   1065     if (len > 0)
   1066         memcpy((uint8_t *)ptp->io_hdr.din_xferp, arr, len);
   1067     return 0;
   1068 }
   1069 
   1070 /* Executes NVMe Admin command (or at least forwards it to lower layers).
   1071  * Returns 0 for success, negative numbers are negated 'errno' values from
   1072  * OS system calls. Positive return values are errors from this package.
   1073  * When time_secs is 0 the Linux NVMe Admin command default of 60 seconds
   1074  * is used. */
   1075 int
   1076 sg_do_nvme_pt(struct sg_pt_base * vp, int fd, int time_secs, int vb)
   1077 {
   1078     bool scsi_cdb;
   1079     bool is_read = false;
   1080     int n, len;
   1081     uint16_t sa;
   1082     struct sg_pt_linux_scsi * ptp = &vp->impl;
   1083     struct sg_nvme_passthru_cmd cmd;
   1084     const uint8_t * cdbp;
   1085     void * dp = NULL;
   1086 
   1087     if (! ptp->io_hdr.request) {
   1088         if (vb)
   1089             pr2ws("No NVMe command given (set_scsi_pt_cdb())\n");
   1090         return SCSI_PT_DO_BAD_PARAMS;
   1091     }
   1092     if (fd >= 0) {
   1093         if ((ptp->dev_fd >= 0) && (fd != ptp->dev_fd)) {
   1094             if (vb)
   1095                 pr2ws("%s: file descriptor given to create() and here "
   1096                       "differ\n", __func__);
   1097             return SCSI_PT_DO_BAD_PARAMS;
   1098         }
   1099         ptp->dev_fd = fd;
   1100     } else if (ptp->dev_fd < 0) {
   1101         if (vb)
   1102             pr2ws("%s: invalid file descriptors\n", __func__);
   1103         return SCSI_PT_DO_BAD_PARAMS;
   1104     }
   1105     n = ptp->io_hdr.request_len;
   1106     cdbp = (const uint8_t *)ptp->io_hdr.request;
   1107     if (vb > 3)
   1108         pr2ws("%s: opcode=0x%x, fd=%d, time_secs=%d\n", __func__, cdbp[0],
   1109               fd, time_secs);
   1110     scsi_cdb = sg_is_scsi_cdb(cdbp, n);
   1111     /* direct NVMe command (i.e. 64 bytes long) or SNTL */
   1112     ptp->nvme_direct = ! scsi_cdb;
   1113     if (scsi_cdb) {
   1114         switch (cdbp[0]) {
   1115         case SCSI_INQUIRY_OPC:
   1116             return sntl_inq(ptp, cdbp, time_secs, vb);
   1117         case SCSI_REPORT_LUNS_OPC:
   1118             return sntl_rluns(ptp, cdbp, time_secs, vb);
   1119         case SCSI_TEST_UNIT_READY_OPC:
   1120             return sntl_tur(ptp, time_secs, vb);
   1121         case SCSI_REQUEST_SENSE_OPC:
   1122             return sntl_req_sense(ptp, cdbp, time_secs, vb);
   1123         case SCSI_SEND_DIAGNOSTIC_OPC:
   1124             return sntl_senddiag(ptp, cdbp, time_secs, vb);
   1125         case SCSI_RECEIVE_DIAGNOSTIC_OPC:
   1126             return sntl_recvdiag(ptp, cdbp, time_secs, vb);
   1127         case SCSI_MAINT_IN_OPC:
   1128             sa = 0x1f & cdbp[1];        /* service action */
   1129             if (SCSI_REP_SUP_OPCS_OPC == sa)
   1130                 return sntl_rep_opcodes(ptp, cdbp, time_secs, vb);
   1131             else if (SCSI_REP_SUP_TMFS_OPC == sa)
   1132                 return sntl_rep_tmfs(ptp, cdbp, time_secs, vb);
   1133             /* fall through */
   1134         default:
   1135             if (vb > 2) {
   1136                 char b[64];
   1137 
   1138                 sg_get_command_name(cdbp, -1, sizeof(b), b);
   1139                 pr2ws("%s: no translation to NVMe for SCSI %s command\n",
   1140                       __func__, b);
   1141             }
   1142             mk_sense_asc_ascq(ptp, SPC_SK_ILLEGAL_REQUEST, INVALID_OPCODE,
   1143                               0, vb);
   1144             return 0;
   1145         }
   1146     }
   1147     len = (int)sizeof(cmd);
   1148     n = (n < len) ? n : len;
   1149     if (n < 64) {
   1150         if (vb)
   1151             pr2ws("%s: command length of %d bytes is too short\n", __func__,
   1152                   n);
   1153         return SCSI_PT_DO_BAD_PARAMS;
   1154     }
   1155     memcpy(&cmd, (const uint8_t *)ptp->io_hdr.request, n);
   1156     if (n < len)        /* zero out rest of 'cmd' */
   1157         memset((unsigned char *)&cmd + n, 0, len - n);
   1158     if (ptp->io_hdr.din_xfer_len > 0) {
   1159         cmd.data_len = ptp->io_hdr.din_xfer_len;
   1160         dp = (void *)ptp->io_hdr.din_xferp;
   1161         cmd.addr = (uint64_t)(sg_uintptr_t)ptp->io_hdr.din_xferp;
   1162         is_read = true;
   1163     } else if (ptp->io_hdr.dout_xfer_len > 0) {
   1164         cmd.data_len = ptp->io_hdr.dout_xfer_len;
   1165         dp = (void *)ptp->io_hdr.dout_xferp;
   1166         cmd.addr = (uint64_t)(sg_uintptr_t)ptp->io_hdr.dout_xferp;
   1167         is_read = false;
   1168     }
   1169     return do_nvme_admin_cmd(ptp, &cmd, dp, is_read, time_secs, vb);
   1170 }
   1171 
   1172 #else           /* (HAVE_NVME && (! IGNORE_NVME)) */
   1173 
   1174 int
   1175 sg_do_nvme_pt(struct sg_pt_base * vp, int fd, int time_secs, int vb)
   1176 {
   1177     if (vb)
   1178         pr2ws("%s: not supported\n", __func__);
   1179     if (vp) { ; }               /* suppress warning */
   1180     if (fd) { ; }               /* suppress warning */
   1181     if (time_secs) { ; }        /* suppress warning */
   1182     return -ENOTTY;             /* inappropriate ioctl error */
   1183 }
   1184 
   1185 #endif          /* (HAVE_NVME && (! IGNORE_NVME)) */
   1186