Feature #11961
closedadd DDI UFM support to the nvme driver
100%
Description
This ticket to covers the work to add DDI UFM support to the nvme driver. This change has already been integrated downstream in illumos-joyent via the following commit:
commit 5a02003f48844ffada03336634b28edaff8574ec Author: Rob Johnston <rob.johnston@joyent.com> Date: Mon Aug 19 20:54:40 2019 +0000 OS-7938 add DDI UFM support to the nvme driver Reviewed by: Robert Mustacchi <rm@joyent.com> Reviewed by: Jordan Hendricks <jordan.hendricks@joyent.com> Approved by: Joshua M. Clulow <josh@sysmgr.org>
Updated by Rob Johnston over 2 years ago
Testing¶
I built/booted a SmartOS PI with these changes on the following two systems, which covers a variety of NVMe drive models:
Supermicro SYS-2028U-E1CNRT+
2 x U.2 INTEL SSDPE2KE016T7
1 x U.2 INTEL SSDPE2MD020T4
1 x U.2 SAMSUNG MZQLW960HMJP-00003
home workstation
1 x M.2 Samsung SSD 970 EVO Plus
After porting the change from illumos-joyent, I retested the change on the following platform running OpenIndiana:
Intel NUC7i5BNH
1 x M.2 Samsung SSD 970 EVO
I verified via mdb that the nvme devices had correctly registered with the DDI UFM subsystem (see sample output from the Supermicro rig below)
> ufm_handles::walk avl | ::print -t ddi_ufm_handle_t ddi_ufm_handle_t { kmutex_t ufmh_lock = { void *[1] _opaque = [ 0 ] } char [1024] ufmh_devpath = [ "/pci@0,0/pci8086,6f06@2,2/pci15d9,808@0" ] ddi_ufm_ops_t *ufmh_ops = mptsas_ufm_ops void *ufmh_arg = 0xfffffe59362c1000 uint_t ufmh_state = 0x3 uint_t ufmh_version = 0x1 struct ddi_ufm_image *ufmh_images = 0xfffffe5939875298 uint_t ufmh_nimages = 0x1 ddi_ufm_cap_t ufmh_caps = 0x1 (DDI_UFM_CAP_REPORT) nvlist_t *ufmh_report = 0xfffffe59367c2c50 avl_node_t ufmh_link = { struct avl_node *[2] avl_child = [ 0, 0xfffffe59368c1d78 ] uintptr_t avl_pcb = 0xfffffe592757e43a } } ddi_ufm_handle_t { kmutex_t ufmh_lock = { void *[1] _opaque = [ 0 ] } char [1024] ufmh_devpath = [ "/pci@0,0/pci8086,6f08@3/pci8086,4712@0" ] ddi_ufm_ops_t *ufmh_ops = nvme_ufm_ops void *ufmh_arg = 0xfffffe5935308cc0 uint_t ufmh_state = 0x3 uint_t ufmh_version = 0x1 struct ddi_ufm_image *ufmh_images = 0xfffffe593a0c4cd0 uint_t ufmh_nimages = 0x1 ddi_ufm_cap_t ufmh_caps = 0x1 (DDI_UFM_CAP_REPORT) nvlist_t *ufmh_report = 0xfffffe59367c2c38 avl_node_t ufmh_link = { struct avl_node *[2] avl_child = [ 0, 0 ] uintptr_t avl_pcb = 0xfffffe59275804bd } } ddi_ufm_handle_t { kmutex_t ufmh_lock = { void *[1] _opaque = [ 0 ] } char [1024] ufmh_devpath = [ "/pci@0,0/pci8086,6f09@3,1/pci8086,4712@0" ] ddi_ufm_ops_t *ufmh_ops = nvme_ufm_ops void *ufmh_arg = 0xfffffe59355bea00 uint_t ufmh_state = 0x3 uint_t ufmh_version = 0x1 struct ddi_ufm_image *ufmh_images = 0xfffffe593a0c7f80 uint_t ufmh_nimages = 0x1 ddi_ufm_cap_t ufmh_caps = 0x1 (DDI_UFM_CAP_REPORT) nvlist_t *ufmh_report = 0xfffffe59367c2c20 avl_node_t ufmh_link = { struct avl_node *[2] avl_child = [ 0xfffffe59275804b8, 0xfffffe59283a1238 ] uintptr_t avl_pcb = 0x1 } } ddi_ufm_handle_t { kmutex_t ufmh_lock = { void *[1] _opaque = [ 0 ] } char [1024] ufmh_devpath = [ "/pci@0,0/pci8086,6f0a@3,2/pci8086,3703@0" ] ddi_ufm_ops_t *ufmh_ops = nvme_ufm_ops void *ufmh_arg = 0xfffffe59360e5940 uint_t ufmh_state = 0x3 uint_t ufmh_version = 0x1 struct ddi_ufm_image *ufmh_images = 0xfffffe593a0c5e00 uint_t ufmh_nimages = 0x1 ddi_ufm_cap_t ufmh_caps = 0x1 (DDI_UFM_CAP_REPORT) nvlist_t *ufmh_report = 0xfffffe59367c2c08 avl_node_t ufmh_link = { struct avl_node *[2] avl_child = [ 0, 0xfffffe593534edb8 ] uintptr_t avl_pcb = 0xfffffe592757e43e } } ddi_ufm_handle_t { kmutex_t ufmh_lock = { void *[1] _opaque = [ 0 ] } char [1024] ufmh_devpath = [ "/pci@0,0/pci8086,6f0b@3,3/pci144d,a801@0" ] ddi_ufm_ops_t *ufmh_ops = nvme_ufm_ops void *ufmh_arg = 0xfffffe5934e14300 uint_t ufmh_state = 0x3 uint_t ufmh_version = 0x1 struct ddi_ufm_image *ufmh_images = 0xfffffe593563e2f8 uint_t ufmh_nimages = 0x1 ddi_ufm_cap_t ufmh_caps = 0x1 (DDI_UFM_CAP_REPORT) nvlist_t *ufmh_report = 0xfffffe59367c2bf0 avl_node_t ufmh_link = { struct avl_node *[2] avl_child = [ 0, 0 ] uintptr_t avl_pcb = 0xfffffe59283a123d } } > ufm_handles::walk avl | ::print -t ddi_ufm_handle_t ufmh_report | ::nvlist ufm-images[0] ufm-image-description='IOC Firmware' ufm-image-slots[0] ufm-slot-attributes=00000004 ufm-slot-version='6.0.0.0' ufm-images[0] ufm-image-description='Firmware' ufm-image-slots[0] ufm-slot-attributes=00000006 ufm-slot-version='QDV10130' ufm-images[0] ufm-image-description='Firmware' ufm-image-slots[0] ufm-slot-attributes=00000006 ufm-slot-version='QDV10130' ufm-images[0] ufm-image-description='Firmware' ufm-image-slots[0] ufm-slot-attributes=00000006 ufm-slot-version='8DV10171' ufm-images[0] ufm-image-description='Firmware' ufm-image-slots[0] ufm-slot-attributes=00000006 ufm-slot-version='CXV8301Q' ufm-image-slots[1] ufm-slot-attributes=0000000a ufm-image-slots[2] ufm-slot-attributes=0000000a
I also ran a CLI to test the ufm(7d) ioctls against nvme devices (see sample output below)
[root@nvme /var/tmp/rejohnst]# ./ufm-ioctl.32 -d /pci@0,0/pci8086,6f08@3/pci8086,4712@0 -i report Report Size: 312 bytes nvlist version: 0 ufm-images = (array of embedded nvlists) (start ufm-images[0]) nvlist version: 0 ufm-image-description = Firmware ufm-image-slots = (array of embedded nvlists) (start ufm-image-slots[0]) nvlist version: 0 ufm-slot-attributes = 0x6 ufm-slot-version = QDV10130 (end ufm-image-slots[0]) (end ufm-images[0]) [root@nvme /var/tmp/rejohnst]# ./ufm-ioctl.64 -d /pci@0,0/pci8086,6f08@3/pci8086,4712@0 -i report Report Size: 312 bytes nvlist version: 0 ufm-images = (array of embedded nvlists) (start ufm-images[0]) nvlist version: 0 ufm-image-description = Firmware ufm-image-slots = (array of embedded nvlists) (start ufm-image-slots[0]) nvlist version: 0 ufm-slot-attributes = 0x6 ufm-slot-version = QDV10130 (end ufm-image-slots[0]) (end ufm-images[0]) [root@nvme /var/tmp/rejohnst]# ./ufm-ioctl.32 -d /pci@0,0/pci8086,6f0b@3,3/pci144d,a801@0 -i report Report Size: 480 bytes nvlist version: 0 ufm-images = (array of embedded nvlists) (start ufm-images[0]) nvlist version: 0 ufm-image-description = Firmware ufm-image-slots = (array of embedded nvlists) (start ufm-image-slots[0]) nvlist version: 0 ufm-slot-attributes = 0x6 ufm-slot-version = CXV8301Q (end ufm-image-slots[0]) (start ufm-image-slots[1]) nvlist version: 0 ufm-slot-attributes = 0xa (end ufm-image-slots[1]) (start ufm-image-slots[2]) nvlist version: 0 ufm-slot-attributes = 0xa (end ufm-image-slots[2]) (end ufm-images[0]) [root@nvme /var/tmp/rejohnst]# ./ufm-ioctl.64 -d /pci@0,0/pci8086,6f0b@3,3/pci144d,a801@0 -i report Report Size: 480 bytes nvlist version: 0 ufm-images = (array of embedded nvlists) (start ufm-images[0]) nvlist version: 0 ufm-image-description = Firmware ufm-image-slots = (array of embedded nvlists) (start ufm-image-slots[0]) nvlist version: 0 ufm-slot-attributes = 0x6 ufm-slot-version = CXV8301Q (end ufm-image-slots[0]) (start ufm-image-slots[1]) nvlist version: 0 ufm-slot-attributes = 0xa (end ufm-image-slots[1]) (start ufm-image-slots[2]) nvlist version: 0 ufm-slot-attributes = 0xa (end ufm-image-slots[2]) (end ufm-images[0])
Updated by Electric Monk over 2 years ago
- Status changed from New to Closed
- % Done changed from 0 to 100
git commit e89be50a407de17396dc2e87e7f9aa8160182fb6
commit e89be50a407de17396dc2e87e7f9aa8160182fb6 Author: Rob Johnston <rob.johnston@joyent.com> Date: 2019-11-14T22:06:58.000Z 11961 add DDI UFM support to the nvme driver Reviewed by: Robert Mustacchi <rm@joyent.com> Reviewed by: Jordan Hendricks <jordan.hendricks@joyent.com> Reviewed by: Paul Winder <paul@winders.demon.co.uk> Reviewed by: Andrew Stormont <astormont@racktopsystems.com> Approved by: Dan McDonald <danmcd@joyent.com>