Bug #5320
closedpanic in smb_com_locking_andx
100%
Description
The system panicked in null pointer dereference:
> ::status debugging crash dump vmcore.0 (64-bit) from sgc01 operating system: 5.11 NexentaOS_134f (i86pc) panic message: BAD TRAP: type=e (#pf Page fault) rp=ffffff01eaf78930 addr=158 occurred in module "unix" due to a NULL pointer dereference dump content: kernel pages only (curproc requested, but a kernel thread panicked) Full panic stack reported in messages file: 2014 Apr 24 14:17:06 ffffff4492f13070 ss: 38 2014 Apr 24 14:17:06 ffffff4492e0aa70 2014 Apr 24 14:17:06 ffffff43edc51570 ffffff01eaf78810 unix:die+dd () 2014 Apr 24 14:17:06 ffffff4311e7c870 ffffff01eaf78920 unix:trap+177b () 2014 Apr 24 14:17:06 ffffff43ea6d5430 ffffff01eaf78930 unix:cmntrap+e6 () 2014 Apr 24 14:17:06 ffffff43edc60630 ffffff01eaf78a70 unix:mutex_enter+b () 2014 Apr 24 14:17:06 ffffff4492f471b0 ffffff01eaf78ae0 smbsrv:smb_com_locking_andx+c3 () 2014 Apr 24 14:17:06 ffffff43edc11230 ffffff01eaf78b80 smbsrv:smb_dispatch_request+4d0 () 2014 Apr 24 14:17:06 ffffff4492fb2870 ffffff01eaf78bb0 smbsrv:smb_session_worker+7c () 2014 Apr 24 14:17:06 ffffff43edc5c370 ffffff01eaf78c40 genunix:taskq_d_thread+b1 () 2014 Apr 24 14:17:06 ffffff42e2a9a630 ffffff01eaf78c50 unix:thread_start+8 () 2014 Apr 24 14:17:06 ffffff43ed0f6930 2014 Apr 24 14:17:06 ffffff4492e016b0 syncing file systems... 2014 Apr 24 14:17:08 ffffff4492f705b0 done 2014 Apr 24 14:17:09 ffffff4492ded4f0
Updated by Gordon Ross over 7 years ago
Gordon Ross added a comment - 25/Apr/14 2:33 PM
Here's a log of the crash dump analysis.
ssh support@l3cda.nexenta.com cd /mnt/ftp-incoming/ingrooves mdb -k 0 > utsname ::print { sysname = [ "SunOS" ] nodename = [ "sgc01" ] release = [ "5.11" ] version = [ "NexentaOS_134f" ] machine = [ "i86pc" ] } > ::status debugging crash dump vmcore.0 (64-bit) from sgc01 operating system: 5.11 NexentaOS_134f (i86pc) panic message: BAD TRAP: type=e (#pf Page fault) rp=ffffff01eaf78930 addr=158 occurred in modul e "unix" due to a NULL pointer dereference dump content: kernel pages only (curproc requested, but a kernel thread panicked) > $C ffffff01eaf78a70 mutex_enter+0xb() ffffff01eaf78ae0 smb_com_locking_andx+0xc3(ffffff43df9f8340) ffffff01eaf78b80 smb_dispatch_request+0x4d0(ffffff43df9f8340) ffffff01eaf78bb0 smb_session_worker+0x7c(ffffff43df9f8340) ffffff01eaf78c40 taskq_d_thread+0xb1(ffffff4460da26c0) ffffff01eaf78c50 thread_start+8()
Here's the SMB request this thread is working on:
> ffffff43df9f8340 ::print smb_request_t { sr_magic = 0x534d4252 sr_mutex = { _opaque = [ 0 ] } sr_session_lnd = { list_next = 0xffffff442aa5ce10 list_prev = 0xffffff442aa5ce10 } sr_state = 3 (SMB_REQ_STATE_ACTIVE) sr_cache = 0xffffff4354608008 sr_server = 0xffffff4313070800 sr_pid = 0 sr_gmtoff = 0x6270 session = 0xffffff442aa5c920 sr_cfg = 0xffffff442aa5c980 sr_ncr = { nc_lnd = { list_next = 0 list_prev = 0 } nc_cv = { _opaque = 0 } nc_flags = 0 nc_action = 0 nc_fname = 0 } sr_req_length = 0x33 sr_request_buf = 0xffffff4493ed15a0 sr_awaiting = 0 command = { mbc_magic = 0 mbc_lnd = { list_next = 0 list_prev = 0 } flags = { ch_link = { dl_next = 0 dl_prev = 0 } ch_name = 0xffffff43747408e8 "" ch_namelen = 0x3300000033 ch_mutex = { _opaque = [ 0 ] } ch_queue = 0 ch_subscr = { dh_head = { dl_next = 0 dl_prev = 0 } dh_count = 0 } ch_bindings = 0x7530d0a8 ch_maxbinds = 0xffffff43 ch_uid = 0x9400 ch_gid = 0x20 ch_pubmx = { _opaque = [ 0 ] } ch_pubcv = { _opaque = 0 } ch_nevents = 0 ch_maxev = 0 ch_maxsubscr = 0 ch_holdpend = 0 ch_ctime = 0 } shadow_of = 0 chain = 0xffffff43747408e8 max_bytes = 0x33 chain_offset = 0x33 } reply = { mbc_magic = 0 mbc_lnd = { list_next = 0 list_prev = 0 } flags = { ch_link = { dl_next = 0 dl_prev = 0 } ch_name = 0xffffff437530d0a8 "" ch_namelen = 0x2000009400 ch_mutex = { _opaque = [ 0 ] } ch_queue = 0 ch_subscr = { dh_head = { dl_next = 0 dl_prev = 0 } dh_count = 0 } ch_bindings = 0 ch_maxbinds = 0 ch_uid = 0 ch_gid = 0 ch_pubmx = { _opaque = [ 0x28 ] } ch_pubcv = { _opaque = 0x18 } ch_nevents = 0 ch_maxev = 0xdf9f8490 ch_maxsubscr = 0xffffff43 ch_holdpend = 0xdf9f8490 ch_ctime = 0 } shadow_of = 0 chain = 0xffffff437530d0a8 max_bytes = 0x9400 chain_offset = 0x20 } raw_data = { mbc_magic = 0 mbc_lnd = { list_next = 0 list_prev = 0 } flags = { ch_link = { dl_next = 0 dl_prev = 0 } ch_name = 0 ch_namelen = 0 ch_mutex = { _opaque = [ 0x28 ] } ch_queue = 0x18 ch_subscr = { dh_head = { dl_next = 0xffffff43df9f8490 dl_prev = 0xffffff43df9f8490 } dh_count = 0 } ch_bindings = 0 ch_maxbinds = 0x20 ch_uid = 0 ch_gid = 0 ch_pubmx = { _opaque = [ 0x2424 ] } ch_pubcv = { _opaque = 0 } ch_nevents = 0 ch_maxev = 0x88070018 ch_maxsubscr = 0 ch_holdpend = 0 ch_ctime = 8ffff0002feff } shadow_of = 0 chain = 0 max_bytes = 0 chain_offset = 0 } sr_storage = { list_size = 0x28 list_offset = 0x18 list_head = { list_next = 0xffffff43df9f8490 list_prev = 0xffffff43df9f8490 } } r_xa = 0 andx_prev_wct = 0 cur_reply_offset = 0x20 orig_request_hdr = 0 reply_seqnum = 0 first_smb_com = 0x24 smb_com = 0x24 smb_rcls = 0 smb_reh = 0 smb_err = 0 smb_error = { status = 0 errcls = 0 errcode = 0 } smb_flg = 0x18 smb_flg2 = 0x8807 smb_pid_high = 0 smb_sig = [ 0, 0, 0, 0, 0, 0, 0, 0 ] smb_tid = 0x1 smb_pid = 0xfeff smb_uid = 0x2 smb_mid = 0xffff smb_wct = 0x8 smb_bcc = 0 smb_vwv = { mbc_magic = 0 mbc_lnd = { list_next = 0 list_prev = 0 } flags = { ch_link = { dl_next = 0 dl_prev = 0xffffff43df9f83d8 } ch_name = 0xffffff43747408e8 "" ch_namelen = 0x3100000031 ch_mutex = { _opaque = [ 0 ] } ch_queue = 0 ch_subscr = { dh_head = { dl_next = 0 dl_prev = 0 } dh_count = 0xdf9f83d8 } ch_bindings = 0x747408e8 ch_maxbinds = 0xffffff43 ch_uid = 0x33 ch_gid = 0x33 ch_pubmx = { _opaque = [ 0xdede00ff0001 ] } ch_pubcv = { _opaque = 0x240 } ch_nevents = 0xffffff44 ch_maxev = 0xab7b6308 ch_maxsubscr = 0xffffff43 ch_holdpend = 0x7449ee00 ch_ctime = 0 } shadow_of = 0xffffff43df9f83d8 chain = 0xffffff43747408e8 max_bytes = 0x31 chain_offset = 0x31 } smb_data = { mbc_magic = 0 mbc_lnd = { list_next = 0 list_prev = 0 } flags = { ch_link = { dl_next = 0 dl_prev = 0xffffff43df9f83d8 } ch_name = 0xffffff43747408e8 "" ch_namelen = 0x3300000033 ch_mutex = { _opaque = [ 0xdede00ff0001 ] } ch_queue = 0xffffff4413010240 ch_subscr = { dh_head = { dl_next = 0xffffff43ab7b6308 dl_prev = 0xffffff447449ee00 } dh_count = 0 } ch_bindings = 0 ch_maxbinds = 0 ch_uid = 0 ch_gid = 0 ch_pubmx = { _opaque = [ 0 ] } ch_pubcv = { _opaque = 0 } ch_nevents = 0 ch_maxev = 0 ch_maxsubscr = 0 ch_holdpend = 0 ch_ctime = 0 } shadow_of = 0xffffff43df9f83d8 chain = 0xffffff43747408e8 max_bytes = 0x33 chain_offset = 0x33 } smb_fid = 0x1 andx_com = 0xff andx_off = 0xdede tid_tree = 0xffffff4413010240 fid_ofile = 0xffffff43ab7b6308 uid_user = 0xffffff447449ee00 arg = { negprot = 0 ssetup = 0 tcon = { path = 0 service = 0 pwdlen = 0 password = 0 flags = 0 optional_support = 0 si = 0 } dirop = { fqi = { fq_path = { pn_path = 0 pn_pname = 0 pn_fname = 0 pn_sname = 0 pn_stype = 0 } fq_sattr = 0 fq_dnode = 0 fq_fnode = 0 fq_fattr = { sa_mask = 0 sa_vattr = { va_mask = 0 va_type = 0 (VNON) va_mode = 0 va_uid = 0 va_gid = 0 va_fsid = 0 va_nodeid = 0 va_nlink = 0 va_size = 0 va_atime = { tv_sec = 0 tv_nsec = 0 } va_mtime = { tv_sec = 0 tv_nsec = 0 } va_ctime = { tv_sec = 0 tv_nsec = 0 } va_rdev = 0 va_blksize = 0 va_nblocks = 0 va_seq = 0 } sa_dosattr = 0 sa_crtime = { tv_sec = 0 tv_nsec = 0 } sa_allocsz = 0 } fq_last_comp = [ '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', ' \\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', ... ] } dst_fqi = { fq_path = { pn_path = 0 pn_pname = 0 pn_fname = 0 pn_sname = 0 pn_stype = 0 } fq_sattr = 0 fq_dnode = 0 fq_fnode = 0 fq_fattr = { sa_mask = 0 sa_vattr = { va_mask = 0 va_type = 0 (VNON) va_mode = 0 va_uid = 0 va_gid = 0 va_fsid = 0 va_nodeid = 0 va_nlink = 0 va_size = 0 va_atime = { tv_sec = 0 tv_nsec = 0 } va_mtime = { tv_sec = 0 tv_nsec = 0 } va_ctime = { tv_sec = 0 tv_nsec = 0 } va_rdev = 0 va_blksize = 0 va_nblocks = 0 va_seq = 0 } sa_dosattr = 0 sa_crtime = { tv_sec = 0 tv_nsec = 0 } sa_allocsz = 0 } fq_last_comp = [ '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', ' \\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', ... ] } info_level = 0 flags = 0 } open = { fqi = { fq_path = { pn_path = 0 pn_pname = 0 pn_fname = 0 pn_sname = 0 pn_stype = 0 } fq_sattr = 0 fq_dnode = 0 fq_fnode = 0 fq_fattr = { sa_mask = 0 sa_vattr = { va_mask = 0 va_type = 0 (VNON) va_mode = 0 va_uid = 0 va_gid = 0 va_fsid = 0 va_nodeid = 0 va_nlink = 0 va_size = 0 va_atime = { tv_sec = 0 tv_nsec = 0 } va_mtime = { tv_sec = 0 tv_nsec = 0 } va_ctime = { tv_sec = 0 tv_nsec = 0 } va_rdev = 0 va_blksize = 0 va_nblocks = 0 va_seq = 0 } sa_dosattr = 0 sa_crtime = { tv_sec = 0 tv_nsec = 0 } sa_allocsz = 0 } fq_last_comp = [ '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', ' \\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', ... ] } omode = 0 ofun = 0 nt_flags = 0 timeo = 0 dattr = 0 crtime = { tv_sec = 0 tv_nsec = 0 } mtime = { tv_sec = 0 tv_nsec = 0 } dsize = 0 desired_access = 0 share_access = 0 create_options = 0 create_disposition = 0 created_readonly = 0 (0) ftype = 0 devstate = 0 action_taken = 0 fileid = 0 rootdirfid = 0 dir = 0 sd = 0 op_oplock_level = 0 op_oplock_levelII = 0 (0) } rw = 0 timestamp = 0 } user_cr = 0xffffff4354b7f2a8 sr_worker = 0xffffff01eaf78c60 sr_time_submitted = 0x61eb927fd28 sr_time_active = 0x61eb9281479 sr_time_start = 0x61eb92824de sr_txb = 0x20 sr_seqnum = 0 } >
Let's look at the code where it panic'ed:
> smb_com_locking_andx::dis smb_com_locking_andx: pushq %rbp smb_com_locking_andx+1: movq %rsp,%rbp smb_com_locking_andx+4: subq $0x8,%rsp smb_com_locking_andx+8: movq %rdi,-0x8(%rbp) smb_com_locking_andx+0xc: pushq %rbx smb_com_locking_andx+0xd: pushq %r12 smb_com_locking_andx+0xf: pushq %r13 smb_com_locking_andx+0x11: pushq %r14 smb_com_locking_andx+0x13: pushq %r15 smb_com_locking_andx+0x15: subq $0x30,%rsp smb_com_locking_andx+0x19: movq %rdi,%r13 smb_com_locking_andx+0x1c: leaq +0x2de6d(%rip),%rsi <0xfffffffff8408 148> smb_com_locking_andx+0x23: leaq 0x218(%r13),%rdx smb_com_locking_andx+0x2a: leaq -0x52(%rbp),%rcx smb_com_locking_andx+0x2e: leaq -0x51(%rbp),%r8 smb_com_locking_andx+0x32: leaq -0x50(%rbp),%r9 smb_com_locking_andx+0x36: leaq -0x4a(%rbp),%r10 smb_com_locking_andx+0x3a: pushq %r10 smb_com_locking_andx+0x3c: leaq -0x4c(%rbp),%r10 smb_com_locking_andx+0x40: pushq %r10 smb_com_locking_andx+0x42: movq %r13,%rdi smb_com_locking_andx+0x45: xorl %eax,%eax smb_com_locking_andx+0x47: call -0x107ec <smbsr_decode_vwv> smb_com_locking_andx+0x4c: addq $0x10,%rsp smb_com_locking_andx+0x50: testl %eax,%eax smb_com_locking_andx+0x52: je +0xa <smb_com_locking_andx+0x5e> smb_com_locking_andx+0x54: movl $0x1,%eax smb_com_locking_andx+0x59: jmp +0x3ed <smb_com_locking_andx+0x44b> smb_com_locking_andx+0x5e: movq %r13,%rdi smb_com_locking_andx+0x61: call -0x10116 <smbsr_lookup_file> smb_com_locking_andx+0x66: movq 0x228(%r13),%rsi smb_com_locking_andx+0x6d: testq %rsi,%rsi smb_com_locking_andx+0x70: je +0x3b9 <smb_com_locking_andx+0x42f> smb_com_locking_andx+0x76: movzbl -0x52(%rbp),%eax smb_com_locking_andx+0x7a: testl $0x1,%eax smb_com_locking_andx+0x7f: movl $0x66,%ebx smb_com_locking_andx+0x84: movl $0x65,%ecx smb_com_locking_andx+0x89: movl %ebx,%r12d smb_com_locking_andx+0x8c: cmovl.e %ecx,%r12d smb_com_locking_andx+0x90: leaq 0x198(%r13),%r14 smb_com_locking_andx+0x97: movzwl 0x198(%r13),%ecx smb_com_locking_andx+0x9f: movl %ecx,-0x58(%rbp) smb_com_locking_andx+0xa2: testl $0x2,%eax smb_com_locking_andx+0xa7: je +0x26 <smb_com_locking_andx+0xcf> smb_com_locking_andx+0xa9: cmpb $0x0,-0x51(%rbp) smb_com_locking_andx+0xad: movl $0x1,%edx smb_com_locking_andx+0xb2: movl $0x2,%eax smb_com_locking_andx+0xb7: cmovl.ne %eax,%edx smb_com_locking_andx+0xba: movq 0x58(%rsi),%rdi smb_com_locking_andx+0xbe: call +0x11b0d <smb_oplock_ack> smb_com_locking_andx+0xc3: movzwl -0x4c(%rbp),%eax smb_com_locking_andx+0xc7: testl %eax,%eax
Interesting. We're actually in smb_oplock_ack()
> smb_oplock_ack::dis smb_oplock_ack: pushq %rbp smb_oplock_ack+1: movq %rsp,%rbp smb_oplock_ack+4: subq $0x18,%rsp smb_oplock_ack+8: movq %rdi,-0x8(%rbp) smb_oplock_ack+0xc: movq %rsi,-0x10(%rbp) smb_oplock_ack+0x10: movq %rdx,-0x18(%rbp) smb_oplock_ack+0x14: pushq %rbx smb_oplock_ack+0x15: pushq %r12 smb_oplock_ack+0x17: pushq %r13 smb_oplock_ack+0x19: pushq %r14 smb_oplock_ack+0x1b: pushq %r15 smb_oplock_ack+0x1d: movq %rdi,%r15 smb_oplock_ack+0x20: movq %rsi,%r14 smb_oplock_ack+0x23: movl %edx,%ebx smb_oplock_ack+0x25: leaq 0x158(%r15),%r13 smb_oplock_ack+0x2c: movq %r13,%rdi smb_oplock_ack+0x2f: call +0x34773c4 <mutex_enter> smb_oplock_ack+0x34: movq %r15,%rdi
So we tried to load NULL + some small offset.
Let's look at the registers:
> ::regs %rax = 0x0000000000000000 %r9 = 0x0000000000000031 %rbx = 0x0000000000000002 %r10 = 0xffffff43747408e8 %rcx = 0x000000000000feff %r11 = 0xffffff01eaf78c60 %rdx = 0xffffff01eaf78c60 %r12 = 0x0000000000000065 %rsi = 0xffffff43ab7b6308 %r13 = 0x0000000000000158 %rdi = 0x0000000000000158 %r14 = 0xffffff43ab7b6308 %r8 = 0xffffff44130102d8 %r15 = 0x0000000000000000 %rip = 0xfffffffffb86328b mutex_enter+0xb %rbp = 0xffffff01eaf78a70 %rsp = 0xffffff01eaf78a28 %rflags = 0x00010246 id=0 vip=0 vif=0 ac=0 vm=0 rf=1 nt=0 iopl=0x0 status=<of,df,IF,tf,sf,ZF,af,PF,cf> %cs = 0x0030 %ds = 0x004b %es = 0x004b %trapno = 0xe %fs = 0x0000 %gs = 0x01c3 %err = 0x2
Looks like smb_oplock_ack+0x2c gives us %rdi==0x158
so we had %r15==0, which was %rdi on entry (first arg).
So smb_oplock_ack() was called with node NULL.
The call site appears to be: smb_locking_andx:270
Let's look at sr->fid_ofile (from above)
> 0xffffff43ab7b6308 ::smbofile -v SMB ofile information (ffffff43ab7b6308): FID: 1 State: 0 (OPEN) SMB Node: 0 LLF Offset: 0x0 (Invalid) Flags: 0x00000000 Credential: ffffff4354b7f2a8 > 0xffffff43ab7b6308 ::print smb_ofile_t { f_magic = 0x4f464c45 f_mutex = { _opaque = [ 0 ] } f_lnd = { list_next = 0xffffff44130102a0 list_prev = 0xffffff44130102a0 } f_nnd = { list_next = 0 list_prev = 0 } f_state = 0 (SMB_OFILE_STATE_OPEN) f_server = 0xffffff4313070800 f_session = 0xffffff442aa5c920 f_user = 0xffffff447449ee00 f_tree = 0xffffff4413010240 f_node = 0 f_pipe = 0xffffff43d520d390 f_uniqid = 0x35a62 f_refcnt = 0x1 f_seek_pos = 0x44 f_flags = 0 f_granted_access = 0x2019f f_share_access = 0x3 f_create_options = 0 f_fid = 0x1 f_opened_by_pid = 0x5e8 f_ftype = 0x2 f_llf_pos = 0 f_mode = 0 f_cr = 0xffffff4354b7f2a8 f_pid = 0 f_pending_attr = { sa_mask = 0 sa_vattr = { va_mask = 0 va_type = 0 (VNON) va_mode = 0 va_uid = 0 va_gid = 0 va_fsid = 0 va_nodeid = 0 va_nlink = 0 va_size = 0 va_atime = { tv_sec = 0 tv_nsec = 0 } va_mtime = { tv_sec = 0 tv_nsec = 0 } va_ctime = { tv_sec = 0 tv_nsec = 0 } va_rdev = 0 va_blksize = 0 va_nblocks = 0 va_seq = 0 } sa_dosattr = 0 sa_crtime = { tv_sec = 0 tv_nsec = 0 } sa_allocsz = 0 } f_written = 0 (0) f_quota_resume = [ '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0 ', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', ... ] f_oplock_grant = { og_magic = 0 og_lnd = { list_next = 0 list_prev = 0 } og_level = 0 og_fid = 0 og_tid = 0 og_uid = 0 og_session = 0 og_ofile = 0 } } >
Yes, ofile->f_node NULL. Why?
It's a PIPE!!! See ofile->f_ftype == 2.
That's: SMB_FTYPE_MESG_PIPE
We can confirm by looking at the tree type:
> 0xffffff4413010240 ::smbtree -v SMB tree information (ffffff4413010240): TID: 0001 State: 0 (CONNECTED) Share: IPC$ Resource: IPC$ Type: Volume: Umask: 0000 Flags: 00000000 SMB Node: 0 Reference Count: 1
So this bastard client is sending us an unsolicited
oplock acknowledge on a named pipe!
There are never oplocks on named pipes!!!
Updated by Electric Monk over 7 years ago
- Status changed from New to Closed
- % Done changed from 0 to 100
git commit 9c787c76803e57736d13413909945366ce3a9448
commit 9c787c76803e57736d13413909945366ce3a9448 Author: Gordon Ross <gwr@nexenta.com> Date: 2014-11-29T20:44:20.000Z 5320 panic in smb_com_locking_andx Reviewed by: Alek Pinchuk <pinchuk.alek@gmail.com> Reviewed by: Bayard Bell <bayard.bell@nexenta.com> Approved by: Robert Mustacchi <rm@joyent.com>