1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #include "nacl_io/kernel_object.h" 6 7 #include <assert.h> 8 #include <errno.h> 9 #include <fcntl.h> 10 #include <pthread.h> 11 12 #include <algorithm> 13 #include <map> 14 #include <string> 15 #include <vector> 16 17 #include "nacl_io/kernel_handle.h" 18 #include "nacl_io/mount.h" 19 #include "nacl_io/mount_node.h" 20 21 #include "sdk_util/auto_lock.h" 22 #include "sdk_util/ref_object.h" 23 #include "sdk_util/scoped_ref.h" 24 25 namespace nacl_io { 26 27 KernelObject::KernelObject() { 28 cwd_ = "/"; 29 } 30 31 KernelObject::~KernelObject() {}; 32 33 Error KernelObject::AttachMountAtPath(const ScopedMount& mnt, 34 const std::string& path) { 35 std::string abs_path = GetAbsParts(path).Join(); 36 37 AUTO_LOCK(mount_lock_); 38 if (mounts_.find(abs_path) != mounts_.end()) 39 return EBUSY; 40 41 mounts_[abs_path] = mnt; 42 return 0; 43 } 44 45 Error KernelObject::DetachMountAtPath(const std::string& path) { 46 std::string abs_path = GetAbsParts(path).Join(); 47 48 AUTO_LOCK(mount_lock_); 49 MountMap_t::iterator it = mounts_.find(abs_path); 50 if (mounts_.end() == it) 51 return EINVAL; 52 53 // It is only legal to unmount if there are no open references 54 if (it->second->RefCount() != 1) 55 return EBUSY; 56 57 mounts_.erase(it); 58 return 0; 59 } 60 61 // Uses longest prefix to find the mount for the give path, then 62 // acquires the mount and returns it with a relative path. 63 Error KernelObject::AcquireMountAndRelPath(const std::string& path, 64 ScopedMount* out_mount, 65 Path* rel_parts) { 66 Path abs_parts = GetAbsParts(path); 67 68 out_mount->reset(NULL); 69 *rel_parts = Path(); 70 71 AUTO_LOCK(mount_lock_); 72 73 // Find longest prefix 74 size_t max = abs_parts.Size(); 75 for (size_t len = 0; len < abs_parts.Size(); len++) { 76 MountMap_t::iterator it = mounts_.find(abs_parts.Range(0, max - len)); 77 if (it != mounts_.end()) { 78 rel_parts->Set("/"); 79 rel_parts->Append(abs_parts.Range(max - len, max)); 80 81 *out_mount = it->second; 82 return 0; 83 } 84 } 85 86 return ENOTDIR; 87 } 88 89 // Given a path, acquire the associated mount and node, creating the 90 // node if needed based on the provided flags. 91 Error KernelObject::AcquireMountAndNode(const std::string& path, 92 int oflags, 93 ScopedMount* out_mount, 94 ScopedMountNode* out_node) { 95 Path rel_parts; 96 out_mount->reset(NULL); 97 out_node->reset(NULL); 98 Error error = AcquireMountAndRelPath(path, out_mount, &rel_parts); 99 if (error) 100 return error; 101 102 error = (*out_mount)->Open(rel_parts, oflags, out_node); 103 if (error) 104 return error; 105 106 return 0; 107 } 108 109 Path KernelObject::GetAbsParts(const std::string& path) { 110 AUTO_LOCK(cwd_lock_); 111 112 Path abs_parts(cwd_); 113 if (path[0] == '/') { 114 abs_parts = path; 115 } else { 116 abs_parts = cwd_; 117 abs_parts.Append(path); 118 } 119 120 return abs_parts; 121 } 122 123 std::string KernelObject::GetCWD() { 124 AUTO_LOCK(cwd_lock_); 125 std::string out = cwd_; 126 127 return out; 128 } 129 130 Error KernelObject::SetCWD(const std::string& path) { 131 std::string abs_path = GetAbsParts(path).Join(); 132 133 ScopedMount mnt; 134 ScopedMountNode node; 135 136 Error error = AcquireMountAndNode(abs_path, O_RDONLY, &mnt, &node); 137 if (error) 138 return error; 139 140 if ((node->GetType() & S_IFDIR) == 0) 141 return ENOTDIR; 142 143 AUTO_LOCK(cwd_lock_); 144 cwd_ = abs_path; 145 return 0; 146 } 147 148 Error KernelObject::GetFDFlags(int fd, int* out_flags) { 149 AUTO_LOCK(handle_lock_); 150 if (fd < 0 || fd >= static_cast<int>(handle_map_.size())) 151 return EBADF; 152 153 *out_flags = handle_map_[fd].flags; 154 return 0; 155 } 156 157 Error KernelObject::SetFDFlags(int fd, int flags) { 158 AUTO_LOCK(handle_lock_); 159 if (fd < 0 || fd >= static_cast<int>(handle_map_.size())) 160 return EBADF; 161 162 // Only setting of FD_CLOEXEC is supported. 163 if (flags & ~FD_CLOEXEC) 164 return EINVAL; 165 166 handle_map_[fd].flags = flags; 167 return 0; 168 } 169 170 Error KernelObject::AcquireHandle(int fd, ScopedKernelHandle* out_handle) { 171 out_handle->reset(NULL); 172 173 AUTO_LOCK(handle_lock_); 174 if (fd < 0 || fd >= static_cast<int>(handle_map_.size())) 175 return EBADF; 176 177 *out_handle = handle_map_[fd].handle; 178 if (out_handle) return 0; 179 180 return EBADF; 181 } 182 183 int KernelObject::AllocateFD(const ScopedKernelHandle& handle) { 184 AUTO_LOCK(handle_lock_); 185 int id; 186 187 Descriptor_t descriptor(handle); 188 189 // If we can recycle and FD, use that first 190 if (free_fds_.size()) { 191 id = free_fds_.front(); 192 // Force lower numbered FD to be available first. 193 std::pop_heap(free_fds_.begin(), free_fds_.end(), std::greater<int>()); 194 free_fds_.pop_back(); 195 handle_map_[id] = descriptor; 196 } else { 197 id = handle_map_.size(); 198 handle_map_.push_back(descriptor); 199 } 200 return id; 201 } 202 203 void KernelObject::FreeAndReassignFD(int fd, const ScopedKernelHandle& handle) { 204 if (NULL == handle) { 205 FreeFD(fd); 206 } else { 207 AUTO_LOCK(handle_lock_); 208 209 // If the required FD is larger than the current set, grow the set 210 if (fd >= (int)handle_map_.size()) 211 handle_map_.resize(fd + 1); 212 213 handle_map_[fd] = Descriptor_t(handle); 214 } 215 } 216 217 void KernelObject::FreeFD(int fd) { 218 AUTO_LOCK(handle_lock_); 219 220 handle_map_[fd].handle.reset(NULL); 221 free_fds_.push_back(fd); 222 223 // Force lower numbered FD to be available first. 224 std::push_heap(free_fds_.begin(), free_fds_.end(), std::greater<int>()); 225 } 226 227 } // namespace nacl_io 228