Message ID | 1602907457-13680-5-git-send-email-hemantk@codeaurora.org (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | userspace MHI client interface driver | expand |
Hi Hemant, I love your patch! Perhaps something to improve: [auto build test WARNING on char-misc/char-misc-testing] [also build test WARNING on staging/staging-testing linus/master next-20201016] [cannot apply to linux/master v5.9] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch] url: https://github.com/0day-ci/linux/commits/Hemant-Kumar/userspace-MHI-client-interface-driver/20201017-140145 base: https://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git f3277cbfba763cd2826396521b9296de67cf1bbc config: m68k-allmodconfig (attached as .config) compiler: m68k-linux-gcc (GCC) 9.3.0 reproduce (this is a W=1 build): wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross # https://github.com/0day-ci/linux/commit/6f44d9c0efd29cbd60a4c26843462a573050a520 git remote add linux-review https://github.com/0day-ci/linux git fetch --no-tags linux-review Hemant-Kumar/userspace-MHI-client-interface-driver/20201017-140145 git checkout 6f44d9c0efd29cbd60a4c26843462a573050a520 # save the attached .config to linux build tree COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross ARCH=m68k If you fix the issue, kindly add following tag as appropriate Reported-by: kernel test robot <lkp@intel.com> All warnings (new ones prefixed by >>): In file included from include/linux/kernel.h:11, from drivers/bus/mhi/uci.c:4: include/linux/scatterlist.h: In function 'sg_set_buf': arch/m68k/include/asm/page_mm.h:169:49: warning: ordered comparison of pointer with null pointer [-Wextra] 169 | #define virt_addr_valid(kaddr) ((void *)(kaddr) >= (void *)PAGE_OFFSET && (void *)(kaddr) < high_memory) | ^~ include/linux/compiler.h:78:42: note: in definition of macro 'unlikely' 78 | # define unlikely(x) __builtin_expect(!!(x), 0) | ^ include/linux/scatterlist.h:143:2: note: in expansion of macro 'BUG_ON' 143 | BUG_ON(!virt_addr_valid(buf)); | ^~~~~~ include/linux/scatterlist.h:143:10: note: in expansion of macro 'virt_addr_valid' 143 | BUG_ON(!virt_addr_valid(buf)); | ^~~~~~~~~~~~~~~ In file included from include/linux/printk.h:405, from include/linux/kernel.h:15, from drivers/bus/mhi/uci.c:4: drivers/bus/mhi/uci.c: In function 'mhi_queue_inbound': >> drivers/bus/mhi/uci.c:147:16: warning: format '%ld' expects argument of type 'long int', but argument 6 has type 'size_t' {aka 'unsigned int'} [-Wformat=] 147 | dev_dbg(dev, "Allocated buf %d of %d size %ld\n", i, nr_trbs, | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ include/linux/dynamic_debug.h:129:15: note: in definition of macro '__dynamic_func_call' 129 | func(&id, ##__VA_ARGS__); \ | ^~~~~~~~~~~ include/linux/dynamic_debug.h:161:2: note: in expansion of macro '_dynamic_func_call' 161 | _dynamic_func_call(fmt,__dynamic_dev_dbg, \ | ^~~~~~~~~~~~~~~~~~ include/linux/dev_printk.h:115:2: note: in expansion of macro 'dynamic_dev_dbg' 115 | dynamic_dev_dbg(dev, dev_fmt(fmt), ##__VA_ARGS__) | ^~~~~~~~~~~~~~~ include/linux/dev_printk.h:115:23: note: in expansion of macro 'dev_fmt' 115 | dynamic_dev_dbg(dev, dev_fmt(fmt), ##__VA_ARGS__) | ^~~~~~~ drivers/bus/mhi/uci.c:147:3: note: in expansion of macro 'dev_dbg' 147 | dev_dbg(dev, "Allocated buf %d of %d size %ld\n", i, nr_trbs, | ^~~~~~~ drivers/bus/mhi/uci.c:147:47: note: format string is defined here 147 | dev_dbg(dev, "Allocated buf %d of %d size %ld\n", i, nr_trbs, | ~~^ | | | long int | %d In file included from include/linux/printk.h:405, from include/linux/kernel.h:15, from drivers/bus/mhi/uci.c:4: drivers/bus/mhi/uci.c: In function 'mhi_uci_write': >> drivers/bus/mhi/uci.c:308:15: warning: format '%lu' expects argument of type 'long unsigned int', but argument 5 has type 'size_t' {aka 'unsigned int'} [-Wformat=] 308 | dev_dbg(dev, "%s: to xfer: %lu bytes\n", __func__, count); | ^~~~~~~~~~~~~~~~~~~~~~~~~~ include/linux/dynamic_debug.h:129:15: note: in definition of macro '__dynamic_func_call' 129 | func(&id, ##__VA_ARGS__); \ | ^~~~~~~~~~~ include/linux/dynamic_debug.h:161:2: note: in expansion of macro '_dynamic_func_call' 161 | _dynamic_func_call(fmt,__dynamic_dev_dbg, \ | ^~~~~~~~~~~~~~~~~~ include/linux/dev_printk.h:115:2: note: in expansion of macro 'dynamic_dev_dbg' 115 | dynamic_dev_dbg(dev, dev_fmt(fmt), ##__VA_ARGS__) | ^~~~~~~~~~~~~~~ include/linux/dev_printk.h:115:23: note: in expansion of macro 'dev_fmt' 115 | dynamic_dev_dbg(dev, dev_fmt(fmt), ##__VA_ARGS__) | ^~~~~~~ drivers/bus/mhi/uci.c:308:2: note: in expansion of macro 'dev_dbg' 308 | dev_dbg(dev, "%s: to xfer: %lu bytes\n", __func__, count); | ^~~~~~~ drivers/bus/mhi/uci.c:308:31: note: format string is defined here 308 | dev_dbg(dev, "%s: to xfer: %lu bytes\n", __func__, count); | ~~^ | | | long unsigned int | %u In file included from include/linux/printk.h:405, from include/linux/kernel.h:15, from drivers/bus/mhi/uci.c:4: drivers/bus/mhi/uci.c:366:15: warning: format '%lu' expects argument of type 'long unsigned int', but argument 5 has type 'size_t' {aka 'unsigned int'} [-Wformat=] 366 | dev_dbg(dev, "%s: bytes xferred: %lu\n", __func__, bytes_xfered); | ^~~~~~~~~~~~~~~~~~~~~~~~~~ include/linux/dynamic_debug.h:129:15: note: in definition of macro '__dynamic_func_call' 129 | func(&id, ##__VA_ARGS__); \ | ^~~~~~~~~~~ include/linux/dynamic_debug.h:161:2: note: in expansion of macro '_dynamic_func_call' 161 | _dynamic_func_call(fmt,__dynamic_dev_dbg, \ | ^~~~~~~~~~~~~~~~~~ include/linux/dev_printk.h:115:2: note: in expansion of macro 'dynamic_dev_dbg' 115 | dynamic_dev_dbg(dev, dev_fmt(fmt), ##__VA_ARGS__) | ^~~~~~~~~~~~~~~ include/linux/dev_printk.h:115:23: note: in expansion of macro 'dev_fmt' 115 | dynamic_dev_dbg(dev, dev_fmt(fmt), ##__VA_ARGS__) | ^~~~~~~ drivers/bus/mhi/uci.c:366:2: note: in expansion of macro 'dev_dbg' 366 | dev_dbg(dev, "%s: bytes xferred: %lu\n", __func__, bytes_xfered); | ^~~~~~~ drivers/bus/mhi/uci.c:366:37: note: format string is defined here 366 | dev_dbg(dev, "%s: bytes xferred: %lu\n", __func__, bytes_xfered); | ~~^ | | | long unsigned int | %u In file included from include/linux/printk.h:405, from include/linux/kernel.h:15, from drivers/bus/mhi/uci.c:4: drivers/bus/mhi/uci.c: In function 'mhi_uci_read': drivers/bus/mhi/uci.c:447:15: warning: format '%lu' expects argument of type 'long unsigned int', but argument 4 has type 'size_t' {aka 'unsigned int'} [-Wformat=] 447 | dev_dbg(dev, "Copied %lu of %lu bytes\n", to_copy, uchan->dl_size); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~ include/linux/dynamic_debug.h:129:15: note: in definition of macro '__dynamic_func_call' 129 | func(&id, ##__VA_ARGS__); \ | ^~~~~~~~~~~ include/linux/dynamic_debug.h:161:2: note: in expansion of macro '_dynamic_func_call' 161 | _dynamic_func_call(fmt,__dynamic_dev_dbg, \ | ^~~~~~~~~~~~~~~~~~ include/linux/dev_printk.h:115:2: note: in expansion of macro 'dynamic_dev_dbg' 115 | dynamic_dev_dbg(dev, dev_fmt(fmt), ##__VA_ARGS__) | ^~~~~~~~~~~~~~~ include/linux/dev_printk.h:115:23: note: in expansion of macro 'dev_fmt' 115 | dynamic_dev_dbg(dev, dev_fmt(fmt), ##__VA_ARGS__) | ^~~~~~~ drivers/bus/mhi/uci.c:447:2: note: in expansion of macro 'dev_dbg' 447 | dev_dbg(dev, "Copied %lu of %lu bytes\n", to_copy, uchan->dl_size); | ^~~~~~~ drivers/bus/mhi/uci.c:447:25: note: format string is defined here 447 | dev_dbg(dev, "Copied %lu of %lu bytes\n", to_copy, uchan->dl_size); | ~~^ | | | long unsigned int | %u In file included from include/linux/printk.h:405, from include/linux/kernel.h:15, from drivers/bus/mhi/uci.c:4: drivers/bus/mhi/uci.c:447:15: warning: format '%lu' expects argument of type 'long unsigned int', but argument 5 has type 'size_t' {aka 'unsigned int'} [-Wformat=] 447 | dev_dbg(dev, "Copied %lu of %lu bytes\n", to_copy, uchan->dl_size); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~ include/linux/dynamic_debug.h:129:15: note: in definition of macro '__dynamic_func_call' 129 | func(&id, ##__VA_ARGS__); \ | ^~~~~~~~~~~ include/linux/dynamic_debug.h:161:2: note: in expansion of macro '_dynamic_func_call' 161 | _dynamic_func_call(fmt,__dynamic_dev_dbg, \ | ^~~~~~~~~~~~~~~~~~ include/linux/dev_printk.h:115:2: note: in expansion of macro 'dynamic_dev_dbg' 115 | dynamic_dev_dbg(dev, dev_fmt(fmt), ##__VA_ARGS__) | ^~~~~~~~~~~~~~~ include/linux/dev_printk.h:115:23: note: in expansion of macro 'dev_fmt' 115 | dynamic_dev_dbg(dev, dev_fmt(fmt), ##__VA_ARGS__) | ^~~~~~~ drivers/bus/mhi/uci.c:447:2: note: in expansion of macro 'dev_dbg' 447 | dev_dbg(dev, "Copied %lu of %lu bytes\n", to_copy, uchan->dl_size); | ^~~~~~~ drivers/bus/mhi/uci.c:447:32: note: format string is defined here vim +147 drivers/bus/mhi/uci.c 120 121 static int mhi_queue_inbound(struct uci_dev *udev) 122 { 123 struct mhi_device *mhi_dev = udev->mhi_dev; 124 struct device *dev = &mhi_dev->dev; 125 int nr_trbs, i, ret = -EIO; 126 size_t dl_buf_size; 127 void *buf; 128 struct uci_buf *ubuf; 129 130 /* dont queue if dl channel is not supported */ 131 if (!udev->mhi_dev->dl_chan) 132 return 0; 133 134 nr_trbs = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE); 135 136 for (i = 0; i < nr_trbs; i++) { 137 buf = kmalloc(udev->mtu, GFP_KERNEL); 138 if (!buf) 139 return -ENOMEM; 140 141 dl_buf_size = udev->mtu - sizeof(*ubuf); 142 143 /* save uci_buf info at the end of buf */ 144 ubuf = buf + dl_buf_size; 145 ubuf->data = buf; 146 > 147 dev_dbg(dev, "Allocated buf %d of %d size %ld\n", i, nr_trbs, 148 dl_buf_size); 149 150 ret = mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE, buf, dl_buf_size, 151 MHI_EOT); 152 if (ret) { 153 kfree(buf); 154 dev_err(dev, "Failed to queue buffer %d\n", i); 155 return ret; 156 } 157 } 158 159 return ret; 160 } 161 162 static int mhi_uci_dev_start_chan(struct uci_dev *udev) 163 { 164 int ret = 0; 165 struct uci_chan *uchan; 166 167 mutex_lock(&udev->lock); 168 if (!udev->uchan || !kref_get_unless_zero(&udev->uchan->ref_count)) { 169 uchan = kzalloc(sizeof(*uchan), GFP_KERNEL); 170 if (!uchan) { 171 ret = -ENOMEM; 172 goto error_chan_start; 173 } 174 175 udev->uchan = uchan; 176 uchan->udev = udev; 177 init_waitqueue_head(&uchan->ul_wq); 178 init_waitqueue_head(&uchan->dl_wq); 179 mutex_init(&uchan->write_lock); 180 mutex_init(&uchan->read_lock); 181 spin_lock_init(&uchan->dl_lock); 182 INIT_LIST_HEAD(&uchan->pending); 183 184 ret = mhi_prepare_for_transfer(udev->mhi_dev); 185 if (ret) { 186 dev_err(&udev->mhi_dev->dev, "Error starting transfer channels\n"); 187 goto error_chan_cleanup; 188 } 189 190 ret = mhi_queue_inbound(udev); 191 if (ret) 192 goto error_chan_cleanup; 193 194 kref_init(&uchan->ref_count); 195 } 196 197 mutex_unlock(&udev->lock); 198 return 0; 199 200 error_chan_cleanup: 201 mhi_uci_dev_chan_release(&uchan->ref_count); 202 error_chan_start: 203 mutex_unlock(&udev->lock); 204 return ret; 205 } 206 207 static void mhi_uci_dev_release(struct kref *ref) 208 { 209 struct uci_dev *udev = 210 container_of(ref, struct uci_dev, ref_count); 211 212 mutex_destroy(&udev->lock); 213 214 kfree(udev); 215 } 216 217 static int mhi_uci_open(struct inode *inode, struct file *filp) 218 { 219 unsigned int minor = iminor(inode); 220 struct uci_dev *udev = NULL; 221 int ret; 222 223 mutex_lock(&uci_drv_mutex); 224 udev = idr_find(&uci_idr, minor); 225 if (!udev) { 226 pr_debug("uci dev: minor %d not found\n", minor); 227 mutex_unlock(&uci_drv_mutex); 228 return -ENODEV; 229 } 230 231 kref_get(&udev->ref_count); 232 mutex_unlock(&uci_drv_mutex); 233 234 ret = mhi_uci_dev_start_chan(udev); 235 if (ret) { 236 kref_put(&udev->ref_count, mhi_uci_dev_release); 237 return ret; 238 } 239 240 filp->private_data = udev; 241 242 return 0; 243 } 244 245 static int mhi_uci_release(struct inode *inode, struct file *file) 246 { 247 struct uci_dev *udev = file->private_data; 248 249 mutex_lock(&udev->lock); 250 kref_put(&udev->uchan->ref_count, mhi_uci_dev_chan_release); 251 mutex_unlock(&udev->lock); 252 253 kref_put(&udev->ref_count, mhi_uci_dev_release); 254 255 return 0; 256 } 257 258 static __poll_t mhi_uci_poll(struct file *file, poll_table *wait) 259 { 260 struct uci_dev *udev = file->private_data; 261 struct mhi_device *mhi_dev = udev->mhi_dev; 262 struct device *dev = &mhi_dev->dev; 263 struct uci_chan *uchan = udev->uchan; 264 __poll_t mask = 0; 265 266 poll_wait(file, &udev->uchan->ul_wq, wait); 267 poll_wait(file, &udev->uchan->dl_wq, wait); 268 269 if (!udev->enabled) { 270 mask = EPOLLERR; 271 goto done; 272 } 273 274 spin_lock_bh(&uchan->dl_lock); 275 if (!list_empty(&uchan->pending) || uchan->cur_buf) { 276 dev_dbg(dev, "Client can read from node\n"); 277 mask |= EPOLLIN | EPOLLRDNORM; 278 } 279 spin_unlock_bh(&uchan->dl_lock); 280 281 if (mhi_get_free_desc_count(mhi_dev, DMA_TO_DEVICE) > 0) { 282 dev_dbg(dev, "Client can write to node\n"); 283 mask |= EPOLLOUT | EPOLLWRNORM; 284 } 285 286 dev_dbg(dev, "Client attempted to poll, returning mask 0x%x\n", mask); 287 288 done: 289 return mask; 290 } 291 292 static ssize_t mhi_uci_write(struct file *file, 293 const char __user *buf, 294 size_t count, 295 loff_t *offp) 296 { 297 struct uci_dev *udev = file->private_data; 298 struct mhi_device *mhi_dev = udev->mhi_dev; 299 struct device *dev = &mhi_dev->dev; 300 struct uci_chan *uchan = udev->uchan; 301 size_t bytes_xfered = 0; 302 int ret, nr_avail = 0; 303 304 /* if ul channel is not supported return error */ 305 if (!buf || !count || !mhi_dev->ul_chan) 306 return -EINVAL; 307 > 308 dev_dbg(dev, "%s: to xfer: %lu bytes\n", __func__, count); 309 310 mutex_lock(&uchan->write_lock); 311 while (count) { 312 size_t xfer_size; 313 void *kbuf; 314 enum mhi_flags flags; 315 316 /* wait for free descriptors */ 317 ret = wait_event_interruptible(uchan->ul_wq, 318 (!udev->enabled) || 319 (nr_avail = mhi_get_free_desc_count(mhi_dev, 320 DMA_TO_DEVICE)) > 0); 321 322 if (ret == -ERESTARTSYS) { 323 dev_dbg(dev, "Interrupted by a signal in %s, exiting\n", 324 __func__); 325 goto err_mtx_unlock; 326 } 327 328 if (!udev->enabled) { 329 ret = -ENODEV; 330 goto err_mtx_unlock; 331 } 332 333 xfer_size = min_t(size_t, count, udev->mtu); 334 kbuf = kmalloc(xfer_size, GFP_KERNEL); 335 if (!kbuf) { 336 ret = -ENOMEM; 337 goto err_mtx_unlock; 338 } 339 340 ret = copy_from_user(kbuf, buf, xfer_size); 341 if (ret) { 342 kfree(kbuf); 343 ret = -EFAULT; 344 goto err_mtx_unlock; 345 } 346 347 /* if ring is full after this force EOT */ 348 if (nr_avail > 1 && (count - xfer_size)) 349 flags = MHI_CHAIN; 350 else 351 flags = MHI_EOT; 352 353 ret = mhi_queue_buf(mhi_dev, DMA_TO_DEVICE, kbuf, xfer_size, 354 flags); 355 if (ret) { 356 kfree(kbuf); 357 goto err_mtx_unlock; 358 } 359 360 bytes_xfered += xfer_size; 361 count -= xfer_size; 362 buf += xfer_size; 363 } 364 365 mutex_unlock(&uchan->write_lock); 366 dev_dbg(dev, "%s: bytes xferred: %lu\n", __func__, bytes_xfered); 367 368 return bytes_xfered; 369 370 err_mtx_unlock: 371 mutex_unlock(&uchan->write_lock); 372 373 return ret; 374 } 375 --- 0-DAY CI Kernel Test Service, Intel Corporation https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
On Fri, Oct 16, 2020 at 09:04:17PM -0700, Hemant Kumar wrote: > This MHI client driver allows userspace clients to transfer > raw data between MHI device and host using standard file operations. > Driver instantiates uci device object which is associated to device > file node. uci device object instantiates uci channel object when device > file node is opened. uci channel object is used to manage MHI channels > by calling MHI core APIs for read and write operations. MHI channels > are started as part of device open(). MHI channels remain in start > state until last release() is called on uci device file node. Device > file node is created with format > > /dev/mhi_<controller_name>_<mhi_device_name> > > Currently it supports LOOPBACK channel. > > Signed-off-by: Hemant Kumar <hemantk@codeaurora.org> > --- > drivers/bus/mhi/Kconfig | 13 + > drivers/bus/mhi/Makefile | 4 + > drivers/bus/mhi/uci.c | 656 +++++++++++++++++++++++++++++++++++++++++++++++ > 3 files changed, 673 insertions(+) > create mode 100644 drivers/bus/mhi/uci.c > > diff --git a/drivers/bus/mhi/Kconfig b/drivers/bus/mhi/Kconfig > index e841c10..3891b31 100644 > --- a/drivers/bus/mhi/Kconfig > +++ b/drivers/bus/mhi/Kconfig > @@ -20,3 +20,16 @@ config MHI_BUS_DEBUG > Enable debugfs support for use with the MHI transport. Allows > reading and/or modifying some values within the MHI controller > for debug and test purposes. > + > +config MHI_UCI > + tristate "MHI UCI" > + depends on MHI_BUS > + help > + MHI based userspace client interface driver is used for transferring Userspace Client Interface (UCI) And please use the caps form UCI in comments throughout the driver. > + raw data between host and device using standard file operations from > + userspace. Open, read, write, and close operations are supported > + by this driver. Please check mhi_uci_match_table for all supported > + channels that are exposed to userspace. > + > + To compile this driver as a module, choose M here: the module will be > + called mhi_uci. > diff --git a/drivers/bus/mhi/Makefile b/drivers/bus/mhi/Makefile > index 19e6443..80feefb 100644 > --- a/drivers/bus/mhi/Makefile > +++ b/drivers/bus/mhi/Makefile > @@ -1,2 +1,6 @@ > # core layer > obj-y += core/ > + > +# MHI client > +mhi_uci-y := uci.o > +obj-$(CONFIG_MHI_UCI) += mhi_uci.o > diff --git a/drivers/bus/mhi/uci.c b/drivers/bus/mhi/uci.c > new file mode 100644 > index 0000000..8334836 > --- /dev/null > +++ b/drivers/bus/mhi/uci.c > @@ -0,0 +1,656 @@ > +// SPDX-License-Identifier: GPL-2.0-only > +/* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.*/ > + > +#include <linux/kernel.h> > +#include <linux/mhi.h> > +#include <linux/mod_devicetable.h> > +#include <linux/module.h> > +#include <linux/poll.h> > + > +#define DEVICE_NAME "mhi" > +#define MHI_UCI_DRIVER_NAME "mhi_uci" > +#define MAX_UCI_MINORS (128) No need of (). > + > +static DEFINE_IDR(uci_idr); > +static DEFINE_MUTEX(uci_drv_mutex); > +static struct class *uci_dev_class; > +static int uci_dev_major; > + > +/** > + * struct uci_chan - MHI channel for a uci device > + * @udev: associated uci device object > + * @ul_wq: wait queue for writer > + * @write_lock: mutex write lock for ul channel > + * @dl_wq: wait queue for reader > + * @read_lock: mutex read lock for dl channel > + * @dl_lock: spin lock > + * @pending: list of dl buffers userspace is waiting to read > + * @cur_buf: current buffer userspace is reading > + * @dl_size: size of the current dl buffer userspace is reading > + * @ref_count: uci_chan reference count > + */ > +struct uci_chan { > + struct uci_dev *udev; > + wait_queue_head_t ul_wq; > + > + /* ul channel lock to synchronize multiple writes */ Please move these inline comments to Kdoc. > + struct mutex write_lock; > + > + wait_queue_head_t dl_wq; > + > + /* dl channel lock to synchronize multiple reads */ > + struct mutex read_lock; > + > + /* > + * protects pending and cur_buf members in bh context, channel release, > + * read and poll > + */ > + spinlock_t dl_lock; > + > + struct list_head pending; > + struct uci_buf *cur_buf; > + size_t dl_size; > + struct kref ref_count; > +}; > + > +/** > + * struct uci_buf - uci buffer > + * @data: data buffer > + * @len: length of data buffer > + * @node: list node of the uci buffer > + */ > +struct uci_buf { > + void *data; > + size_t len; > + struct list_head node; > +}; > + > +/** > + * struct uci_dev - MHI uci device > + * @minor: uci device node minor number > + * @mhi_dev: associated mhi device object > + * @uchan: uci uplink and downlink channel object > + * @mtu: max TRE buffer length > + * @enabled: uci device probed Use something like, "Flag to track the state of the UCI device". > + * @lock: mutex lock to manage uchan object > + * @ref_count: uci_dev reference count > + */ > +struct uci_dev { > + unsigned int minor; > + struct mhi_device *mhi_dev; > + struct uci_chan *uchan; > + size_t mtu; > + bool enabled; > + > + /* synchronize open, release and driver remove */ > + struct mutex lock; > + struct kref ref_count; > +}; > + [...] > + > +static int mhi_uci_dev_start_chan(struct uci_dev *udev) > +{ > + int ret = 0; > + struct uci_chan *uchan; > + > + mutex_lock(&udev->lock); > + if (!udev->uchan || !kref_get_unless_zero(&udev->uchan->ref_count)) { > + uchan = kzalloc(sizeof(*uchan), GFP_KERNEL); > + if (!uchan) { > + ret = -ENOMEM; > + goto error_chan_start; > + } > + > + udev->uchan = uchan; > + uchan->udev = udev; > + init_waitqueue_head(&uchan->ul_wq); > + init_waitqueue_head(&uchan->dl_wq); > + mutex_init(&uchan->write_lock); > + mutex_init(&uchan->read_lock); > + spin_lock_init(&uchan->dl_lock); > + INIT_LIST_HEAD(&uchan->pending); > + > + ret = mhi_prepare_for_transfer(udev->mhi_dev); > + if (ret) { > + dev_err(&udev->mhi_dev->dev, "Error starting transfer channels\n"); > + goto error_chan_cleanup; > + } > + > + ret = mhi_queue_inbound(udev); > + if (ret) > + goto error_chan_cleanup; > + > + kref_init(&uchan->ref_count); > + } > + > + mutex_unlock(&udev->lock); Please leave a new line before return. > + return 0; > + > +error_chan_cleanup: > + mhi_uci_dev_chan_release(&uchan->ref_count); > +error_chan_start: > + mutex_unlock(&udev->lock); > + return ret; > +} > + [...] > + > +static int mhi_uci_probe(struct mhi_device *mhi_dev, > + const struct mhi_device_id *id) > +{ > + struct uci_dev *udev; > + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; > + struct device *dev; > + int index; > + > + udev = kzalloc(sizeof(*udev), GFP_KERNEL); > + if (!udev) > + return -ENOMEM; > + > + kref_init(&udev->ref_count); > + mutex_init(&udev->lock); > + udev->mhi_dev = mhi_dev; > + > + mutex_lock(&uci_drv_mutex); Do we really need the lock here? > + index = idr_alloc(&uci_idr, udev, 0, MAX_UCI_MINORS, GFP_KERNEL); > + mutex_unlock(&uci_drv_mutex); > + if (index < 0) { > + kfree(udev); > + return index; > + } > + > + udev->minor = index; > + > + udev->mtu = min_t(size_t, id->driver_data, MHI_MAX_MTU); > + dev_set_drvdata(&mhi_dev->dev, udev); > + udev->enabled = true; > + > + /* create device file node /dev/mhi_<cntrl_dev_name>_<mhi_dev_name> */ > + dev = device_create(uci_dev_class, &mhi_dev->dev, > + MKDEV(uci_dev_major, index), udev, > + DEVICE_NAME "_%s_%s", > + dev_name(mhi_cntrl->cntrl_dev), mhi_dev->name); > + if (IS_ERR(dev)) { > + mutex_lock(&uci_drv_mutex); > + idr_remove(&uci_idr, udev->minor); > + mutex_unlock(&uci_drv_mutex); > + dev_set_drvdata(&mhi_dev->dev, NULL); > + kfree(udev); > + return PTR_ERR(dev); > + } > + > + dev_dbg(&mhi_dev->dev, "probed uci dev: minor %d\n", index); > + > + return 0; > +}; > + > +static void mhi_uci_remove(struct mhi_device *mhi_dev) > +{ > + struct uci_dev *udev = dev_get_drvdata(&mhi_dev->dev); > + > + /* disable the node */ > + mutex_lock(&udev->lock); > + udev->enabled = false; > + > + /* delete the node to prevent new opens */ > + device_destroy(uci_dev_class, MKDEV(uci_dev_major, udev->minor)); > + > + /* return error for any blocked read or write */ > + if (udev->uchan) { > + wake_up(&udev->uchan->ul_wq); > + wake_up(&udev->uchan->dl_wq); > + } > + mutex_unlock(&udev->lock); > + > + mutex_lock(&uci_drv_mutex); > + idr_remove(&uci_idr, udev->minor); > + kref_put(&udev->ref_count, mhi_uci_dev_release); > + mutex_unlock(&uci_drv_mutex); > +} > + > +/* .driver_data stores max mtu */ > +static const struct mhi_device_id mhi_uci_match_table[] = { > + { .chan = "LOOPBACK", .driver_data = 0x1000}, > + {}, > +}; > +MODULE_DEVICE_TABLE(mhi, mhi_uci_match_table); > + > +static struct mhi_driver mhi_uci_driver = { > + .id_table = mhi_uci_match_table, > + .remove = mhi_uci_remove, > + .probe = mhi_uci_probe, > + .ul_xfer_cb = mhi_ul_xfer_cb, > + .dl_xfer_cb = mhi_dl_xfer_cb, > + .driver = { > + .name = MHI_UCI_DRIVER_NAME, > + }, > +}; > + > +static int mhi_uci_init(void) > +{ > + int ret; > + > + ret = register_chrdev(0, MHI_UCI_DRIVER_NAME, &mhidev_fops); > + if (ret < 0) > + return ret; > + > + uci_dev_major = ret; > + uci_dev_class = class_create(THIS_MODULE, MHI_UCI_DRIVER_NAME); > + if (IS_ERR(uci_dev_class)) { > + unregister_chrdev(uci_dev_major, MHI_UCI_DRIVER_NAME); Use an error path for cleaning this. Thanks, Mani
Hi Mani, On 10/21/20 9:11 AM, Manivannan Sadhasivam wrote: > On Fri, Oct 16, 2020 at 09:04:17PM -0700, Hemant Kumar wrote: >> This MHI client driver allows userspace clients to transfer >> raw data between MHI device and host using standard file operations. >> Driver instantiates uci device object which is associated to device >> file node. uci device object instantiates uci channel object when device >> file node is opened. uci channel object is used to manage MHI channels >> by calling MHI core APIs for read and write operations. MHI channels >> are started as part of device open(). MHI channels remain in start >> state until last release() is called on uci device file node. Device >> file node is created with format >> >> /dev/mhi_<controller_name>_<mhi_device_name> >> >> Currently it supports LOOPBACK channel. >> >> Signed-off-by: Hemant Kumar <hemantk@codeaurora.org> >> --- >> drivers/bus/mhi/Kconfig | 13 + >> drivers/bus/mhi/Makefile | 4 + >> drivers/bus/mhi/uci.c | 656 +++++++++++++++++++++++++++++++++++++++++++++++ >> 3 files changed, 673 insertions(+) >> create mode 100644 drivers/bus/mhi/uci.c >> >> diff --git a/drivers/bus/mhi/Kconfig b/drivers/bus/mhi/Kconfig >> index e841c10..3891b31 100644 >> --- a/drivers/bus/mhi/Kconfig >> +++ b/drivers/bus/mhi/Kconfig >> @@ -20,3 +20,16 @@ config MHI_BUS_DEBUG >> Enable debugfs support for use with the MHI transport. Allows >> reading and/or modifying some values within the MHI controller >> for debug and test purposes. >> + >> +config MHI_UCI >> + tristate "MHI UCI" >> + depends on MHI_BUS >> + help >> + MHI based userspace client interface driver is used for transferring > > Userspace Client Interface (UCI) Done. > > And please use the caps form UCI in comments throughout the driver. Done. In commit text : "uci device object", "uci channel object" and "uci device file node" shall we change these as well ? > >> + raw data between host and device using standard file operations from >> + userspace. Open, read, write, and close operations are supported >> + by this driver. Please check mhi_uci_match_table for all supported >> + channels that are exposed to userspace. >> + >> + To compile this driver as a module, choose M here: the module will be >> + called mhi_uci. >> diff --git a/drivers/bus/mhi/Makefile b/drivers/bus/mhi/Makefile >> index 19e6443..80feefb 100644 >> --- a/drivers/bus/mhi/Makefile >> +++ b/drivers/bus/mhi/Makefile >> @@ -1,2 +1,6 @@ >> # core layer >> obj-y += core/ >> + >> +# MHI client >> +mhi_uci-y := uci.o >> +obj-$(CONFIG_MHI_UCI) += mhi_uci.o >> diff --git a/drivers/bus/mhi/uci.c b/drivers/bus/mhi/uci.c >> new file mode 100644 >> index 0000000..8334836 >> --- /dev/null >> +++ b/drivers/bus/mhi/uci.c >> @@ -0,0 +1,656 @@ >> +// SPDX-License-Identifier: GPL-2.0-only >> +/* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.*/ >> + >> +#include <linux/kernel.h> >> +#include <linux/mhi.h> >> +#include <linux/mod_devicetable.h> >> +#include <linux/module.h> >> +#include <linux/poll.h> >> + >> +#define DEVICE_NAME "mhi" >> +#define MHI_UCI_DRIVER_NAME "mhi_uci" >> +#define MAX_UCI_MINORS (128) > > No need of (). Done. > >> + >> +static DEFINE_IDR(uci_idr); >> +static DEFINE_MUTEX(uci_drv_mutex); >> +static struct class *uci_dev_class; >> +static int uci_dev_major; >> + >> +/** >> + * struct uci_chan - MHI channel for a uci device >> + * @udev: associated uci device object >> + * @ul_wq: wait queue for writer >> + * @write_lock: mutex write lock for ul channel >> + * @dl_wq: wait queue for reader >> + * @read_lock: mutex read lock for dl channel >> + * @dl_lock: spin lock >> + * @pending: list of dl buffers userspace is waiting to read >> + * @cur_buf: current buffer userspace is reading >> + * @dl_size: size of the current dl buffer userspace is reading >> + * @ref_count: uci_chan reference count >> + */ >> +struct uci_chan { >> + struct uci_dev *udev; >> + wait_queue_head_t ul_wq; >> + >> + /* ul channel lock to synchronize multiple writes */ > > Please move these inline comments to Kdoc. This was added because checkpatch --strict required to add a comment when lock is added to struct, after adding inline comment, checkpatch error was gone. > >> + struct mutex write_lock; >> + >> + wait_queue_head_t dl_wq; >> + >> + /* dl channel lock to synchronize multiple reads */ >> + struct mutex read_lock; >> + >> + /* >> + * protects pending and cur_buf members in bh context, channel release, >> + * read and poll >> + */ >> + spinlock_t dl_lock; >> + >> + struct list_head pending; >> + struct uci_buf *cur_buf; >> + size_t dl_size; >> + struct kref ref_count; >> +}; >> + >> +/** >> + * struct uci_buf - uci buffer >> + * @data: data buffer >> + * @len: length of data buffer >> + * @node: list node of the uci buffer >> + */ >> +struct uci_buf { >> + void *data; >> + size_t len; >> + struct list_head node; >> +}; >> + >> +/** >> + * struct uci_dev - MHI uci device >> + * @minor: uci device node minor number >> + * @mhi_dev: associated mhi device object >> + * @uchan: uci uplink and downlink channel object >> + * @mtu: max TRE buffer length >> + * @enabled: uci device probed > > Use something like, "Flag to track the state of the UCI device". Done > >> + * @lock: mutex lock to manage uchan object >> + * @ref_count: uci_dev reference count >> + */ >> +struct uci_dev { >> + unsigned int minor; >> + struct mhi_device *mhi_dev; >> + struct uci_chan *uchan; >> + size_t mtu; >> + bool enabled; >> + >> + /* synchronize open, release and driver remove */ >> + struct mutex lock; >> + struct kref ref_count; >> +}; >> + > > [...] > >> + >> +static int mhi_uci_dev_start_chan(struct uci_dev *udev) >> +{ >> + int ret = 0; >> + struct uci_chan *uchan; >> + >> + mutex_lock(&udev->lock); >> + if (!udev->uchan || !kref_get_unless_zero(&udev->uchan->ref_count)) { >> + uchan = kzalloc(sizeof(*uchan), GFP_KERNEL); >> + if (!uchan) { >> + ret = -ENOMEM; >> + goto error_chan_start; >> + } >> + >> + udev->uchan = uchan; >> + uchan->udev = udev; >> + init_waitqueue_head(&uchan->ul_wq); >> + init_waitqueue_head(&uchan->dl_wq); >> + mutex_init(&uchan->write_lock); >> + mutex_init(&uchan->read_lock); >> + spin_lock_init(&uchan->dl_lock); >> + INIT_LIST_HEAD(&uchan->pending); >> + >> + ret = mhi_prepare_for_transfer(udev->mhi_dev); >> + if (ret) { >> + dev_err(&udev->mhi_dev->dev, "Error starting transfer channels\n"); >> + goto error_chan_cleanup; >> + } >> + >> + ret = mhi_queue_inbound(udev); >> + if (ret) >> + goto error_chan_cleanup; >> + >> + kref_init(&uchan->ref_count); >> + } >> + >> + mutex_unlock(&udev->lock); > > Please leave a new line before return. Done. > >> + return 0; >> + >> +error_chan_cleanup: >> + mhi_uci_dev_chan_release(&uchan->ref_count); >> +error_chan_start: >> + mutex_unlock(&udev->lock); >> + return ret; >> +} >> + > > [...] > >> + >> +static int mhi_uci_probe(struct mhi_device *mhi_dev, >> + const struct mhi_device_id *id) >> +{ >> + struct uci_dev *udev; >> + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; >> + struct device *dev; >> + int index; >> + >> + udev = kzalloc(sizeof(*udev), GFP_KERNEL); >> + if (!udev) >> + return -ENOMEM; >> + >> + kref_init(&udev->ref_count); >> + mutex_init(&udev->lock); >> + udev->mhi_dev = mhi_dev; >> + >> + mutex_lock(&uci_drv_mutex); > > Do we really need the lock here? Added this based on the comment from idr_alloc API * The caller should provide their own locking to ensure that two * concurrent modifications to the IDR are not possible. > >> + index = idr_alloc(&uci_idr, udev, 0, MAX_UCI_MINORS, GFP_KERNEL); >> + mutex_unlock(&uci_drv_mutex); >> + if (index < 0) { >> + kfree(udev); >> + return index; >> + } >> + >> + udev->minor = index; >> + >> + udev->mtu = min_t(size_t, id->driver_data, MHI_MAX_MTU); >> + dev_set_drvdata(&mhi_dev->dev, udev); >> + udev->enabled = true; >> + >> + /* create device file node /dev/mhi_<cntrl_dev_name>_<mhi_dev_name> */ >> + dev = device_create(uci_dev_class, &mhi_dev->dev, >> + MKDEV(uci_dev_major, index), udev, >> + DEVICE_NAME "_%s_%s", >> + dev_name(mhi_cntrl->cntrl_dev), mhi_dev->name); >> + if (IS_ERR(dev)) { >> + mutex_lock(&uci_drv_mutex); >> + idr_remove(&uci_idr, udev->minor); >> + mutex_unlock(&uci_drv_mutex); >> + dev_set_drvdata(&mhi_dev->dev, NULL); >> + kfree(udev); >> + return PTR_ERR(dev); >> + } >> + >> + dev_dbg(&mhi_dev->dev, "probed uci dev: minor %d\n", index); >> + >> + return 0; >> +}; >> + >> +static void mhi_uci_remove(struct mhi_device *mhi_dev) >> +{ >> + struct uci_dev *udev = dev_get_drvdata(&mhi_dev->dev); >> + >> + /* disable the node */ >> + mutex_lock(&udev->lock); >> + udev->enabled = false; >> + >> + /* delete the node to prevent new opens */ >> + device_destroy(uci_dev_class, MKDEV(uci_dev_major, udev->minor)); >> + >> + /* return error for any blocked read or write */ >> + if (udev->uchan) { >> + wake_up(&udev->uchan->ul_wq); >> + wake_up(&udev->uchan->dl_wq); >> + } >> + mutex_unlock(&udev->lock); >> + >> + mutex_lock(&uci_drv_mutex); >> + idr_remove(&uci_idr, udev->minor); >> + kref_put(&udev->ref_count, mhi_uci_dev_release); >> + mutex_unlock(&uci_drv_mutex); >> +} >> + >> +/* .driver_data stores max mtu */ >> +static const struct mhi_device_id mhi_uci_match_table[] = { >> + { .chan = "LOOPBACK", .driver_data = 0x1000}, >> + {}, >> +}; >> +MODULE_DEVICE_TABLE(mhi, mhi_uci_match_table); >> + >> +static struct mhi_driver mhi_uci_driver = { >> + .id_table = mhi_uci_match_table, >> + .remove = mhi_uci_remove, >> + .probe = mhi_uci_probe, >> + .ul_xfer_cb = mhi_ul_xfer_cb, >> + .dl_xfer_cb = mhi_dl_xfer_cb, >> + .driver = { >> + .name = MHI_UCI_DRIVER_NAME, >> + }, >> +}; >> + >> +static int mhi_uci_init(void) >> +{ >> + int ret; >> + >> + ret = register_chrdev(0, MHI_UCI_DRIVER_NAME, &mhidev_fops); >> + if (ret < 0) >> + return ret; >> + >> + uci_dev_major = ret; >> + uci_dev_class = class_create(THIS_MODULE, MHI_UCI_DRIVER_NAME); >> + if (IS_ERR(uci_dev_class)) { >> + unregister_chrdev(uci_dev_major, MHI_UCI_DRIVER_NAME); > > Use an error path for cleaning this. Done. > > Thanks, > Mani > Thanks, Hemant
diff --git a/drivers/bus/mhi/Kconfig b/drivers/bus/mhi/Kconfig index e841c10..3891b31 100644 --- a/drivers/bus/mhi/Kconfig +++ b/drivers/bus/mhi/Kconfig @@ -20,3 +20,16 @@ config MHI_BUS_DEBUG Enable debugfs support for use with the MHI transport. Allows reading and/or modifying some values within the MHI controller for debug and test purposes. + +config MHI_UCI + tristate "MHI UCI" + depends on MHI_BUS + help + MHI based userspace client interface driver is used for transferring + raw data between host and device using standard file operations from + userspace. Open, read, write, and close operations are supported + by this driver. Please check mhi_uci_match_table for all supported + channels that are exposed to userspace. + + To compile this driver as a module, choose M here: the module will be + called mhi_uci. diff --git a/drivers/bus/mhi/Makefile b/drivers/bus/mhi/Makefile index 19e6443..80feefb 100644 --- a/drivers/bus/mhi/Makefile +++ b/drivers/bus/mhi/Makefile @@ -1,2 +1,6 @@ # core layer obj-y += core/ + +# MHI client +mhi_uci-y := uci.o +obj-$(CONFIG_MHI_UCI) += mhi_uci.o diff --git a/drivers/bus/mhi/uci.c b/drivers/bus/mhi/uci.c new file mode 100644 index 0000000..8334836 --- /dev/null +++ b/drivers/bus/mhi/uci.c @@ -0,0 +1,656 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.*/ + +#include <linux/kernel.h> +#include <linux/mhi.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/poll.h> + +#define DEVICE_NAME "mhi" +#define MHI_UCI_DRIVER_NAME "mhi_uci" +#define MAX_UCI_MINORS (128) + +static DEFINE_IDR(uci_idr); +static DEFINE_MUTEX(uci_drv_mutex); +static struct class *uci_dev_class; +static int uci_dev_major; + +/** + * struct uci_chan - MHI channel for a uci device + * @udev: associated uci device object + * @ul_wq: wait queue for writer + * @write_lock: mutex write lock for ul channel + * @dl_wq: wait queue for reader + * @read_lock: mutex read lock for dl channel + * @dl_lock: spin lock + * @pending: list of dl buffers userspace is waiting to read + * @cur_buf: current buffer userspace is reading + * @dl_size: size of the current dl buffer userspace is reading + * @ref_count: uci_chan reference count + */ +struct uci_chan { + struct uci_dev *udev; + wait_queue_head_t ul_wq; + + /* ul channel lock to synchronize multiple writes */ + struct mutex write_lock; + + wait_queue_head_t dl_wq; + + /* dl channel lock to synchronize multiple reads */ + struct mutex read_lock; + + /* + * protects pending and cur_buf members in bh context, channel release, + * read and poll + */ + spinlock_t dl_lock; + + struct list_head pending; + struct uci_buf *cur_buf; + size_t dl_size; + struct kref ref_count; +}; + +/** + * struct uci_buf - uci buffer + * @data: data buffer + * @len: length of data buffer + * @node: list node of the uci buffer + */ +struct uci_buf { + void *data; + size_t len; + struct list_head node; +}; + +/** + * struct uci_dev - MHI uci device + * @minor: uci device node minor number + * @mhi_dev: associated mhi device object + * @uchan: uci uplink and downlink channel object + * @mtu: max TRE buffer length + * @enabled: uci device probed + * @lock: mutex lock to manage uchan object + * @ref_count: uci_dev reference count + */ +struct uci_dev { + unsigned int minor; + struct mhi_device *mhi_dev; + struct uci_chan *uchan; + size_t mtu; + bool enabled; + + /* synchronize open, release and driver remove */ + struct mutex lock; + struct kref ref_count; +}; + +static void mhi_uci_dev_chan_release(struct kref *ref) +{ + struct uci_buf *buf_itr, *tmp; + struct uci_chan *uchan = + container_of(ref, struct uci_chan, ref_count); + + if (uchan->udev->enabled) + mhi_unprepare_from_transfer(uchan->udev->mhi_dev); + + spin_lock_bh(&uchan->dl_lock); + list_for_each_entry_safe(buf_itr, tmp, &uchan->pending, node) { + list_del(&buf_itr->node); + kfree(buf_itr->data); + } + + if (uchan->cur_buf) + kfree(uchan->cur_buf->data); + spin_unlock_bh(&uchan->dl_lock); + + uchan->cur_buf = NULL; + + wake_up(&uchan->ul_wq); + wake_up(&uchan->dl_wq); + + mutex_destroy(&uchan->write_lock); + mutex_destroy(&uchan->read_lock); + + uchan->udev->uchan = NULL; + kfree(uchan); +} + +static int mhi_queue_inbound(struct uci_dev *udev) +{ + struct mhi_device *mhi_dev = udev->mhi_dev; + struct device *dev = &mhi_dev->dev; + int nr_trbs, i, ret = -EIO; + size_t dl_buf_size; + void *buf; + struct uci_buf *ubuf; + + /* dont queue if dl channel is not supported */ + if (!udev->mhi_dev->dl_chan) + return 0; + + nr_trbs = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE); + + for (i = 0; i < nr_trbs; i++) { + buf = kmalloc(udev->mtu, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + dl_buf_size = udev->mtu - sizeof(*ubuf); + + /* save uci_buf info at the end of buf */ + ubuf = buf + dl_buf_size; + ubuf->data = buf; + + dev_dbg(dev, "Allocated buf %d of %d size %ld\n", i, nr_trbs, + dl_buf_size); + + ret = mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE, buf, dl_buf_size, + MHI_EOT); + if (ret) { + kfree(buf); + dev_err(dev, "Failed to queue buffer %d\n", i); + return ret; + } + } + + return ret; +} + +static int mhi_uci_dev_start_chan(struct uci_dev *udev) +{ + int ret = 0; + struct uci_chan *uchan; + + mutex_lock(&udev->lock); + if (!udev->uchan || !kref_get_unless_zero(&udev->uchan->ref_count)) { + uchan = kzalloc(sizeof(*uchan), GFP_KERNEL); + if (!uchan) { + ret = -ENOMEM; + goto error_chan_start; + } + + udev->uchan = uchan; + uchan->udev = udev; + init_waitqueue_head(&uchan->ul_wq); + init_waitqueue_head(&uchan->dl_wq); + mutex_init(&uchan->write_lock); + mutex_init(&uchan->read_lock); + spin_lock_init(&uchan->dl_lock); + INIT_LIST_HEAD(&uchan->pending); + + ret = mhi_prepare_for_transfer(udev->mhi_dev); + if (ret) { + dev_err(&udev->mhi_dev->dev, "Error starting transfer channels\n"); + goto error_chan_cleanup; + } + + ret = mhi_queue_inbound(udev); + if (ret) + goto error_chan_cleanup; + + kref_init(&uchan->ref_count); + } + + mutex_unlock(&udev->lock); + return 0; + +error_chan_cleanup: + mhi_uci_dev_chan_release(&uchan->ref_count); +error_chan_start: + mutex_unlock(&udev->lock); + return ret; +} + +static void mhi_uci_dev_release(struct kref *ref) +{ + struct uci_dev *udev = + container_of(ref, struct uci_dev, ref_count); + + mutex_destroy(&udev->lock); + + kfree(udev); +} + +static int mhi_uci_open(struct inode *inode, struct file *filp) +{ + unsigned int minor = iminor(inode); + struct uci_dev *udev = NULL; + int ret; + + mutex_lock(&uci_drv_mutex); + udev = idr_find(&uci_idr, minor); + if (!udev) { + pr_debug("uci dev: minor %d not found\n", minor); + mutex_unlock(&uci_drv_mutex); + return -ENODEV; + } + + kref_get(&udev->ref_count); + mutex_unlock(&uci_drv_mutex); + + ret = mhi_uci_dev_start_chan(udev); + if (ret) { + kref_put(&udev->ref_count, mhi_uci_dev_release); + return ret; + } + + filp->private_data = udev; + + return 0; +} + +static int mhi_uci_release(struct inode *inode, struct file *file) +{ + struct uci_dev *udev = file->private_data; + + mutex_lock(&udev->lock); + kref_put(&udev->uchan->ref_count, mhi_uci_dev_chan_release); + mutex_unlock(&udev->lock); + + kref_put(&udev->ref_count, mhi_uci_dev_release); + + return 0; +} + +static __poll_t mhi_uci_poll(struct file *file, poll_table *wait) +{ + struct uci_dev *udev = file->private_data; + struct mhi_device *mhi_dev = udev->mhi_dev; + struct device *dev = &mhi_dev->dev; + struct uci_chan *uchan = udev->uchan; + __poll_t mask = 0; + + poll_wait(file, &udev->uchan->ul_wq, wait); + poll_wait(file, &udev->uchan->dl_wq, wait); + + if (!udev->enabled) { + mask = EPOLLERR; + goto done; + } + + spin_lock_bh(&uchan->dl_lock); + if (!list_empty(&uchan->pending) || uchan->cur_buf) { + dev_dbg(dev, "Client can read from node\n"); + mask |= EPOLLIN | EPOLLRDNORM; + } + spin_unlock_bh(&uchan->dl_lock); + + if (mhi_get_free_desc_count(mhi_dev, DMA_TO_DEVICE) > 0) { + dev_dbg(dev, "Client can write to node\n"); + mask |= EPOLLOUT | EPOLLWRNORM; + } + + dev_dbg(dev, "Client attempted to poll, returning mask 0x%x\n", mask); + +done: + return mask; +} + +static ssize_t mhi_uci_write(struct file *file, + const char __user *buf, + size_t count, + loff_t *offp) +{ + struct uci_dev *udev = file->private_data; + struct mhi_device *mhi_dev = udev->mhi_dev; + struct device *dev = &mhi_dev->dev; + struct uci_chan *uchan = udev->uchan; + size_t bytes_xfered = 0; + int ret, nr_avail = 0; + + /* if ul channel is not supported return error */ + if (!buf || !count || !mhi_dev->ul_chan) + return -EINVAL; + + dev_dbg(dev, "%s: to xfer: %lu bytes\n", __func__, count); + + mutex_lock(&uchan->write_lock); + while (count) { + size_t xfer_size; + void *kbuf; + enum mhi_flags flags; + + /* wait for free descriptors */ + ret = wait_event_interruptible(uchan->ul_wq, + (!udev->enabled) || + (nr_avail = mhi_get_free_desc_count(mhi_dev, + DMA_TO_DEVICE)) > 0); + + if (ret == -ERESTARTSYS) { + dev_dbg(dev, "Interrupted by a signal in %s, exiting\n", + __func__); + goto err_mtx_unlock; + } + + if (!udev->enabled) { + ret = -ENODEV; + goto err_mtx_unlock; + } + + xfer_size = min_t(size_t, count, udev->mtu); + kbuf = kmalloc(xfer_size, GFP_KERNEL); + if (!kbuf) { + ret = -ENOMEM; + goto err_mtx_unlock; + } + + ret = copy_from_user(kbuf, buf, xfer_size); + if (ret) { + kfree(kbuf); + ret = -EFAULT; + goto err_mtx_unlock; + } + + /* if ring is full after this force EOT */ + if (nr_avail > 1 && (count - xfer_size)) + flags = MHI_CHAIN; + else + flags = MHI_EOT; + + ret = mhi_queue_buf(mhi_dev, DMA_TO_DEVICE, kbuf, xfer_size, + flags); + if (ret) { + kfree(kbuf); + goto err_mtx_unlock; + } + + bytes_xfered += xfer_size; + count -= xfer_size; + buf += xfer_size; + } + + mutex_unlock(&uchan->write_lock); + dev_dbg(dev, "%s: bytes xferred: %lu\n", __func__, bytes_xfered); + + return bytes_xfered; + +err_mtx_unlock: + mutex_unlock(&uchan->write_lock); + + return ret; +} + +static ssize_t mhi_uci_read(struct file *file, + char __user *buf, + size_t count, + loff_t *ppos) +{ + struct uci_dev *udev = file->private_data; + struct mhi_device *mhi_dev = udev->mhi_dev; + struct uci_chan *uchan = udev->uchan; + struct device *dev = &mhi_dev->dev; + struct uci_buf *ubuf; + size_t rx_buf_size; + char *ptr; + size_t to_copy; + int ret = 0; + + /* if dl channel is not supported return error */ + if (!buf || !mhi_dev->dl_chan) + return -EINVAL; + + mutex_lock(&uchan->read_lock); + spin_lock_bh(&uchan->dl_lock); + /* No data available to read, wait */ + if (!uchan->cur_buf && list_empty(&uchan->pending)) { + dev_dbg(dev, "No data available to read, waiting\n"); + + spin_unlock_bh(&uchan->dl_lock); + ret = wait_event_interruptible(uchan->dl_wq, + (!udev->enabled || + !list_empty(&uchan->pending))); + + if (ret == -ERESTARTSYS) { + dev_dbg(dev, "Interrupted by a signal in %s, exiting\n", + __func__); + goto err_mtx_unlock; + } + + if (!udev->enabled) { + ret = -ENODEV; + goto err_mtx_unlock; + } + spin_lock_bh(&uchan->dl_lock); + } + + /* new read, get the next descriptor from the list */ + if (!uchan->cur_buf) { + ubuf = list_first_entry_or_null(&uchan->pending, + struct uci_buf, node); + if (!ubuf) { + ret = -EIO; + goto err_spin_unlock; + } + + list_del(&ubuf->node); + uchan->cur_buf = ubuf; + uchan->dl_size = ubuf->len; + dev_dbg(dev, "Got pkt of size: %zu\n", uchan->dl_size); + } + + ubuf = uchan->cur_buf; + spin_unlock_bh(&uchan->dl_lock); + + /* Copy the buffer to user space */ + to_copy = min_t(size_t, count, uchan->dl_size); + ptr = ubuf->data + (ubuf->len - uchan->dl_size); + + ret = copy_to_user(buf, ptr, to_copy); + if (ret) { + ret = -EFAULT; + goto err_mtx_unlock; + } + + dev_dbg(dev, "Copied %lu of %lu bytes\n", to_copy, uchan->dl_size); + uchan->dl_size -= to_copy; + + /* we finished with this buffer, queue it back to hardware */ + if (!uchan->dl_size) { + spin_lock_bh(&uchan->dl_lock); + uchan->cur_buf = NULL; + spin_unlock_bh(&uchan->dl_lock); + + rx_buf_size = udev->mtu - sizeof(*ubuf); + ret = mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE, ubuf->data, + rx_buf_size, MHI_EOT); + if (ret) { + dev_err(dev, "Failed to recycle element: %d\n", ret); + kfree(ubuf->data); + goto err_mtx_unlock; + } + } + mutex_unlock(&uchan->read_lock); + + dev_dbg(dev, "%s: Returning %lu bytes\n", __func__, to_copy); + + return to_copy; + +err_spin_unlock: + spin_unlock_bh(&uchan->dl_lock); +err_mtx_unlock: + mutex_unlock(&uchan->read_lock); + return ret; +} + +static const struct file_operations mhidev_fops = { + .owner = THIS_MODULE, + .open = mhi_uci_open, + .release = mhi_uci_release, + .read = mhi_uci_read, + .write = mhi_uci_write, + .poll = mhi_uci_poll, +}; + +static void mhi_ul_xfer_cb(struct mhi_device *mhi_dev, + struct mhi_result *mhi_result) +{ + struct uci_dev *udev = dev_get_drvdata(&mhi_dev->dev); + struct uci_chan *uchan = udev->uchan; + struct device *dev = &mhi_dev->dev; + + dev_dbg(dev, "status: %d xfer_len: %zu\n", + mhi_result->transaction_status, mhi_result->bytes_xferd); + + kfree(mhi_result->buf_addr); + + if (!mhi_result->transaction_status) + wake_up(&uchan->ul_wq); +} + +static void mhi_dl_xfer_cb(struct mhi_device *mhi_dev, + struct mhi_result *mhi_result) +{ + struct uci_dev *udev = dev_get_drvdata(&mhi_dev->dev); + struct uci_chan *uchan = udev->uchan; + struct device *dev = &mhi_dev->dev; + struct uci_buf *ubuf; + size_t dl_buf_size = udev->mtu - sizeof(*ubuf); + + dev_dbg(dev, "status: %d receive_len: %zu\n", + mhi_result->transaction_status, mhi_result->bytes_xferd); + + if (mhi_result->transaction_status == -ENOTCONN) { + kfree(mhi_result->buf_addr); + return; + } + + ubuf = mhi_result->buf_addr + dl_buf_size; + ubuf->data = mhi_result->buf_addr; + ubuf->len = mhi_result->bytes_xferd; + spin_lock_bh(&uchan->dl_lock); + list_add_tail(&ubuf->node, &uchan->pending); + spin_unlock_bh(&uchan->dl_lock); + + wake_up(&uchan->dl_wq); +} + +static int mhi_uci_probe(struct mhi_device *mhi_dev, + const struct mhi_device_id *id) +{ + struct uci_dev *udev; + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct device *dev; + int index; + + udev = kzalloc(sizeof(*udev), GFP_KERNEL); + if (!udev) + return -ENOMEM; + + kref_init(&udev->ref_count); + mutex_init(&udev->lock); + udev->mhi_dev = mhi_dev; + + mutex_lock(&uci_drv_mutex); + index = idr_alloc(&uci_idr, udev, 0, MAX_UCI_MINORS, GFP_KERNEL); + mutex_unlock(&uci_drv_mutex); + if (index < 0) { + kfree(udev); + return index; + } + + udev->minor = index; + + udev->mtu = min_t(size_t, id->driver_data, MHI_MAX_MTU); + dev_set_drvdata(&mhi_dev->dev, udev); + udev->enabled = true; + + /* create device file node /dev/mhi_<cntrl_dev_name>_<mhi_dev_name> */ + dev = device_create(uci_dev_class, &mhi_dev->dev, + MKDEV(uci_dev_major, index), udev, + DEVICE_NAME "_%s_%s", + dev_name(mhi_cntrl->cntrl_dev), mhi_dev->name); + if (IS_ERR(dev)) { + mutex_lock(&uci_drv_mutex); + idr_remove(&uci_idr, udev->minor); + mutex_unlock(&uci_drv_mutex); + dev_set_drvdata(&mhi_dev->dev, NULL); + kfree(udev); + return PTR_ERR(dev); + } + + dev_dbg(&mhi_dev->dev, "probed uci dev: minor %d\n", index); + + return 0; +}; + +static void mhi_uci_remove(struct mhi_device *mhi_dev) +{ + struct uci_dev *udev = dev_get_drvdata(&mhi_dev->dev); + + /* disable the node */ + mutex_lock(&udev->lock); + udev->enabled = false; + + /* delete the node to prevent new opens */ + device_destroy(uci_dev_class, MKDEV(uci_dev_major, udev->minor)); + + /* return error for any blocked read or write */ + if (udev->uchan) { + wake_up(&udev->uchan->ul_wq); + wake_up(&udev->uchan->dl_wq); + } + mutex_unlock(&udev->lock); + + mutex_lock(&uci_drv_mutex); + idr_remove(&uci_idr, udev->minor); + kref_put(&udev->ref_count, mhi_uci_dev_release); + mutex_unlock(&uci_drv_mutex); +} + +/* .driver_data stores max mtu */ +static const struct mhi_device_id mhi_uci_match_table[] = { + { .chan = "LOOPBACK", .driver_data = 0x1000}, + {}, +}; +MODULE_DEVICE_TABLE(mhi, mhi_uci_match_table); + +static struct mhi_driver mhi_uci_driver = { + .id_table = mhi_uci_match_table, + .remove = mhi_uci_remove, + .probe = mhi_uci_probe, + .ul_xfer_cb = mhi_ul_xfer_cb, + .dl_xfer_cb = mhi_dl_xfer_cb, + .driver = { + .name = MHI_UCI_DRIVER_NAME, + }, +}; + +static int mhi_uci_init(void) +{ + int ret; + + ret = register_chrdev(0, MHI_UCI_DRIVER_NAME, &mhidev_fops); + if (ret < 0) + return ret; + + uci_dev_major = ret; + uci_dev_class = class_create(THIS_MODULE, MHI_UCI_DRIVER_NAME); + if (IS_ERR(uci_dev_class)) { + unregister_chrdev(uci_dev_major, MHI_UCI_DRIVER_NAME); + return PTR_ERR(uci_dev_class); + } + + ret = mhi_driver_register(&mhi_uci_driver); + if (ret) { + class_destroy(uci_dev_class); + unregister_chrdev(uci_dev_major, MHI_UCI_DRIVER_NAME); + } + + return ret; +} + +static void __exit mhi_uci_exit(void) +{ + mhi_driver_unregister(&mhi_uci_driver); + class_destroy(uci_dev_class); + unregister_chrdev(uci_dev_major, MHI_UCI_DRIVER_NAME); + idr_destroy(&uci_idr); +} + +module_init(mhi_uci_init); +module_exit(mhi_uci_exit); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("MHI UCI Driver");
This MHI client driver allows userspace clients to transfer raw data between MHI device and host using standard file operations. Driver instantiates uci device object which is associated to device file node. uci device object instantiates uci channel object when device file node is opened. uci channel object is used to manage MHI channels by calling MHI core APIs for read and write operations. MHI channels are started as part of device open(). MHI channels remain in start state until last release() is called on uci device file node. Device file node is created with format /dev/mhi_<controller_name>_<mhi_device_name> Currently it supports LOOPBACK channel. Signed-off-by: Hemant Kumar <hemantk@codeaurora.org> --- drivers/bus/mhi/Kconfig | 13 + drivers/bus/mhi/Makefile | 4 + drivers/bus/mhi/uci.c | 656 +++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 673 insertions(+) create mode 100644 drivers/bus/mhi/uci.c