@@ -1435,14 +1435,15 @@ static const struct vm_operations_struct sg_mmap_vm_ops = {
.fault = sg_vma_fault,
};
+/* Entry point for mmap(2) system call */
static int
sg_mmap(struct file *filp, struct vm_area_struct *vma)
{
- struct sg_fd *sfp;
- unsigned long req_sz, len, sa;
- struct sg_scatter_hold *rsv_schp;
int k, length;
int ret = 0;
+ unsigned long req_sz, len, sa;
+ struct sg_scatter_hold *rsv_schp;
+ struct sg_fd *sfp;
if (!filp || !vma)
return -ENXIO;
@@ -1455,19 +1456,23 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
SG_LOG(3, sfp, "%s: vm_start=%p, len=%d\n", __func__,
(void *)vma->vm_start, (int)req_sz);
if (vma->vm_pgoff)
- return -EINVAL; /* want no offset */
- rsv_schp = &sfp->reserve;
+ return -EINVAL; /* only an offset of 0 accepted */
+ /* Check reserve request is inactive and has large enough buffer */
mutex_lock(&sfp->f_mutex);
- if (req_sz > rsv_schp->buflen) {
- ret = -ENOMEM; /* cannot map more than reserved buffer */
+ if (sfp->res_in_use) {
+ ret = -EBUSY;
+ goto out;
+ }
+ rsv_schp = &sfp->reserve;
+ if (req_sz > (unsigned long)rsv_schp->buflen) {
+ ret = -ENOMEM;
goto out;
}
-
sa = vma->vm_start;
length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
- for (k = 0; k < rsv_schp->num_sgat && sa < vma->vm_end; k++) {
+ for (k = 0; k < rsv_schp->num_sgat && sa < vma->vm_end; ++k) {
len = vma->vm_end - sa;
- len = (len < length) ? len : length;
+ len = min_t(unsigned long, len, (unsigned long)length);
sa += len;
}