Message ID | 20180804124526.46206-15-hverkuil@xs4all.nl (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Request API | expand |
Em Sat, 4 Aug 2018 14:45:06 +0200 Hans Verkuil <hverkuil@xs4all.nl> escreveu: > From: Hans Verkuil <hans.verkuil@cisco.com> > > Integrate the request support. This adds the v4l2_ctrl_request_complete > and v4l2_ctrl_request_setup functions to complete a request and (as a > helper function) to apply a request to the hardware. > > It takes care of queuing requests and correctly chaining control values > in the request queue. > > Note that when a request is marked completed it will copy control values > to the internal request state. This can be optimized in the future since > this is sub-optimal when dealing with large compound and/or array controls. > > For the initial 'stateless codec' use-case the current implementation is > sufficient. > > Signed-off-by: Hans Verkuil <hans.verkuil@cisco.com> > --- > drivers/media/v4l2-core/v4l2-ctrls.c | 326 ++++++++++++++++++++++++++- > include/media/v4l2-ctrls.h | 51 +++++ > 2 files changed, 371 insertions(+), 6 deletions(-) > > diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c > index 570b6f8ae46a..b8ff6d6b14cd 100644 > --- a/drivers/media/v4l2-core/v4l2-ctrls.c > +++ b/drivers/media/v4l2-core/v4l2-ctrls.c > @@ -1668,6 +1668,13 @@ static int new_to_user(struct v4l2_ext_control *c, > return ptr_to_user(c, ctrl, ctrl->p_new); > } > > +/* Helper function: copy the request value back to the caller */ > +static int req_to_user(struct v4l2_ext_control *c, > + struct v4l2_ctrl_ref *ref) > +{ > + return ptr_to_user(c, ref->ctrl, ref->p_req); > +} > + > /* Helper function: copy the initial control value back to the caller */ > static int def_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl) > { > @@ -1787,6 +1794,26 @@ static void cur_to_new(struct v4l2_ctrl *ctrl) > ptr_to_ptr(ctrl, ctrl->p_cur, ctrl->p_new); > } > > +/* Copy the new value to the request value */ > +static void new_to_req(struct v4l2_ctrl_ref *ref) > +{ > + if (!ref) > + return; > + ptr_to_ptr(ref->ctrl, ref->ctrl->p_new, ref->p_req); > + ref->req = ref; > +} > + > +/* Copy the request value to the new value */ > +static void req_to_new(struct v4l2_ctrl_ref *ref) > +{ > + if (!ref) > + return; > + if (ref->req) > + ptr_to_ptr(ref->ctrl, ref->req->p_req, ref->ctrl->p_new); > + else > + ptr_to_ptr(ref->ctrl, ref->ctrl->p_cur, ref->ctrl->p_new); > +} > + > /* Return non-zero if one or more of the controls in the cluster has a new > value that differs from the current value. */ > static int cluster_changed(struct v4l2_ctrl *master) > @@ -1896,6 +1923,9 @@ int v4l2_ctrl_handler_init_class(struct v4l2_ctrl_handler *hdl, > lockdep_set_class_and_name(hdl->lock, key, name); > INIT_LIST_HEAD(&hdl->ctrls); > INIT_LIST_HEAD(&hdl->ctrl_refs); > + INIT_LIST_HEAD(&hdl->requests); > + INIT_LIST_HEAD(&hdl->requests_queued); > + hdl->request_is_queued = false; > hdl->nr_of_buckets = 1 + nr_of_controls_hint / 8; > hdl->buckets = kvmalloc_array(hdl->nr_of_buckets, > sizeof(hdl->buckets[0]), > @@ -1916,6 +1946,14 @@ void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl) > if (hdl == NULL || hdl->buckets == NULL) > return; > > + if (!hdl->req_obj.req && !list_empty(&hdl->requests)) { > + struct v4l2_ctrl_handler *req, *next_req; > + > + list_for_each_entry_safe(req, next_req, &hdl->requests, requests) { > + media_request_object_unbind(&req->req_obj); > + media_request_object_put(&req->req_obj); Hmm... while this would work for the trivial case where object_put() would just drop the object from the list if nobody else is using it, nothing prevents that, if v4l2_ctrl_handler_free() is called twice, it would do the wrong thing... as the only test here is if req_obj.reg is not NULL, and not if the control framework is already done with the object. > + } > + } > mutex_lock(hdl->lock); > /* Free all nodes */ > list_for_each_entry_safe(ref, next_ref, &hdl->ctrl_refs, node) { > @@ -2837,6 +2875,123 @@ int v4l2_querymenu(struct v4l2_ctrl_handler *hdl, struct v4l2_querymenu *qm) > } > EXPORT_SYMBOL(v4l2_querymenu); > > +static int v4l2_ctrl_request_clone(struct v4l2_ctrl_handler *hdl, > + const struct v4l2_ctrl_handler *from) > +{ > + struct v4l2_ctrl_ref *ref; > + int err; > + > + if (WARN_ON(!hdl || hdl == from)) > + return -EINVAL; > + > + if (hdl->error) > + return hdl->error; > + > + WARN_ON(hdl->lock != &hdl->_lock); > + > + mutex_lock(from->lock); > + list_for_each_entry(ref, &from->ctrl_refs, node) { > + struct v4l2_ctrl *ctrl = ref->ctrl; > + struct v4l2_ctrl_ref *new_ref; > + > + /* Skip refs inherited from other devices */ > + if (ref->from_other_dev) > + continue; > + /* And buttons */ > + if (ctrl->type == V4L2_CTRL_TYPE_BUTTON) > + continue; > + err = handler_new_ref(hdl, ctrl, &new_ref, false, true); > + if (err) > + break; > + } > + mutex_unlock(from->lock); > + return err; > +} > + > +static void v4l2_ctrl_request_queue(struct media_request_object *obj) > +{ > + struct v4l2_ctrl_handler *hdl = > + container_of(obj, struct v4l2_ctrl_handler, req_obj); > + struct v4l2_ctrl_handler *main_hdl = obj->priv; > + struct v4l2_ctrl_handler *prev_hdl = NULL; > + struct v4l2_ctrl_ref *ref_ctrl, *ref_ctrl_prev = NULL; > + > + if (list_empty(&main_hdl->requests_queued)) > + goto queue; > + > + prev_hdl = list_last_entry(&main_hdl->requests_queued, > + struct v4l2_ctrl_handler, requests_queued); > + /* > + * Note: prev_hdl and hdl must contain the same list of control > + * references, so if any differences are detected then that is a > + * driver bug and the WARN_ON is triggered. > + */ > + mutex_lock(prev_hdl->lock); > + ref_ctrl_prev = list_first_entry(&prev_hdl->ctrl_refs, > + struct v4l2_ctrl_ref, node); > + list_for_each_entry(ref_ctrl, &hdl->ctrl_refs, node) { > + if (ref_ctrl->req) > + continue; > + while (ref_ctrl_prev->ctrl->id < ref_ctrl->ctrl->id) { > + /* Should never happen, but just in case... */ > + if (list_is_last(&ref_ctrl_prev->node, > + &prev_hdl->ctrl_refs)) If should never happen, please use unlikely(). That makes clearer while doing some speed optimization). > + break; > + ref_ctrl_prev = list_next_entry(ref_ctrl_prev, node); > + } > + if (WARN_ON(ref_ctrl_prev->ctrl->id != ref_ctrl->ctrl->id)) > + break; > + ref_ctrl->req = ref_ctrl_prev->req; > + } > + mutex_unlock(prev_hdl->lock); > +queue: > + list_add_tail(&hdl->requests_queued, &main_hdl->requests_queued); Shouldn't list changes be serialized? Ok, the ioctls are serialized, but device removal/unbind can happen any time for hot-plugged devices[1]. [1] yeah, I know that, right now, the framework is meant to be used only by codecs that are on SoC, but I'm pretty sure we'll end by using it on other use cases in the future. As this is core code, we should be sure that it will not cause troubles due to the lack of a proper locking, as I doubt we'll review the locks when adding other use cases. > + hdl->request_is_queued = true; > +} > + > +static void v4l2_ctrl_request_unbind(struct media_request_object *obj) > +{ > + struct v4l2_ctrl_handler *hdl = > + container_of(obj, struct v4l2_ctrl_handler, req_obj); > + > + list_del_init(&hdl->requests); > + if (hdl->request_is_queued) { > + list_del_init(&hdl->requests_queued); > + hdl->request_is_queued = false; > + } > +} > + > +static void v4l2_ctrl_request_release(struct media_request_object *obj) > +{ > + struct v4l2_ctrl_handler *hdl = > + container_of(obj, struct v4l2_ctrl_handler, req_obj); > + > + v4l2_ctrl_handler_free(hdl); > + kfree(hdl); > +} > + > +static const struct media_request_object_ops req_ops = { > + .queue = v4l2_ctrl_request_queue, > + .unbind = v4l2_ctrl_request_unbind, > + .release = v4l2_ctrl_request_release, > +}; > + > +static int v4l2_ctrl_request_bind(struct media_request *req, > + struct v4l2_ctrl_handler *hdl, > + struct v4l2_ctrl_handler *from) > +{ > + int ret; > + > + ret = v4l2_ctrl_request_clone(hdl, from); > + > + if (!ret) { > + ret = media_request_object_bind(req, &req_ops, > + from, &hdl->req_obj); > + if (!ret) > + list_add_tail(&hdl->requests, &from->requests); > + } > + return ret; > +} > > /* Some general notes on the atomic requirements of VIDIOC_G/TRY/S_EXT_CTRLS: > > @@ -2898,6 +3053,7 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl, > > if (cs->which && > cs->which != V4L2_CTRL_WHICH_DEF_VAL && > + cs->which != V4L2_CTRL_WHICH_REQUEST_VAL && > V4L2_CTRL_ID2WHICH(id) != cs->which) > return -EINVAL; > > @@ -2977,13 +3133,12 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl, > whether there are any controls at all. */ > static int class_check(struct v4l2_ctrl_handler *hdl, u32 which) > { > - if (which == 0 || which == V4L2_CTRL_WHICH_DEF_VAL) > + if (which == 0 || which == V4L2_CTRL_WHICH_DEF_VAL || > + which == V4L2_CTRL_WHICH_REQUEST_VAL) > return 0; > return find_ref_lock(hdl, which | 1) ? 0 : -EINVAL; > } > > - > - > /* Get extended controls. Allocates the helpers array if needed. */ > int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs) > { > @@ -3049,8 +3204,12 @@ int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs > u32 idx = i; > > do { > - ret = ctrl_to_user(cs->controls + idx, > - helpers[idx].ref->ctrl); > + if (helpers[idx].ref->req) > + ret = req_to_user(cs->controls + idx, > + helpers[idx].ref->req); > + else > + ret = ctrl_to_user(cs->controls + idx, > + helpers[idx].ref->ctrl); > idx = helpers[idx].next; > } while (!ret && idx); > } > @@ -3336,7 +3495,16 @@ static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl, > } while (!ret && idx); > > if (!ret) > - ret = try_or_set_cluster(fh, master, set, 0); > + ret = try_or_set_cluster(fh, master, > + !hdl->req_obj.req && set, 0); > + if (!ret && hdl->req_obj.req && set) { > + for (j = 0; j < master->ncontrols; j++) { > + struct v4l2_ctrl_ref *ref = > + find_ref(hdl, master->cluster[j]->id); > + > + new_to_req(ref); > + } > + } > > /* Copy the new values back to userspace. */ > if (!ret) { > @@ -3463,6 +3631,152 @@ int __v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s) > } > EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_string); > > +void v4l2_ctrl_request_complete(struct media_request *req, > + struct v4l2_ctrl_handler *main_hdl) > +{ > + struct media_request_object *obj; > + struct v4l2_ctrl_handler *hdl; > + struct v4l2_ctrl_ref *ref; > + > + if (!req || !main_hdl) > + return; > + > + obj = media_request_object_find(req, &req_ops, main_hdl); > + if (!obj) > + return; > + hdl = container_of(obj, struct v4l2_ctrl_handler, req_obj); > + > + list_for_each_entry(ref, &hdl->ctrl_refs, node) { > + struct v4l2_ctrl *ctrl = ref->ctrl; > + struct v4l2_ctrl *master = ctrl->cluster[0]; > + unsigned int i; > + > + if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) { > + ref->req = ref; > + > + v4l2_ctrl_lock(master); > + /* g_volatile_ctrl will update the current control values */ > + for (i = 0; i < master->ncontrols; i++) > + cur_to_new(master->cluster[i]); > + call_op(master, g_volatile_ctrl); > + new_to_req(ref); > + v4l2_ctrl_unlock(master); > + continue; > + } > + if (ref->req == ref) > + continue; > + > + v4l2_ctrl_lock(ctrl); > + if (ref->req) > + ptr_to_ptr(ctrl, ref->req->p_req, ref->p_req); > + else > + ptr_to_ptr(ctrl, ctrl->p_cur, ref->p_req); > + v4l2_ctrl_unlock(ctrl); > + } > + > + WARN_ON(!hdl->request_is_queued); > + list_del_init(&hdl->requests_queued); > + hdl->request_is_queued = false; > + media_request_object_complete(obj); > + media_request_object_put(obj); Hmm... nothing prevents that this would race with v4l2_ctrl_handler_free() and cause double-free (actually double object_put). > +} > +EXPORT_SYMBOL(v4l2_ctrl_request_complete); > + > +void v4l2_ctrl_request_setup(struct media_request *req, > + struct v4l2_ctrl_handler *main_hdl) > +{ > + struct media_request_object *obj; > + struct v4l2_ctrl_handler *hdl; > + struct v4l2_ctrl_ref *ref; > + > + if (!req || !main_hdl) > + return; > + > + if (WARN_ON(req->state != MEDIA_REQUEST_STATE_QUEUED)) > + return; > + > + obj = media_request_object_find(req, &req_ops, main_hdl); > + if (!obj) > + return; Shouldn't the above checks produce an error or print something at the logs? > + if (obj->completed) { > + media_request_object_put(obj); > + return; > + } > + hdl = container_of(obj, struct v4l2_ctrl_handler, req_obj); > + > + list_for_each_entry(ref, &hdl->ctrl_refs, node) > + ref->req_done = false; > + > + list_for_each_entry(ref, &hdl->ctrl_refs, node) { > + struct v4l2_ctrl *ctrl = ref->ctrl; > + struct v4l2_ctrl *master = ctrl->cluster[0]; > + bool have_new_data = false; > + int i; > + > + /* > + * Skip if this control was already handled by a cluster. > + * Skip button controls and read-only controls. > + */ > + if (ref->req_done || ctrl->type == V4L2_CTRL_TYPE_BUTTON || > + (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)) > + continue; > + > + v4l2_ctrl_lock(master); > + for (i = 0; i < master->ncontrols; i++) { > + if (master->cluster[i]) { > + struct v4l2_ctrl_ref *r = > + find_ref(hdl, master->cluster[i]->id); > + > + if (r->req && r == r->req) { > + have_new_data = true; > + break; > + } > + } > + } > + if (!have_new_data) { > + v4l2_ctrl_unlock(master); > + continue; > + } > + > + for (i = 0; i < master->ncontrols; i++) { > + if (master->cluster[i]) { > + struct v4l2_ctrl_ref *r = > + find_ref(hdl, master->cluster[i]->id); > + > + req_to_new(r); > + master->cluster[i]->is_new = 1; > + r->req_done = true; > + } > + } > + /* > + * For volatile autoclusters that are currently in auto mode > + * we need to discover if it will be set to manual mode. > + * If so, then we have to copy the current volatile values > + * first since those will become the new manual values (which > + * may be overwritten by explicit new values from this set > + * of controls). > + */ > + if (master->is_auto && master->has_volatiles && > + !is_cur_manual(master)) { > + s32 new_auto_val = *master->p_new.p_s32; > + > + /* > + * If the new value == the manual value, then copy > + * the current volatile values. > + */ > + if (new_auto_val == master->manual_mode_value) > + update_from_auto_cluster(master); > + } > + > + try_or_set_cluster(NULL, master, true, 0); > + > + v4l2_ctrl_unlock(master); > + } > + > + media_request_object_put(obj); > +} > +EXPORT_SYMBOL(v4l2_ctrl_request_setup); > + > void v4l2_ctrl_notify(struct v4l2_ctrl *ctrl, v4l2_ctrl_notify_fnc notify, void *priv) > { > if (ctrl == NULL) > diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h > index 3f4e062d4e3d..3617d5077b19 100644 > --- a/include/media/v4l2-ctrls.h > +++ b/include/media/v4l2-ctrls.h > @@ -250,6 +250,12 @@ struct v4l2_ctrl { > * ``prepare_ext_ctrls`` function at ``v4l2-ctrl.c``. > * @from_other_dev: If true, then @ctrl was defined in another > * device than the &struct v4l2_ctrl_handler. > + * @req_done: Internal flag: if the control handler containing this control > + * reference is bound to a media request, then this is set when > + * the control has been applied. This prevents applying controls > + * from a cluster with multiple controls twice (when the first > + * control of a cluster is applied, they all are). > + * @req: If set, this refers to another request that sets this control. > * @p_req: If the control handler containing this control reference > * is bound to a media request, then this points to the > * value of the control that should be applied when the request > @@ -266,6 +272,8 @@ struct v4l2_ctrl_ref { > struct v4l2_ctrl *ctrl; > struct v4l2_ctrl_helper *helper; > bool from_other_dev; > + bool req_done; > + struct v4l2_ctrl_ref *req; > union v4l2_ctrl_ptr p_req; > }; > > @@ -290,6 +298,15 @@ struct v4l2_ctrl_ref { > * @notify_priv: Passed as argument to the v4l2_ctrl notify callback. > * @nr_of_buckets: Total number of buckets in the array. > * @error: The error code of the first failed control addition. > + * @request_is_queued: True if the request was queued. > + * @requests: List to keep track of open control handler request objects. > + * For the parent control handler (@req_obj.req == NULL) this > + * is the list header. When the parent control handler is > + * removed, it has to unbind and put all these requests since > + * they refer to the parent. > + * @requests_queued: List of the queued requests. This determines the order > + * in which these controls are applied. Once the request is > + * completed it is removed from this list. > * @req_obj: The &struct media_request_object, used to link into a > * &struct media_request. This request object has a refcount. > */ > @@ -304,6 +321,9 @@ struct v4l2_ctrl_handler { > void *notify_priv; > u16 nr_of_buckets; > int error; > + bool request_is_queued; > + struct list_head requests; > + struct list_head requests_queued; > struct media_request_object req_obj; > }; > > @@ -1062,6 +1082,37 @@ int v4l2_ctrl_subscribe_event(struct v4l2_fh *fh, > */ > __poll_t v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait); > > +/** > + * v4l2_ctrl_request_setup - helper function to apply control values in a request > + * > + * @req: The request > + * @hdl: The control handler > + * > + * This is a helper function to call the control handler's s_ctrl callback with > + * the control values contained in the request. Do note that this approach of > + * applying control values in a request is only applicable to memory-to-memory > + * devices. > + */ > +void v4l2_ctrl_request_setup(struct media_request *req, > + struct v4l2_ctrl_handler *hdl); > + > +/** > + * v4l2_ctrl_request_complete - Complete a control handler request object > + * > + * @req: The request > + * @hdl: The control handler > + * > + * This function is to be called on each control handler that may have had a > + * request object associated with it, i.e. control handlers of a driver that > + * supports requests. > + * > + * The function first obtains the values of any volatile controls in the control > + * handler and attach them to the request. Then, the function completes the > + * request object. > + */ > +void v4l2_ctrl_request_complete(struct media_request *req, > + struct v4l2_ctrl_handler *hdl); > + > /* Helpers for ioctl_ops */ > > /** Thanks, Mauro
On 13/08/18 12:55, Mauro Carvalho Chehab wrote: > Em Sat, 4 Aug 2018 14:45:06 +0200 > Hans Verkuil <hverkuil@xs4all.nl> escreveu: > >> From: Hans Verkuil <hans.verkuil@cisco.com> >> >> Integrate the request support. This adds the v4l2_ctrl_request_complete >> and v4l2_ctrl_request_setup functions to complete a request and (as a >> helper function) to apply a request to the hardware. >> >> It takes care of queuing requests and correctly chaining control values >> in the request queue. >> >> Note that when a request is marked completed it will copy control values >> to the internal request state. This can be optimized in the future since >> this is sub-optimal when dealing with large compound and/or array controls. >> >> For the initial 'stateless codec' use-case the current implementation is >> sufficient. >> >> Signed-off-by: Hans Verkuil <hans.verkuil@cisco.com> >> --- >> drivers/media/v4l2-core/v4l2-ctrls.c | 326 ++++++++++++++++++++++++++- >> include/media/v4l2-ctrls.h | 51 +++++ >> 2 files changed, 371 insertions(+), 6 deletions(-) >> >> diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c >> index 570b6f8ae46a..b8ff6d6b14cd 100644 >> --- a/drivers/media/v4l2-core/v4l2-ctrls.c >> +++ b/drivers/media/v4l2-core/v4l2-ctrls.c >> @@ -1668,6 +1668,13 @@ static int new_to_user(struct v4l2_ext_control *c, >> return ptr_to_user(c, ctrl, ctrl->p_new); >> } >> >> +/* Helper function: copy the request value back to the caller */ >> +static int req_to_user(struct v4l2_ext_control *c, >> + struct v4l2_ctrl_ref *ref) >> +{ >> + return ptr_to_user(c, ref->ctrl, ref->p_req); >> +} >> + >> /* Helper function: copy the initial control value back to the caller */ >> static int def_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl) >> { >> @@ -1787,6 +1794,26 @@ static void cur_to_new(struct v4l2_ctrl *ctrl) >> ptr_to_ptr(ctrl, ctrl->p_cur, ctrl->p_new); >> } >> >> +/* Copy the new value to the request value */ >> +static void new_to_req(struct v4l2_ctrl_ref *ref) >> +{ >> + if (!ref) >> + return; >> + ptr_to_ptr(ref->ctrl, ref->ctrl->p_new, ref->p_req); >> + ref->req = ref; >> +} >> + >> +/* Copy the request value to the new value */ >> +static void req_to_new(struct v4l2_ctrl_ref *ref) >> +{ >> + if (!ref) >> + return; >> + if (ref->req) >> + ptr_to_ptr(ref->ctrl, ref->req->p_req, ref->ctrl->p_new); >> + else >> + ptr_to_ptr(ref->ctrl, ref->ctrl->p_cur, ref->ctrl->p_new); >> +} >> + >> /* Return non-zero if one or more of the controls in the cluster has a new >> value that differs from the current value. */ >> static int cluster_changed(struct v4l2_ctrl *master) >> @@ -1896,6 +1923,9 @@ int v4l2_ctrl_handler_init_class(struct v4l2_ctrl_handler *hdl, >> lockdep_set_class_and_name(hdl->lock, key, name); >> INIT_LIST_HEAD(&hdl->ctrls); >> INIT_LIST_HEAD(&hdl->ctrl_refs); >> + INIT_LIST_HEAD(&hdl->requests); >> + INIT_LIST_HEAD(&hdl->requests_queued); >> + hdl->request_is_queued = false; >> hdl->nr_of_buckets = 1 + nr_of_controls_hint / 8; >> hdl->buckets = kvmalloc_array(hdl->nr_of_buckets, >> sizeof(hdl->buckets[0]), >> @@ -1916,6 +1946,14 @@ void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl) >> if (hdl == NULL || hdl->buckets == NULL) >> return; >> >> + if (!hdl->req_obj.req && !list_empty(&hdl->requests)) { >> + struct v4l2_ctrl_handler *req, *next_req; >> + >> + list_for_each_entry_safe(req, next_req, &hdl->requests, requests) { >> + media_request_object_unbind(&req->req_obj); >> + media_request_object_put(&req->req_obj); > > Hmm... while this would work for the trivial case where object_put() > would just drop the object from the list if nobody else is using it, > nothing prevents that, if v4l2_ctrl_handler_free() is called twice, > it would do the wrong thing... as the only test here is if req_obj.reg > is not NULL, and not if the control framework is already done with the > object. v4l2_ctrl_handler_free sets hdl->buckets to NULL when done. And if it is called twice it will detect that hdl->buckets == NULL and return. So this isn't an issue. > >> + } >> + } >> mutex_lock(hdl->lock); >> /* Free all nodes */ >> list_for_each_entry_safe(ref, next_ref, &hdl->ctrl_refs, node) { >> @@ -2837,6 +2875,123 @@ int v4l2_querymenu(struct v4l2_ctrl_handler *hdl, struct v4l2_querymenu *qm) >> } >> EXPORT_SYMBOL(v4l2_querymenu); >> >> +static int v4l2_ctrl_request_clone(struct v4l2_ctrl_handler *hdl, >> + const struct v4l2_ctrl_handler *from) >> +{ >> + struct v4l2_ctrl_ref *ref; >> + int err; >> + >> + if (WARN_ON(!hdl || hdl == from)) >> + return -EINVAL; >> + >> + if (hdl->error) >> + return hdl->error; >> + >> + WARN_ON(hdl->lock != &hdl->_lock); >> + >> + mutex_lock(from->lock); >> + list_for_each_entry(ref, &from->ctrl_refs, node) { >> + struct v4l2_ctrl *ctrl = ref->ctrl; >> + struct v4l2_ctrl_ref *new_ref; >> + >> + /* Skip refs inherited from other devices */ >> + if (ref->from_other_dev) >> + continue; >> + /* And buttons */ >> + if (ctrl->type == V4L2_CTRL_TYPE_BUTTON) >> + continue; >> + err = handler_new_ref(hdl, ctrl, &new_ref, false, true); >> + if (err) >> + break; >> + } >> + mutex_unlock(from->lock); >> + return err; >> +} >> + >> +static void v4l2_ctrl_request_queue(struct media_request_object *obj) >> +{ >> + struct v4l2_ctrl_handler *hdl = >> + container_of(obj, struct v4l2_ctrl_handler, req_obj); >> + struct v4l2_ctrl_handler *main_hdl = obj->priv; >> + struct v4l2_ctrl_handler *prev_hdl = NULL; >> + struct v4l2_ctrl_ref *ref_ctrl, *ref_ctrl_prev = NULL; >> + >> + if (list_empty(&main_hdl->requests_queued)) >> + goto queue; >> + >> + prev_hdl = list_last_entry(&main_hdl->requests_queued, >> + struct v4l2_ctrl_handler, requests_queued); >> + /* >> + * Note: prev_hdl and hdl must contain the same list of control >> + * references, so if any differences are detected then that is a >> + * driver bug and the WARN_ON is triggered. >> + */ >> + mutex_lock(prev_hdl->lock); >> + ref_ctrl_prev = list_first_entry(&prev_hdl->ctrl_refs, >> + struct v4l2_ctrl_ref, node); >> + list_for_each_entry(ref_ctrl, &hdl->ctrl_refs, node) { >> + if (ref_ctrl->req) >> + continue; >> + while (ref_ctrl_prev->ctrl->id < ref_ctrl->ctrl->id) { >> + /* Should never happen, but just in case... */ >> + if (list_is_last(&ref_ctrl_prev->node, >> + &prev_hdl->ctrl_refs)) > > If should never happen, please use unlikely(). That makes clearer > while doing some speed optimization). Sorry, I won't make this change. There is no optimization benefit from using unlikely (there almost never is) and it just pollutes the code. > >> + break; >> + ref_ctrl_prev = list_next_entry(ref_ctrl_prev, node); >> + } >> + if (WARN_ON(ref_ctrl_prev->ctrl->id != ref_ctrl->ctrl->id)) >> + break; >> + ref_ctrl->req = ref_ctrl_prev->req; >> + } >> + mutex_unlock(prev_hdl->lock); >> +queue: >> + list_add_tail(&hdl->requests_queued, &main_hdl->requests_queued); > > Shouldn't list changes be serialized? Ok, the ioctls are serialized, but > device removal/unbind can happen any time for hot-plugged devices[1]. They are serialized. This function is (indirectly) called from media_request_ioctl_queue() which has req_queue_mutex locked. This should all be safe. > > [1] yeah, I know that, right now, the framework is meant to be used only > by codecs that are on SoC, but I'm pretty sure we'll end by using it on > other use cases in the future. As this is core code, we should be sure > that it will not cause troubles due to the lack of a proper locking, as > I doubt we'll review the locks when adding other use cases. > >> + hdl->request_is_queued = true; >> +} >> + >> +static void v4l2_ctrl_request_unbind(struct media_request_object *obj) >> +{ >> + struct v4l2_ctrl_handler *hdl = >> + container_of(obj, struct v4l2_ctrl_handler, req_obj); >> + >> + list_del_init(&hdl->requests); >> + if (hdl->request_is_queued) { >> + list_del_init(&hdl->requests_queued); >> + hdl->request_is_queued = false; >> + } >> +} >> + >> +static void v4l2_ctrl_request_release(struct media_request_object *obj) >> +{ >> + struct v4l2_ctrl_handler *hdl = >> + container_of(obj, struct v4l2_ctrl_handler, req_obj); >> + >> + v4l2_ctrl_handler_free(hdl); >> + kfree(hdl); >> +} >> + >> +static const struct media_request_object_ops req_ops = { >> + .queue = v4l2_ctrl_request_queue, >> + .unbind = v4l2_ctrl_request_unbind, >> + .release = v4l2_ctrl_request_release, >> +}; >> + >> +static int v4l2_ctrl_request_bind(struct media_request *req, >> + struct v4l2_ctrl_handler *hdl, >> + struct v4l2_ctrl_handler *from) >> +{ >> + int ret; >> + >> + ret = v4l2_ctrl_request_clone(hdl, from); >> + >> + if (!ret) { >> + ret = media_request_object_bind(req, &req_ops, >> + from, &hdl->req_obj); >> + if (!ret) >> + list_add_tail(&hdl->requests, &from->requests); >> + } >> + return ret; >> +} >> >> /* Some general notes on the atomic requirements of VIDIOC_G/TRY/S_EXT_CTRLS: >> >> @@ -2898,6 +3053,7 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl, >> >> if (cs->which && >> cs->which != V4L2_CTRL_WHICH_DEF_VAL && >> + cs->which != V4L2_CTRL_WHICH_REQUEST_VAL && >> V4L2_CTRL_ID2WHICH(id) != cs->which) >> return -EINVAL; >> >> @@ -2977,13 +3133,12 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl, >> whether there are any controls at all. */ >> static int class_check(struct v4l2_ctrl_handler *hdl, u32 which) >> { >> - if (which == 0 || which == V4L2_CTRL_WHICH_DEF_VAL) >> + if (which == 0 || which == V4L2_CTRL_WHICH_DEF_VAL || >> + which == V4L2_CTRL_WHICH_REQUEST_VAL) >> return 0; >> return find_ref_lock(hdl, which | 1) ? 0 : -EINVAL; >> } >> >> - >> - >> /* Get extended controls. Allocates the helpers array if needed. */ >> int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs) >> { >> @@ -3049,8 +3204,12 @@ int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs >> u32 idx = i; >> >> do { >> - ret = ctrl_to_user(cs->controls + idx, >> - helpers[idx].ref->ctrl); >> + if (helpers[idx].ref->req) >> + ret = req_to_user(cs->controls + idx, >> + helpers[idx].ref->req); >> + else >> + ret = ctrl_to_user(cs->controls + idx, >> + helpers[idx].ref->ctrl); >> idx = helpers[idx].next; >> } while (!ret && idx); >> } >> @@ -3336,7 +3495,16 @@ static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl, >> } while (!ret && idx); >> >> if (!ret) >> - ret = try_or_set_cluster(fh, master, set, 0); >> + ret = try_or_set_cluster(fh, master, >> + !hdl->req_obj.req && set, 0); >> + if (!ret && hdl->req_obj.req && set) { >> + for (j = 0; j < master->ncontrols; j++) { >> + struct v4l2_ctrl_ref *ref = >> + find_ref(hdl, master->cluster[j]->id); >> + >> + new_to_req(ref); >> + } >> + } >> >> /* Copy the new values back to userspace. */ >> if (!ret) { >> @@ -3463,6 +3631,152 @@ int __v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s) >> } >> EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_string); >> >> +void v4l2_ctrl_request_complete(struct media_request *req, >> + struct v4l2_ctrl_handler *main_hdl) >> +{ >> + struct media_request_object *obj; >> + struct v4l2_ctrl_handler *hdl; >> + struct v4l2_ctrl_ref *ref; >> + >> + if (!req || !main_hdl) >> + return; >> + >> + obj = media_request_object_find(req, &req_ops, main_hdl); >> + if (!obj) >> + return; >> + hdl = container_of(obj, struct v4l2_ctrl_handler, req_obj); >> + >> + list_for_each_entry(ref, &hdl->ctrl_refs, node) { >> + struct v4l2_ctrl *ctrl = ref->ctrl; >> + struct v4l2_ctrl *master = ctrl->cluster[0]; >> + unsigned int i; >> + >> + if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) { >> + ref->req = ref; >> + >> + v4l2_ctrl_lock(master); >> + /* g_volatile_ctrl will update the current control values */ >> + for (i = 0; i < master->ncontrols; i++) >> + cur_to_new(master->cluster[i]); >> + call_op(master, g_volatile_ctrl); >> + new_to_req(ref); >> + v4l2_ctrl_unlock(master); >> + continue; >> + } >> + if (ref->req == ref) >> + continue; >> + >> + v4l2_ctrl_lock(ctrl); >> + if (ref->req) >> + ptr_to_ptr(ctrl, ref->req->p_req, ref->p_req); >> + else >> + ptr_to_ptr(ctrl, ctrl->p_cur, ref->p_req); >> + v4l2_ctrl_unlock(ctrl); >> + } >> + >> + WARN_ON(!hdl->request_is_queued); >> + list_del_init(&hdl->requests_queued); >> + hdl->request_is_queued = false; >> + media_request_object_complete(obj); >> + media_request_object_put(obj); > > Hmm... nothing prevents that this would race with v4l2_ctrl_handler_free() > and cause double-free (actually double object_put). That would be a driver bug: complete is called when completing a request, and if this happens after v4l2_ctrl_handler_free is called, then the driver got the cleanup sequence wrong (and this won't be the only problem that driver has!). > >> +} >> +EXPORT_SYMBOL(v4l2_ctrl_request_complete); >> + >> +void v4l2_ctrl_request_setup(struct media_request *req, >> + struct v4l2_ctrl_handler *main_hdl) >> +{ >> + struct media_request_object *obj; >> + struct v4l2_ctrl_handler *hdl; >> + struct v4l2_ctrl_ref *ref; >> + >> + if (!req || !main_hdl) >> + return; >> + >> + if (WARN_ON(req->state != MEDIA_REQUEST_STATE_QUEUED)) >> + return; >> + >> + obj = media_request_object_find(req, &req_ops, main_hdl); >> + if (!obj) >> + return; > > Shouldn't the above checks produce an error or print something at > the logs? Good question. I think not. This situation would occur if the applications makes a request with only a buffer but no controls, thus making no changes to the controls in this request. This is perfectly legal, so nothing needs to be logged here. Regards, Hans
Em Tue, 14 Aug 2018 10:34:47 +0200 Hans Verkuil <hverkuil@xs4all.nl> escreveu: > >> +void v4l2_ctrl_request_setup(struct media_request *req, > >> + struct v4l2_ctrl_handler *main_hdl) > >> +{ > >> + struct media_request_object *obj; > >> + struct v4l2_ctrl_handler *hdl; > >> + struct v4l2_ctrl_ref *ref; > >> + > >> + if (!req || !main_hdl) > >> + return; > >> + > >> + if (WARN_ON(req->state != MEDIA_REQUEST_STATE_QUEUED)) > >> + return; > >> + > >> + obj = media_request_object_find(req, &req_ops, main_hdl); > >> + if (!obj) > >> + return; > > > > Shouldn't the above checks produce an error or print something at > > the logs? > > Good question. > > I think not. This situation would occur if the applications makes a request > with only a buffer but no controls, thus making no changes to the controls in > this request. > > This is perfectly legal, so nothing needs to be logged here. Ok, makes sense. Please add a note at the source code explaining that, as this is not obvious for a casual code reviewer. Thanks, Mauro
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c index 570b6f8ae46a..b8ff6d6b14cd 100644 --- a/drivers/media/v4l2-core/v4l2-ctrls.c +++ b/drivers/media/v4l2-core/v4l2-ctrls.c @@ -1668,6 +1668,13 @@ static int new_to_user(struct v4l2_ext_control *c, return ptr_to_user(c, ctrl, ctrl->p_new); } +/* Helper function: copy the request value back to the caller */ +static int req_to_user(struct v4l2_ext_control *c, + struct v4l2_ctrl_ref *ref) +{ + return ptr_to_user(c, ref->ctrl, ref->p_req); +} + /* Helper function: copy the initial control value back to the caller */ static int def_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl) { @@ -1787,6 +1794,26 @@ static void cur_to_new(struct v4l2_ctrl *ctrl) ptr_to_ptr(ctrl, ctrl->p_cur, ctrl->p_new); } +/* Copy the new value to the request value */ +static void new_to_req(struct v4l2_ctrl_ref *ref) +{ + if (!ref) + return; + ptr_to_ptr(ref->ctrl, ref->ctrl->p_new, ref->p_req); + ref->req = ref; +} + +/* Copy the request value to the new value */ +static void req_to_new(struct v4l2_ctrl_ref *ref) +{ + if (!ref) + return; + if (ref->req) + ptr_to_ptr(ref->ctrl, ref->req->p_req, ref->ctrl->p_new); + else + ptr_to_ptr(ref->ctrl, ref->ctrl->p_cur, ref->ctrl->p_new); +} + /* Return non-zero if one or more of the controls in the cluster has a new value that differs from the current value. */ static int cluster_changed(struct v4l2_ctrl *master) @@ -1896,6 +1923,9 @@ int v4l2_ctrl_handler_init_class(struct v4l2_ctrl_handler *hdl, lockdep_set_class_and_name(hdl->lock, key, name); INIT_LIST_HEAD(&hdl->ctrls); INIT_LIST_HEAD(&hdl->ctrl_refs); + INIT_LIST_HEAD(&hdl->requests); + INIT_LIST_HEAD(&hdl->requests_queued); + hdl->request_is_queued = false; hdl->nr_of_buckets = 1 + nr_of_controls_hint / 8; hdl->buckets = kvmalloc_array(hdl->nr_of_buckets, sizeof(hdl->buckets[0]), @@ -1916,6 +1946,14 @@ void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl) if (hdl == NULL || hdl->buckets == NULL) return; + if (!hdl->req_obj.req && !list_empty(&hdl->requests)) { + struct v4l2_ctrl_handler *req, *next_req; + + list_for_each_entry_safe(req, next_req, &hdl->requests, requests) { + media_request_object_unbind(&req->req_obj); + media_request_object_put(&req->req_obj); + } + } mutex_lock(hdl->lock); /* Free all nodes */ list_for_each_entry_safe(ref, next_ref, &hdl->ctrl_refs, node) { @@ -2837,6 +2875,123 @@ int v4l2_querymenu(struct v4l2_ctrl_handler *hdl, struct v4l2_querymenu *qm) } EXPORT_SYMBOL(v4l2_querymenu); +static int v4l2_ctrl_request_clone(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_handler *from) +{ + struct v4l2_ctrl_ref *ref; + int err; + + if (WARN_ON(!hdl || hdl == from)) + return -EINVAL; + + if (hdl->error) + return hdl->error; + + WARN_ON(hdl->lock != &hdl->_lock); + + mutex_lock(from->lock); + list_for_each_entry(ref, &from->ctrl_refs, node) { + struct v4l2_ctrl *ctrl = ref->ctrl; + struct v4l2_ctrl_ref *new_ref; + + /* Skip refs inherited from other devices */ + if (ref->from_other_dev) + continue; + /* And buttons */ + if (ctrl->type == V4L2_CTRL_TYPE_BUTTON) + continue; + err = handler_new_ref(hdl, ctrl, &new_ref, false, true); + if (err) + break; + } + mutex_unlock(from->lock); + return err; +} + +static void v4l2_ctrl_request_queue(struct media_request_object *obj) +{ + struct v4l2_ctrl_handler *hdl = + container_of(obj, struct v4l2_ctrl_handler, req_obj); + struct v4l2_ctrl_handler *main_hdl = obj->priv; + struct v4l2_ctrl_handler *prev_hdl = NULL; + struct v4l2_ctrl_ref *ref_ctrl, *ref_ctrl_prev = NULL; + + if (list_empty(&main_hdl->requests_queued)) + goto queue; + + prev_hdl = list_last_entry(&main_hdl->requests_queued, + struct v4l2_ctrl_handler, requests_queued); + /* + * Note: prev_hdl and hdl must contain the same list of control + * references, so if any differences are detected then that is a + * driver bug and the WARN_ON is triggered. + */ + mutex_lock(prev_hdl->lock); + ref_ctrl_prev = list_first_entry(&prev_hdl->ctrl_refs, + struct v4l2_ctrl_ref, node); + list_for_each_entry(ref_ctrl, &hdl->ctrl_refs, node) { + if (ref_ctrl->req) + continue; + while (ref_ctrl_prev->ctrl->id < ref_ctrl->ctrl->id) { + /* Should never happen, but just in case... */ + if (list_is_last(&ref_ctrl_prev->node, + &prev_hdl->ctrl_refs)) + break; + ref_ctrl_prev = list_next_entry(ref_ctrl_prev, node); + } + if (WARN_ON(ref_ctrl_prev->ctrl->id != ref_ctrl->ctrl->id)) + break; + ref_ctrl->req = ref_ctrl_prev->req; + } + mutex_unlock(prev_hdl->lock); +queue: + list_add_tail(&hdl->requests_queued, &main_hdl->requests_queued); + hdl->request_is_queued = true; +} + +static void v4l2_ctrl_request_unbind(struct media_request_object *obj) +{ + struct v4l2_ctrl_handler *hdl = + container_of(obj, struct v4l2_ctrl_handler, req_obj); + + list_del_init(&hdl->requests); + if (hdl->request_is_queued) { + list_del_init(&hdl->requests_queued); + hdl->request_is_queued = false; + } +} + +static void v4l2_ctrl_request_release(struct media_request_object *obj) +{ + struct v4l2_ctrl_handler *hdl = + container_of(obj, struct v4l2_ctrl_handler, req_obj); + + v4l2_ctrl_handler_free(hdl); + kfree(hdl); +} + +static const struct media_request_object_ops req_ops = { + .queue = v4l2_ctrl_request_queue, + .unbind = v4l2_ctrl_request_unbind, + .release = v4l2_ctrl_request_release, +}; + +static int v4l2_ctrl_request_bind(struct media_request *req, + struct v4l2_ctrl_handler *hdl, + struct v4l2_ctrl_handler *from) +{ + int ret; + + ret = v4l2_ctrl_request_clone(hdl, from); + + if (!ret) { + ret = media_request_object_bind(req, &req_ops, + from, &hdl->req_obj); + if (!ret) + list_add_tail(&hdl->requests, &from->requests); + } + return ret; +} /* Some general notes on the atomic requirements of VIDIOC_G/TRY/S_EXT_CTRLS: @@ -2898,6 +3053,7 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl, if (cs->which && cs->which != V4L2_CTRL_WHICH_DEF_VAL && + cs->which != V4L2_CTRL_WHICH_REQUEST_VAL && V4L2_CTRL_ID2WHICH(id) != cs->which) return -EINVAL; @@ -2977,13 +3133,12 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl, whether there are any controls at all. */ static int class_check(struct v4l2_ctrl_handler *hdl, u32 which) { - if (which == 0 || which == V4L2_CTRL_WHICH_DEF_VAL) + if (which == 0 || which == V4L2_CTRL_WHICH_DEF_VAL || + which == V4L2_CTRL_WHICH_REQUEST_VAL) return 0; return find_ref_lock(hdl, which | 1) ? 0 : -EINVAL; } - - /* Get extended controls. Allocates the helpers array if needed. */ int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs) { @@ -3049,8 +3204,12 @@ int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs u32 idx = i; do { - ret = ctrl_to_user(cs->controls + idx, - helpers[idx].ref->ctrl); + if (helpers[idx].ref->req) + ret = req_to_user(cs->controls + idx, + helpers[idx].ref->req); + else + ret = ctrl_to_user(cs->controls + idx, + helpers[idx].ref->ctrl); idx = helpers[idx].next; } while (!ret && idx); } @@ -3336,7 +3495,16 @@ static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl, } while (!ret && idx); if (!ret) - ret = try_or_set_cluster(fh, master, set, 0); + ret = try_or_set_cluster(fh, master, + !hdl->req_obj.req && set, 0); + if (!ret && hdl->req_obj.req && set) { + for (j = 0; j < master->ncontrols; j++) { + struct v4l2_ctrl_ref *ref = + find_ref(hdl, master->cluster[j]->id); + + new_to_req(ref); + } + } /* Copy the new values back to userspace. */ if (!ret) { @@ -3463,6 +3631,152 @@ int __v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s) } EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_string); +void v4l2_ctrl_request_complete(struct media_request *req, + struct v4l2_ctrl_handler *main_hdl) +{ + struct media_request_object *obj; + struct v4l2_ctrl_handler *hdl; + struct v4l2_ctrl_ref *ref; + + if (!req || !main_hdl) + return; + + obj = media_request_object_find(req, &req_ops, main_hdl); + if (!obj) + return; + hdl = container_of(obj, struct v4l2_ctrl_handler, req_obj); + + list_for_each_entry(ref, &hdl->ctrl_refs, node) { + struct v4l2_ctrl *ctrl = ref->ctrl; + struct v4l2_ctrl *master = ctrl->cluster[0]; + unsigned int i; + + if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) { + ref->req = ref; + + v4l2_ctrl_lock(master); + /* g_volatile_ctrl will update the current control values */ + for (i = 0; i < master->ncontrols; i++) + cur_to_new(master->cluster[i]); + call_op(master, g_volatile_ctrl); + new_to_req(ref); + v4l2_ctrl_unlock(master); + continue; + } + if (ref->req == ref) + continue; + + v4l2_ctrl_lock(ctrl); + if (ref->req) + ptr_to_ptr(ctrl, ref->req->p_req, ref->p_req); + else + ptr_to_ptr(ctrl, ctrl->p_cur, ref->p_req); + v4l2_ctrl_unlock(ctrl); + } + + WARN_ON(!hdl->request_is_queued); + list_del_init(&hdl->requests_queued); + hdl->request_is_queued = false; + media_request_object_complete(obj); + media_request_object_put(obj); +} +EXPORT_SYMBOL(v4l2_ctrl_request_complete); + +void v4l2_ctrl_request_setup(struct media_request *req, + struct v4l2_ctrl_handler *main_hdl) +{ + struct media_request_object *obj; + struct v4l2_ctrl_handler *hdl; + struct v4l2_ctrl_ref *ref; + + if (!req || !main_hdl) + return; + + if (WARN_ON(req->state != MEDIA_REQUEST_STATE_QUEUED)) + return; + + obj = media_request_object_find(req, &req_ops, main_hdl); + if (!obj) + return; + if (obj->completed) { + media_request_object_put(obj); + return; + } + hdl = container_of(obj, struct v4l2_ctrl_handler, req_obj); + + list_for_each_entry(ref, &hdl->ctrl_refs, node) + ref->req_done = false; + + list_for_each_entry(ref, &hdl->ctrl_refs, node) { + struct v4l2_ctrl *ctrl = ref->ctrl; + struct v4l2_ctrl *master = ctrl->cluster[0]; + bool have_new_data = false; + int i; + + /* + * Skip if this control was already handled by a cluster. + * Skip button controls and read-only controls. + */ + if (ref->req_done || ctrl->type == V4L2_CTRL_TYPE_BUTTON || + (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)) + continue; + + v4l2_ctrl_lock(master); + for (i = 0; i < master->ncontrols; i++) { + if (master->cluster[i]) { + struct v4l2_ctrl_ref *r = + find_ref(hdl, master->cluster[i]->id); + + if (r->req && r == r->req) { + have_new_data = true; + break; + } + } + } + if (!have_new_data) { + v4l2_ctrl_unlock(master); + continue; + } + + for (i = 0; i < master->ncontrols; i++) { + if (master->cluster[i]) { + struct v4l2_ctrl_ref *r = + find_ref(hdl, master->cluster[i]->id); + + req_to_new(r); + master->cluster[i]->is_new = 1; + r->req_done = true; + } + } + /* + * For volatile autoclusters that are currently in auto mode + * we need to discover if it will be set to manual mode. + * If so, then we have to copy the current volatile values + * first since those will become the new manual values (which + * may be overwritten by explicit new values from this set + * of controls). + */ + if (master->is_auto && master->has_volatiles && + !is_cur_manual(master)) { + s32 new_auto_val = *master->p_new.p_s32; + + /* + * If the new value == the manual value, then copy + * the current volatile values. + */ + if (new_auto_val == master->manual_mode_value) + update_from_auto_cluster(master); + } + + try_or_set_cluster(NULL, master, true, 0); + + v4l2_ctrl_unlock(master); + } + + media_request_object_put(obj); +} +EXPORT_SYMBOL(v4l2_ctrl_request_setup); + void v4l2_ctrl_notify(struct v4l2_ctrl *ctrl, v4l2_ctrl_notify_fnc notify, void *priv) { if (ctrl == NULL) diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h index 3f4e062d4e3d..3617d5077b19 100644 --- a/include/media/v4l2-ctrls.h +++ b/include/media/v4l2-ctrls.h @@ -250,6 +250,12 @@ struct v4l2_ctrl { * ``prepare_ext_ctrls`` function at ``v4l2-ctrl.c``. * @from_other_dev: If true, then @ctrl was defined in another * device than the &struct v4l2_ctrl_handler. + * @req_done: Internal flag: if the control handler containing this control + * reference is bound to a media request, then this is set when + * the control has been applied. This prevents applying controls + * from a cluster with multiple controls twice (when the first + * control of a cluster is applied, they all are). + * @req: If set, this refers to another request that sets this control. * @p_req: If the control handler containing this control reference * is bound to a media request, then this points to the * value of the control that should be applied when the request @@ -266,6 +272,8 @@ struct v4l2_ctrl_ref { struct v4l2_ctrl *ctrl; struct v4l2_ctrl_helper *helper; bool from_other_dev; + bool req_done; + struct v4l2_ctrl_ref *req; union v4l2_ctrl_ptr p_req; }; @@ -290,6 +298,15 @@ struct v4l2_ctrl_ref { * @notify_priv: Passed as argument to the v4l2_ctrl notify callback. * @nr_of_buckets: Total number of buckets in the array. * @error: The error code of the first failed control addition. + * @request_is_queued: True if the request was queued. + * @requests: List to keep track of open control handler request objects. + * For the parent control handler (@req_obj.req == NULL) this + * is the list header. When the parent control handler is + * removed, it has to unbind and put all these requests since + * they refer to the parent. + * @requests_queued: List of the queued requests. This determines the order + * in which these controls are applied. Once the request is + * completed it is removed from this list. * @req_obj: The &struct media_request_object, used to link into a * &struct media_request. This request object has a refcount. */ @@ -304,6 +321,9 @@ struct v4l2_ctrl_handler { void *notify_priv; u16 nr_of_buckets; int error; + bool request_is_queued; + struct list_head requests; + struct list_head requests_queued; struct media_request_object req_obj; }; @@ -1062,6 +1082,37 @@ int v4l2_ctrl_subscribe_event(struct v4l2_fh *fh, */ __poll_t v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait); +/** + * v4l2_ctrl_request_setup - helper function to apply control values in a request + * + * @req: The request + * @hdl: The control handler + * + * This is a helper function to call the control handler's s_ctrl callback with + * the control values contained in the request. Do note that this approach of + * applying control values in a request is only applicable to memory-to-memory + * devices. + */ +void v4l2_ctrl_request_setup(struct media_request *req, + struct v4l2_ctrl_handler *hdl); + +/** + * v4l2_ctrl_request_complete - Complete a control handler request object + * + * @req: The request + * @hdl: The control handler + * + * This function is to be called on each control handler that may have had a + * request object associated with it, i.e. control handlers of a driver that + * supports requests. + * + * The function first obtains the values of any volatile controls in the control + * handler and attach them to the request. Then, the function completes the + * request object. + */ +void v4l2_ctrl_request_complete(struct media_request *req, + struct v4l2_ctrl_handler *hdl); + /* Helpers for ioctl_ops */ /**