@@ -18,6 +18,7 @@ struct nv50_head_atom {
struct {
u32 mask;
+ u32 owned;
u32 olut;
} wndw;
@@ -2254,12 +2254,28 @@ static int
nv50_disp_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
{
struct nv50_atom *atom = nv50_atom(state);
+ struct nv50_core *core = nv50_disp(dev)->core;
struct drm_connector_state *old_connector_state, *new_connector_state;
struct drm_connector *connector;
struct drm_crtc_state *new_crtc_state;
struct drm_crtc *crtc;
+ struct nv50_head *head;
+ struct nv50_head_atom *asyh;
int ret, i;
+ if (core->assign_windows && core->func->head->static_wndw_map) {
+ drm_for_each_crtc(crtc, dev) {
+ new_crtc_state = drm_atomic_get_crtc_state(state,
+ crtc);
+ if (IS_ERR(new_crtc_state))
+ return PTR_ERR(new_crtc_state);
+
+ head = nv50_head(crtc);
+ asyh = nv50_head_atom(new_crtc_state);
+ core->func->head->static_wndw_map(head, asyh);
+ }
+ }
+
/* We need to handle colour management on a per-plane basis. */
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->color_mgmt_changed) {
@@ -40,6 +40,7 @@ struct nv50_head_func {
void (*dither)(struct nv50_head *, struct nv50_head_atom *);
void (*procamp)(struct nv50_head *, struct nv50_head_atom *);
void (*or)(struct nv50_head *, struct nv50_head_atom *);
+ void (*static_wndw_map)(struct nv50_head *, struct nv50_head_atom *);
};
extern const struct nv50_head_func head507d;
@@ -86,6 +87,7 @@ int headc37d_curs_format(struct nv50_head *, struct nv50_wndw_atom *,
void headc37d_curs_set(struct nv50_head *, struct nv50_head_atom *);
void headc37d_curs_clr(struct nv50_head *);
void headc37d_dither(struct nv50_head *, struct nv50_head_atom *);
+void headc37d_static_wndw_map(struct nv50_head *, struct nv50_head_atom *);
extern const struct nv50_head_func headc57d;
#endif
@@ -203,6 +203,15 @@ headc37d_view(struct nv50_head *head, struct nv50_head_atom *asyh)
}
}
+void
+headc37d_static_wndw_map(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ int i, end;
+
+ for (i = head->base.index * 2, end = i + 2; i < end; i++)
+ asyh->wndw.owned |= BIT(i);
+}
+
const struct nv50_head_func
headc37d = {
.view = headc37d_view,
@@ -218,4 +227,5 @@ headc37d = {
.dither = headc37d_dither,
.procamp = headc37d_procamp,
.or = headc37d_or,
+ .static_wndw_map = headc37d_static_wndw_map,
};
@@ -210,4 +210,6 @@ headc57d = {
.dither = headc37d_dither,
.procamp = headc57d_procamp,
.or = headc57d_or,
+ /* TODO: flexible window mappings */
+ .static_wndw_map = headc37d_static_wndw_map,
};
While we're not quite ready yet to add support for flexible wndw mappings, we are going to need to at least keep track of the static wndw mappings we're currently using in each head's atomic state. We'll likely use this in the future to implement real flexible window mapping, but the primary reason we'll need this is for CRC support. See: on nvidia hardware, each CRC entry in the CRC notifier dma context has a "tag". This tag corresponds to the nth update on a specific EVO/NvDisplay channel, which itself is referred to as the "controlling channel". For gf119+ this can be the core channel, ovly channel, or base channel. Since we don't expose CRC entry tags to userspace, we simply ignore this feature and always use the core channel as the controlling channel. Simple. Things get a little bit more complicated on gv100+ though. GV100+ only lets us set the controlling channel to a specific wndw channel, and that wndw must be owned by the head that we're grabbing CRCs when we enable CRC generation. Thus, we always need to make sure that each atomic head state has at least one wndw that is mapped to the head, which will be used as the controlling channel. Note that since we don't have flexible wndw mappings yet, we don't expect to run into any scenarios yet where we'd have a head with no mapped wndws. When we do add support for flexible wndw mappings however, we'll need to make sure that we handle reprogramming CRC capture if our controlling wndw is moved to another head (and potentially reject the new head state entirely if we can't find another available wndw to replace it). With that being said, nouveau currently tracks wndw visibility on heads. It does not keep track of the actual ownership mappings, which are (currently) statically programmed. To fix this, we introduce another bitmask into nv50_head_atom.wndw to keep track of ownership separately from visibility. We then introduce a nv50_head callback to handle populating the wndw ownership map, and call it during the atomic check phase when core->assign_windows is set to true. Signed-off-by: Lyude Paul <lyude@redhat.com> --- drivers/gpu/drm/nouveau/dispnv50/atom.h | 1 + drivers/gpu/drm/nouveau/dispnv50/disp.c | 16 ++++++++++++++++ drivers/gpu/drm/nouveau/dispnv50/head.h | 2 ++ drivers/gpu/drm/nouveau/dispnv50/headc37d.c | 10 ++++++++++ drivers/gpu/drm/nouveau/dispnv50/headc57d.c | 2 ++ 5 files changed, 31 insertions(+)