[4/4] AMD/IOMMU: Treat head/tail pointers as byte offsets
diff mbox series

Message ID 20200203144340.4614-5-andrew.cooper3@citrix.com
State New
Headers show
Series
  • AMD/IOMMU: Cleanup
Related show

Commit Message

Andrew Cooper Feb. 3, 2020, 2:43 p.m. UTC
The MMIO registers as already byte offsets.  Using them in this form removes
the need to shift their values for use.

It is also inefficient to store both entries and alloc_size (which differ by a
fixed entry_size).  Rename alloc_size to size, and drop entries entirely,
which simplifies the allocation/deallocation helpers slightly.

Mark send_iommu_command() and invalidate_iommu_all() as static, as they have
no external declaration or callers.

Overall, this simplification allows GCC to optimise sufficiently to construct
commands directly in the command ring, rather than on the stack and copying
them into place.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Wei Liu <wl@xen.org>
CC: Roger Pau Monné <roger.pau@citrix.com>

Tested on a Rome system.
---
 xen/drivers/passthrough/amd/iommu-defs.h |  1 -
 xen/drivers/passthrough/amd/iommu.h      | 18 ++----------------
 xen/drivers/passthrough/amd/iommu_cmd.c  | 20 ++++++++------------
 xen/drivers/passthrough/amd/iommu_init.c | 30 +++++++++++++-----------------
 4 files changed, 23 insertions(+), 46 deletions(-)

Comments

Jan Beulich Feb. 5, 2020, 10:36 a.m. UTC | #1
On 03.02.2020 15:43, Andrew Cooper wrote:
> --- a/xen/drivers/passthrough/amd/iommu_cmd.c
> +++ b/xen/drivers/passthrough/amd/iommu_cmd.c
> @@ -24,16 +24,14 @@ static int queue_iommu_command(struct amd_iommu *iommu, u32 cmd[])
>  {
>      uint32_t tail, head;
>  
> -    tail = iommu->cmd_buffer.tail;
> -    if ( ++tail == iommu->cmd_buffer.entries )
> +    tail = iommu->cmd_buffer.tail + IOMMU_CMD_BUFFER_ENTRY_SIZE;
> +    if ( tail == iommu->cmd_buffer.size )
>          tail = 0;
>  
> -    head = iommu_get_rb_pointer(readl(iommu->mmio_base +
> -                                      IOMMU_CMD_BUFFER_HEAD_OFFSET));
> +    head = readl(iommu->mmio_base + IOMMU_CMD_BUFFER_HEAD_OFFSET);
>      if ( head != tail )

Surely you want to mask off reserved (or more generally
unrelated) bits, before consuming the value for the purpose
here (and elsewhere below)?

> @@ -45,13 +43,11 @@ static int queue_iommu_command(struct amd_iommu *iommu, u32 cmd[])
>  
>  static void commit_iommu_command_buffer(struct amd_iommu *iommu)
>  {
> -    u32 tail = 0;
> -
> -    iommu_set_rb_pointer(&tail, iommu->cmd_buffer.tail);
> -    writel(tail, iommu->mmio_base+IOMMU_CMD_BUFFER_TAIL_OFFSET);
> +    writel(iommu->cmd_buffer.tail,
> +           iommu->mmio_base + IOMMU_CMD_BUFFER_TAIL_OFFSET);

I guess not preserving the reserved bits isn't a problem
right now, but is doing so a good idea in general? (I notice
the head point updating when processing the logs did so
before, so perhaps it's indeed acceptable.)

> @@ -316,22 +316,20 @@ static int iommu_read_log(struct amd_iommu *iommu,
>          IOMMU_PPR_LOG_HEAD_OFFSET;
>  
>      tail = readl(iommu->mmio_base + tail_offest);
> -    tail = iommu_get_rb_pointer(tail);
>  
>      while ( tail != log->head )
>      {
>          /* read event log entry */
> -        entry = (u32 *)(log->buffer + log->head * entry_size);
> +        entry = (u32 *)(log->buffer + log->head);

Would you mind dropping the pointless cast here at the same time?

Jan
Andrew Cooper Feb. 10, 2020, 2:55 p.m. UTC | #2
On 05/02/2020 10:36, Jan Beulich wrote:
> On 03.02.2020 15:43, Andrew Cooper wrote:
>> --- a/xen/drivers/passthrough/amd/iommu_cmd.c
>> +++ b/xen/drivers/passthrough/amd/iommu_cmd.c
>> @@ -24,16 +24,14 @@ static int queue_iommu_command(struct amd_iommu *iommu, u32 cmd[])
>>  {
>>      uint32_t tail, head;
>>  
>> -    tail = iommu->cmd_buffer.tail;
>> -    if ( ++tail == iommu->cmd_buffer.entries )
>> +    tail = iommu->cmd_buffer.tail + IOMMU_CMD_BUFFER_ENTRY_SIZE;
>> +    if ( tail == iommu->cmd_buffer.size )
>>          tail = 0;
>>  
>> -    head = iommu_get_rb_pointer(readl(iommu->mmio_base +
>> -                                      IOMMU_CMD_BUFFER_HEAD_OFFSET));
>> +    head = readl(iommu->mmio_base + IOMMU_CMD_BUFFER_HEAD_OFFSET);
>>      if ( head != tail )
> Surely you want to mask off reserved (or more generally
> unrelated) bits, before consuming the value for the purpose
> here (and elsewhere below)?

Reserved bits are defined in the IOMMU spec to be read-only zero.

It is also undefined behaviour for this value to ever be outside of the
size configured for command buffer, so using the value like this is spec
compliant.

As for actually masking the values, that breaks the optimisers ability
to construct commands in the command ring.  This aspect can be worked
around with other code changes, but I also think it is implausible that
the remaining reserved bits here are going to sprout incompatible future
uses.

>
>> @@ -45,13 +43,11 @@ static int queue_iommu_command(struct amd_iommu *iommu, u32 cmd[])
>>  
>>  static void commit_iommu_command_buffer(struct amd_iommu *iommu)
>>  {
>> -    u32 tail = 0;
>> -
>> -    iommu_set_rb_pointer(&tail, iommu->cmd_buffer.tail);
>> -    writel(tail, iommu->mmio_base+IOMMU_CMD_BUFFER_TAIL_OFFSET);
>> +    writel(iommu->cmd_buffer.tail,
>> +           iommu->mmio_base + IOMMU_CMD_BUFFER_TAIL_OFFSET);
> I guess not preserving the reserved bits isn't a problem
> right now, but is doing so a good idea in general?

As above - there are by definition no bits to preserve.

>> @@ -316,22 +316,20 @@ static int iommu_read_log(struct amd_iommu *iommu,
>>          IOMMU_PPR_LOG_HEAD_OFFSET;
>>  
>>      tail = readl(iommu->mmio_base + tail_offest);
>> -    tail = iommu_get_rb_pointer(tail);
>>  
>>      while ( tail != log->head )
>>      {
>>          /* read event log entry */
>> -        entry = (u32 *)(log->buffer + log->head * entry_size);
>> +        entry = (u32 *)(log->buffer + log->head);
> Would you mind dropping the pointless cast here at the same time?

Can do.

~Andrew
Jan Beulich Feb. 10, 2020, 3:02 p.m. UTC | #3
On 10.02.2020 15:55, Andrew Cooper wrote:
> On 05/02/2020 10:36, Jan Beulich wrote:
>> On 03.02.2020 15:43, Andrew Cooper wrote:
>>> --- a/xen/drivers/passthrough/amd/iommu_cmd.c
>>> +++ b/xen/drivers/passthrough/amd/iommu_cmd.c
>>> @@ -24,16 +24,14 @@ static int queue_iommu_command(struct amd_iommu *iommu, u32 cmd[])
>>>  {
>>>      uint32_t tail, head;
>>>  
>>> -    tail = iommu->cmd_buffer.tail;
>>> -    if ( ++tail == iommu->cmd_buffer.entries )
>>> +    tail = iommu->cmd_buffer.tail + IOMMU_CMD_BUFFER_ENTRY_SIZE;
>>> +    if ( tail == iommu->cmd_buffer.size )
>>>          tail = 0;
>>>  
>>> -    head = iommu_get_rb_pointer(readl(iommu->mmio_base +
>>> -                                      IOMMU_CMD_BUFFER_HEAD_OFFSET));
>>> +    head = readl(iommu->mmio_base + IOMMU_CMD_BUFFER_HEAD_OFFSET);
>>>      if ( head != tail )
>> Surely you want to mask off reserved (or more generally
>> unrelated) bits, before consuming the value for the purpose
>> here (and elsewhere below)?
> 
> Reserved bits are defined in the IOMMU spec to be read-only zero.
> 
> It is also undefined behaviour for this value to ever be outside of the
> size configured for command buffer, so using the value like this is spec
> compliant.
> 
> As for actually masking the values, that breaks the optimisers ability
> to construct commands in the command ring.  This aspect can be worked
> around with other code changes, but I also think it is implausible that
> the remaining reserved bits here are going to sprout incompatible future
> uses.

Implausible - perhaps. But impossible - no. There could be a simple
flag bit appearing somewhere in these registers. I simply don't it
is a good idea to set a precedent of not honoring reserved bit as
being reserved. The spec saying "read-only zero" can only be viewed
as correct for the current version of the spec, or else why would
the bits be called "reserved" rather than e.g. "read-as-zero"?

Jan
Andrew Cooper Feb. 10, 2020, 3:19 p.m. UTC | #4
On 10/02/2020 15:02, Jan Beulich wrote:
> On 10.02.2020 15:55, Andrew Cooper wrote:
>> On 05/02/2020 10:36, Jan Beulich wrote:
>>> On 03.02.2020 15:43, Andrew Cooper wrote:
>>>> --- a/xen/drivers/passthrough/amd/iommu_cmd.c
>>>> +++ b/xen/drivers/passthrough/amd/iommu_cmd.c
>>>> @@ -24,16 +24,14 @@ static int queue_iommu_command(struct amd_iommu *iommu, u32 cmd[])
>>>>  {
>>>>      uint32_t tail, head;
>>>>  
>>>> -    tail = iommu->cmd_buffer.tail;
>>>> -    if ( ++tail == iommu->cmd_buffer.entries )
>>>> +    tail = iommu->cmd_buffer.tail + IOMMU_CMD_BUFFER_ENTRY_SIZE;
>>>> +    if ( tail == iommu->cmd_buffer.size )
>>>>          tail = 0;
>>>>  
>>>> -    head = iommu_get_rb_pointer(readl(iommu->mmio_base +
>>>> -                                      IOMMU_CMD_BUFFER_HEAD_OFFSET));
>>>> +    head = readl(iommu->mmio_base + IOMMU_CMD_BUFFER_HEAD_OFFSET);
>>>>      if ( head != tail )
>>> Surely you want to mask off reserved (or more generally
>>> unrelated) bits, before consuming the value for the purpose
>>> here (and elsewhere below)?
>> Reserved bits are defined in the IOMMU spec to be read-only zero.
>>
>> It is also undefined behaviour for this value to ever be outside of the
>> size configured for command buffer, so using the value like this is spec
>> compliant.
>>
>> As for actually masking the values, that breaks the optimisers ability
>> to construct commands in the command ring.  This aspect can be worked
>> around with other code changes, but I also think it is implausible that
>> the remaining reserved bits here are going to sprout incompatible future
>> uses.
> Implausible - perhaps. But impossible - no. There could be a simple
> flag bit appearing somewhere in these registers. I simply don't it
> is a good idea to set a precedent of not honoring reserved bit as
> being reserved. The spec saying "read-only zero" can only be viewed
> as correct for the current version of the spec,

Its perfectly easy to do forward compatible changes with a spec written
in this way.

It means that new behaviours have to be opted in to, and this is how the
AMD IOMMU spec has evolved.  Notice how every new feature declares "this
bit is reserved unless $OTHER_THING is enabled."

It is also a very sane way of doing forward compatibility, from
software's point of view.

> or else why would
> the bits be called "reserved" rather than e.g. "read-as-zero"?

Read Table 1, but it also ought to be obvious.  "Reserved", "Resv" and
"Res" are all shorter to write than "read-as-zero", especially in the
diagrams of a few individual bits in a register.

~Andrew
Jan Beulich Feb. 10, 2020, 4:38 p.m. UTC | #5
On 10.02.2020 16:19, Andrew Cooper wrote:
> On 10/02/2020 15:02, Jan Beulich wrote:
>> On 10.02.2020 15:55, Andrew Cooper wrote:
>>> On 05/02/2020 10:36, Jan Beulich wrote:
>>>> On 03.02.2020 15:43, Andrew Cooper wrote:
>>>>> --- a/xen/drivers/passthrough/amd/iommu_cmd.c
>>>>> +++ b/xen/drivers/passthrough/amd/iommu_cmd.c
>>>>> @@ -24,16 +24,14 @@ static int queue_iommu_command(struct amd_iommu *iommu, u32 cmd[])
>>>>>  {
>>>>>      uint32_t tail, head;
>>>>>  
>>>>> -    tail = iommu->cmd_buffer.tail;
>>>>> -    if ( ++tail == iommu->cmd_buffer.entries )
>>>>> +    tail = iommu->cmd_buffer.tail + IOMMU_CMD_BUFFER_ENTRY_SIZE;
>>>>> +    if ( tail == iommu->cmd_buffer.size )
>>>>>          tail = 0;
>>>>>  
>>>>> -    head = iommu_get_rb_pointer(readl(iommu->mmio_base +
>>>>> -                                      IOMMU_CMD_BUFFER_HEAD_OFFSET));
>>>>> +    head = readl(iommu->mmio_base + IOMMU_CMD_BUFFER_HEAD_OFFSET);
>>>>>      if ( head != tail )
>>>> Surely you want to mask off reserved (or more generally
>>>> unrelated) bits, before consuming the value for the purpose
>>>> here (and elsewhere below)?
>>> Reserved bits are defined in the IOMMU spec to be read-only zero.
>>>
>>> It is also undefined behaviour for this value to ever be outside of the
>>> size configured for command buffer, so using the value like this is spec
>>> compliant.
>>>
>>> As for actually masking the values, that breaks the optimisers ability
>>> to construct commands in the command ring.  This aspect can be worked
>>> around with other code changes, but I also think it is implausible that
>>> the remaining reserved bits here are going to sprout incompatible future
>>> uses.
>> Implausible - perhaps. But impossible - no. There could be a simple
>> flag bit appearing somewhere in these registers. I simply don't it
>> is a good idea to set a precedent of not honoring reserved bit as
>> being reserved. The spec saying "read-only zero" can only be viewed
>> as correct for the current version of the spec,
> 
> Its perfectly easy to do forward compatible changes with a spec written
> in this way.
> 
> It means that new behaviours have to be opted in to, and this is how the
> AMD IOMMU spec has evolved.  Notice how every new feature declares "this
> bit is reserved unless $OTHER_THING is enabled."
> 
> It is also a very sane way of doing forward compatibility, from
> software's point of view.

Yes. But does the IOMMU spec spell out that it'll follow this
in the future?

>> or else why would
>> the bits be called "reserved" rather than e.g. "read-as-zero"?
> 
> Read Table 1, but it also ought to be obvious.  "Reserved", "Resv" and
> "Res" are all shorter to write than "read-as-zero", especially in the
> diagrams of a few individual bits in a register.

There's also the common acronym "raz", which is as short. That table
in particular says nothing about future uses of currently reserved
bits. Just take the Extended Feature Register as a reference: How
would new features be advertised (in currently reserved bits) if use
of those bits was to be qualified by yet some other feature bit(s).
Past growth of the set of used bits also hasn't followed a pattern
you seem to suggest.

Don't get me wrong - I agree it's unlikely for these bits to gain
a meaning that would conflict with a more relaxed use like you do
suggest. But I don't think better code generation should be an
argument against having code written as compatibly as possible.

Jan

Patch
diff mbox series

diff --git a/xen/drivers/passthrough/amd/iommu-defs.h b/xen/drivers/passthrough/amd/iommu-defs.h
index 963009de6a..50613ca150 100644
--- a/xen/drivers/passthrough/amd/iommu-defs.h
+++ b/xen/drivers/passthrough/amd/iommu-defs.h
@@ -481,7 +481,6 @@  struct amd_iommu_pte {
 #define INV_IOMMU_ALL_PAGES_ADDRESS      ((1ULL << 63) - 1)
 
 #define IOMMU_RING_BUFFER_PTR_MASK                  0x0007FFF0
-#define IOMMU_RING_BUFFER_PTR_SHIFT                 4
 
 #define IOMMU_CMD_DEVICE_ID_MASK                    0x0000FFFF
 #define IOMMU_CMD_DEVICE_ID_SHIFT                   0
diff --git a/xen/drivers/passthrough/amd/iommu.h b/xen/drivers/passthrough/amd/iommu.h
index 0b598d06f8..1abfdc685a 100644
--- a/xen/drivers/passthrough/amd/iommu.h
+++ b/xen/drivers/passthrough/amd/iommu.h
@@ -58,12 +58,11 @@  struct table_struct {
 };
 
 struct ring_buffer {
+    spinlock_t lock;    /* protect buffer pointers */
     void *buffer;
-    unsigned long entries;
-    unsigned long alloc_size;
     uint32_t tail;
     uint32_t head;
-    spinlock_t lock;    /* protect buffer pointers */
+    uint32_t size;
 };
 
 typedef struct iommu_cap {
@@ -379,19 +378,6 @@  static inline int iommu_has_cap(struct amd_iommu *iommu, uint32_t bit)
     return !!(iommu->cap.header & (1u << bit));
 }
 
-/* access tail or head pointer of ring buffer */
-static inline uint32_t iommu_get_rb_pointer(uint32_t reg)
-{
-    return get_field_from_reg_u32(reg, IOMMU_RING_BUFFER_PTR_MASK,
-                                  IOMMU_RING_BUFFER_PTR_SHIFT);
-}
-
-static inline void iommu_set_rb_pointer(uint32_t *reg, uint32_t val)
-{
-    set_field_in_reg_u32(val, *reg, IOMMU_RING_BUFFER_PTR_MASK,
-                         IOMMU_RING_BUFFER_PTR_SHIFT, reg);
-}
-
 /* access device id field from iommu cmd */
 static inline uint16_t iommu_get_devid_from_cmd(uint32_t cmd)
 {
diff --git a/xen/drivers/passthrough/amd/iommu_cmd.c b/xen/drivers/passthrough/amd/iommu_cmd.c
index 166f0e7263..24ef59d436 100644
--- a/xen/drivers/passthrough/amd/iommu_cmd.c
+++ b/xen/drivers/passthrough/amd/iommu_cmd.c
@@ -24,16 +24,14 @@  static int queue_iommu_command(struct amd_iommu *iommu, u32 cmd[])
 {
     uint32_t tail, head;
 
-    tail = iommu->cmd_buffer.tail;
-    if ( ++tail == iommu->cmd_buffer.entries )
+    tail = iommu->cmd_buffer.tail + IOMMU_CMD_BUFFER_ENTRY_SIZE;
+    if ( tail == iommu->cmd_buffer.size )
         tail = 0;
 
-    head = iommu_get_rb_pointer(readl(iommu->mmio_base +
-                                      IOMMU_CMD_BUFFER_HEAD_OFFSET));
+    head = readl(iommu->mmio_base + IOMMU_CMD_BUFFER_HEAD_OFFSET);
     if ( head != tail )
     {
-        memcpy(iommu->cmd_buffer.buffer +
-               (iommu->cmd_buffer.tail * IOMMU_CMD_BUFFER_ENTRY_SIZE),
+        memcpy(iommu->cmd_buffer.buffer + iommu->cmd_buffer.tail,
                cmd, IOMMU_CMD_BUFFER_ENTRY_SIZE);
 
         iommu->cmd_buffer.tail = tail;
@@ -45,13 +43,11 @@  static int queue_iommu_command(struct amd_iommu *iommu, u32 cmd[])
 
 static void commit_iommu_command_buffer(struct amd_iommu *iommu)
 {
-    u32 tail = 0;
-
-    iommu_set_rb_pointer(&tail, iommu->cmd_buffer.tail);
-    writel(tail, iommu->mmio_base+IOMMU_CMD_BUFFER_TAIL_OFFSET);
+    writel(iommu->cmd_buffer.tail,
+           iommu->mmio_base + IOMMU_CMD_BUFFER_TAIL_OFFSET);
 }
 
-int send_iommu_command(struct amd_iommu *iommu, u32 cmd[])
+static int send_iommu_command(struct amd_iommu *iommu, u32 cmd[])
 {
     if ( queue_iommu_command(iommu, cmd) )
     {
@@ -261,7 +257,7 @@  static void invalidate_interrupt_table(struct amd_iommu *iommu, u16 device_id)
     send_iommu_command(iommu, cmd);
 }
 
-void invalidate_iommu_all(struct amd_iommu *iommu)
+static void invalidate_iommu_all(struct amd_iommu *iommu)
 {
     u32 cmd[4], entry;
 
diff --git a/xen/drivers/passthrough/amd/iommu_init.c b/xen/drivers/passthrough/amd/iommu_init.c
index 7bf6fef3ee..cfdeeefc2d 100644
--- a/xen/drivers/passthrough/amd/iommu_init.c
+++ b/xen/drivers/passthrough/amd/iommu_init.c
@@ -117,7 +117,7 @@  static void register_iommu_cmd_buffer_in_mmio_space(struct amd_iommu *iommu)
     iommu_set_addr_lo_to_reg(&entry, addr_lo >> PAGE_SHIFT);
     writel(entry, iommu->mmio_base + IOMMU_CMD_BUFFER_BASE_LOW_OFFSET);
 
-    power_of2_entries = get_order_from_bytes(iommu->cmd_buffer.alloc_size) +
+    power_of2_entries = get_order_from_bytes(iommu->cmd_buffer.size) +
         IOMMU_CMD_BUFFER_POWER_OF2_ENTRIES_PER_PAGE;
 
     entry = 0;
@@ -145,7 +145,7 @@  static void register_iommu_event_log_in_mmio_space(struct amd_iommu *iommu)
     iommu_set_addr_lo_to_reg(&entry, addr_lo >> PAGE_SHIFT);
     writel(entry, iommu->mmio_base + IOMMU_EVENT_LOG_BASE_LOW_OFFSET);
 
-    power_of2_entries = get_order_from_bytes(iommu->event_log.alloc_size) +
+    power_of2_entries = get_order_from_bytes(iommu->event_log.size) +
                         IOMMU_EVENT_LOG_POWER_OF2_ENTRIES_PER_PAGE;
 
     entry = 0;
@@ -173,7 +173,7 @@  static void register_iommu_ppr_log_in_mmio_space(struct amd_iommu *iommu)
     iommu_set_addr_lo_to_reg(&entry, addr_lo >> PAGE_SHIFT);
     writel(entry, iommu->mmio_base + IOMMU_PPR_LOG_BASE_LOW_OFFSET);
 
-    power_of2_entries = get_order_from_bytes(iommu->ppr_log.alloc_size) +
+    power_of2_entries = get_order_from_bytes(iommu->ppr_log.size) +
                         IOMMU_PPR_LOG_POWER_OF2_ENTRIES_PER_PAGE;
 
     entry = 0;
@@ -300,7 +300,7 @@  static int iommu_read_log(struct amd_iommu *iommu,
                           unsigned int entry_size,
                           void (*parse_func)(struct amd_iommu *, u32 *))
 {
-    u32 tail, head, *entry, tail_offest, head_offset;
+    u32 tail, *entry, tail_offest, head_offset;
 
     BUG_ON(!iommu || ((log != &iommu->event_log) && (log != &iommu->ppr_log)));
     
@@ -316,22 +316,20 @@  static int iommu_read_log(struct amd_iommu *iommu,
         IOMMU_PPR_LOG_HEAD_OFFSET;
 
     tail = readl(iommu->mmio_base + tail_offest);
-    tail = iommu_get_rb_pointer(tail);
 
     while ( tail != log->head )
     {
         /* read event log entry */
-        entry = (u32 *)(log->buffer + log->head * entry_size);
+        entry = (u32 *)(log->buffer + log->head);
 
         parse_func(iommu, entry);
-        if ( ++log->head == log->entries )
+
+        log->head += entry_size;
+        if ( log->head == log->size )
             log->head = 0;
 
         /* update head pointer */
-        head = 0;
-        iommu_set_rb_pointer(&head, log->head);
-
-        writel(head, iommu->mmio_base + head_offset);
+        writel(log->head, iommu->mmio_base + head_offset);
     }
 
     spin_unlock(&log->lock);
@@ -1001,7 +999,7 @@  static void __init deallocate_buffer(void *buf, unsigned long sz)
 
 static void __init deallocate_ring_buffer(struct ring_buffer *ring_buf)
 {
-    deallocate_buffer(ring_buf->buffer, ring_buf->alloc_size);
+    deallocate_buffer(ring_buf->buffer, ring_buf->size);
     ring_buf->buffer = NULL;
     ring_buf->head = 0;
     ring_buf->tail = 0;
@@ -1036,11 +1034,9 @@  static void *__init allocate_ring_buffer(struct ring_buffer *ring_buf,
     ring_buf->tail = 0;
 
     spin_lock_init(&ring_buf->lock);
-    
-    ring_buf->alloc_size = PAGE_SIZE << get_order_from_bytes(entries *
-                                                             entry_size);
-    ring_buf->entries = ring_buf->alloc_size / entry_size;
-    ring_buf->buffer = allocate_buffer(ring_buf->alloc_size, name, clear);
+
+    ring_buf->size = PAGE_SIZE << get_order_from_bytes(entries * entry_size);
+    ring_buf->buffer = allocate_buffer(ring_buf->size, name, clear);
 
     return ring_buf->buffer;
 }