@@ -109,7 +109,9 @@ static int virtsnd_pcm_build_hw(struct virtio_pcm_substream *vss,
SNDRV_PCM_INFO_BATCH |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_INTERLEAVED |
- SNDRV_PCM_INFO_PAUSE;
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_NO_REWINDS |
+ SNDRV_PCM_INFO_SYNC_APPLPTR;
if (!info->channels_min || info->channels_min > info->channels_max) {
dev_err(&vdev->dev,
@@ -471,7 +473,7 @@ int virtsnd_pcm_build_devs(struct virtio_snd *snd)
for (kss = ks->substream; kss; kss = kss->next)
vs->substreams[kss->number]->substream = kss;
- snd_pcm_set_ops(vpcm->pcm, i, &virtsnd_pcm_ops);
+ snd_pcm_set_ops(vpcm->pcm, i, &virtsnd_pcm_ops[i]);
}
snd_pcm_set_managed_buffer_all(vpcm->pcm,
@@ -9,6 +9,7 @@
#include <linux/atomic.h>
#include <linux/virtio_config.h>
#include <sound/pcm.h>
+#include <sound/pcm-indirect.h>
struct virtio_pcm;
struct virtio_pcm_msg;
@@ -21,6 +22,7 @@ struct virtio_pcm_msg;
* @direction: Stream data flow direction (SNDRV_PCM_STREAM_XXX).
* @features: Stream VirtIO feature bit map (1 << VIRTIO_SND_PCM_F_XXX).
* @substream: Kernel ALSA substream.
+ * @pcm_indirect: Kernel indirect pcm structure.
* @hw: Kernel ALSA substream hardware descriptor.
* @elapsed_period: Kernel work to handle the elapsed period state.
* @lock: Spinlock that protects fields shared by interrupt handlers and
@@ -46,6 +48,7 @@ struct virtio_pcm_substream {
u32 direction;
u32 features;
struct snd_pcm_substream *substream;
+ struct snd_pcm_indirect pcm_indirect;
struct snd_pcm_hardware hw;
struct work_struct elapsed_period;
spinlock_t lock;
@@ -57,7 +60,6 @@ struct virtio_pcm_substream {
bool suspended;
struct virtio_pcm_msg **msgs;
unsigned int nmsgs;
- int msg_last_enqueued;
unsigned int msg_count;
wait_queue_head_t msg_empty;
};
@@ -90,7 +92,7 @@ struct virtio_pcm {
struct virtio_pcm_stream streams[SNDRV_PCM_STREAM_LAST + 1];
};
-extern const struct snd_pcm_ops virtsnd_pcm_ops;
+extern const struct snd_pcm_ops virtsnd_pcm_ops[];
int virtsnd_pcm_validate(struct virtio_device *vdev);
@@ -117,7 +119,8 @@ int virtsnd_pcm_msg_alloc(struct virtio_pcm_substream *vss,
void virtsnd_pcm_msg_free(struct virtio_pcm_substream *vss);
-int virtsnd_pcm_msg_send(struct virtio_pcm_substream *vss);
+int virtsnd_pcm_msg_send(struct virtio_pcm_substream *vss, unsigned long offset,
+ unsigned long bytes);
unsigned int virtsnd_pcm_msg_pending_num(struct virtio_pcm_substream *vss);
@@ -155,7 +155,6 @@ int virtsnd_pcm_msg_alloc(struct virtio_pcm_substream *vss,
sizeof(msg->xfer));
sg_init_one(&msg->sgs[PCM_MSG_SG_STATUS], &msg->status,
sizeof(msg->status));
- msg->length = period_bytes;
virtsnd_pcm_sg_from(&msg->sgs[PCM_MSG_SG_DATA], sg_num, data,
period_bytes);
@@ -186,61 +185,75 @@ void virtsnd_pcm_msg_free(struct virtio_pcm_substream *vss)
/**
* virtsnd_pcm_msg_send() - Send asynchronous I/O messages.
* @vss: VirtIO PCM substream.
+ * @offset: starting position that has been updated
+ * @bytes: number of bytes that has been updated
*
* All messages are organized in an ordered circular list. Each time the
* function is called, all currently non-enqueued messages are added to the
- * virtqueue. For this, the function keeps track of two values:
- *
- * msg_last_enqueued = index of the last enqueued message,
- * msg_count = # of pending messages in the virtqueue.
+ * virtqueue. For this, the function uses offset and bytes to calculate the
+ * messages that need to be added.
*
* Context: Any context. Expects the tx/rx queue and the VirtIO substream
* spinlocks to be held by caller.
* Return: 0 on success, -errno on failure.
*/
-int virtsnd_pcm_msg_send(struct virtio_pcm_substream *vss)
+int virtsnd_pcm_msg_send(struct virtio_pcm_substream *vss, unsigned long offset,
+ unsigned long bytes)
{
- struct snd_pcm_runtime *runtime = vss->substream->runtime;
struct virtio_snd *snd = vss->snd;
struct virtio_device *vdev = snd->vdev;
struct virtqueue *vqueue = virtsnd_pcm_queue(vss)->vqueue;
- int i;
- int n;
+ unsigned long period_bytes = snd_pcm_lib_period_bytes(vss->substream);
+ unsigned long start, end, i;
+ unsigned int msg_count = vss->msg_count;
bool notify = false;
+ int rc;
- i = (vss->msg_last_enqueued + 1) % runtime->periods;
- n = runtime->periods - vss->msg_count;
+ start = offset / period_bytes;
+ end = (offset + bytes - 1) / period_bytes;
- for (; n; --n, i = (i + 1) % runtime->periods) {
+ for (i = start; i <= end; i++) {
struct virtio_pcm_msg *msg = vss->msgs[i];
struct scatterlist *psgs[] = {
&msg->sgs[PCM_MSG_SG_XFER],
&msg->sgs[PCM_MSG_SG_DATA],
&msg->sgs[PCM_MSG_SG_STATUS]
};
- int rc;
-
- msg->xfer.stream_id = cpu_to_le32(vss->sid);
- memset(&msg->status, 0, sizeof(msg->status));
-
- if (vss->direction == SNDRV_PCM_STREAM_PLAYBACK)
- rc = virtqueue_add_sgs(vqueue, psgs, 2, 1, msg,
- GFP_ATOMIC);
- else
- rc = virtqueue_add_sgs(vqueue, psgs, 1, 2, msg,
- GFP_ATOMIC);
-
- if (rc) {
- dev_err(&vdev->dev,
- "SID %u: failed to send I/O message\n",
- vss->sid);
- return rc;
+ unsigned long n;
+
+ n = period_bytes - (offset % period_bytes);
+ if (n > bytes)
+ n = bytes;
+
+ msg->length += n;
+ if (msg->length == period_bytes) {
+ msg->xfer.stream_id = cpu_to_le32(vss->sid);
+ memset(&msg->status, 0, sizeof(msg->status));
+
+ if (vss->direction == SNDRV_PCM_STREAM_PLAYBACK)
+ rc = virtqueue_add_sgs(vqueue, psgs, 2, 1, msg,
+ GFP_ATOMIC);
+ else
+ rc = virtqueue_add_sgs(vqueue, psgs, 1, 2, msg,
+ GFP_ATOMIC);
+
+ if (rc) {
+ dev_err(&vdev->dev,
+ "SID %u: failed to send I/O message\n",
+ vss->sid);
+ return rc;
+ }
+
+ vss->msg_count++;
}
- vss->msg_last_enqueued = i;
- vss->msg_count++;
+ offset = 0;
+ bytes -= n;
}
+ if (msg_count == vss->msg_count)
+ return 0;
+
if (!(vss->features & (1U << VIRTIO_SND_PCM_F_MSG_POLLING)))
notify = virtqueue_kick_prepare(vqueue);
@@ -309,6 +322,8 @@ static void virtsnd_pcm_msg_complete(struct virtio_pcm_msg *msg,
if (vss->hw_ptr >= vss->buffer_bytes)
vss->hw_ptr -= vss->buffer_bytes;
+ msg->length = 0;
+
vss->xfer_xrun = false;
vss->msg_count--;
@@ -320,8 +335,6 @@ static void virtsnd_pcm_msg_complete(struct virtio_pcm_msg *msg,
le32_to_cpu(msg->status.latency_bytes));
schedule_work(&vss->elapsed_period);
-
- virtsnd_pcm_msg_send(vss);
} else if (!vss->msg_count) {
wake_up_all(&vss->msg_empty);
}
@@ -282,7 +282,6 @@ static int virtsnd_pcm_prepare(struct snd_pcm_substream *substream)
vss->buffer_bytes = snd_pcm_lib_buffer_bytes(substream);
vss->hw_ptr = 0;
- vss->msg_last_enqueued = -1;
} else {
struct snd_pcm_runtime *runtime = substream->runtime;
unsigned int buffer_bytes = snd_pcm_lib_buffer_bytes(substream);
@@ -300,6 +299,11 @@ static int virtsnd_pcm_prepare(struct snd_pcm_substream *substream)
vss->suspended = false;
vss->msg_count = 0;
+ memset(&vss->pcm_indirect, 0, sizeof(vss->pcm_indirect));
+ vss->pcm_indirect.sw_buffer_size =
+ vss->pcm_indirect.hw_buffer_size =
+ snd_pcm_lib_buffer_bytes(substream);
+
msg = virtsnd_pcm_ctl_msg_alloc(vss, VIRTIO_SND_R_PCM_PREPARE,
GFP_KERNEL);
if (!msg)
@@ -324,7 +328,7 @@ static int virtsnd_pcm_trigger(struct snd_pcm_substream *substream, int command)
struct virtio_snd_queue *queue;
struct virtio_snd_msg *msg;
unsigned long flags;
- int rc;
+ int rc = 0;
switch (command) {
case SNDRV_PCM_TRIGGER_START:
@@ -333,7 +337,8 @@ static int virtsnd_pcm_trigger(struct snd_pcm_substream *substream, int command)
spin_lock_irqsave(&queue->lock, flags);
spin_lock(&vss->lock);
- rc = virtsnd_pcm_msg_send(vss);
+ if (vss->direction == SNDRV_PCM_STREAM_CAPTURE)
+ rc = virtsnd_pcm_msg_send(vss, 0, vss->buffer_bytes);
if (!rc)
vss->xfer_enabled = true;
spin_unlock(&vss->lock);
@@ -428,37 +433,111 @@ static int virtsnd_pcm_sync_stop(struct snd_pcm_substream *substream)
}
/**
- * virtsnd_pcm_pointer() - Get the current hardware position for the PCM
- * substream.
+ * virtsnd_pcm_pb_pointer() - Get the current hardware position for the PCM
+ * substream for playback.
* @substream: Kernel ALSA substream.
*
- * Context: Any context. Takes and releases the VirtIO substream spinlock.
+ * Context: Any context.
* Return: Hardware position in frames inside [0 ... buffer_size) range.
*/
static snd_pcm_uframes_t
-virtsnd_pcm_pointer(struct snd_pcm_substream *substream)
+virtsnd_pcm_pb_pointer(struct snd_pcm_substream *substream)
+{
+ struct virtio_pcm_substream *vss = snd_pcm_substream_chip(substream);
+
+ return snd_pcm_indirect_playback_pointer(substream,
+ &vss->pcm_indirect,
+ vss->hw_ptr);
+}
+
+/**
+ * virtsnd_pcm_cp_pointer() - Get the current hardware position for the PCM
+ * substream for capture.
+ * @substream: Kernel ALSA substream.
+ *
+ * Context: Any context.
+ * Return: Hardware position in frames inside [0 ... buffer_size) range.
+ */
+static snd_pcm_uframes_t
+virtsnd_pcm_cp_pointer(struct snd_pcm_substream *substream)
+{
+ struct virtio_pcm_substream *vss = snd_pcm_substream_chip(substream);
+
+ return snd_pcm_indirect_capture_pointer(substream,
+ &vss->pcm_indirect,
+ vss->hw_ptr);
+}
+
+static void virtsnd_pcm_trans_copy(struct snd_pcm_substream *substream,
+ struct snd_pcm_indirect *rec, size_t bytes)
{
struct virtio_pcm_substream *vss = snd_pcm_substream_chip(substream);
- snd_pcm_uframes_t hw_ptr = SNDRV_PCM_POS_XRUN;
+
+ virtsnd_pcm_msg_send(vss, rec->sw_data, bytes);
+}
+
+static int virtsnd_pcm_pb_ack(struct snd_pcm_substream *substream)
+{
+ struct virtio_pcm_substream *vss = snd_pcm_substream_chip(substream);
+ struct virtio_snd_queue *queue = virtsnd_pcm_queue(vss);
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&queue->lock, flags);
+ spin_lock(&vss->lock);
+
+ rc = snd_pcm_indirect_playback_transfer(substream, &vss->pcm_indirect,
+ virtsnd_pcm_trans_copy);
+
+ spin_unlock(&vss->lock);
+ spin_unlock_irqrestore(&queue->lock, flags);
+
+ return rc;
+}
+
+static int virtsnd_pcm_cp_ack(struct snd_pcm_substream *substream)
+{
+ struct virtio_pcm_substream *vss = snd_pcm_substream_chip(substream);
+ struct virtio_snd_queue *queue = virtsnd_pcm_queue(vss);
unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&queue->lock, flags);
+ spin_lock(&vss->lock);
+
+ rc = snd_pcm_indirect_capture_transfer(substream, &vss->pcm_indirect,
+ virtsnd_pcm_trans_copy);
- spin_lock_irqsave(&vss->lock, flags);
- if (!vss->xfer_xrun)
- hw_ptr = bytes_to_frames(substream->runtime, vss->hw_ptr);
- spin_unlock_irqrestore(&vss->lock, flags);
+ spin_unlock(&vss->lock);
+ spin_unlock_irqrestore(&queue->lock, flags);
- return hw_ptr;
+ return rc;
}
/* PCM substream operators map. */
-const struct snd_pcm_ops virtsnd_pcm_ops = {
- .open = virtsnd_pcm_open,
- .close = virtsnd_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = virtsnd_pcm_hw_params,
- .hw_free = virtsnd_pcm_hw_free,
- .prepare = virtsnd_pcm_prepare,
- .trigger = virtsnd_pcm_trigger,
- .sync_stop = virtsnd_pcm_sync_stop,
- .pointer = virtsnd_pcm_pointer,
+const struct snd_pcm_ops virtsnd_pcm_ops[] = {
+ {
+ .open = virtsnd_pcm_open,
+ .close = virtsnd_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = virtsnd_pcm_hw_params,
+ .hw_free = virtsnd_pcm_hw_free,
+ .prepare = virtsnd_pcm_prepare,
+ .trigger = virtsnd_pcm_trigger,
+ .sync_stop = virtsnd_pcm_sync_stop,
+ .pointer = virtsnd_pcm_pb_pointer,
+ .ack = virtsnd_pcm_pb_ack,
+ },
+ {
+ .open = virtsnd_pcm_open,
+ .close = virtsnd_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = virtsnd_pcm_hw_params,
+ .hw_free = virtsnd_pcm_hw_free,
+ .prepare = virtsnd_pcm_prepare,
+ .trigger = virtsnd_pcm_trigger,
+ .sync_stop = virtsnd_pcm_sync_stop,
+ .pointer = virtsnd_pcm_cp_pointer,
+ .ack = virtsnd_pcm_cp_ack,
+ },
};