diff mbox series

[v6,3/3] drivers: iio: adc: add support for ad777x family

Message ID 20240926135418.8342-4-ramona.nechita@analog.com (mailing list archive)
State Changes Requested
Headers show
Series add support for ad777x family | expand

Commit Message

Ramona Alexandra Nechita Sept. 26, 2024, 1:53 p.m. UTC
Add support for AD7770, AD7771, AD7779 ADCs. The device is capable of
sending out data both on DOUT lines interface,as on the SDO line.
The driver currently implements only the SDO data streaming mode. SPI
communication is used alternatively for accessing registers and streaming
data. Register accesses are protected by crc8.

Signed-off-by: Ramona Alexandra Nechita <ramona.nechita@analog.com>
---
 drivers/iio/adc/Kconfig  |  11 +
 drivers/iio/adc/Makefile |   1 +
 drivers/iio/adc/ad7779.c | 911 +++++++++++++++++++++++++++++++++++++++
 3 files changed, 923 insertions(+)
 create mode 100644 drivers/iio/adc/ad7779.c

Comments

Andy Shevchenko Sept. 26, 2024, 2:58 p.m. UTC | #1
On Thu, Sep 26, 2024 at 04:53:57PM +0300, Ramona Alexandra Nechita wrote:
> Add support for AD7770, AD7771, AD7779 ADCs. The device is capable of
> sending out data both on DOUT lines interface,as on the SDO line.
> The driver currently implements only the SDO data streaming mode. SPI
> communication is used alternatively for accessing registers and streaming
> data. Register accesses are protected by crc8.

...

> +struct ad7779_state {
> +	struct spi_device *spi;
> +	const struct ad7779_chip_info *chip_info;
> +	struct clk *mclk;
> +	struct iio_trigger *trig;
> +	struct completion completion;
> +	unsigned int sampling_freq;
> +	enum ad7779_filter filter_enabled;
> +	/*
> +	 * DMA (thus cache coherency maintenance) requires the
> +	 * transfer buffers to live in their own cache lines.
> +	 */
> +	struct {
> +		u32 chans[8];
> +		s64 timestamp;

	aligned_s64 timestamp;

while it makes no difference in this case, this makes code aligned inside
the IIO subsystem.

> +	} data __aligned(IIO_DMA_MINALIGN);

Note, this is different alignment to the above. And isn't the buffer below
should have it instead?

> +	u32			spidata_tx[8];
> +	u8			reg_rx_buf[3];
> +	u8			reg_tx_buf[3];
> +	u8			reset_buf[8];
> +};

...

> +static const char * const ad7779_power_supplies[] = {
> +	"avdd1", "avdd2", "avdd4"

Leave trailing comma.

> +};

...

> +static int ad7779_spi_read(struct ad7779_state *st, u8 reg, u8 *rbuf)
> +{
> +	int ret;

> +	int length = 3;

Why signed?

> +	u8 crc_buf[2];

> +	u8 exp_crc = 0;

Redundant assignment.

> +	struct spi_transfer reg_read_tr[] = {
> +		{
> +			.tx_buf = st->reg_tx_buf,
> +			.rx_buf = st->reg_rx_buf,
> +		},
> +	};
> +
> +	st->reg_tx_buf[0] = AD7779_SPI_READ_CMD | FIELD_GET(AD7779_REG_MSK, reg);
> +	st->reg_tx_buf[1] = 0;
> +
> +	if (reg == AD7779_REG_GEN_ERR_REG_1_EN)
> +		length = 2;
> +	else
> +		st->reg_tx_buf[2] = crc8(ad7779_crc8_table, st->reg_tx_buf, 2, 0);

I would write this as

	if (reg == AD7779_REG_GEN_ERR_REG_1_EN) {
		length = 2;
	} else {
		length = 3;
		st->reg_tx_buf[2] = crc8(ad7779_crc8_table, st->reg_tx_buf, length - 1, 0);
	}

> +	reg_read_tr[0].len = length;
> +
> +	ret = spi_sync_transfer(st->spi, reg_read_tr, ARRAY_SIZE(reg_read_tr));
> +	if (ret)
> +		return ret;
> +
> +	crc_buf[0] = AD7779_SPI_READ_CMD | FIELD_GET(AD7779_REG_MSK, reg);
> +	crc_buf[1] = st->reg_rx_buf[1];
> +	exp_crc = crc8(ad7779_crc8_table, crc_buf, 2, 0);

ARRAY_SIZE(crc_buf) / sizeof(crc_buf)


> +	if (reg != AD7779_REG_GEN_ERR_REG_1_EN && exp_crc != st->reg_rx_buf[2]) {
> +		dev_err(&st->spi->dev, "Bad CRC %x, expected %x",
> +			st->reg_rx_buf[2], exp_crc);
> +		return -EINVAL;
> +	}
> +	*rbuf = st->reg_rx_buf[1];
> +
> +	return 0;
> +}

Alternatively you can have a helper

static void ad7779_prepare_buf_with_crc(u8 reg, u8 val, u8 *buf)
{
	buf[0] = AD7779_SPI_READ_CMD | FIELD_GET(AD7779_REG_MSK, reg);
	buf[1] = val;
	buf[2] = crc8(ad7779_crc8_table, buf, 2, 0);
}

static int ad7779_spi_read(struct ad7779_state *st, u8 reg, u8 *rbuf)
{
	struct spi_transfer t[] = {
		{
			.tx_buf = st->reg_tx_buf,
			.rx_buf = st->reg_rx_buf,
		},
	};
	u8 *buf = t[0].tx_buf;
	unsigned int length;
	int ret;

	ad7779_prepare_buf_with_crc(reg, 0, buf);

	if (reg == AD7779_REG_GEN_ERR_REG_1_EN)
		t[0].len = 2;
	else
		t[0].len = 3;

	ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t));
	if (ret)
		return ret;

	ad7779_prepare_buf_with_crc(reg, t[0].rx_buf[1], buf);

	if (reg != AD7779_REG_GEN_ERR_REG_1_EN && buf[2] != t[0].rx_buf[2]) {
		dev_err(&st->spi->dev, "Bad CRC %x, expected %x", t[0].rx_buf[2], buf[2]);
		return -EINVAL;
	}
	*rbuf = t[0].rx_buf[1];

	return 0;
}

...

> +static int ad7779_spi_write(struct ad7779_state *st, u8 reg, u8 val)

Same comments as per _read() above.

...

> +static int ad7779_reg_access(struct iio_dev *indio_dev,
> +			     unsigned int reg,
> +			     unsigned int writeval,
> +			     unsigned int *readval)
> +{
> +	struct ad7779_state *st = iio_priv(indio_dev);
> +
> +	if (readval)

> +		return ad7779_spi_read(st, reg, (u8 *)readval);

This casting is incorrect as it breaks the big endianess.

> +	return ad7779_spi_write(st, reg, writeval);
> +}

...

> +	freq_khz = sampling_freq / KILO;

Now it's clear to use HZ_PER_KHZ.

...

> +	if (frac) {
> +		/*
> +		* In order to obtain the first three decimals of the decimation
> +		* the initial number is multiplied with 10^3 prior to the
> +		* division, then the original division result is subtracted and
> +		* the number is divided by 10^3.
> +		*/
> +		temp = (div * KILO) / freq_khz;
> +		decimal = ((temp -  dec * KILO) << 16) / KILO;

Unneeded extra space. Can be one line, btw.

		decimal = (((div * KILO) / freq_khz - dec * KILO) << 16) / KILO;

Is it something that mult_frac() from math.h does?

> +		ret = ad7779_spi_write(st, AD7779_REG_SRC_N_MSB,
> +				       FIELD_GET(AD7779_FREQ_MSB_MSK, decimal));
> +		if (ret)
> +			return ret;
> +		ret = ad7779_spi_write(st, AD7779_REG_SRC_N_LSB,
> +				       FIELD_GET(AD7779_FREQ_LSB_MSK, decimal));
> +		if (ret)
> +			return ret;
> +	} else {
> +		ret = ad7779_spi_write(st, AD7779_REG_SRC_N_MSB,
> +				       FIELD_GET(AD7779_FREQ_MSB_MSK, 0x0));
> +		if (ret)
> +			return ret;
> +		ret = ad7779_spi_write(st, AD7779_REG_SRC_N_LSB,
> +				       FIELD_GET(AD7779_FREQ_LSB_MSK, 0x0));
> +		if (ret)
> +			return ret;
> +	}

...

> +	ret = ad7779_spi_write(st, AD7779_REG_SRC_UPDATE, 0x1);

BIT(0) ?

> +	if (ret)
> +		return ret;
> +
> +	/* SRC update settling time */
> +	fsleep(15);
> +
> +	ret = ad7779_spi_write(st, AD7779_REG_SRC_UPDATE, 0x0);
> +	if (ret)
> +		return ret;
> +
> +	/* SRC update settling time */
> +	fsleep(15);
> +
> +	st->sampling_freq = sampling_freq;
> +
> +	return 0;
> +}

...

> +	ret = ad7779_spi_write_mask(st,
> +				    AD7779_REG_GENERAL_USER_CONFIG_2,
> +				    AD7779_FILTER_MSK,
> +				    FIELD_PREP(AD7779_FILTER_MSK, mode));
> +	if (ret < 0)

In other cases there is no < 0 part, why here?

> +		return ret;
> +
> +	ret = ad7779_set_sampling_frequency(st, st->sampling_freq);
> +	if (ret < 0)

Ditto.

> +		return ret;

...

> +static int ad7779_set_calibscale(struct ad7779_state *st, int channel, int val)
> +{
> +	int ret;
> +	unsigned int gain;
> +	unsigned long long tmp;
> +	u8 gain_bytes[3];
> +
> +	/*
> +	 * The gain value is relative to 0x555555, which represents a gain of 1
> +	 */
> +	tmp = val * 5592405LL;
> +	gain = DIV_ROUND_CLOSEST_ULL(tmp, MEGA);

	gain = DIV_ROUND_CLOSEST_ULL((u64)val * 5592405LL, MEGA);

> +	put_unaligned_be24(gain, gain_bytes);
> +	ret = ad7779_spi_write(st, AD7779_REG_CH_GAIN_UPPER_BYTE(channel),
> +			       gain_bytes[0]);
> +	if (ret)
> +		return ret;
> +
> +	ret = ad7779_spi_write(st, AD7779_REG_CH_GAIN_MID_BYTE(channel),
> +			       gain_bytes[1]);
> +	if (ret)
> +		return ret;
> +
> +	return ad7779_spi_write(st, AD7779_REG_CH_GAIN_LOWER_BYTE(channel),
> +				gain_bytes[2]);
> +}

...

> +static int ad7779_read_raw(struct iio_dev *indio_dev,
> +			   struct iio_chan_spec const *chan, int *val,
> +			   int *val2, long mask)
> +{
> +	struct ad7779_state *st = iio_priv(indio_dev);
> +
> +	iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
> +		switch (mask) {
> +		case IIO_CHAN_INFO_CALIBSCALE:
> +			*val = ad7779_get_calibscale(st, chan->channel);
> +			if (*val < 0)

The rule of thumb is try to avoid polluting output in case of error.

> +				return -EINVAL;

Shadowed error code, why?

> +			*val2 = GAIN_REL;
> +			return IIO_VAL_FRACTIONAL;
> +		case IIO_CHAN_INFO_CALIBBIAS:
> +			*val = ad7779_get_calibbias(st, chan->channel);
> +			if (*val < 0)
> +				return -EINVAL;

Ditto.

> +			return IIO_VAL_INT;
> +		case IIO_CHAN_INFO_SAMP_FREQ:
> +			*val = st->sampling_freq;
> +			if (*val < 0)
> +				return -EINVAL;

Ditto.

> +			return IIO_VAL_INT;
> +		}

> +		return -EINVAL;

Make this to be default case in the switch.

> +	}
> +	unreachable();
> +}

...

> +static int ad7779_write_raw(struct iio_dev *indio_dev,
> +			    struct iio_chan_spec const *chan, int val, int val2,
> +			    long mask)

long? Not unsigned long?

> +{
> +	struct ad7779_state *st = iio_priv(indio_dev);
> +
> +	iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
> +		switch (mask) {
> +		case IIO_CHAN_INFO_CALIBSCALE:
> +			return ad7779_set_calibscale(st, chan->channel, val2);
> +		case IIO_CHAN_INFO_CALIBBIAS:
> +			return ad7779_set_calibbias(st, chan->channel, val);
> +		case IIO_CHAN_INFO_SAMP_FREQ:
> +			return ad7779_set_sampling_frequency(st, val);
> +		default:
> +			return -EINVAL;
> +		}
> +	}
> +	unreachable();
> +}

...

> +static irqreturn_t ad7779_trigger_handler(int irq, void *p)
> +{
> +	struct iio_poll_func *pf = p;
> +	struct iio_dev *indio_dev = pf->indio_dev;
> +	struct ad7779_state *st = iio_priv(indio_dev);
> +	int ret;

> +

This blank line has to be removed. Doesn't checkpatch complain?

> +	struct spi_transfer sd_readback_tr[] = {

It's the only transfer in the function, name it just 't'.

> +		{
> +			.rx_buf = st->data.chans,
> +			.tx_buf = st->spidata_tx,
> +			.len = AD7779_NUM_CHANNELS * AD7779_CHAN_DATA_SIZE,
> +		}
> +	};

> +	st->spidata_tx[0] = AD7779_SPI_READ_CMD;

It's better to use t.tx_buf[0] instead. See above as well, and apply this
approach to all your functions, so the idea is to make code more robust against
changes. With this in mind, it will become visible which buffer you operates on
in the entire function, no need to access it differently.

> +	ret = spi_sync_transfer(st->spi, sd_readback_tr,
> +				ARRAY_SIZE(sd_readback_tr));

With the above, this is one line.

> +	if (ret) {
> +		dev_err(&st->spi->dev,
> +			"spi transfer error in irq handler");

SPI
IRQ

One line.

> +		goto exit_handler;
> +	}
> +
> +	iio_push_to_buffers_with_timestamp(indio_dev, &st->data, pf->timestamp);
> +
> +exit_handler:
> +	iio_trigger_notify_done(indio_dev->trig);
> +	return IRQ_HANDLED;
> +}

...

> +static int ad7779_reset(struct iio_dev *indio_dev, struct gpio_desc *reset_gpio)
> +{

Same comments as per above function.

> +	struct spi_transfer reg_read_tr[] = {
> +		{
> +			.tx_buf = st->reset_buf,
> +			.len = 8,
> +		},
> +	};
> +
> +	if (reset_gpio) {

> +		/* Delay for reset to occur is 225 microseconds*/

Missing white space and misplaced comment, should be immediate to fsleep()
and not here.

> +		gpiod_set_value(reset_gpio, 1);
> +		fsleep(230);
> +		return 0;
> +	} else {
> +		memset(st->reset_buf, 0xff, sizeof(st->reset_buf));
> +		ret = spi_sync_transfer(st->spi, reg_read_tr,
> +					ARRAY_SIZE(reg_read_tr));
> +		if (ret)
> +			return ret;
> +	}
> +
> +	/* Delay for reset to occur is 225 microseconds*/

Missing white space.

> +	fsleep(230);
> +
> +	return 0;
> +}

...

> +	gpiod_set_value(start_gpio, 0);
> +	/* Start setup time */
> +	fsleep(15);

This one is commented...

> +	gpiod_set_value(start_gpio, 1);
> +	fsleep(15);
> +	gpiod_set_value(start_gpio, 0);
> +	fsleep(15);

...but the other two.

...

> +static int ad7779_probe(struct spi_device *spi)
> +{

	struct device *dev = &spi->dev;

> +	struct iio_dev *indio_dev;
> +	struct ad7779_state *st;
> +	struct gpio_desc *reset_gpio, *start_gpio;
> +	int ret = -EINVAL;

> +	if (!spi->irq)
> +		return dev_err_probe(&spi->dev, ret,
> +				     "DRDY irq not present\n");

		return dev_err_probe(dev, ret, "DRDY irq not present\n");

> +	indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));

	indio_dev = devm_iio_device_alloc(dev, sizeof(*st));

> +	if (!indio_dev)
> +		return -ENOMEM;
> +
> +	st = iio_priv(indio_dev);
> +
> +	ret = devm_regulator_bulk_get_enable(&spi->dev,

	ret = devm_regulator_bulk_get_enable(dev,

> +					     ARRAY_SIZE(ad7779_power_supplies),
> +					     ad7779_power_supplies);
> +	if (ret)
> +		return dev_err_probe(&spi->dev, ret,
> +				     "failed to get and enable supplies\n");

'dev' here and everywhere below and put this on one line.

> +	st->mclk = devm_clk_get_enabled(&spi->dev, "mclk");
> +	if (IS_ERR(st->mclk))
> +		return PTR_ERR(st->mclk);
> +
> +	reset_gpio = devm_gpiod_get_optional(&spi->dev, "reset", GPIOD_OUT_LOW);
> +	if (IS_ERR(reset_gpio))
> +		return PTR_ERR(reset_gpio);
> +
> +	start_gpio = devm_gpiod_get(&spi->dev, "start", GPIOD_OUT_HIGH);
> +	if (IS_ERR(start_gpio))
> +		return PTR_ERR(start_gpio);
> +
> +	crc8_populate_msb(ad7779_crc8_table, AD7779_CRC8_POLY);
> +	st->spi = spi;
> +
> +	st->chip_info = spi_get_device_match_data(spi);
> +	if (!st->chip_info)
> +		return -ENODEV;
> +
> +	ret = ad7779_reset(indio_dev, reset_gpio);
> +	if (ret)
> +		return ret;
> +
> +	ret = ad7779_powerup(st, start_gpio);
> +	if (ret)
> +		return ret;
> +
> +	indio_dev->name = st->chip_info->name;
> +	indio_dev->info = &ad7779_info;
> +	indio_dev->modes = INDIO_DIRECT_MODE;
> +	indio_dev->channels = st->chip_info->channels;
> +	indio_dev->num_channels = ARRAY_SIZE(ad7779_channels);

> +	st->trig = devm_iio_trigger_alloc(&spi->dev, "%s-dev%d",
> +					  indio_dev->name, iio_device_id(indio_dev));

At least it will be a room for one more parameter on the first line.

> +	if (!st->trig)
> +		return -ENOMEM;
> +
> +	st->trig->ops = &ad7779_trigger_ops;
> +
> +	iio_trigger_set_drvdata(st->trig, st);
> +
> +	ret = devm_request_irq(&spi->dev, spi->irq,
> +			      iio_trigger_generic_data_rdy_poll,
> +			      IRQF_ONESHOT | IRQF_NO_AUTOEN,
> +			      indio_dev->name, st->trig);

It will be one line less with 'dev'.

> +	if (ret)
> +		return dev_err_probe(&spi->dev, ret, "request irq %d failed\n",

IRQ

> +				     st->spi->irq);

One line. Why out of a sudden st-> ?


> +	ret = devm_iio_trigger_register(&spi->dev, st->trig);
> +	if (ret)
> +		return ret;
> +
> +	indio_dev->trig = iio_trigger_get(st->trig);
> +
> +	init_completion(&st->completion);
> +
> +	ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev,
> +					      &iio_pollfunc_store_time,
> +					      &ad7779_trigger_handler,
> +					      &ad7779_buffer_setup_ops);
> +	if (ret)
> +		return ret;
> +
> +	ret = ad7779_spi_write_mask(st, AD7779_REG_DOUT_FORMAT,
> +				    AD7779_DCLK_CLK_DIV_MSK,
> +				    FIELD_PREP(AD7779_DCLK_CLK_DIV_MSK, 7));
> +	if (ret)
> +		return ret;
> +
> +	return devm_iio_device_register(&spi->dev, indio_dev);
> +}

...

> +static int ad7779_suspend(struct device *dev)
> +{
> +	struct iio_dev *indio_dev = dev_get_drvdata(dev);
> +	struct ad7779_state *st = iio_priv(indio_dev);

	... val = FIELD_PREP(AD7779_MOD_POWERMODE_MSK, AD7779_LOW_POWER));

> +	return ad7779_spi_write_mask(st, AD7779_REG_GENERAL_USER_CONFIG_1,
> +				    AD7779_MOD_POWERMODE_MSK,
> +				    FIELD_PREP(AD7779_MOD_POWERMODE_MSK,
> +					       AD7779_LOW_POWER));

	return ad7779_spi_write_mask(st, AD7779_REG_GENERAL_USER_CONFIG_1,
				     AD7779_MOD_POWERMODE_MSK, val);

Also note broken indentation in your version.

> +}

> +static int ad7779_resume(struct device *dev)

As per above.

> +{
> +	struct iio_dev *indio_dev = dev_get_drvdata(dev);
> +	struct ad7779_state *st = iio_priv(indio_dev);
> +
> +	return ad7779_spi_write_mask(st, AD7779_REG_GENERAL_USER_CONFIG_1,
> +				    AD7779_MOD_POWERMODE_MSK,
> +				    FIELD_PREP(AD7779_MOD_POWERMODE_MSK,
> +					       AD7779_HIGH_POWER));
> +}
Ramona Alexandra Nechita Oct. 10, 2024, 2:32 p.m. UTC | #2
Hello,

I have some questions inline before sending out a new patch.


....
>> +struct ad7779_state {
>> +	struct spi_device *spi;
>> +	const struct ad7779_chip_info *chip_info;
>> +	struct clk *mclk;
>> +	struct iio_trigger *trig;
>> +	struct completion completion;
>> +	unsigned int sampling_freq;
>> +	enum ad7779_filter filter_enabled;
>> +	/*
>> +	 * DMA (thus cache coherency maintenance) requires the
>> +	 * transfer buffers to live in their own cache lines.
>> +	 */
>> +	struct {
>> +		u32 chans[8];
>> +		s64 timestamp;
>
>	aligned_s64 timestamp;
>
>while it makes no difference in this case, this makes code aligned inside the IIO subsystem.

I might be missing something but I can't find the aligned_s64 data type, should I define it myself
in the driver?

>
>> +	} data __aligned(IIO_DMA_MINALIGN);
>
>Note, this is different alignment to the above. And isn't the buffer below should have it instead?
>
>> +	u32			spidata_tx[8];
>> +	u8			reg_rx_buf[3];
>> +	u8			reg_tx_buf[3];
>> +	u8			reset_buf[8];
>> +};
>
>....
>
>> +static int ad7779_write_raw(struct iio_dev *indio_dev,
>> +			    struct iio_chan_spec const *chan, int val, int val2,
>> +			    long mask)
>
>long? Not unsigned long?

I copied the function header directly from iio.h, shouldn't it be left as such?

>
>> +{
>> +	struct ad7779_state *st = iio_priv(indio_dev);
>> +
>> +	iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
>> +		switch (mask) {
>> +		case IIO_CHAN_INFO_CALIBSCALE:
>> +			return ad7779_set_calibscale(st, chan->channel, val2);
>> +		case IIO_CHAN_INFO_CALIBBIAS:
>> +			return ad7779_set_calibbias(st, chan->channel, val);
>> +		case IIO_CHAN_INFO_SAMP_FREQ:
>> +			return ad7779_set_sampling_frequency(st, val);
>> +		default:
>> +			return -EINVAL;
>> +		}
>> +	}
>> +	unreachable();
>> +}
>
>...
>
>> +static irqreturn_t ad7779_trigger_handler(int irq, void *p) {
>> +	struct iio_poll_func *pf = p;
>> +	struct iio_dev *indio_dev = pf->indio_dev;
>> +	struct ad7779_state *st = iio_priv(indio_dev);
>> +	int ret;
>
>...
>

Thank you!

Best Regards,
Ramona
Jonathan Cameron Oct. 10, 2024, 5:45 p.m. UTC | #3
On Thu, 10 Oct 2024 14:32:49 +0000
"Nechita, Ramona" <Ramona.Nechita@analog.com> wrote:

> Hello,
> 
> I have some questions inline before sending out a new patch.
> 
> 
> ....
> >> +struct ad7779_state {
> >> +	struct spi_device *spi;
> >> +	const struct ad7779_chip_info *chip_info;
> >> +	struct clk *mclk;
> >> +	struct iio_trigger *trig;
> >> +	struct completion completion;
> >> +	unsigned int sampling_freq;
> >> +	enum ad7779_filter filter_enabled;
> >> +	/*
> >> +	 * DMA (thus cache coherency maintenance) requires the
> >> +	 * transfer buffers to live in their own cache lines.
> >> +	 */
> >> +	struct {
> >> +		u32 chans[8];
> >> +		s64 timestamp;  
> >
> >	aligned_s64 timestamp;
> >
> >while it makes no difference in this case, this makes code aligned inside the IIO subsystem.  
> 
> I might be missing something but I can't find the aligned_s64 data type, should I define it myself
> in the driver?

Recent addition to the iio tree so it is in linux-next but not in mainline yet.
https://git.kernel.org/pub/scm/linux/kernel/git/jic23/iio.git/commit/?h=togreg&id=e4ca0e59c39442546866f3dd514a3a5956577daf
It just missed last cycle.

> 
> >  
> >> +	} data __aligned(IIO_DMA_MINALIGN);  
> >
> >Note, this is different alignment to the above. And isn't the buffer below should have it instead?

While I'm here:  No to this one.  The s64 alignment is about
performance of CPU access + consistency across CPU architectures.
This one (which happens to always be 8 or more) is about DMA safety.



> >  
> >> +	u32			spidata_tx[8];
> >> +	u8			reg_rx_buf[3];
> >> +	u8			reg_tx_buf[3];
> >> +	u8			reset_buf[8];
> >> +};  
> >
> >....
> >  
> >> +static int ad7779_write_raw(struct iio_dev *indio_dev,
> >> +			    struct iio_chan_spec const *chan, int val, int val2,
> >> +			    long mask)  
> >
> >long? Not unsigned long?  
> 
> I copied the function header directly from iio.h, shouldn't it be left as such?

Hmm. That's an ancient legacy that we should really cleanup at somepoint.
Leave this as long.


> 
> >  
> >> +{
> >> +	struct ad7779_state *st = iio_priv(indio_dev);
> >> +
> >> +	iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
> >> +		switch (mask) {
> >> +		case IIO_CHAN_INFO_CALIBSCALE:
> >> +			return ad7779_set_calibscale(st, chan->channel, val2);
> >> +		case IIO_CHAN_INFO_CALIBBIAS:
> >> +			return ad7779_set_calibbias(st, chan->channel, val);
> >> +		case IIO_CHAN_INFO_SAMP_FREQ:
> >> +			return ad7779_set_sampling_frequency(st, val);
> >> +		default:
> >> +			return -EINVAL;
> >> +		}
> >> +	}
> >> +	unreachable();
> >> +}  
> >
> >...
> >  
> >> +static irqreturn_t ad7779_trigger_handler(int irq, void *p) {
> >> +	struct iio_poll_func *pf = p;
> >> +	struct iio_dev *indio_dev = pf->indio_dev;
> >> +	struct ad7779_state *st = iio_priv(indio_dev);
> >> +	int ret;  
> >
> >...
> >  
> 
> Thank you!
> 
> Best Regards,
> Ramona
>
Andy Shevchenko Oct. 10, 2024, 5:59 p.m. UTC | #4
On Thu, Oct 10, 2024 at 02:32:49PM +0000, Nechita, Ramona wrote:

...

> >> +struct ad7779_state {
> >> +	struct spi_device *spi;
> >> +	const struct ad7779_chip_info *chip_info;
> >> +	struct clk *mclk;
> >> +	struct iio_trigger *trig;
> >> +	struct completion completion;
> >> +	unsigned int sampling_freq;
> >> +	enum ad7779_filter filter_enabled;
> >> +	/*
> >> +	 * DMA (thus cache coherency maintenance) requires the
> >> +	 * transfer buffers to live in their own cache lines.
> >> +	 */
> >> +	struct {
> >> +		u32 chans[8];
> >> +		s64 timestamp;
> >
> >	aligned_s64 timestamp;
> >
> >while it makes no difference in this case, this makes code aligned inside
> >the IIO subsystem.
> 
> I might be missing something but I can't find the aligned_s64 data type,
> should I define it myself in the driver?

Definitely, basically the rule of thumb is to create your patches based on
the respective subsystem tree. In such a case this is IIO tree togreg branch
(in some cases testing should be used).

There is the mentioned type been introduced.

> >> +	} data __aligned(IIO_DMA_MINALIGN);
> >
> >Note, this is different alignment to the above. And isn't the buffer below should have it instead?
> >
> >> +	u32			spidata_tx[8];
> >> +	u8			reg_rx_buf[3];
> >> +	u8			reg_tx_buf[3];
> >> +	u8			reset_buf[8];
> >> +};

...

> >> +static int ad7779_write_raw(struct iio_dev *indio_dev,
> >> +			    struct iio_chan_spec const *chan, int val, int val2,
> >> +			    long mask)
> >
> >long? Not unsigned long?
> 
> I copied the function header directly from iio.h, shouldn't it be left as such?

Oh, this is unfortunate. It should be fixed there, indeed.
Andy Shevchenko Oct. 10, 2024, 6:25 p.m. UTC | #5
On Thu, Oct 10, 2024 at 06:45:16PM +0100, Jonathan Cameron wrote:
> On Thu, 10 Oct 2024 14:32:49 +0000
> "Nechita, Ramona" <Ramona.Nechita@analog.com> wrote:

...

> > >> +	/*
> > >> +	 * DMA (thus cache coherency maintenance) requires the
> > >> +	 * transfer buffers to live in their own cache lines.
> > >> +	 */
> > >> +	struct {
> > >> +		u32 chans[8];
> > >> +		s64 timestamp;  
> > >
> > >	aligned_s64 timestamp;
> > >
> > >while it makes no difference in this case, this makes code aligned inside the IIO subsystem.  
> > 
> > I might be missing something but I can't find the aligned_s64 data type, should I define it myself
> > in the driver?
> 
> Recent addition to the iio tree so it is in linux-next but not in mainline yet.
> https://git.kernel.org/pub/scm/linux/kernel/git/jic23/iio.git/commit/?h=togreg&id=e4ca0e59c39442546866f3dd514a3a5956577daf
> It just missed last cycle.
> 
> > >> +	} data __aligned(IIO_DMA_MINALIGN);  
> > >
> > >Note, this is different alignment to the above. And isn't the buffer below should have it instead?
> 
> While I'm here:  No to this one.  The s64 alignment is about
> performance of CPU access + consistency across CPU architectures.
> This one (which happens to always be 8 or more) is about DMA safety.

Right, but shouldn't...

> > >> +	u32			spidata_tx[8];

> > >> +	u8			reg_rx_buf[3];
> > >> +	u8			reg_tx_buf[3];

...one of these also be cache aligned for DMA?

> > >> +	u8			reset_buf[8];
Jonathan Cameron Oct. 12, 2024, 10:42 a.m. UTC | #6
On Thu, 10 Oct 2024 21:25:00 +0300
Andy Shevchenko <andy@kernel.org> wrote:

> On Thu, Oct 10, 2024 at 06:45:16PM +0100, Jonathan Cameron wrote:
> > On Thu, 10 Oct 2024 14:32:49 +0000
> > "Nechita, Ramona" <Ramona.Nechita@analog.com> wrote:  
> 
> ...
> 
> > > >> +	/*
> > > >> +	 * DMA (thus cache coherency maintenance) requires the
> > > >> +	 * transfer buffers to live in their own cache lines.
> > > >> +	 */
> > > >> +	struct {
> > > >> +		u32 chans[8];
> > > >> +		s64 timestamp;    
> > > >
> > > >	aligned_s64 timestamp;
> > > >
> > > >while it makes no difference in this case, this makes code aligned inside the IIO subsystem.    
> > > 
> > > I might be missing something but I can't find the aligned_s64 data type, should I define it myself
> > > in the driver?  
> > 
> > Recent addition to the iio tree so it is in linux-next but not in mainline yet.
> > https://git.kernel.org/pub/scm/linux/kernel/git/jic23/iio.git/commit/?h=togreg&id=e4ca0e59c39442546866f3dd514a3a5956577daf
> > It just missed last cycle.
> >   
> > > >> +	} data __aligned(IIO_DMA_MINALIGN);    
> > > >
> > > >Note, this is different alignment to the above. And isn't the buffer below should have it instead?  
> > 
> > While I'm here:  No to this one.  The s64 alignment is about
> > performance of CPU access + consistency across CPU architectures.
> > This one (which happens to always be 8 or more) is about DMA safety.  
> 
> Right, but shouldn't...
> 
> > > >> +	u32			spidata_tx[8];  
> 
> > > >> +	u8			reg_rx_buf[3];
> > > >> +	u8			reg_tx_buf[3];  
> 
> ...one of these also be cache aligned for DMA?
No need as long as driver doesn't do anything bad like
write to these whilst another dma transaction is in flight.
(I haven't checked though, but typical drivers don't)
All you have to do is ensure that any DMA buffer doesn't share
a cacheline with an unrelated bit of data (i.e. a separate allocation or some
state tracking etc). It is fine for multiple DMA buffers  (say rx and tx)
to be in the same cacheline.  Any DMA device that is stupid enough
to stomp it itself is broken (and would be fairly hard to build!). Such
a device would need to be worked around in the controller driver.

They are allowed to write back stale data, but not garbage data to unrelated
parts of the cacheline.  I.e. a tx buffer that was changed whilst
the SPI transaction was going on, might be overwritten with the old value
when the SPI controller DMAs back an updated version of the cacheline
containing the rx data + a cached copy of the early tx data.
The risk we are defending against with this alignment isn't this, it's
unrelated (and typically not protected by lock) fields in the structure
being overwritten with stale data.  The really nasty one being when
the allocator gives the next bit of the cacheline to another allocation.
(avoided here because the structure is sized as a multiple of the maximum
 alignment).

Now, the code that bounces unaligned dma buffers on arm64 will probably
bounce these because it can't tell they are safe :( That's not incorrect
it's just less than optimal.

Jonathan
> 
> > > >> +	u8			reset_buf[8];  
>
Andy Shevchenko Oct. 15, 2024, 11:08 a.m. UTC | #7
On Sat, Oct 12, 2024 at 11:42:02AM +0100, Jonathan Cameron wrote:
> On Thu, 10 Oct 2024 21:25:00 +0300
> Andy Shevchenko <andy@kernel.org> wrote:
> > On Thu, Oct 10, 2024 at 06:45:16PM +0100, Jonathan Cameron wrote:
> > > On Thu, 10 Oct 2024 14:32:49 +0000
> > > "Nechita, Ramona" <Ramona.Nechita@analog.com> wrote:  

...

> > > > >> +	/*
> > > > >> +	 * DMA (thus cache coherency maintenance) requires the
> > > > >> +	 * transfer buffers to live in their own cache lines.
> > > > >> +	 */
> > > > >> +	struct {
> > > > >> +		u32 chans[8];
> > > > >> +		s64 timestamp;    
> > > > >
> > > > >	aligned_s64 timestamp;
> > > > >
> > > > >while it makes no difference in this case, this makes code aligned inside the IIO subsystem.    
> > > > 
> > > > I might be missing something but I can't find the aligned_s64 data type, should I define it myself
> > > > in the driver?  
> > > 
> > > Recent addition to the iio tree so it is in linux-next but not in mainline yet.
> > > https://git.kernel.org/pub/scm/linux/kernel/git/jic23/iio.git/commit/?h=togreg&id=e4ca0e59c39442546866f3dd514a3a5956577daf
> > > It just missed last cycle.
> > >   
> > > > >> +	} data __aligned(IIO_DMA_MINALIGN);    
> > > > >
> > > > >Note, this is different alignment to the above. And isn't the buffer below should have it instead?  
> > > 
> > > While I'm here:  No to this one.  The s64 alignment is about
> > > performance of CPU access + consistency across CPU architectures.
> > > This one (which happens to always be 8 or more) is about DMA safety.  
> > 
> > Right, but shouldn't...
> > 
> > > > >> +	u32			spidata_tx[8];  
> > 
> > > > >> +	u8			reg_rx_buf[3];
> > > > >> +	u8			reg_tx_buf[3];  
> > 
> > ...one of these also be cache aligned for DMA?
> No need as long as driver doesn't do anything bad like
> write to these whilst another dma transaction is in flight.
> (I haven't checked though, but typical drivers don't)
> All you have to do is ensure that any DMA buffer doesn't share
> a cacheline with an unrelated bit of data (i.e. a separate allocation or some
> state tracking etc). It is fine for multiple DMA buffers  (say rx and tx)
> to be in the same cacheline.  Any DMA device that is stupid enough
> to stomp it itself is broken (and would be fairly hard to build!). Such
> a device would need to be worked around in the controller driver.
> 
> They are allowed to write back stale data, but not garbage data to unrelated
> parts of the cacheline.  I.e. a tx buffer that was changed whilst
> the SPI transaction was going on, might be overwritten with the old value
> when the SPI controller DMAs back an updated version of the cacheline
> containing the rx data + a cached copy of the early tx data.
> The risk we are defending against with this alignment isn't this, it's
> unrelated (and typically not protected by lock) fields in the structure
> being overwritten with stale data.  The really nasty one being when
> the allocator gives the next bit of the cacheline to another allocation.
> (avoided here because the structure is sized as a multiple of the maximum
>  alignment).
> 
> Now, the code that bounces unaligned dma buffers on arm64 will probably
> bounce these because it can't tell they are safe :( That's not incorrect
> it's just less than optimal.

Thanks for the really good elaboration! I will try hard to not forget it.

> > > > >> +	u8			reset_buf[8];
diff mbox series

Patch

diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 88e8ce2e78b3..fe6d56225fa1 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -267,6 +267,17 @@  config AD7768_1
 	  To compile this driver as a module, choose M here: the module will be
 	  called ad7768-1.
 
+config AD7779
+	tristate "Analog Devices AD7779 ADC driver"
+	depends on SPI
+	select IIO_BUFFER
+	help
+	  Say yes here to build support for Analog Devices AD777X family
+	  (AD7770, AD7771, AD7779) analog to digital converter (ADC).
+
+	  To compile this driver as a module, choose M here: the module will be
+	  called ad7779.
+
 config AD7780
 	tristate "Analog Devices AD7780 and similar ADCs driver"
 	depends on SPI
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 8b80664c6d6b..3d28ea490e01 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -27,6 +27,7 @@  obj-$(CONFIG_AD7606_IFACE_SPI) += ad7606_spi.o
 obj-$(CONFIG_AD7606) += ad7606.o
 obj-$(CONFIG_AD7766) += ad7766.o
 obj-$(CONFIG_AD7768_1) += ad7768-1.o
+obj-$(CONFIG_AD7779) += ad7779.o
 obj-$(CONFIG_AD7780) += ad7780.o
 obj-$(CONFIG_AD7791) += ad7791.o
 obj-$(CONFIG_AD7793) += ad7793.o
diff --git a/drivers/iio/adc/ad7779.c b/drivers/iio/adc/ad7779.c
new file mode 100644
index 000000000000..d63b61d2efcd
--- /dev/null
+++ b/drivers/iio/adc/ad7779.c
@@ -0,0 +1,911 @@ 
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * AD7770, AD7771, AD7779 ADC
+ *
+ * Copyright 2023-2024 Analog Devices Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/clk.h>
+#include <linux/crc8.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+#include <linux/string.h>
+#include <linux/units.h>
+
+#include <asm/unaligned.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+
+#define AD7779_SPI_READ_CMD			BIT(7)
+
+#define AD7779_DISABLE_SD			BIT(7)
+
+#define AD7779_REG_CH_DISABLE			0x08
+#define AD7779_REG_CH_SYNC_OFFSET(ch)		(0x09 + (ch))
+#define AD7779_REG_CH_CONFIG(ch)		(0x00 + (ch))
+#define AD7779_REG_GENERAL_USER_CONFIG_1	0x11
+#define AD7779_REG_GENERAL_USER_CONFIG_2	0x12
+#define AD7779_REG_GENERAL_USER_CONFIG_3	0x13
+#define AD7779_REG_DOUT_FORMAT			0x14
+#define AD7779_REG_ADC_MUX_CONFIG		0x15
+#define AD7779_REG_GPIO_CONFIG			0x17
+#define AD7779_REG_BUFFER_CONFIG_1		0x19
+#define AD7779_REG_GLOBAL_MUX_CONFIG		0x16
+#define AD7779_REG_BUFFER_CONFIG_2		0x1A
+#define AD7779_REG_GPIO_DATA			0x18
+#define AD7779_REG_CH_OFFSET_UPPER_BYTE(ch)	(0x1C + (ch) * 6)
+#define AD7779_REG_CH_OFFSET_LOWER_BYTE(ch)	(0x1E + (ch) * 6)
+#define AD7779_REG_CH_GAIN_UPPER_BYTE(ch)	(0x1F + (ch) * 6)
+#define AD7779_REG_CH_OFFSET_MID_BYTE(ch)	(0x1D + (ch) * 6)
+#define AD7779_REG_CH_GAIN_MID_BYTE(ch)		(0x20 + (ch) * 6)
+#define AD7779_REG_CH_ERR_REG(ch)		(0x4C + (ch))
+#define AD7779_REG_CH0_1_SAT_ERR		0x54
+#define AD7779_REG_CH_GAIN_LOWER_BYTE(ch)	(0x21 + (ch) * 6)
+#define AD7779_REG_CH2_3_SAT_ERR		0x55
+#define AD7779_REG_CH4_5_SAT_ERR		0x56
+#define AD7779_REG_CH6_7_SAT_ERR		0x57
+#define AD7779_REG_CHX_ERR_REG_EN		0x58
+#define AD7779_REG_GEN_ERR_REG_1		0x59
+#define AD7779_REG_GEN_ERR_REG_1_EN		0x5A
+#define AD7779_REG_GEN_ERR_REG_2		0x5B
+#define AD7779_REG_GEN_ERR_REG_2_EN		0x5C
+#define AD7779_REG_STATUS_REG_1			0x5D
+#define AD7779_REG_STATUS_REG_2			0x5E
+#define AD7779_REG_STATUS_REG_3			0x5F
+#define AD7779_REG_SRC_N_MSB			0x60
+#define AD7779_REG_SRC_N_LSB			0x61
+#define AD7779_REG_SRC_IF_MSB			0x62
+#define AD7779_REG_SRC_IF_LSB			0x63
+#define AD7779_REG_SRC_UPDATE			0x64
+
+#define AD7779_FILTER_MSK			BIT(6)
+#define AD7779_MOD_POWERMODE_MSK		BIT(6)
+#define AD7779_MOD_PDB_REFOUT_MSK		BIT(4)
+#define AD7779_MOD_SPI_EN_MSK			BIT(4)
+#define AD7779_USRMOD_INIT_MSK			GENMASK(6, 4)
+
+/* AD7779_REG_DOUT_FORMAT */
+#define AD7779_DOUT_FORMAT_MSK			GENMASK(7, 6)
+#define AD7779_DOUT_HEADER_FORMAT		BIT(5)
+#define AD7779_DCLK_CLK_DIV_MSK			GENMASK(3, 1)
+
+#define AD7779_REFMUX_CTRL_MSK			GENMASK(7, 6)
+#define AD7779_SPI_CRC_EN_MSK			BIT(0)
+
+#define AD7779_MAXCLK_LOWPOWER			(4096 * HZ_PER_KHZ)
+#define AD7779_NUM_CHANNELS			8
+#define AD7779_RESET_BUF_SIZE			8
+#define AD7779_CHAN_DATA_SIZE			4
+
+#define AD7779_LOWPOWER_DIV			512
+#define AD7779_HIGHPOWER_DIV			2048
+
+#define AD7779_SINC3_MAXFREQ			(16 * HZ_PER_KHZ)
+#define AD7779_SINC5_MAXFREQ			(128 * HZ_PER_KHZ)
+
+#define AD7779_DEFAULT_SAMPLING_FREQ		(8 * HZ_PER_KHZ)
+#define AD7779_DEFAULT_SAMPLING_2LINE		(4 * HZ_PER_KHZ)
+#define AD7779_DEFAULT_SAMPLING_1LINE		(2 * HZ_PER_KHZ)
+
+#define AD7779_SPIMODE_MAX_SAMP_FREQ		(16 * HZ_PER_KHZ)
+
+#define GAIN_REL				0x555555
+#define AD7779_FREQ_MSB_MSK			GENMASK(15, 8)
+#define AD7779_FREQ_LSB_MSK			GENMASK(7, 0)
+#define AD7779_UPPER				GENMASK(23, 16)
+#define AD7779_MID				GENMASK(15, 8)
+#define AD7779_LOWER				GENMASK(7, 0)
+
+#define AD7779_REG_MSK		GENMASK(6, 0)
+
+#define AD7779_CRC8_POLY			0x07
+DECLARE_CRC8_TABLE(ad7779_crc8_table);
+
+enum ad7779_filter {
+	AD7779_SINC3,
+	AD7779_SINC5,
+};
+
+enum ad7779_variant {
+	ad7770,
+	ad7771,
+	ad7779,
+};
+
+enum ad7779_power_mode {
+	AD7779_LOW_POWER,
+	AD7779_HIGH_POWER,
+};
+
+struct ad7779_chip_info {
+	const char *name;
+	struct iio_chan_spec const *channels;
+};
+
+struct ad7779_state {
+	struct spi_device *spi;
+	const struct ad7779_chip_info *chip_info;
+	struct clk *mclk;
+	struct iio_trigger *trig;
+	struct completion completion;
+	unsigned int sampling_freq;
+	enum ad7779_filter filter_enabled;
+	/*
+	 * DMA (thus cache coherency maintenance) requires the
+	 * transfer buffers to live in their own cache lines.
+	 */
+	struct {
+		u32 chans[8];
+		s64 timestamp;
+	} data __aligned(IIO_DMA_MINALIGN);
+	u32			spidata_tx[8];
+	u8			reg_rx_buf[3];
+	u8			reg_tx_buf[3];
+	u8			reset_buf[8];
+};
+
+static const char * const ad7779_filter_type[] = {
+	[AD7779_SINC3] = "sinc3",
+	[AD7779_SINC5] = "sinc5",
+};
+
+static const char * const ad7779_power_supplies[] = {
+	"avdd1", "avdd2", "avdd4"
+};
+
+static int ad7779_spi_read(struct ad7779_state *st, u8 reg, u8 *rbuf)
+{
+	int ret;
+	int length = 3;
+	u8 crc_buf[2];
+	u8 exp_crc = 0;
+	struct spi_transfer reg_read_tr[] = {
+		{
+			.tx_buf = st->reg_tx_buf,
+			.rx_buf = st->reg_rx_buf,
+		},
+	};
+
+	st->reg_tx_buf[0] = AD7779_SPI_READ_CMD | FIELD_GET(AD7779_REG_MSK, reg);
+	st->reg_tx_buf[1] = 0;
+
+	if (reg == AD7779_REG_GEN_ERR_REG_1_EN)
+		length = 2;
+	else
+		st->reg_tx_buf[2] = crc8(ad7779_crc8_table, st->reg_tx_buf, 2, 0);
+	reg_read_tr[0].len = length;
+
+	ret = spi_sync_transfer(st->spi, reg_read_tr, ARRAY_SIZE(reg_read_tr));
+	if (ret)
+		return ret;
+
+	crc_buf[0] = AD7779_SPI_READ_CMD | FIELD_GET(AD7779_REG_MSK, reg);
+	crc_buf[1] = st->reg_rx_buf[1];
+	exp_crc = crc8(ad7779_crc8_table, crc_buf, 2, 0);
+	if (reg != AD7779_REG_GEN_ERR_REG_1_EN && exp_crc != st->reg_rx_buf[2]) {
+		dev_err(&st->spi->dev, "Bad CRC %x, expected %x",
+			st->reg_rx_buf[2], exp_crc);
+		return -EINVAL;
+	}
+	*rbuf = st->reg_rx_buf[1];
+
+	return 0;
+}
+
+static int ad7779_spi_write(struct ad7779_state *st, u8 reg, u8 val)
+{
+	int length = 3;
+
+	st->reg_tx_buf[0] = FIELD_GET(AD7779_REG_MSK, reg);
+	st->reg_tx_buf[1] = val;
+	if (reg == AD7779_REG_GEN_ERR_REG_1_EN)
+		length = 2;
+	else
+		st->reg_tx_buf[2] = crc8(ad7779_crc8_table, st->reg_tx_buf, 2, 0);
+
+	return spi_write(st->spi, st->reg_tx_buf, length);
+}
+
+static int ad7779_spi_write_mask(struct ad7779_state *st, u8 reg, u8 mask,
+				 u8 val)
+{
+	int ret;
+	u8 regval, data;
+
+	ret = ad7779_spi_read(st, reg, &data);
+	if (ret)
+		return ret;
+
+	regval = (data & ~mask) | (val & mask);
+
+	if (regval == data)
+		return 0;
+
+	return ad7779_spi_write(st, reg, regval);
+}
+
+static int ad7779_reg_access(struct iio_dev *indio_dev,
+			     unsigned int reg,
+			     unsigned int writeval,
+			     unsigned int *readval)
+{
+	struct ad7779_state *st = iio_priv(indio_dev);
+
+	if (readval)
+		return ad7779_spi_read(st, reg, (u8 *)readval);
+
+	return ad7779_spi_write(st, reg, writeval);
+}
+
+static int ad7779_set_sampling_frequency(struct ad7779_state *st,
+					 unsigned int sampling_freq)
+{
+	int ret;
+	unsigned int dec;
+	unsigned int frac;
+	unsigned int div;
+	unsigned int decimal;
+	int temp;
+	unsigned int freq_khz;
+
+	if (st->filter_enabled == AD7779_SINC3 &&
+	    sampling_freq > AD7779_SINC3_MAXFREQ)
+		return -EINVAL;
+
+	if (st->filter_enabled == AD7779_SINC5 &&
+	    sampling_freq > AD7779_SINC5_MAXFREQ)
+		return -EINVAL;
+
+	if (sampling_freq > AD7779_SPIMODE_MAX_SAMP_FREQ)
+		return -EINVAL;
+
+	div = AD7779_HIGHPOWER_DIV;
+
+	freq_khz = sampling_freq / KILO;
+	dec = div / freq_khz;
+	frac = div % freq_khz;
+
+	ret = ad7779_spi_write(st, AD7779_REG_SRC_N_MSB,
+			       FIELD_GET(AD7779_FREQ_MSB_MSK, dec));
+	if (ret)
+		return ret;
+	ret = ad7779_spi_write(st, AD7779_REG_SRC_N_LSB,
+			       FIELD_GET(AD7779_FREQ_LSB_MSK, dec));
+	if (ret)
+		return ret;
+
+	if (frac) {
+		/*
+		* In order to obtain the first three decimals of the decimation
+		* the initial number is multiplied with 10^3 prior to the
+		* division, then the original division result is subtracted and
+		* the number is divided by 10^3.
+		*/
+		temp = (div * KILO) / freq_khz;
+		decimal = ((temp -  dec * KILO) << 16) / KILO;
+		ret = ad7779_spi_write(st, AD7779_REG_SRC_N_MSB,
+				       FIELD_GET(AD7779_FREQ_MSB_MSK, decimal));
+		if (ret)
+			return ret;
+		ret = ad7779_spi_write(st, AD7779_REG_SRC_N_LSB,
+				       FIELD_GET(AD7779_FREQ_LSB_MSK, decimal));
+		if (ret)
+			return ret;
+	} else {
+		ret = ad7779_spi_write(st, AD7779_REG_SRC_N_MSB,
+				       FIELD_GET(AD7779_FREQ_MSB_MSK, 0x0));
+		if (ret)
+			return ret;
+		ret = ad7779_spi_write(st, AD7779_REG_SRC_N_LSB,
+				       FIELD_GET(AD7779_FREQ_LSB_MSK, 0x0));
+		if (ret)
+			return ret;
+	}
+	ret = ad7779_spi_write(st, AD7779_REG_SRC_UPDATE, 0x1);
+	if (ret)
+		return ret;
+
+	/* SRC update settling time */
+	fsleep(15);
+
+	ret = ad7779_spi_write(st, AD7779_REG_SRC_UPDATE, 0x0);
+	if (ret)
+		return ret;
+
+	/* SRC update settling time */
+	fsleep(15);
+
+	st->sampling_freq = sampling_freq;
+
+	return 0;
+}
+
+static int ad7779_get_filter(struct iio_dev *indio_dev,
+			     struct iio_chan_spec const *chan)
+{
+	struct ad7779_state *st = iio_priv(indio_dev);
+	u8 temp;
+	int ret;
+
+	ret = ad7779_spi_read(st, AD7779_REG_GENERAL_USER_CONFIG_2, &temp);
+	if (ret)
+		return ret;
+
+	return FIELD_GET(AD7779_FILTER_MSK, temp);
+}
+
+static int ad7779_set_filter(struct iio_dev *indio_dev,
+			     struct iio_chan_spec const *chan,
+			     unsigned int mode)
+{
+	struct ad7779_state *st = iio_priv(indio_dev);
+	int ret;
+
+	ret = ad7779_spi_write_mask(st,
+				    AD7779_REG_GENERAL_USER_CONFIG_2,
+				    AD7779_FILTER_MSK,
+				    FIELD_PREP(AD7779_FILTER_MSK, mode));
+	if (ret < 0)
+		return ret;
+
+	ret = ad7779_set_sampling_frequency(st, st->sampling_freq);
+	if (ret < 0)
+		return ret;
+
+	st->filter_enabled = mode;
+
+	return 0;
+}
+
+static int ad7779_get_calibscale(struct ad7779_state *st, int channel)
+{
+	int ret;
+	u8 calibscale[3];
+
+	ret = ad7779_spi_read(st, AD7779_REG_CH_GAIN_LOWER_BYTE(channel), &calibscale[0]);
+	if (ret)
+		return ret;
+
+	ret = ad7779_spi_read(st, AD7779_REG_CH_GAIN_MID_BYTE(channel), &calibscale[1]);
+	if (ret)
+		return ret;
+
+	ret = ad7779_spi_read(st, AD7779_REG_CH_GAIN_UPPER_BYTE(channel), &calibscale[2]);
+	if (ret)
+		return ret;
+
+	return get_unaligned_be24(calibscale);
+}
+
+static int ad7779_set_calibscale(struct ad7779_state *st, int channel, int val)
+{
+	int ret;
+	unsigned int gain;
+	unsigned long long tmp;
+	u8 gain_bytes[3];
+
+	/*
+	 * The gain value is relative to 0x555555, which represents a gain of 1
+	 */
+	tmp = val * 5592405LL;
+	gain = DIV_ROUND_CLOSEST_ULL(tmp, MEGA);
+	put_unaligned_be24(gain, gain_bytes);
+	ret = ad7779_spi_write(st, AD7779_REG_CH_GAIN_UPPER_BYTE(channel),
+			       gain_bytes[0]);
+	if (ret)
+		return ret;
+
+	ret = ad7779_spi_write(st, AD7779_REG_CH_GAIN_MID_BYTE(channel),
+			       gain_bytes[1]);
+	if (ret)
+		return ret;
+
+	return ad7779_spi_write(st, AD7779_REG_CH_GAIN_LOWER_BYTE(channel),
+				gain_bytes[2]);
+}
+
+static int ad7779_get_calibbias(struct ad7779_state *st, int channel)
+{
+	int ret;
+	u8 calibbias[3];
+
+	ret = ad7779_spi_read(st, AD7779_REG_CH_OFFSET_LOWER_BYTE(channel),
+			      &calibbias[0]);
+	if (ret)
+		return ret;
+
+	ret = ad7779_spi_read(st, AD7779_REG_CH_OFFSET_MID_BYTE(channel),
+			      &calibbias[1]);
+	if (ret)
+		return ret;
+
+	ret = ad7779_spi_read(st, AD7779_REG_CH_OFFSET_UPPER_BYTE(channel),
+			      &calibbias[2]);
+	if (ret)
+		return ret;
+
+	return get_unaligned_be24(calibbias);
+}
+
+static int ad7779_set_calibbias(struct ad7779_state *st, int channel, int val)
+{
+	int ret;
+	u8 calibbias[3];
+
+	put_unaligned_be24(val, calibbias);
+	ret = ad7779_spi_write(st, AD7779_REG_CH_OFFSET_UPPER_BYTE(channel),
+			       calibbias[0]);
+	if (ret)
+		return ret;
+
+	ret = ad7779_spi_write(st, AD7779_REG_CH_OFFSET_MID_BYTE(channel),
+			       calibbias[1]);
+	if (ret)
+		return ret;
+
+	return ad7779_spi_write(st, AD7779_REG_CH_OFFSET_LOWER_BYTE(channel),
+				calibbias[2]);
+}
+
+static int ad7779_read_raw(struct iio_dev *indio_dev,
+			   struct iio_chan_spec const *chan, int *val,
+			   int *val2, long mask)
+{
+	struct ad7779_state *st = iio_priv(indio_dev);
+
+	iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
+		switch (mask) {
+		case IIO_CHAN_INFO_CALIBSCALE:
+			*val = ad7779_get_calibscale(st, chan->channel);
+			if (*val < 0)
+				return -EINVAL;
+			*val2 = GAIN_REL;
+			return IIO_VAL_FRACTIONAL;
+		case IIO_CHAN_INFO_CALIBBIAS:
+			*val = ad7779_get_calibbias(st, chan->channel);
+			if (*val < 0)
+				return -EINVAL;
+			return IIO_VAL_INT;
+		case IIO_CHAN_INFO_SAMP_FREQ:
+			*val = st->sampling_freq;
+			if (*val < 0)
+				return -EINVAL;
+			return IIO_VAL_INT;
+		}
+		return -EINVAL;
+	}
+	unreachable();
+}
+
+static int ad7779_write_raw(struct iio_dev *indio_dev,
+			    struct iio_chan_spec const *chan, int val, int val2,
+			    long mask)
+{
+	struct ad7779_state *st = iio_priv(indio_dev);
+
+	iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
+		switch (mask) {
+		case IIO_CHAN_INFO_CALIBSCALE:
+			return ad7779_set_calibscale(st, chan->channel, val2);
+		case IIO_CHAN_INFO_CALIBBIAS:
+			return ad7779_set_calibbias(st, chan->channel, val);
+		case IIO_CHAN_INFO_SAMP_FREQ:
+			return ad7779_set_sampling_frequency(st, val);
+		default:
+			return -EINVAL;
+		}
+	}
+	unreachable();
+}
+
+static int ad7779_buffer_preenable(struct iio_dev *indio_dev)
+{
+	int ret;
+	struct ad7779_state *st = iio_priv(indio_dev);
+
+	ret = ad7779_spi_write_mask(st,
+				    AD7779_REG_GENERAL_USER_CONFIG_3,
+				    AD7779_MOD_SPI_EN_MSK,
+				    FIELD_PREP(AD7779_MOD_SPI_EN_MSK, 1));
+	if (ret)
+		return ret;
+
+	/*
+	 * DRDY output cannot be disabled at device level therefore we mask
+	 * the irq at host end.
+	 */
+	enable_irq(st->spi->irq);
+
+	return 0;
+}
+
+static int ad7779_buffer_postdisable(struct iio_dev *indio_dev)
+{
+	struct ad7779_state *st = iio_priv(indio_dev);
+
+	disable_irq(st->spi->irq);
+
+	return ad7779_spi_write(st, AD7779_REG_GENERAL_USER_CONFIG_3,
+			       AD7779_DISABLE_SD);
+}
+
+static irqreturn_t ad7779_trigger_handler(int irq, void *p)
+{
+	struct iio_poll_func *pf = p;
+	struct iio_dev *indio_dev = pf->indio_dev;
+	struct ad7779_state *st = iio_priv(indio_dev);
+	int ret;
+
+	struct spi_transfer sd_readback_tr[] = {
+		{
+			.rx_buf = st->data.chans,
+			.tx_buf = st->spidata_tx,
+			.len = AD7779_NUM_CHANNELS * AD7779_CHAN_DATA_SIZE,
+		}
+	};
+
+	st->spidata_tx[0] = AD7779_SPI_READ_CMD;
+	ret = spi_sync_transfer(st->spi, sd_readback_tr,
+				ARRAY_SIZE(sd_readback_tr));
+	if (ret) {
+		dev_err(&st->spi->dev,
+			"spi transfer error in irq handler");
+		goto exit_handler;
+	}
+
+	iio_push_to_buffers_with_timestamp(indio_dev, &st->data, pf->timestamp);
+
+exit_handler:
+	iio_trigger_notify_done(indio_dev->trig);
+	return IRQ_HANDLED;
+}
+
+static int ad7779_reset(struct iio_dev *indio_dev, struct gpio_desc *reset_gpio)
+{
+	struct ad7779_state *st = iio_priv(indio_dev);
+	int ret;
+	struct spi_transfer reg_read_tr[] = {
+		{
+			.tx_buf = st->reset_buf,
+			.len = 8,
+		},
+	};
+
+	if (reset_gpio) {
+		/* Delay for reset to occur is 225 microseconds*/
+		gpiod_set_value(reset_gpio, 1);
+		fsleep(230);
+		return 0;
+	} else {
+		memset(st->reset_buf, 0xff, sizeof(st->reset_buf));
+		ret = spi_sync_transfer(st->spi, reg_read_tr,
+					ARRAY_SIZE(reg_read_tr));
+		if (ret)
+			return ret;
+	}
+
+	/* Delay for reset to occur is 225 microseconds*/
+	fsleep(230);
+
+	return 0;
+}
+
+static const struct iio_info ad7779_info = {
+	.read_raw = ad7779_read_raw,
+	.write_raw = ad7779_write_raw,
+	.debugfs_reg_access = &ad7779_reg_access,
+};
+
+static const struct iio_enum ad7779_filter_enum = {
+	.items = ad7779_filter_type,
+	.num_items = ARRAY_SIZE(ad7779_filter_type),
+	.get = ad7779_get_filter,
+	.set = ad7779_set_filter,
+};
+
+static const struct iio_chan_spec_ext_info ad7779_ext_filter[] = {
+	IIO_ENUM("filter_type", IIO_SHARED_BY_ALL, &ad7779_filter_enum),
+	IIO_ENUM_AVAILABLE("filter_type", IIO_SHARED_BY_ALL,
+				  &ad7779_filter_enum),
+	{ }
+};
+
+#define AD777x_CHAN_S(index, _ext_info)					       \
+	{								       \
+		.type = IIO_VOLTAGE,					       \
+		.info_mask_separate = BIT(IIO_CHAN_INFO_CALIBSCALE)  |	       \
+				      BIT(IIO_CHAN_INFO_CALIBBIAS),	       \
+		.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),       \
+		.address = (index),					       \
+		.indexed = 1,						       \
+		.channel = (index),					       \
+		.scan_index = (index),					       \
+		.ext_info = (_ext_info),				       \
+		.scan_type = {						       \
+			.sign = 's',					       \
+			.realbits = 24,					       \
+			.storagebits = 32,				       \
+			.endianness = IIO_BE,				   \
+		},								\
+	}
+
+#define AD777x_CHAN_NO_FILTER_S(index)					       \
+	AD777x_CHAN_S(index, NULL)
+
+#define AD777x_CHAN_FILTER_S(index)					       \
+	AD777x_CHAN_S(index, ad7779_ext_filter)
+static const struct iio_chan_spec ad7779_channels[] = {
+	AD777x_CHAN_NO_FILTER_S(0),
+	AD777x_CHAN_NO_FILTER_S(1),
+	AD777x_CHAN_NO_FILTER_S(2),
+	AD777x_CHAN_NO_FILTER_S(3),
+	AD777x_CHAN_NO_FILTER_S(4),
+	AD777x_CHAN_NO_FILTER_S(5),
+	AD777x_CHAN_NO_FILTER_S(6),
+	AD777x_CHAN_NO_FILTER_S(7),
+	IIO_CHAN_SOFT_TIMESTAMP(8),
+};
+
+static const struct iio_chan_spec ad7779_channels_filter[] = {
+	AD777x_CHAN_FILTER_S(0),
+	AD777x_CHAN_FILTER_S(1),
+	AD777x_CHAN_FILTER_S(2),
+	AD777x_CHAN_FILTER_S(3),
+	AD777x_CHAN_FILTER_S(4),
+	AD777x_CHAN_FILTER_S(5),
+	AD777x_CHAN_FILTER_S(6),
+	AD777x_CHAN_FILTER_S(7),
+	IIO_CHAN_SOFT_TIMESTAMP(8),
+};
+
+static const struct iio_buffer_setup_ops ad7779_buffer_setup_ops = {
+	.preenable = ad7779_buffer_preenable,
+	.postdisable = ad7779_buffer_postdisable,
+};
+
+static const struct iio_trigger_ops ad7779_trigger_ops = {
+	.validate_device = iio_trigger_validate_own_device,
+};
+
+static int ad7779_powerup(struct ad7779_state *st, struct gpio_desc *start_gpio)
+{
+	int ret;
+
+	ret = ad7779_spi_write_mask(st, AD7779_REG_GEN_ERR_REG_1_EN,
+				    AD7779_SPI_CRC_EN_MSK,
+				    FIELD_PREP(AD7779_SPI_CRC_EN_MSK, 1));
+	if (ret)
+		return ret;
+
+	ret = ad7779_spi_write_mask(st, AD7779_REG_GENERAL_USER_CONFIG_1,
+				    AD7779_USRMOD_INIT_MSK,
+				    FIELD_PREP(AD7779_USRMOD_INIT_MSK, 5));
+	if (ret)
+		return ret;
+
+	ret = ad7779_spi_write_mask(st, AD7779_REG_DOUT_FORMAT,
+				    AD7779_DCLK_CLK_DIV_MSK,
+				    FIELD_PREP(AD7779_DCLK_CLK_DIV_MSK, 1));
+	if (ret)
+		return ret;
+
+	ret = ad7779_spi_write_mask(st, AD7779_REG_ADC_MUX_CONFIG,
+				    AD7779_REFMUX_CTRL_MSK,
+				    FIELD_PREP(AD7779_REFMUX_CTRL_MSK, 1));
+	if (ret)
+		return ret;
+
+	ret = ad7779_set_sampling_frequency(st, AD7779_DEFAULT_SAMPLING_FREQ);
+	if (ret)
+		return ret;
+
+	gpiod_set_value(start_gpio, 0);
+	/* Start setup time */
+	fsleep(15);
+	gpiod_set_value(start_gpio, 1);
+	fsleep(15);
+	gpiod_set_value(start_gpio, 0);
+	fsleep(15);
+
+	return 0;
+}
+
+static int ad7779_probe(struct spi_device *spi)
+{
+	struct iio_dev *indio_dev;
+	struct ad7779_state *st;
+	struct gpio_desc *reset_gpio, *start_gpio;
+	int ret = -EINVAL;
+
+	if (!spi->irq)
+		return dev_err_probe(&spi->dev, ret,
+				     "DRDY irq not present\n");
+
+	indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+	if (!indio_dev)
+		return -ENOMEM;
+
+	st = iio_priv(indio_dev);
+
+	ret = devm_regulator_bulk_get_enable(&spi->dev,
+					     ARRAY_SIZE(ad7779_power_supplies),
+					     ad7779_power_supplies);
+	if (ret)
+		return dev_err_probe(&spi->dev, ret,
+				     "failed to get and enable supplies\n");
+
+	st->mclk = devm_clk_get_enabled(&spi->dev, "mclk");
+	if (IS_ERR(st->mclk))
+		return PTR_ERR(st->mclk);
+
+	reset_gpio = devm_gpiod_get_optional(&spi->dev, "reset", GPIOD_OUT_LOW);
+	if (IS_ERR(reset_gpio))
+		return PTR_ERR(reset_gpio);
+
+	start_gpio = devm_gpiod_get(&spi->dev, "start", GPIOD_OUT_HIGH);
+	if (IS_ERR(start_gpio))
+		return PTR_ERR(start_gpio);
+
+	crc8_populate_msb(ad7779_crc8_table, AD7779_CRC8_POLY);
+	st->spi = spi;
+
+	st->chip_info = spi_get_device_match_data(spi);
+	if (!st->chip_info)
+		return -ENODEV;
+
+	ret = ad7779_reset(indio_dev, reset_gpio);
+	if (ret)
+		return ret;
+
+	ret = ad7779_powerup(st, start_gpio);
+	if (ret)
+		return ret;
+
+	indio_dev->name = st->chip_info->name;
+	indio_dev->info = &ad7779_info;
+	indio_dev->modes = INDIO_DIRECT_MODE;
+	indio_dev->channels = st->chip_info->channels;
+	indio_dev->num_channels = ARRAY_SIZE(ad7779_channels);
+
+	st->trig = devm_iio_trigger_alloc(&spi->dev, "%s-dev%d",
+					  indio_dev->name, iio_device_id(indio_dev));
+	if (!st->trig)
+		return -ENOMEM;
+
+	st->trig->ops = &ad7779_trigger_ops;
+
+	iio_trigger_set_drvdata(st->trig, st);
+
+	ret = devm_request_irq(&spi->dev, spi->irq,
+			      iio_trigger_generic_data_rdy_poll,
+			      IRQF_ONESHOT | IRQF_NO_AUTOEN,
+			      indio_dev->name, st->trig);
+	if (ret)
+		return dev_err_probe(&spi->dev, ret, "request irq %d failed\n",
+				     st->spi->irq);
+
+	ret = devm_iio_trigger_register(&spi->dev, st->trig);
+	if (ret)
+		return ret;
+
+	indio_dev->trig = iio_trigger_get(st->trig);
+
+	init_completion(&st->completion);
+
+	ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev,
+					      &iio_pollfunc_store_time,
+					      &ad7779_trigger_handler,
+					      &ad7779_buffer_setup_ops);
+	if (ret)
+		return ret;
+
+	ret = ad7779_spi_write_mask(st, AD7779_REG_DOUT_FORMAT,
+				    AD7779_DCLK_CLK_DIV_MSK,
+				    FIELD_PREP(AD7779_DCLK_CLK_DIV_MSK, 7));
+	if (ret)
+		return ret;
+
+	return devm_iio_device_register(&spi->dev, indio_dev);
+}
+
+static int ad7779_suspend(struct device *dev)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ad7779_state *st = iio_priv(indio_dev);
+
+	return ad7779_spi_write_mask(st, AD7779_REG_GENERAL_USER_CONFIG_1,
+				    AD7779_MOD_POWERMODE_MSK,
+				    FIELD_PREP(AD7779_MOD_POWERMODE_MSK,
+					       AD7779_LOW_POWER));
+}
+
+static int ad7779_resume(struct device *dev)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ad7779_state *st = iio_priv(indio_dev);
+
+	return ad7779_spi_write_mask(st, AD7779_REG_GENERAL_USER_CONFIG_1,
+				    AD7779_MOD_POWERMODE_MSK,
+				    FIELD_PREP(AD7779_MOD_POWERMODE_MSK,
+					       AD7779_HIGH_POWER));
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(ad7779_pm_ops, ad7779_suspend, ad7779_resume);
+
+static const struct ad7779_chip_info ad7770_chip_info = {
+	.name = "ad7770",
+	.channels = ad7779_channels,
+};
+
+static const struct ad7779_chip_info ad7771_chip_info = {
+	.name = "ad7771",
+	.channels = ad7779_channels_filter,
+};
+
+static const struct ad7779_chip_info ad7779_chip_info = {
+	.name = "ad7779",
+	.channels = ad7779_channels,
+};
+
+static const struct spi_device_id ad7779_id[] = {
+	{
+		.name = "ad7770",
+		.driver_data = (kernel_ulong_t)&ad7770_chip_info,
+	},
+	{
+		.name = "ad7771",
+		.driver_data = (kernel_ulong_t)&ad7771_chip_info,
+	},
+	{
+		.name = "ad7779",
+		.driver_data = (kernel_ulong_t)&ad7779_chip_info,
+	},
+	{ }
+};
+MODULE_DEVICE_TABLE(spi, ad7779_id);
+
+static const struct of_device_id ad7779_of_table[] = {
+	{
+		.compatible = "adi,ad7770",
+		.data = &ad7770_chip_info,
+	},
+	{
+		.compatible = "adi,ad7771",
+		.data = &ad7771_chip_info,
+	},
+	{
+		.compatible = "adi,ad7779",
+		.data = &ad7779_chip_info,
+	},
+	{ }
+};
+MODULE_DEVICE_TABLE(of, ad7779_of_table);
+
+static struct spi_driver ad7779_driver = {
+	.driver = {
+		.name = "ad7779",
+		.pm = pm_sleep_ptr(&ad7779_pm_ops),
+		.of_match_table = ad7779_of_table,
+	},
+	.probe = ad7779_probe,
+	.id_table = ad7779_id,
+};
+module_spi_driver(ad7779_driver);
+
+MODULE_AUTHOR("Ramona Alexandra Nechita <ramona.nechita@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD7779 ADC");
+MODULE_LICENSE("GPL");