summaryrefslogtreecommitdiffstats
path: root/kernel/drivers/gpu/drm/drm_context.c
blob: 9b23525c0ed043f0220760010c2584e86ed163f6 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
/*
 * Legacy: Generic DRM Contexts
 *
 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
 * All Rights Reserved.
 *
 * Author: Rickard E. (Rik) Faith <faith@valinux.com>
 * Author: Gareth Hughes <gareth@valinux.com>
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 */

#include <drm/drmP.h>
#include "drm_legacy.h"

struct drm_ctx_list {
	struct list_head head;
	drm_context_t handle;
	struct drm_file *tag;
};

/******************************************************************/
/** \name Context bitmap support */
/*@{*/

/**
 * Free a handle from the context bitmap.
 *
 * \param dev DRM device.
 * \param ctx_handle context handle.
 *
 * Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry
 * in drm_device::ctx_idr, while holding the drm_device::struct_mutex
 * lock.
 */
void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
{
	mutex_lock(&dev->struct_mutex);
	idr_remove(&dev->ctx_idr, ctx_handle);
	mutex_unlock(&dev->struct_mutex);
}

/**
 * Context bitmap allocation.
 *
 * \param dev DRM device.
 * \return (non-negative) context handle on success or a negative number on failure.
 *
 * Allocate a new idr from drm_device::ctx_idr while holding the
 * drm_device::struct_mutex lock.
 */
static int drm_legacy_ctxbitmap_next(struct drm_device * dev)
{
	int ret;

	mutex_lock(&dev->struct_mutex);
	ret = idr_alloc(&dev->ctx_idr, NULL, DRM_RESERVED_CONTEXTS, 0,
			GFP_KERNEL);
	mutex_unlock(&dev->struct_mutex);
	return ret;
}

/**
 * Context bitmap initialization.
 *
 * \param dev DRM device.
 *
 * Initialise the drm_device::ctx_idr
 */
int drm_legacy_ctxbitmap_init(struct drm_device * dev)
{
	idr_init(&dev->ctx_idr);
	return 0;
}

/**
 * Context bitmap cleanup.
 *
 * \param dev DRM device.
 *
 * Free all idr members using drm_ctx_sarea_free helper function
 * while holding the drm_device::struct_mutex lock.
 */
void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev)
{
	mutex_lock(&dev->struct_mutex);
	idr_destroy(&dev->ctx_idr);
	mutex_unlock(&dev->struct_mutex);
}

/**
 * drm_ctxbitmap_flush() - Flush all contexts owned by a file
 * @dev: DRM device to operate on
 * @file: Open file to flush contexts for
 *
 * This iterates over all contexts on @dev and drops them if they're owned by
 * @file. Note that after this call returns, new contexts might be added if
 * the file is still alive.
 */
void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file)
{
	struct drm_ctx_list *pos, *tmp;

	mutex_lock(&dev->ctxlist_mutex);

	list_for_each_entry_safe(pos, tmp, &dev->ctxlist, head) {
		if (pos->tag == file &&
		    pos->handle != DRM_KERNEL_CONTEXT) {
			if (dev->driver->context_dtor)
				dev->driver->context_dtor(dev, pos->handle);

			drm_legacy_ctxbitmap_free(dev, pos->handle);
			list_del(&pos->head);
			kfree(pos);
		}
	}

	mutex_unlock(&dev->ctxlist_mutex);
}

/*@}*/

/******************************************************************/
/** \name Per Context SAREA Support */
/*@{*/

/**
 * Get per-context SAREA.
 *
 * \param inode device inode.
 * \param file_priv DRM file private.
 * \param cmd command.
 * \param arg user argument pointing to a drm_ctx_priv_map structure.
 * \return zero on success or a negative number on failure.
 *
 * Gets the map from drm_device::ctx_idr with the handle specified and
 * returns its handle.
 */
int drm_legacy_getsareactx(struct drm_device *dev, void *data,
			   struct drm_file *file_priv)
{
	struct drm_ctx_priv_map *request = data;
	struct drm_local_map *map;
	struct drm_map_list *_entry;

	mutex_lock(&dev->struct_mutex);

	map = idr_find(&dev->ctx_idr, request->ctx_id);
	if (!map) {
		mutex_unlock(&dev->struct_mutex);
		return -EINVAL;
	}

	request->handle = NULL;
	list_for_each_entry(_entry, &dev->maplist, head) {
		if (_entry->map == map) {
			request->handle =
			    (void *)(unsigned long)_entry->user_token;
			break;
		}
	}

	mutex_unlock(&dev->struct_mutex);

	if (request->handle == NULL)
		return -EINVAL;

	return 0;
}

/**
 * Set per-context SAREA.
 *
 * \param inode device inode.
 * \param file_priv DRM file private.
 * \param cmd command.
 * \param arg user argument pointing to a drm_ctx_priv_map structure.
 * \return zero on success or a negative number on failure.
 *
 * Searches the mapping specified in \p arg and update the entry in
 * drm_device::ctx_idr with it.
 */
int drm_legacy_setsareactx(struct drm_device *dev, void *data,
			   struct drm_file *file_priv)
{
	struct drm_ctx_priv_map *request = data;
	struct drm_local_map *map = NULL;
	struct drm_map_list *r_list = NULL;

	mutex_lock(&dev->struct_mutex);
	list_for_each_entry(r_list, &dev->maplist, head) {
		if (r_list->map
		    && r_list->user_token == (unsigned long) request->handle)
			goto found;
	}
      bad:
	mutex_unlock(&dev->struct_mutex);
	return -EINVAL;

      found:
	map = r_list->map;
	if (!map)
		goto bad;

	if (IS_ERR(idr_replace(&dev->ctx_idr, map, request->ctx_id)))
		goto bad;

	mutex_unlock(&dev->struct_mutex);

	return 0;
}

/*@}*/

/******************************************************************/
/** \name The actual DRM context handling routines */
/*@{*/

/**
 * Switch context.
 *
 * \param dev DRM device.
 * \param old old context handle.
 * \param new new context handle.
 * \return zero on success or a negative number on failure.
 *
 * Attempt to set drm_device::context_flag.
 */
static int drm_context_switch(struct drm_device * dev, int old, int new)
{
	if (test_and_set_bit(0, &dev->context_flag)) {
		DRM_ERROR("Reentering -- FIXME\n");
		return -EBUSY;
	}

	DRM_DEBUG("Context switch from %d to %d\n", old, new);

	if (new == dev->last_context) {
		clear_bit(0, &dev->context_flag);
		return 0;
	}

	return 0;
}

/**
 * Complete context switch.
 *
 * \param dev DRM device.
 * \param new new context handle.
 * \return zero on success or a negative number on failure.
 *
 * Updates drm_device::last_context and drm_device::last_switch. Verifies the
 * hardware lock is held, clears the drm_device::context_flag and wakes up
 * drm_device::context_wait.
 */
static int drm_context_switch_complete(struct drm_device *dev,
				       struct drm_file *file_priv, int new)
{
	dev->last_context = new;	/* PRE/POST: This is the _only_ writer. */

	if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
		DRM_ERROR("Lock isn't held after context switch\n");
	}

	/* If a context switch is ever initiated
	   when the kernel holds the lock, release
	   that lock here. */
	clear_bit(0, &dev->context_flag);

	return 0;
}

/**
 * Reserve contexts.
 *
 * \param inode device inode.
 * \param file_priv DRM file private.
 * \param cmd command.
 * \param arg user argument pointing to a drm_ctx_res structure.
 * \return zero on success or a negative number on failure.
 */
int drm_legacy_resctx(struct drm_device *dev, void *data,
		      struct drm_file *file_priv)
{
	struct drm_ctx_res *res = data;
	struct drm_ctx ctx;
	int i;

	if (res->count >= DRM_RESERVED_CONTEXTS) {
		memset(&ctx, 0, sizeof(ctx));
		for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
			ctx.handle = i;
			if (copy_to_user(&res->contexts[i], &ctx, sizeof(ctx)))
				return -EFAULT;
		}
	}
	res->count = DRM_RESERVED_CONTEXTS;

	return 0;
}

/**
 * Add context.
 *
 * \param inode device inode.
 * \param file_priv DRM file private.
 * \param cmd command.
 * \param arg user argument pointing to a drm_ctx structure.
 * \return zero on success or a negative number on failure.
 *
 * Get a new handle for the context and copy to userspace.
 */
int drm_legacy_addctx(struct drm_device *dev, void *data,
		      struct drm_file *file_priv)
{
	struct drm_ctx_list *ctx_entry;
	struct drm_ctx *ctx = data;

	ctx->handle = drm_legacy_ctxbitmap_next(dev);
	if (ctx->handle == DRM_KERNEL_CONTEXT) {
		/* Skip kernel's context and get a new one. */
		ctx->handle = drm_legacy_ctxbitmap_next(dev);
	}
	DRM_DEBUG("%d\n", ctx->handle);
	if (ctx->handle == -1) {
		DRM_DEBUG("Not enough free contexts.\n");
		/* Should this return -EBUSY instead? */
		return -ENOMEM;
	}

	ctx_entry = kmalloc(sizeof(*ctx_entry), GFP_KERNEL);
	if (!ctx_entry) {
		DRM_DEBUG("out of memory\n");
		return -ENOMEM;
	}

	INIT_LIST_HEAD(&ctx_entry->head);
	ctx_entry->handle = ctx->handle;
	ctx_entry->tag = file_priv;

	mutex_lock(&dev->ctxlist_mutex);
	list_add(&ctx_entry->head, &dev->ctxlist);
	mutex_unlock(&dev->ctxlist_mutex);

	return 0;
}

/**
 * Get context.
 *
 * \param inode device inode.
 * \param file_priv DRM file private.
 * \param cmd command.
 * \param arg user argument pointing to a drm_ctx structure.
 * \return zero on success or a negative number on failure.
 */
int drm_legacy_getctx(struct drm_device *dev, void *data,
		      struct drm_file *file_priv)
{
	struct drm_ctx *ctx = data;

	/* This is 0, because we don't handle any context flags */
	ctx->flags = 0;

	return 0;
}

/**
 * Switch context.
 *
 * \param inode device inode.
 * \param file_priv DRM file private.
 * \param cmd command.
 * \param arg user argument pointing to a drm_ctx structure.
 * \return zero on success or a negative number on failure.
 *
 * Calls context_switch().
 */
int drm_legacy_switchctx(struct drm_device *dev, void *data,
			 struct drm_file *file_priv)
{
	struct drm_ctx *ctx = data;

	DRM_DEBUG("%d\n", ctx->handle);
	return drm_context_switch(dev, dev->last_context, ctx->handle);
}

/**
 * New context.
 *
 * \param inode device inode.
 * \param file_priv DRM file private.
 * \param cmd command.
 * \param arg user argument pointing to a drm_ctx structure.
 * \return zero on success or a negative number on failure.
 *
 * Calls context_switch_complete().
 */
int drm_legacy_newctx(struct drm_device *dev, void *data,
		      struct drm_file *file_priv)
{
	struct drm_ctx *ctx = data;

	DRM_DEBUG("%d\n", ctx->handle);
	drm_context_switch_complete(dev, file_priv, ctx->handle);

	return 0;
}

/**
 * Remove context.
 *
 * \param inode device inode.
 * \param file_priv DRM file private.
 * \param cmd command.
 * \param arg user argument pointing to a drm_ctx structure.
 * \return zero on success or a negative number on failure.
 *
 * If not the special kernel context, calls ctxbitmap_free() to free the specified context.
 */
int drm_legacy_rmctx(struct drm_device *dev, void *data,
		     struct drm_file *file_priv)
{
	struct drm_ctx *ctx = data;

	DRM_DEBUG("%d\n", ctx->handle);
	if (ctx->handle != DRM_KERNEL_CONTEXT) {
		if (dev->driver->context_dtor)
			dev->driver->context_dtor(dev, ctx->handle);
		drm_legacy_ctxbitmap_free(dev, ctx->handle);
	}

	mutex_lock(&dev->ctxlist_mutex);
	if (!list_empty(&dev->ctxlist)) {
		struct drm_ctx_list *pos, *n;

		list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
			if (pos->handle == ctx->handle) {
				list_del(&pos->head);
				kfree(pos);
			}
		}
	}
	mutex_unlock(&dev->ctxlist_mutex);

	return 0;
}

/*@}*/
} void rsnd_dai_pointer_update(struct rsnd_dai_stream *io, int byte) { io->byte_pos += byte; if (io->byte_pos >= io->next_period_byte) { struct snd_pcm_substream *substream = io->substream; struct snd_pcm_runtime *runtime = substream->runtime; io->period_pos++; io->next_period_byte += io->byte_per_period; if (io->period_pos >= runtime->periods) { io->byte_pos = 0; io->period_pos = 0; io->next_period_byte = io->byte_per_period; } snd_pcm_period_elapsed(substream); } } static int rsnd_dai_stream_init(struct rsnd_dai_stream *io, struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; io->substream = substream; io->byte_pos = 0; io->period_pos = 0; io->byte_per_period = runtime->period_size * runtime->channels * samples_to_bytes(runtime, 1); io->next_period_byte = io->byte_per_period; return 0; } static struct snd_soc_dai *rsnd_substream_to_dai(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; return rtd->cpu_dai; } static struct rsnd_dai_stream *rsnd_rdai_to_io(struct rsnd_dai *rdai, struct snd_pcm_substream *substream) { if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) return &rdai->playback; else return &rdai->capture; } static int rsnd_soc_dai_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct rsnd_priv *priv = snd_soc_dai_get_drvdata(dai); struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai); struct rsnd_dai_stream *io = rsnd_rdai_to_io(rdai, substream); int ssi_id = rsnd_mod_id(rsnd_io_to_mod_ssi(io)); int ret; unsigned long flags; rsnd_lock(priv, flags); switch (cmd) { case SNDRV_PCM_TRIGGER_START: ret = rsnd_dai_stream_init(io, substream); if (ret < 0) goto dai_trigger_end; ret = rsnd_platform_call(priv, dai, start, ssi_id); if (ret < 0) goto dai_trigger_end; ret = rsnd_dai_call(init, io, priv); if (ret < 0) goto dai_trigger_end; ret = rsnd_dai_call(start, io, priv); if (ret < 0) goto dai_trigger_end; break; case SNDRV_PCM_TRIGGER_STOP: ret = rsnd_dai_call(stop, io, priv); if (ret < 0) goto dai_trigger_end; ret = rsnd_dai_call(quit, io, priv); if (ret < 0) goto dai_trigger_end; ret = rsnd_platform_call(priv, dai, stop, ssi_id); if (ret < 0) goto dai_trigger_end; break; default: ret = -EINVAL; } dai_trigger_end: rsnd_unlock(priv, flags); return ret; } static int rsnd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) { struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai); /* set master/slave audio interface */ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: rdai->clk_master = 0; break; case SND_SOC_DAIFMT_CBS_CFS: rdai->clk_master = 1; /* codec is slave, cpu is master */ break; default: return -EINVAL; } /* set format */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: rdai->sys_delay = 0; rdai->data_alignment = 0; rdai->frm_clk_inv = 0; break; case SND_SOC_DAIFMT_LEFT_J: rdai->sys_delay = 1; rdai->data_alignment = 0; rdai->frm_clk_inv = 1; break; case SND_SOC_DAIFMT_RIGHT_J: rdai->sys_delay = 1; rdai->data_alignment = 1; rdai->frm_clk_inv = 1; break; } /* set clock inversion */ switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_IF: rdai->bit_clk_inv = rdai->bit_clk_inv; rdai->frm_clk_inv = !rdai->frm_clk_inv; break; case SND_SOC_DAIFMT_IB_NF: rdai->bit_clk_inv = !rdai->bit_clk_inv; rdai->frm_clk_inv = rdai->frm_clk_inv; break; case SND_SOC_DAIFMT_IB_IF: rdai->bit_clk_inv = !rdai->bit_clk_inv; rdai->frm_clk_inv = !rdai->frm_clk_inv; break; case SND_SOC_DAIFMT_NB_NF: default: break; } return 0; } static const struct snd_soc_dai_ops rsnd_soc_dai_ops = { .trigger = rsnd_soc_dai_trigger, .set_fmt = rsnd_soc_dai_set_fmt, }; #define rsnd_path_parse(priv, io, type) \ ({ \ struct rsnd_mod *mod; \ int ret = 0; \ int id = -1; \ \ if (rsnd_is_enable_path(io, type)) { \ id = rsnd_info_id(priv, io, type); \ if (id >= 0) { \ mod = rsnd_##type##_mod_get(priv, id); \ ret = rsnd_dai_connect(mod, io); \ } \ } \ ret; \ }) #define rsnd_path_break(priv, io, type) \ { \ struct rsnd_mod *mod; \ int id = -1; \ \ if (rsnd_is_enable_path(io, type)) { \ id = rsnd_info_id(priv, io, type); \ if (id >= 0) { \ mod = rsnd_##type##_mod_get(priv, id); \ rsnd_dai_disconnect(mod, io); \ } \ } \ } static int rsnd_path_init(struct rsnd_priv *priv, struct rsnd_dai *rdai, struct rsnd_dai_stream *io) { int ret; /* * Gen1 is created by SRU/SSI, and this SRU is base module of * Gen2's SCU/SSIU/SSI. (Gen2 SCU/SSIU came from SRU) * * Easy image is.. * Gen1 SRU = Gen2 SCU + SSIU + etc * * Gen2 SCU path is very flexible, but, Gen1 SRU (SCU parts) is * using fixed path. */ /* SRC */ ret = rsnd_path_parse(priv, io, src); if (ret < 0) return ret; /* SSI */ ret = rsnd_path_parse(priv, io, ssi); if (ret < 0) return ret; /* DVC */ ret = rsnd_path_parse(priv, io, dvc); if (ret < 0) return ret; return ret; } static void rsnd_of_parse_dai(struct platform_device *pdev, const struct rsnd_of_data *of_data, struct rsnd_priv *priv) { struct device_node *dai_node, *dai_np; struct device_node *ssi_node, *ssi_np; struct device_node *src_node, *src_np; struct device_node *dvc_node, *dvc_np; struct device_node *playback, *capture; struct rsnd_dai_platform_info *dai_info; struct rcar_snd_info *info = rsnd_priv_to_info(priv); struct device *dev = &pdev->dev; int nr, i; int dai_i, ssi_i, src_i, dvc_i; if (!of_data) return; dai_node = of_get_child_by_name(dev->of_node, "rcar_sound,dai"); if (!dai_node) return; nr = of_get_child_count(dai_node); if (!nr) return; dai_info = devm_kzalloc(dev, sizeof(struct rsnd_dai_platform_info) * nr, GFP_KERNEL); if (!dai_info) { dev_err(dev, "dai info allocation error\n"); return; } info->dai_info_nr = nr; info->dai_info = dai_info; ssi_node = of_get_child_by_name(dev->of_node, "rcar_sound,ssi"); src_node = of_get_child_by_name(dev->of_node, "rcar_sound,src"); dvc_node = of_get_child_by_name(dev->of_node, "rcar_sound,dvc"); #define mod_parse(name) \ if (name##_node) { \ struct rsnd_##name##_platform_info *name##_info; \ \ name##_i = 0; \ for_each_child_of_node(name##_node, name##_np) { \ name##_info = info->name##_info + name##_i; \ \ if (name##_np == playback) \ dai_info->playback.name = name##_info; \ if (name##_np == capture) \ dai_info->capture.name = name##_info; \ \ name##_i++; \ } \ } /* * parse all dai */ dai_i = 0; for_each_child_of_node(dai_node, dai_np) { dai_info = info->dai_info + dai_i; for (i = 0;; i++) { playback = of_parse_phandle(dai_np, "playback", i); capture = of_parse_phandle(dai_np, "capture", i); if (!playback && !capture) break; mod_parse(ssi); mod_parse(src); mod_parse(dvc); of_node_put(playback); of_node_put(capture); } dai_i++; } } static int rsnd_dai_probe(struct platform_device *pdev, const struct rsnd_of_data *of_data, struct rsnd_priv *priv) { struct snd_soc_dai_driver *drv; struct rcar_snd_info *info = rsnd_priv_to_info(priv); struct rsnd_dai *rdai; struct rsnd_ssi_platform_info *pmod, *cmod; struct device *dev = rsnd_priv_to_dev(priv); int dai_nr; int i; rsnd_of_parse_dai(pdev, of_data, priv); dai_nr = info->dai_info_nr; if (!dai_nr) { dev_err(dev, "no dai\n"); return -EIO; } drv = devm_kzalloc(dev, sizeof(*drv) * dai_nr, GFP_KERNEL); rdai = devm_kzalloc(dev, sizeof(*rdai) * dai_nr, GFP_KERNEL); if (!drv || !rdai) { dev_err(dev, "dai allocate failed\n"); return -ENOMEM; } priv->rdai_nr = dai_nr; priv->daidrv = drv; priv->rdai = rdai; for (i = 0; i < dai_nr; i++) { pmod = info->dai_info[i].playback.ssi; cmod = info->dai_info[i].capture.ssi; /* * init rsnd_dai */ snprintf(rdai[i].name, RSND_DAI_NAME_SIZE, "rsnd-dai.%d", i); rdai[i].priv = priv; /* * init snd_soc_dai_driver */ drv[i].name = rdai[i].name; drv[i].ops = &rsnd_soc_dai_ops; if (pmod) { snprintf(rdai[i].playback.name, RSND_DAI_NAME_SIZE, "DAI%d Playback", i); drv[i].playback.rates = RSND_RATES; drv[i].playback.formats = RSND_FMTS; drv[i].playback.channels_min = 2; drv[i].playback.channels_max = 2; drv[i].playback.stream_name = rdai[i].playback.name; rdai[i].playback.info = &info->dai_info[i].playback; rdai[i].playback.rdai = rdai + i; rsnd_path_init(priv, &rdai[i], &rdai[i].playback); } if (cmod) { snprintf(rdai[i].capture.name, RSND_DAI_NAME_SIZE, "DAI%d Capture", i); drv[i].capture.rates = RSND_RATES; drv[i].capture.formats = RSND_FMTS; drv[i].capture.channels_min = 2; drv[i].capture.channels_max = 2; drv[i].capture.stream_name = rdai[i].capture.name; rdai[i].capture.info = &info->dai_info[i].capture; rdai[i].capture.rdai = rdai + i; rsnd_path_init(priv, &rdai[i], &rdai[i].capture); } dev_dbg(dev, "%s (%s/%s)\n", rdai[i].name, pmod ? "play" : " -- ", cmod ? "capture" : " -- "); } return 0; } /* * pcm ops */ static struct snd_pcm_hardware rsnd_pcm_hardware = { .info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID, .buffer_bytes_max = 64 * 1024, .period_bytes_min = 32, .period_bytes_max = 8192, .periods_min = 1, .periods_max = 32, .fifo_size = 256, }; static int rsnd_pcm_open(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; int ret = 0; snd_soc_set_runtime_hwparams(substream, &rsnd_pcm_hardware); ret = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); return ret; } static int rsnd_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct snd_soc_dai *dai = rsnd_substream_to_dai(substream); struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai); struct rsnd_dai_stream *io = rsnd_rdai_to_io(rdai, substream); int ret; ret = rsnd_dai_call(hw_params, io, substream, hw_params); if (ret) return ret; return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); } static snd_pcm_uframes_t rsnd_pointer(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_soc_dai *dai = rsnd_substream_to_dai(substream); struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai); struct rsnd_dai_stream *io = rsnd_rdai_to_io(rdai, substream); return bytes_to_frames(runtime, io->byte_pos); } static struct snd_pcm_ops rsnd_pcm_ops = { .open = rsnd_pcm_open, .ioctl = snd_pcm_lib_ioctl, .hw_params = rsnd_hw_params, .hw_free = snd_pcm_lib_free_pages, .pointer = rsnd_pointer, }; /* * snd_kcontrol */ #define kcontrol_to_cfg(kctrl) ((struct rsnd_kctrl_cfg *)kctrl->private_value) static int rsnd_kctrl_info(struct snd_kcontrol *kctrl, struct snd_ctl_elem_info *uinfo) { struct rsnd_kctrl_cfg *cfg = kcontrol_to_cfg(kctrl); if (cfg->texts) { uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = cfg->size; uinfo->value.enumerated.items = cfg->max; if (uinfo->value.enumerated.item >= cfg->max) uinfo->value.enumerated.item = cfg->max - 1; strlcpy(uinfo->value.enumerated.name, cfg->texts[uinfo->value.enumerated.item], sizeof(uinfo->value.enumerated.name)); } else { uinfo->count = cfg->size; uinfo->value.integer.min = 0; uinfo->value.integer.max = cfg->max; uinfo->type = (cfg->max == 1) ? SNDRV_CTL_ELEM_TYPE_BOOLEAN : SNDRV_CTL_ELEM_TYPE_INTEGER; } return 0; } static int rsnd_kctrl_get(struct snd_kcontrol *kctrl, struct snd_ctl_elem_value *uc) { struct rsnd_kctrl_cfg *cfg = kcontrol_to_cfg(kctrl); int i; for (i = 0; i < cfg->size; i++) if (cfg->texts) uc->value.enumerated.item[i] = cfg->val[i]; else uc->value.integer.value[i] = cfg->val[i]; return 0; } static int rsnd_kctrl_put(struct snd_kcontrol *kctrl, struct snd_ctl_elem_value *uc) { struct rsnd_mod *mod = snd_kcontrol_chip(kctrl); struct rsnd_kctrl_cfg *cfg = kcontrol_to_cfg(kctrl); int i, change = 0; for (i = 0; i < cfg->size; i++) { if (cfg->texts) { change |= (uc->value.enumerated.item[i] != cfg->val[i]); cfg->val[i] = uc->value.enumerated.item[i]; } else { change |= (uc->value.integer.value[i] != cfg->val[i]); cfg->val[i] = uc->value.integer.value[i]; } } if (change) cfg->update(mod); return change; } static int __rsnd_kctrl_new(struct rsnd_mod *mod, struct snd_soc_pcm_runtime *rtd, const unsigned char *name, struct rsnd_kctrl_cfg *cfg, void (*update)(struct rsnd_mod *mod)) { struct snd_card *card = rtd->card->snd_card; struct snd_kcontrol *kctrl; struct snd_kcontrol_new knew = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = name, .info = rsnd_kctrl_info, .get = rsnd_kctrl_get, .put = rsnd_kctrl_put, .private_value = (unsigned long)cfg, }; int ret; kctrl = snd_ctl_new1(&knew, mod); if (!kctrl) return -ENOMEM; ret = snd_ctl_add(card, kctrl); if (ret < 0) { snd_ctl_free_one(kctrl); return ret; } cfg->update = update; cfg->card = card; cfg->kctrl = kctrl; return 0; } void _rsnd_kctrl_remove(struct rsnd_kctrl_cfg *cfg) { snd_ctl_remove(cfg->card, cfg->kctrl); } int rsnd_kctrl_new_m(struct rsnd_mod *mod, struct snd_soc_pcm_runtime *rtd, const unsigned char *name, void (*update)(struct rsnd_mod *mod), struct rsnd_kctrl_cfg_m *_cfg, u32 max) { _cfg->cfg.max = max; _cfg->cfg.size = RSND_DVC_CHANNELS; _cfg->cfg.val = _cfg->val; return __rsnd_kctrl_new(mod, rtd, name, &_cfg->cfg, update); } int rsnd_kctrl_new_s(struct rsnd_mod *mod, struct snd_soc_pcm_runtime *rtd, const unsigned char *name, void (*update)(struct rsnd_mod *mod), struct rsnd_kctrl_cfg_s *_cfg, u32 max) { _cfg->cfg.max = max; _cfg->cfg.size = 1; _cfg->cfg.val = &_cfg->val; return __rsnd_kctrl_new(mod, rtd, name, &_cfg->cfg, update); } int rsnd_kctrl_new_e(struct rsnd_mod *mod, struct snd_soc_pcm_runtime *rtd, const unsigned char *name, struct rsnd_kctrl_cfg_s *_cfg, void (*update)(struct rsnd_mod *mod), const char * const *texts, u32 max) { _cfg->cfg.max = max; _cfg->cfg.size = 1; _cfg->cfg.val = &_cfg->val; _cfg->cfg.texts = texts; return __rsnd_kctrl_new(mod, rtd, name, &_cfg->cfg, update); } /* * snd_soc_platform */ #define PREALLOC_BUFFER (32 * 1024) #define PREALLOC_BUFFER_MAX (32 * 1024) static int rsnd_pcm_new(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_dai *dai = rtd->cpu_dai; struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai); int ret; ret = rsnd_dai_call(pcm_new, &rdai->playback, rtd); if (ret) return ret; ret = rsnd_dai_call(pcm_new, &rdai->capture, rtd); if (ret) return ret; return snd_pcm_lib_preallocate_pages_for_all( rtd->pcm, SNDRV_DMA_TYPE_DEV, rtd->card->snd_card->dev, PREALLOC_BUFFER, PREALLOC_BUFFER_MAX); } static struct snd_soc_platform_driver rsnd_soc_platform = { .ops = &rsnd_pcm_ops, .pcm_new = rsnd_pcm_new, }; static const struct snd_soc_component_driver rsnd_soc_component = { .name = "rsnd", }; static int rsnd_rdai_continuance_probe(struct rsnd_priv *priv, struct rsnd_dai_stream *io) { int ret; ret = rsnd_dai_call(probe, io, priv); if (ret == -EAGAIN) { /* * Fallback to PIO mode */ /* * call "remove" for SSI/SRC/DVC * SSI will be switch to PIO mode if it was DMA mode * see * rsnd_dma_init() * rsnd_ssi_fallback() */ rsnd_dai_call(remove, io, priv); /* * remove SRC/DVC from DAI, */ rsnd_path_break(priv, io, src); rsnd_path_break(priv, io, dvc); /* * fallback */ rsnd_dai_call(fallback, io, priv); /* * retry to "probe". * DAI has SSI which is PIO mode only now. */ ret = rsnd_dai_call(probe, io, priv); } return ret; } /* * rsnd probe */ static int rsnd_probe(struct platform_device *pdev) { struct rcar_snd_info *info; struct rsnd_priv *priv; struct device *dev = &pdev->dev; struct rsnd_dai *rdai; const struct of_device_id *of_id = of_match_device(rsnd_of_match, dev); const struct rsnd_of_data *of_data; int (*probe_func[])(struct platform_device *pdev, const struct rsnd_of_data *of_data, struct rsnd_priv *priv) = { rsnd_gen_probe, rsnd_dma_probe, rsnd_ssi_probe, rsnd_src_probe, rsnd_dvc_probe, rsnd_adg_probe, rsnd_dai_probe, }; int ret, i; info = NULL; of_data = NULL; if (of_id) { info = devm_kzalloc(&pdev->dev, sizeof(struct rcar_snd_info), GFP_KERNEL); of_data = of_id->data; } else { info = pdev->dev.platform_data; } if (!info) { dev_err(dev, "driver needs R-Car sound information\n"); return -ENODEV; } /* * init priv data */ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) { dev_err(dev, "priv allocate failed\n"); return -ENODEV; } priv->pdev = pdev; priv->info = info; spin_lock_init(&priv->lock); /* * init each module */ for (i = 0; i < ARRAY_SIZE(probe_func); i++) { ret = probe_func[i](pdev, of_data, priv); if (ret) return ret; } for_each_rsnd_dai(rdai, priv, i) { ret = rsnd_rdai_continuance_probe(priv, &rdai->playback); if (ret) goto exit_snd_probe; ret = rsnd_rdai_continuance_probe(priv, &rdai->capture); if (ret) goto exit_snd_probe; } dev_set_drvdata(dev, priv); /* * asoc register */ ret = snd_soc_register_platform(dev, &rsnd_soc_platform); if (ret < 0) { dev_err(dev, "cannot snd soc register\n"); return ret; } ret = snd_soc_register_component(dev, &rsnd_soc_component, priv->daidrv, rsnd_rdai_nr(priv)); if (ret < 0) { dev_err(dev, "cannot snd dai register\n"); goto exit_snd_soc; } pm_runtime_enable(dev); dev_info(dev, "probed\n"); return ret; exit_snd_soc: snd_soc_unregister_platform(dev); exit_snd_probe: for_each_rsnd_dai(rdai, priv, i) { rsnd_dai_call(remove, &rdai->playback, priv); rsnd_dai_call(remove, &rdai->capture, priv); } return ret; } static int rsnd_remove(struct platform_device *pdev) { struct rsnd_priv *priv = dev_get_drvdata(&pdev->dev); struct rsnd_dai *rdai; void (*remove_func[])(struct platform_device *pdev, struct rsnd_priv *priv) = { rsnd_ssi_remove, rsnd_src_remove, rsnd_dvc_remove, }; int ret = 0, i; pm_runtime_disable(&pdev->dev); for_each_rsnd_dai(rdai, priv, i) { ret |= rsnd_dai_call(remove, &rdai->playback, priv); ret |= rsnd_dai_call(remove, &rdai->capture, priv); } for (i = 0; i < ARRAY_SIZE(remove_func); i++) remove_func[i](pdev, priv); snd_soc_unregister_component(&pdev->dev); snd_soc_unregister_platform(&pdev->dev); return ret; } static struct platform_driver rsnd_driver = { .driver = { .name = "rcar_sound", .of_match_table = rsnd_of_match, }, .probe = rsnd_probe, .remove = rsnd_remove, }; module_platform_driver(rsnd_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Renesas R-Car audio driver"); MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>"); MODULE_ALIAS("platform:rcar-pcm-audio");