kfd_process.c 49.5 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
/*
 * Copyright 2014 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 */

#include <linux/mutex.h>
#include <linux/log2.h>
#include <linux/sched.h>
26
#include <linux/sched/mm.h>
27
#include <linux/sched/task.h>
28
#include <linux/mmu_context.h>
29
#include <linux/slab.h>
30
#include <linux/amd-iommu.h>
31
#include <linux/notifier.h>
32
#include <linux/compat.h>
Felix Kuehling's avatar
Felix Kuehling committed
33
#include <linux/mman.h>
34
#include <linux/file.h>
35
#include <linux/pm_runtime.h>
36
#include "amdgpu_amdkfd.h"
37
#include "amdgpu.h"
38

39
40
41
struct mm_struct;

#include "kfd_priv.h"
42
#include "kfd_device_queue_manager.h"
43
#include "kfd_dbgmgr.h"
44
#include "kfd_iommu.h"
Philip Yang's avatar
Philip Yang committed
45
#include "kfd_svm.h"
46
47
48
49
50

/*
 * List of struct kfd_process (field kfd_process).
 * Unique/indexed by mm_struct*
 */
51
DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
52
53
static DEFINE_MUTEX(kfd_processes_mutex);

54
DEFINE_SRCU(kfd_processes_srcu);
55

56
/* For process termination handling */
57
58
static struct workqueue_struct *kfd_process_wq;

59
60
61
62
63
64
65
66
/* Ordered, single-threaded workqueue for restoring evicted
 * processes. Restoring multiple processes concurrently under memory
 * pressure can lead to processes blocking each other from validating
 * their BOs and result in a live-lock situation where processes
 * remain evicted indefinitely.
 */
static struct workqueue_struct *kfd_restore_wq;

67
static struct kfd_process *find_process(const struct task_struct *thread);
68
static void kfd_process_ref_release(struct kref *ref);
69
70
static struct kfd_process *create_process(const struct task_struct *thread);
static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep);
Felix Kuehling's avatar
Felix Kuehling committed
71

72
73
74
static void evict_process_worker(struct work_struct *work);
static void restore_process_worker(struct work_struct *work);

75
76
77
78
79
80
struct kfd_procfs_tree {
	struct kobject *kobj;
};

static struct kfd_procfs_tree procfs;

81
82
83
84
85
86
87
88
89
/*
 * Structure for SDMA activity tracking
 */
struct kfd_sdma_activity_handler_workarea {
	struct work_struct sdma_activity_work;
	struct kfd_process_device *pdd;
	uint64_t sdma_activity_counter;
};

90
struct temp_sdma_queue_list {
91
	uint64_t __user *rptr;
92
93
94
95
96
	uint64_t sdma_val;
	unsigned int queue_id;
	struct list_head list;
};

97
98
99
100
101
102
103
104
105
106
static void kfd_sdma_activity_worker(struct work_struct *work)
{
	struct kfd_sdma_activity_handler_workarea *workarea;
	struct kfd_process_device *pdd;
	uint64_t val;
	struct mm_struct *mm;
	struct queue *q;
	struct qcm_process_device *qpd;
	struct device_queue_manager *dqm;
	int ret = 0;
107
108
	struct temp_sdma_queue_list sdma_q_list;
	struct temp_sdma_queue_list *sdma_q, *next;
109
110
111
112
113

	workarea = container_of(work, struct kfd_sdma_activity_handler_workarea,
				sdma_activity_work);

	pdd = workarea->pdd;
114
115
	if (!pdd)
		return;
116
117
	dqm = pdd->dev->dqm;
	qpd = &pdd->qpd;
118
	if (!dqm || !qpd)
119
		return;
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
	/*
	 * Total SDMA activity is current SDMA activity + past SDMA activity
	 * Past SDMA count is stored in pdd.
	 * To get the current activity counters for all active SDMA queues,
	 * we loop over all SDMA queues and get their counts from user-space.
	 *
	 * We cannot call get_user() with dqm_lock held as it can cause
	 * a circular lock dependency situation. To read the SDMA stats,
	 * we need to do the following:
	 *
	 * 1. Create a temporary list of SDMA queue nodes from the qpd->queues_list,
	 *    with dqm_lock/dqm_unlock().
	 * 2. Call get_user() for each node in temporary list without dqm_lock.
	 *    Save the SDMA count for each node and also add the count to the total
	 *    SDMA count counter.
	 *    Its possible, during this step, a few SDMA queue nodes got deleted
	 *    from the qpd->queues_list.
	 * 3. Do a second pass over qpd->queues_list to check if any nodes got deleted.
	 *    If any node got deleted, its SDMA count would be captured in the sdma
	 *    past activity counter. So subtract the SDMA counter stored in step 2
	 *    for this node from the total SDMA count.
	 */
	INIT_LIST_HEAD(&sdma_q_list.list);
143

144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
	/*
	 * Create the temp list of all SDMA queues
	 */
	dqm_lock(dqm);

	list_for_each_entry(q, &qpd->queues_list, list) {
		if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) &&
		    (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI))
			continue;

		sdma_q = kzalloc(sizeof(struct temp_sdma_queue_list), GFP_KERNEL);
		if (!sdma_q) {
			dqm_unlock(dqm);
			goto cleanup;
		}

		INIT_LIST_HEAD(&sdma_q->list);
161
		sdma_q->rptr = (uint64_t __user *)q->properties.read_ptr;
162
163
		sdma_q->queue_id = q->properties.queue_id;
		list_add_tail(&sdma_q->list, &sdma_q_list.list);
164
165
	}

166
167
168
169
170
171
172
173
174
175
	/*
	 * If the temp list is empty, then no SDMA queues nodes were found in
	 * qpd->queues_list. Return the past activity count as the total sdma
	 * count
	 */
	if (list_empty(&sdma_q_list.list)) {
		workarea->sdma_activity_counter = pdd->sdma_past_activity_counter;
		dqm_unlock(dqm);
		return;
	}
176

177
	dqm_unlock(dqm);
178
179

	/*
180
	 * Get the usage count for each SDMA queue in temp_list.
181
	 */
182
183
184
185
	mm = get_task_mm(pdd->process->lead_thread);
	if (!mm)
		goto cleanup;

186
	kthread_use_mm(mm);
187
188
189
190
191
192
193
194
195
196
197
198
199

	list_for_each_entry(sdma_q, &sdma_q_list.list, list) {
		val = 0;
		ret = read_sdma_queue_counter(sdma_q->rptr, &val);
		if (ret) {
			pr_debug("Failed to read SDMA queue active counter for queue id: %d",
				 sdma_q->queue_id);
		} else {
			sdma_q->sdma_val = val;
			workarea->sdma_activity_counter += val;
		}
	}

200
	kthread_unuse_mm(mm);
201
	mmput(mm);
202
203

	/*
204
205
	 * Do a second iteration over qpd_queues_list to check if any SDMA
	 * nodes got deleted while fetching SDMA counter.
206
	 */
207
208
209
210
	dqm_lock(dqm);

	workarea->sdma_activity_counter += pdd->sdma_past_activity_counter;

211
	list_for_each_entry(q, &qpd->queues_list, list) {
212
213
214
215
216
217
218
219
		if (list_empty(&sdma_q_list.list))
			break;

		if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) &&
		    (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI))
			continue;

		list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
220
			if (((uint64_t __user *)q->properties.read_ptr == sdma_q->rptr) &&
221
222
223
224
225
			     (sdma_q->queue_id == q->properties.queue_id)) {
				list_del(&sdma_q->list);
				kfree(sdma_q);
				break;
			}
226
227
228
229
		}
	}

	dqm_unlock(dqm);
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248

	/*
	 * If temp list is not empty, it implies some queues got deleted
	 * from qpd->queues_list during SDMA usage read. Subtract the SDMA
	 * count for each node from the total SDMA count.
	 */
	list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
		workarea->sdma_activity_counter -= sdma_q->sdma_val;
		list_del(&sdma_q->list);
		kfree(sdma_q);
	}

	return;

cleanup:
	list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
		list_del(&sdma_q->list);
		kfree(sdma_q);
	}
249
250
}

251
/**
252
 * @kfd_get_cu_occupancy - Collect number of waves in-flight on this device
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
 * by current process. Translates acquired wave count into number of compute units
 * that are occupied.
 *
 * @atr: Handle of attribute that allows reporting of wave count. The attribute
 * handle encapsulates GPU device it is associated with, thereby allowing collection
 * of waves in flight, etc
 *
 * @buffer: Handle of user provided buffer updated with wave count
 *
 * Return: Number of bytes written to user buffer or an error value
 */
static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
{
	int cu_cnt;
	int wave_cnt;
	int max_waves_per_cu;
	struct kfd_dev *dev = NULL;
	struct kfd_process *proc = NULL;
	struct kfd_process_device *pdd = NULL;

	pdd = container_of(attr, struct kfd_process_device, attr_cu_occupancy);
	dev = pdd->dev;
	if (dev->kfd2kgd->get_cu_occupancy == NULL)
		return -EINVAL;

	cu_cnt = 0;
	proc = pdd->process;
	if (pdd->qpd.queue_count == 0) {
		pr_debug("Gpu-Id: %d has no active queues for process %d\n",
			 dev->id, proc->pasid);
		return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
	}

	/* Collect wave count from device if it supports */
	wave_cnt = 0;
	max_waves_per_cu = 0;
	dev->kfd2kgd->get_cu_occupancy(dev->kgd, proc->pasid, &wave_cnt,
			&max_waves_per_cu);

	/* Translate wave count to number of compute units */
	cu_cnt = (wave_cnt + (max_waves_per_cu - 1)) / max_waves_per_cu;
	return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
}

297
298
299
300
301
302
static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
			       char *buffer)
{
	if (strcmp(attr->name, "pasid") == 0) {
		struct kfd_process *p = container_of(attr, struct kfd_process,
						     attr_pasid);
303
304
305
306
307

		return snprintf(buffer, PAGE_SIZE, "%d\n", p->pasid);
	} else if (strncmp(attr->name, "vram_", 5) == 0) {
		struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
							      attr_vram);
308
309
310
311
312
313
314
315
316
317
		return snprintf(buffer, PAGE_SIZE, "%llu\n", READ_ONCE(pdd->vram_usage));
	} else if (strncmp(attr->name, "sdma_", 5) == 0) {
		struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
							      attr_sdma);
		struct kfd_sdma_activity_handler_workarea sdma_activity_work_handler;

		INIT_WORK(&sdma_activity_work_handler.sdma_activity_work,
					kfd_sdma_activity_worker);

		sdma_activity_work_handler.pdd = pdd;
318
		sdma_activity_work_handler.sdma_activity_counter = 0;
319
320
321
322
323
324
325
326

		schedule_work(&sdma_activity_work_handler.sdma_activity_work);

		flush_work(&sdma_activity_work_handler.sdma_activity_work);

		return snprintf(buffer, PAGE_SIZE, "%llu\n",
				(sdma_activity_work_handler.sdma_activity_counter)/
				 SDMA_ACTIVITY_DIVISOR);
327
328
329
330
331
	} else {
		pr_err("Invalid attribute");
		return -EINVAL;
	}

332
	return 0;
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
}

static void kfd_procfs_kobj_release(struct kobject *kobj)
{
	kfree(kobj);
}

static const struct sysfs_ops kfd_procfs_ops = {
	.show = kfd_procfs_show,
};

static struct kobj_type procfs_type = {
	.release = kfd_procfs_kobj_release,
	.sysfs_ops = &kfd_procfs_ops,
};

void kfd_procfs_init(void)
{
	int ret = 0;

	procfs.kobj = kfd_alloc_struct(procfs.kobj);
	if (!procfs.kobj)
		return;

	ret = kobject_init_and_add(procfs.kobj, &procfs_type,
				   &kfd_device->kobj, "proc");
	if (ret) {
		pr_warn("Could not create procfs proc folder");
		/* If we fail to create the procfs, clean up */
		kfd_procfs_shutdown();
	}
}

void kfd_procfs_shutdown(void)
{
	if (procfs.kobj) {
		kobject_del(procfs.kobj);
		kobject_put(procfs.kobj);
		procfs.kobj = NULL;
	}
}
374

375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
static ssize_t kfd_procfs_queue_show(struct kobject *kobj,
				     struct attribute *attr, char *buffer)
{
	struct queue *q = container_of(kobj, struct queue, kobj);

	if (!strcmp(attr->name, "size"))
		return snprintf(buffer, PAGE_SIZE, "%llu",
				q->properties.queue_size);
	else if (!strcmp(attr->name, "type"))
		return snprintf(buffer, PAGE_SIZE, "%d", q->properties.type);
	else if (!strcmp(attr->name, "gpuid"))
		return snprintf(buffer, PAGE_SIZE, "%u", q->device->id);
	else
		pr_err("Invalid attribute");

	return 0;
}

393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
static ssize_t kfd_procfs_stats_show(struct kobject *kobj,
				     struct attribute *attr, char *buffer)
{
	if (strcmp(attr->name, "evicted_ms") == 0) {
		struct kfd_process_device *pdd = container_of(attr,
				struct kfd_process_device,
				attr_evict);
		uint64_t evict_jiffies;

		evict_jiffies = atomic64_read(&pdd->evict_duration_counter);

		return snprintf(buffer,
				PAGE_SIZE,
				"%llu\n",
				jiffies64_to_msecs(evict_jiffies));
408
409
410
411
412

	/* Sysfs handle that gets CU occupancy is per device */
	} else if (strcmp(attr->name, "cu_occupancy") == 0) {
		return kfd_get_cu_occupancy(attr, buffer);
	} else {
413
		pr_err("Invalid attribute");
414
	}
415
416
417

	return 0;
}
418

419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
static ssize_t kfd_sysfs_counters_show(struct kobject *kobj,
				       struct attribute *attr, char *buf)
{
	struct kfd_process_device *pdd;

	if (!strcmp(attr->name, "faults")) {
		pdd = container_of(attr, struct kfd_process_device,
				   attr_faults);
		return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->faults));
	}
	if (!strcmp(attr->name, "page_in")) {
		pdd = container_of(attr, struct kfd_process_device,
				   attr_page_in);
		return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_in));
	}
	if (!strcmp(attr->name, "page_out")) {
		pdd = container_of(attr, struct kfd_process_device,
				   attr_page_out);
		return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_out));
	}
	return 0;
}

442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
static struct attribute attr_queue_size = {
	.name = "size",
	.mode = KFD_SYSFS_FILE_MODE
};

static struct attribute attr_queue_type = {
	.name = "type",
	.mode = KFD_SYSFS_FILE_MODE
};

static struct attribute attr_queue_gpuid = {
	.name = "gpuid",
	.mode = KFD_SYSFS_FILE_MODE
};

static struct attribute *procfs_queue_attrs[] = {
	&attr_queue_size,
	&attr_queue_type,
	&attr_queue_gpuid,
	NULL
};

static const struct sysfs_ops procfs_queue_ops = {
	.show = kfd_procfs_queue_show,
};

static struct kobj_type procfs_queue_type = {
	.sysfs_ops = &procfs_queue_ops,
	.default_attrs = procfs_queue_attrs,
};

473
474
475
476
477
478
static const struct sysfs_ops procfs_stats_ops = {
	.show = kfd_procfs_stats_show,
};

static struct kobj_type procfs_stats_type = {
	.sysfs_ops = &procfs_stats_ops,
Philip Yang's avatar
Philip Yang committed
479
	.release = kfd_procfs_kobj_release,
480
481
};

482
483
484
485
486
487
488
489
490
static const struct sysfs_ops sysfs_counters_ops = {
	.show = kfd_sysfs_counters_show,
};

static struct kobj_type sysfs_counters_type = {
	.sysfs_ops = &sysfs_counters_ops,
	.release = kfd_procfs_kobj_release,
};

491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
int kfd_procfs_add_queue(struct queue *q)
{
	struct kfd_process *proc;
	int ret;

	if (!q || !q->process)
		return -EINVAL;
	proc = q->process;

	/* Create proc/<pid>/queues/<queue id> folder */
	if (!proc->kobj_queues)
		return -EFAULT;
	ret = kobject_init_and_add(&q->kobj, &procfs_queue_type,
			proc->kobj_queues, "%u", q->properties.queue_id);
	if (ret < 0) {
		pr_warn("Creating proc/<pid>/queues/%u failed",
			q->properties.queue_id);
		kobject_put(&q->kobj);
		return ret;
	}

	return 0;
}

515
static void kfd_sysfs_create_file(struct kobject *kobj, struct attribute *attr,
516
517
				 char *name)
{
518
	int ret;
519

520
521
	if (!kobj || !attr || !name)
		return;
522
523
524
525
526

	attr->name = name;
	attr->mode = KFD_SYSFS_FILE_MODE;
	sysfs_attr_init(attr);

527
528
529
	ret = sysfs_create_file(kobj, attr);
	if (ret)
		pr_warn("Create sysfs %s/%s failed %d", kobj->name, name, ret);
530
531
}

532
static void kfd_procfs_add_sysfs_stats(struct kfd_process *p)
533
{
534
	int ret;
535
	int i;
536
537
	char stats_dir_filename[MAX_SYSFS_FILENAME_LEN];

538
539
	if (!p || !p->kobj)
		return;
540
541
542
543
544

	/*
	 * Create sysfs files for each GPU:
	 * - proc/<pid>/stats_<gpuid>/
	 * - proc/<pid>/stats_<gpuid>/evicted_ms
545
	 * - proc/<pid>/stats_<gpuid>/cu_occupancy
546
	 */
547
548
	for (i = 0; i < p->n_pdds; i++) {
		struct kfd_process_device *pdd = p->pdds[i];
549
550
551

		snprintf(stats_dir_filename, MAX_SYSFS_FILENAME_LEN,
				"stats_%u", pdd->dev->id);
552
553
554
		pdd->kobj_stats = kfd_alloc_struct(pdd->kobj_stats);
		if (!pdd->kobj_stats)
			return;
555

556
557
558
559
		ret = kobject_init_and_add(pdd->kobj_stats,
					   &procfs_stats_type,
					   p->kobj,
					   stats_dir_filename);
560
561
562

		if (ret) {
			pr_warn("Creating KFD proc/stats_%s folder failed",
563
564
565
566
				stats_dir_filename);
			kobject_put(pdd->kobj_stats);
			pdd->kobj_stats = NULL;
			return;
567
568
		}

569
570
		kfd_sysfs_create_file(pdd->kobj_stats, &pdd->attr_evict,
				      "evicted_ms");
571
		/* Add sysfs file to report compute unit occupancy */
572
573
574
575
		if (pdd->dev->kfd2kgd->get_cu_occupancy)
			kfd_sysfs_create_file(pdd->kobj_stats,
					      &pdd->attr_cu_occupancy,
					      "cu_occupancy");
576
577
578
	}
}

579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
static void kfd_procfs_add_sysfs_counters(struct kfd_process *p)
{
	int ret = 0;
	int i;
	char counters_dir_filename[MAX_SYSFS_FILENAME_LEN];

	if (!p || !p->kobj)
		return;

	/*
	 * Create sysfs files for each GPU which supports SVM
	 * - proc/<pid>/counters_<gpuid>/
	 * - proc/<pid>/counters_<gpuid>/faults
	 * - proc/<pid>/counters_<gpuid>/page_in
	 * - proc/<pid>/counters_<gpuid>/page_out
	 */
	for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) {
		struct kfd_process_device *pdd = p->pdds[i];
		struct kobject *kobj_counters;

		snprintf(counters_dir_filename, MAX_SYSFS_FILENAME_LEN,
			"counters_%u", pdd->dev->id);
		kobj_counters = kfd_alloc_struct(kobj_counters);
		if (!kobj_counters)
			return;

		ret = kobject_init_and_add(kobj_counters, &sysfs_counters_type,
					   p->kobj, counters_dir_filename);
		if (ret) {
			pr_warn("Creating KFD proc/%s folder failed",
				counters_dir_filename);
			kobject_put(kobj_counters);
			return;
		}

		pdd->kobj_counters = kobj_counters;
		kfd_sysfs_create_file(kobj_counters, &pdd->attr_faults,
				      "faults");
		kfd_sysfs_create_file(kobj_counters, &pdd->attr_page_in,
				      "page_in");
		kfd_sysfs_create_file(kobj_counters, &pdd->attr_page_out,
				      "page_out");
	}
}
623

624
static void kfd_procfs_add_sysfs_files(struct kfd_process *p)
625
{
626
	int i;
627

628
629
	if (!p || !p->kobj)
		return;
630

631
632
633
634
635
	/*
	 * Create sysfs files for each GPU:
	 * - proc/<pid>/vram_<gpuid>
	 * - proc/<pid>/sdma_<gpuid>
	 */
636
637
638
	for (i = 0; i < p->n_pdds; i++) {
		struct kfd_process_device *pdd = p->pdds[i];

639
		snprintf(pdd->vram_filename, MAX_SYSFS_FILENAME_LEN, "vram_%u",
640
			 pdd->dev->id);
641
642
		kfd_sysfs_create_file(p->kobj, &pdd->attr_vram,
				      pdd->vram_filename);
643
644
645

		snprintf(pdd->sdma_filename, MAX_SYSFS_FILENAME_LEN, "sdma_%u",
			 pdd->dev->id);
646
647
		kfd_sysfs_create_file(p->kobj, &pdd->attr_sdma,
					    pdd->sdma_filename);
648
649
650
	}
}

651
652
653
654
655
656
657
658
659
void kfd_procfs_del_queue(struct queue *q)
{
	if (!q)
		return;

	kobject_del(&q->kobj);
	kobject_put(&q->kobj);
}

660
int kfd_process_create_wq(void)
661
662
{
	if (!kfd_process_wq)
663
		kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
664
665
666
667
668
669
670
671
672
	if (!kfd_restore_wq)
		kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq", 0);

	if (!kfd_process_wq || !kfd_restore_wq) {
		kfd_process_destroy_wq();
		return -ENOMEM;
	}

	return 0;
673
674
675
676
677
678
679
680
}

void kfd_process_destroy_wq(void)
{
	if (kfd_process_wq) {
		destroy_workqueue(kfd_process_wq);
		kfd_process_wq = NULL;
	}
681
682
683
684
	if (kfd_restore_wq) {
		destroy_workqueue(kfd_restore_wq);
		kfd_restore_wq = NULL;
	}
685
686
}

687
688
689
690
691
static void kfd_process_free_gpuvm(struct kgd_mem *mem,
			struct kfd_process_device *pdd)
{
	struct kfd_dev *dev = pdd->dev;

692
	amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->drm_priv);
693
694
	amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem, pdd->drm_priv,
					       NULL);
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
}

/* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
 *	This function should be only called right after the process
 *	is created and when kfd_processes_mutex is still being held
 *	to avoid concurrency. Because of that exclusiveness, we do
 *	not need to take p->mutex.
 */
static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
				   uint64_t gpu_va, uint32_t size,
				   uint32_t flags, void **kptr)
{
	struct kfd_dev *kdev = pdd->dev;
	struct kgd_mem *mem = NULL;
	int handle;
	int err;

712
	err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->kgd, gpu_va, size,
713
						 pdd->drm_priv, &mem, NULL, flags);
714
715
716
	if (err)
		goto err_alloc_mem;

717
	err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, pdd->drm_priv);
718
719
720
	if (err)
		goto err_map_mem;

721
	err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->kgd, mem, true);
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
	if (err) {
		pr_debug("Sync memory failed, wait interrupted by user signal\n");
		goto sync_memory_failed;
	}

	/* Create an obj handle so kfd_process_device_remove_obj_handle
	 * will take care of the bo removal when the process finishes.
	 * We do not need to take p->mutex, because the process is just
	 * created and the ioctls have not had the chance to run.
	 */
	handle = kfd_process_device_create_obj_handle(pdd, mem);

	if (handle < 0) {
		err = handle;
		goto free_gpuvm;
	}

	if (kptr) {
740
		err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kdev->kgd,
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
				(struct kgd_mem *)mem, kptr, NULL);
		if (err) {
			pr_debug("Map GTT BO to kernel failed\n");
			goto free_obj_handle;
		}
	}

	return err;

free_obj_handle:
	kfd_process_device_remove_obj_handle(pdd, handle);
free_gpuvm:
sync_memory_failed:
	kfd_process_free_gpuvm(mem, pdd);
	return err;

err_map_mem:
758
759
	amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem, pdd->drm_priv,
					       NULL);
760
761
762
763
764
err_alloc_mem:
	*kptr = NULL;
	return err;
}

765
766
767
768
769
770
771
772
773
/* kfd_process_device_reserve_ib_mem - Reserve memory inside the
 *	process for IB usage The memory reserved is for KFD to submit
 *	IB to AMDGPU from kernel.  If the memory is reserved
 *	successfully, ib_kaddr will have the CPU/kernel
 *	address. Check ib_kaddr before accessing the memory.
 */
static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
{
	struct qcm_process_device *qpd = &pdd->qpd;
774
775
776
777
	uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT |
			KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
			KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE |
			KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
	void *kaddr;
	int ret;

	if (qpd->ib_kaddr || !qpd->ib_base)
		return 0;

	/* ib_base is only set for dGPU */
	ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags,
				      &kaddr);
	if (ret)
		return ret;

	qpd->ib_kaddr = kaddr;

	return 0;
}

Felix Kuehling's avatar
Felix Kuehling committed
795
struct kfd_process *kfd_create_process(struct file *filep)
796
797
{
	struct kfd_process *process;
Felix Kuehling's avatar
Felix Kuehling committed
798
	struct task_struct *thread = current;
799
	int ret;
800

801
	if (!thread->mm)
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
		return ERR_PTR(-EINVAL);

	/* Only the pthreads threading model is supported. */
	if (thread->group_leader->mm != thread->mm)
		return ERR_PTR(-EINVAL);

	/*
	 * take kfd processes mutex before starting of process creation
	 * so there won't be a case where two threads of the same process
	 * create two kfd_process structures
	 */
	mutex_lock(&kfd_processes_mutex);

	/* A prior open of /dev/kfd could have already created the process. */
	process = find_process(thread);
817
	if (process) {
818
		pr_debug("Process already found\n");
819
	} else {
820
821
822
823
824
		process = create_process(thread);
		if (IS_ERR(process))
			goto out;

		ret = kfd_process_init_cwsr_apu(process, filep);
825
826
		if (ret)
			goto out_destroy;
827

828
829
830
831
832
833
834
835
836
837
838
839
840
		if (!procfs.kobj)
			goto out;

		process->kobj = kfd_alloc_struct(process->kobj);
		if (!process->kobj) {
			pr_warn("Creating procfs kobject failed");
			goto out;
		}
		ret = kobject_init_and_add(process->kobj, &procfs_type,
					   procfs.kobj, "%d",
					   (int)process->lead_thread->pid);
		if (ret) {
			pr_warn("Creating procfs pid directory failed");
841
			kobject_put(process->kobj);
842
843
844
			goto out;
		}

845
846
		kfd_sysfs_create_file(process->kobj, &process->attr_pasid,
				      "pasid");
847
848
849
850
851

		process->kobj_queues = kobject_create_and_add("queues",
							process->kobj);
		if (!process->kobj_queues)
			pr_warn("Creating KFD proc/queues folder failed");
852

853
854
		kfd_procfs_add_sysfs_stats(process);
		kfd_procfs_add_sysfs_files(process);
855
		kfd_procfs_add_sysfs_counters(process);
856
857
	}
out:
858
859
	if (!IS_ERR(process))
		kref_get(&process->ref);
860
861
862
	mutex_unlock(&kfd_processes_mutex);

	return process;
863
864
865
866
867
868
869
870

out_destroy:
	hash_del_rcu(&process->kfd_processes);
	mutex_unlock(&kfd_processes_mutex);
	synchronize_srcu(&kfd_processes_srcu);
	/* kfd_process_free_notifier will trigger the cleanup */
	mmu_notifier_put(&process->mmu_notifier);
	return ERR_PTR(ret);
871
872
873
874
875
876
}

struct kfd_process *kfd_get_process(const struct task_struct *thread)
{
	struct kfd_process *process;

877
	if (!thread->mm)
878
879
880
881
882
883
884
		return ERR_PTR(-EINVAL);

	/* Only the pthreads threading model is supported. */
	if (thread->group_leader->mm != thread->mm)
		return ERR_PTR(-EINVAL);

	process = find_process(thread);
885
886
	if (!process)
		return ERR_PTR(-EINVAL);
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914

	return process;
}

static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
{
	struct kfd_process *process;

	hash_for_each_possible_rcu(kfd_processes_table, process,
					kfd_processes, (uintptr_t)mm)
		if (process->mm == mm)
			return process;

	return NULL;
}

static struct kfd_process *find_process(const struct task_struct *thread)
{
	struct kfd_process *p;
	int idx;

	idx = srcu_read_lock(&kfd_processes_srcu);
	p = find_process_by_mm(thread->mm);
	srcu_read_unlock(&kfd_processes_srcu, idx);

	return p;
}

915
916
917
918
919
void kfd_unref_process(struct kfd_process *p)
{
	kref_put(&p->ref, kfd_process_ref_release);
}

920

921
922
923
924
925
static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
{
	struct kfd_process *p = pdd->process;
	void *mem;
	int id;
926
	int i;
927
928
929
930
931
932
933

	/*
	 * Remove all handles from idr and release appropriate
	 * local memory object
	 */
	idr_for_each_entry(&pdd->alloc_idr, mem, id) {

934
935
936
		for (i = 0; i < p->n_pdds; i++) {
			struct kfd_process_device *peer_pdd = p->pdds[i];

937
			if (!peer_pdd->drm_priv)
938
				continue;
939
			amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
940
				peer_pdd->dev->kgd, mem, peer_pdd->drm_priv);
941
942
		}

943
944
		amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem,
						       pdd->drm_priv, NULL);
945
946
947
948
949
950
		kfd_process_device_remove_obj_handle(pdd, id);
	}
}

static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p)
{
951
	int i;
952

953
954
	for (i = 0; i < p->n_pdds; i++)
		kfd_process_device_free_bos(p->pdds[i]);
955
956
}

957
static void kfd_process_destroy_pdds(struct kfd_process *p)
958
{
959
960
961
962
	int i;

	for (i = 0; i < p->n_pdds; i++) {
		struct kfd_process_device *pdd = p->pdds[i];
963

964
		pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n",
965
966
				pdd->dev->id, p->pasid);

967
		if (pdd->drm_file) {
968
			amdgpu_amdkfd_gpuvm_release_process_vm(
969
					pdd->dev->kgd, pdd->drm_priv);
970
			fput(pdd->drm_file);
971
		}
972

973
		if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base)
Felix Kuehling's avatar
Felix Kuehling committed
974
975
976
			free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
				get_order(KFD_CWSR_TBA_TMA_SIZE));

977
		kfree(pdd->qpd.doorbell_bitmap);
978
979
		idr_destroy(&pdd->alloc_idr);

980
981
		kfd_free_process_doorbells(pdd->dev, pdd->doorbell_index);

982
983
984
985
986
987
988
989
990
991
		/*
		 * before destroying pdd, make sure to report availability
		 * for auto suspend
		 */
		if (pdd->runtime_inuse) {
			pm_runtime_mark_last_busy(pdd->dev->ddev->dev);
			pm_runtime_put_autosuspend(pdd->dev->ddev->dev);
			pdd->runtime_inuse = false;
		}

992
		kfree(pdd);
993
		p->pdds[i] = NULL;
994
	}
995
	p->n_pdds = 0;
996
997
}

998
static void kfd_process_remove_sysfs(struct kfd_process *p)
999
{
1000
	struct kfd_process_device *pdd;
1001
	int i;
1002

1003
1004
	if (!p->kobj)
		return;
1005

1006
1007
1008
1009
	sysfs_remove_file(p->kobj, &p->attr_pasid);
	kobject_del(p->kobj_queues);
	kobject_put(p->kobj_queues);
	p->kobj_queues = NULL;
1010

1011
1012
	for (i = 0; i < p->n_pdds; i++) {
		pdd = p->pdds[i];
Philip Yang's avatar
Philip Yang committed
1013

1014
1015
		sysfs_remove_file(p->kobj, &pdd->attr_vram);
		sysfs_remove_file(p->kobj, &pdd->attr_sdma);
1016

1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
		sysfs_remove_file(pdd->kobj_stats, &pdd->attr_evict);
		if (pdd->dev->kfd2kgd->get_cu_occupancy)
			sysfs_remove_file(pdd->kobj_stats,
					  &pdd->attr_cu_occupancy);
		kobject_del(pdd->kobj_stats);
		kobject_put(pdd->kobj_stats);
		pdd->kobj_stats = NULL;
	}

	for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) {
		pdd = p->pdds[i];

		sysfs_remove_file(pdd->kobj_counters, &pdd->attr_faults);
		sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_in);
		sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_out);
		kobject_del(pdd->kobj_counters);
		kobject_put(pdd->kobj_counters);
		pdd->kobj_counters = NULL;
1035
1036
	}

1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
	kobject_del(p->kobj);
	kobject_put(p->kobj);
	p->kobj = NULL;
}

/* No process locking is needed in this function, because the process
 * is not findable any more. We must assume that no other thread is
 * using it any more, otherwise we couldn't safely free the process
 * structure in the end.
 */
static void kfd_process_wq_release(struct work_struct *work)
{
	struct kfd_process *p = container_of(work, struct kfd_process,
					     release_work);
	kfd_process_remove_sysfs(p);
1052
	kfd_iommu_unbind_process(p);
1053

1054
	kfd_process_free_outstanding_kfd_bos(p);
Philip Yang's avatar
Philip Yang committed
1055
	svm_range_list_fini(p);
1056

1057
	kfd_process_destroy_pdds(p);
1058
	dma_fence_put(p->ef);
1059

1060
1061
	kfd_event_free_process(p);

1062
1063
1064
	kfd_pasid_free(p->pasid);
	mutex_destroy(&p->mutex);

1065
1066
	put_task_struct(p->lead_thread);

1067
1068
1069
	kfree(p);
}

1070
static void kfd_process_ref_release(struct kref *ref)
1071
{
1072
	struct kfd_process *p = container_of(ref, struct kfd_process, ref);
1073

1074
1075
1076
	INIT_WORK(&p->release_work, kfd_process_wq_release);
	queue_work(kfd_process_wq, &p->release_work);
}
1077

1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
static struct mmu_notifier *kfd_process_alloc_notifier(struct mm_struct *mm)
{
	int idx = srcu_read_lock(&kfd_processes_srcu);
	struct kfd_process *p = find_process_by_mm(mm);

	srcu_read_unlock(&kfd_processes_srcu, idx);

	return p ? &p->mmu_notifier : ERR_PTR(-ESRCH);
}

1088
static void kfd_process_free_notifier(struct mmu_notifier *mn)
1089
{
1090
	kfd_unref_process(container_of(mn, struct kfd_process, mmu_notifier));
1091
1092
1093
1094
1095
1096
}

static void kfd_process_notifier_release(struct mmu_notifier *mn,
					struct mm_struct *mm)
{
	struct kfd_process *p;
1097
	int i;
1098
1099
1100
1101
1102
1103

	/*
	 * The kfd_process structure can not be free because the
	 * mmu_notifier srcu is read locked
	 */
	p = container_of(mn, struct kfd_process, mmu_notifier);
1104
1105
	if (WARN_ON(p->mm != mm))
		return;
1106
1107
1108
1109
1110
1111

	mutex_lock(&kfd_processes_mutex);
	hash_del_rcu(&p->kfd_processes);
	mutex_unlock(&kfd_processes_mutex);
	synchronize_srcu(&kfd_processes_srcu);

1112
1113
	cancel_delayed_work_sync(&p->eviction_work);
	cancel_delayed_work_sync(&p->restore_work);
1114
	cancel_delayed_work_sync(&p->svms.restore_work);
1115

1116
1117
	mutex_lock(&p->mutex);

1118
1119
1120
1121
	/* Iterate over all process device data structures and if the
	 * pdd is in debug mode, we should first force unregistration,
	 * then we will be able to destroy the queues
	 */
1122
1123
	for (i = 0; i < p->n_pdds; i++) {
		struct kfd_dev *dev = p->pdds[i]->dev;
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134

		mutex_lock(kfd_get_dbgmgr_mutex());
		if (dev && dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
			if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
				kfd_dbgmgr_destroy(dev->dbgmgr);
				dev->dbgmgr = NULL;
			}
		}
		mutex_unlock(kfd_get_dbgmgr_mutex());
	}

1135
	kfd_process_dequeue_from_all_devices(p);
1136
1137
	pqm_uninit(&p->pqm);

1138
1139
	/* Indicate to other users that MM is no longer valid */
	p->mm = NULL;
1140
1141
1142
1143
1144
	/* Signal the eviction fence after user mode queues are
	 * destroyed. This allows any BOs to be freed without
	 * triggering pointless evictions or waiting for fences.
	 */
	dma_fence_signal(p->ef);
1145

1146
1147
	mutex_unlock(&p->mutex);

1148
	mmu_notifier_put(&p->mmu_notifier);
1149
1150
1151
1152
}

static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
	.release = kfd_process_notifier_release,
1153
	.alloc_notifier = kfd_process_alloc_notifier,
1154
	.free_notifier = kfd_process_free_notifier,
1155
1156
};

1157
static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
Felix Kuehling's avatar
Felix Kuehling committed
1158
1159
{
	unsigned long  offset;
1160
	int i;
Felix Kuehling's avatar
Felix Kuehling committed
1161

1162
1163
1164
	for (i = 0; i < p->n_pdds; i++) {
		struct kfd_dev *dev = p->pdds[i]->dev;
		struct qcm_process_device *qpd = &p->pdds[i]->qpd;
1165
1166

		if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
Felix Kuehling's avatar
Felix Kuehling committed
1167
			continue;
1168

1169
		offset = KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id);
Felix Kuehling's avatar
Felix Kuehling committed
1170
1171
1172
1173
1174
		qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
			KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
			MAP_SHARED, offset);

		if (IS_ERR_VALUE(qpd->tba_addr)) {
1175
1176
1177
			int err = qpd->tba_addr;

			pr_err("Failure to set tba address. error %d.\n", err);
Felix Kuehling's avatar
Felix Kuehling committed
1178
1179
			qpd->tba_addr = 0;
			qpd->cwsr_kaddr = NULL;
1180
			return err;
Felix Kuehling's avatar
Felix Kuehling committed
1181
1182
1183
1184
1185
1186
1187
1188
		}

		memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);

		qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
		pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
			qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
	}
1189
1190

	return 0;
Felix Kuehling's avatar
Felix Kuehling committed
1191
1192
}

1193
1194
1195
1196
static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
{
	struct kfd_dev *dev = pdd->dev;
	struct qcm_process_device *qpd = &pdd->qpd;
1197
1198
1199
	uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT
			| KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE
			| KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
	void *kaddr;
	int ret;

	if (!dev->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base)
		return 0;

	/* cwsr_base is only set for dGPU */
	ret = kfd_process_alloc_gpuvm(pdd, qpd->cwsr_base,
				      KFD_CWSR_TBA_TMA_SIZE, flags, &kaddr);
	if (ret)
		return ret;

	qpd->cwsr_kaddr = kaddr;
	qpd->tba_addr = qpd->cwsr_base;

	memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);

	qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
	pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
		 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);

	return 0;
}

1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
void kfd_process_set_trap_handler(struct qcm_process_device *qpd,
				  uint64_t tba_addr,
				  uint64_t tma_addr)
{
	if (qpd->cwsr_kaddr) {
		/* KFD trap handler is bound, record as second-level TBA/TMA
		 * in first-level TMA. First-level trap will jump to second.
		 */
		uint64_t *tma =
			(uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
		tma[0] = tba_addr;
		tma[1] = tma_addr;
	} else {
		/* No trap handler bound, bind as first-level TBA/TMA. */
		qpd->tba_addr = tba_addr;
		qpd->tma_addr = tma_addr;
	}
}

1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
bool kfd_process_xnack_mode(struct kfd_process *p, bool supported)
{
	int i;

	/* On most GFXv9 GPUs, the retry mode in the SQ must match the
	 * boot time retry setting. Mixing processes with different
	 * XNACK/retry settings can hang the GPU.
	 *
	 * Different GPUs can have different noretry settings depending
	 * on HW bugs or limitations. We need to find at least one
	 * XNACK mode for this process that's compatible with all GPUs.
	 * Fortunately GPUs with retry enabled (noretry=0) can run code
	 * built for XNACK-off. On GFXv9 it may perform slower.
	 *
	 * Therefore applications built for XNACK-off can always be
	 * supported and will be our fallback if any GPU does not
	 * support retry.
	 */
	for (i = 0; i < p->n_pdds; i++) {
		struct kfd_dev *dev = p->pdds[i]->dev;

		/* Only consider GFXv9 and higher GPUs. Older GPUs don't
		 * support the SVM APIs and don't need to be considered
		 * for the XNACK mode selection.
		 */
		if (dev->device_info->asic_family < CHIP_VEGA10)
			continue;
		/* Aldebaran can always support XNACK because it can support
		 * per-process XNACK mode selection. But let the dev->noretry
		 * setting still influence the default XNACK mode.
		 */
		if (supported &&
		    dev->device_info->asic_family == CHIP_ALDEBARAN)
			continue;

		/* GFXv10 and later GPUs do not support shader preemption
		 * during page faults. This can lead to poor QoS for queue
		 * management and memory-manager-related preemptions or
		 * even deadlocks.
		 */
		if (dev->device_info->asic_family >= CHIP_NAVI10)
			return false;

		if (dev->noretry)
			return false;
	}

	return true;
}

1293
1294
1295
1296
1297
/*
 * On return the kfd_process is fully operational and will be freed when the
 * mm is released
 */
static struct kfd_process *create_process(const struct task_struct *thread)
1298
1299
{
	struct kfd_process *process;
1300
	struct mmu_notifier *mn;
1301
1302
1303
1304
1305
1306
	int err = -ENOMEM;

	process = kzalloc(sizeof(*process), GFP_KERNEL);
	if (!process)
		goto err_alloc_process;

1307
	kref_init(&process->ref);
1308
1309
1310
	mutex_init(&process->mutex);
	process->mm = thread->mm;
	process->lead_thread = thread->group_leader;
1311
	process->n_pdds = 0;
1312
1313
1314
	INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker);
	INIT_DELAYED_WORK(&process->restore_work, restore_process_worker);
	process->last_restore_timestamp = get_jiffies_64();
1315
	kfd_event_init_process(process);
1316
1317
1318
1319
1320
1321
	process->is_32bit_user_mode = in_compat_syscall();

	process->pasid = kfd_pasid_alloc();
	if (process->pasid == 0)
		goto err_alloc_pasid;

1322
1323
1324
1325
	err = pqm_init(&process->pqm, process);
	if (err != 0)
		goto err_process_pqm_init;

1326
	/* init process apertures*/
1327
1328
	err = kfd_init_apertures(process);
	if (err != 0)
1329
		goto err_init_apertures;
1330

1331
1332
1333
	/* Check XNACK support after PDDs are created in kfd_init_apertures */
	process->xnack_enabled = kfd_process_xnack_mode(process, false);

Philip Yang's avatar
Philip Yang committed
1334
1335
1336
1337
	err = svm_range_list_init(process);
	if (err)
		goto err_init_svm_range_list;

1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
	/* alloc_notifier needs to find the process in the hash table */
	hash_add_rcu(kfd_processes_table, &process->kfd_processes,
			(uintptr_t)process->mm);

	/* MMU notifier registration must be the last call that can fail
	 * because after this point we cannot unwind the process creation.
	 * After this point, mmu_notifier_put will trigger the cleanup by
	 * dropping the last process reference in the free_notifier.
	 */
	mn = mmu_notifier_get(&kfd_process_mmu_notifier_ops, process->mm);
	if (IS_ERR(mn)) {
		err = PTR_ERR(mn);
1350
		goto err_register_notifier;
1351
1352
	}
	BUG_ON(mn != &process->mmu_notifier);
1353
1354

	get_task_struct(process->lead_thread);
1355

1356
1357
	return process;

1358
err_register_notifier:
1359
	hash_del_rcu(&process->kfd_processes);
Philip Yang's avatar
Philip Yang committed
1360
1361
	svm_range_list_fini(process);
err_init_svm_range_list:
1362
	kfd_process_free_outstanding_kfd_bos(process);
1363
	kfd_process_destroy_pdds(process);
1364
err_init_apertures:
1365
	pqm_uninit(&process->pqm);
1366
err_process_pqm_init:
1367
1368
	kfd_pasid_free(process->pasid);
err_alloc_pasid:
1369
	mutex_destroy(&process->mutex);
1370
1371
1372
1373
1374
	kfree(process);
err_alloc_process:
	return ERR_PTR(err);
}

1375
1376
1377
1378
static int init_doorbell_bitmap(struct qcm_process_device *qpd,
			struct kfd_dev *dev)
{
	unsigned int i;
1379
1380
	int range_start = dev->shared_resources.non_cp_doorbells_start;
	int range_end = dev->shared_resources.non_cp_doorbells_end;