Commit 786cb0a2 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'drm-fixes-2021-07-16' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Regular rc2 fixes though a bit more than usual at rc2 stage, people
  must have been testing early or else some fixes from last week got a
  bit laggy.

  There is one larger change in the amd fixes to amalgamate some power
  management code on the newer chips with the code from the older chips,
  it should only affects chips where support was introduced in rc1 and
  it should make future fixes easier to maintain probably a good idea to
  merge it now.

  Otherwise it's mostly fixes across the board.

  dma-buf:
   - Fix fence leak in sync_file_merge() error code

  drm/panel:
   - nt35510: Don't fail on DSI reads

  fbdev:
   - Avoid use-after-free by not deleting current video mode

  ttm:
   - Avoid NULL-ptr deref in ttm_range_man_fini()

  vmwgfx:
   - Fix a merge commit

  qxl:
   - fix a TTM regression

  amdgpu:
   - SR-IOV fixes
   - RAS fixes
   - eDP fixes
   - SMU13 code unification to facilitate fixes in the future
   - Add new renoir DID
   - Yellow Carp fixes
   - Beige Goby fixes
   - Revert a bunch of TLB fixes that caused regressions
   - Revert an LTTPR display regression

  amdkfd
   - Fix VRAM access regression
   - SVM fixes

  i915:
   - Fix -EDEADLK handling regression
   - Drop the page table optimisation"

* tag 'drm-fixes-2021-07-16' of git://anongit.freedesktop.org/drm/drm: (29 commits)
  drm/amdgpu: add another Renoir DID
  drm/ttm: add a check against null pointer dereference
  drm/i915/gtt: drop the page table optimisation
  drm/i915/gt: Fix -EDEADLK handling regression
  drm/amd/pm: Add waiting for response of mode-reset message for yellow carp
  Revert "drm/amdkfd: Add heavy-weight TLB flush after unmapping"
  Revert "drm/amdgpu: Add table_freed parameter to amdgpu_vm_bo_update"
  Revert "drm/amdkfd: Make TLB flush conditional on mapping"
  Revert "drm/amdgpu: Fix warning of Function parameter or member not described"
  Revert "drm/amdkfd: Add memory sync before TLB flush on unmap"
  drm/amd/pm: Fix BACO state setting for Beige_Goby
  drm/amdgpu: Restore msix after FLR
  drm/amdkfd: Allow CPU access for all VRAM BOs
  drm/amdgpu/display - only update eDP's backlight level when necessary
  drm/amdkfd: handle fault counters on invalid address
  drm/amdgpu: Correct the irq numbers for virtual crtc
  drm/amd/display: update header file name
  drm/amd/pm: drop smu_v13_0_1.c|h files for yellow carp
  drm/amd/display: remove faulty assert
  Revert "drm/amd/display: Always write repeater mode regardless of LTTPR"
  ...
parents 6e442d06 876d98e5
...@@ -211,8 +211,8 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, ...@@ -211,8 +211,8 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
struct sync_file *b) struct sync_file *b)
{ {
struct sync_file *sync_file; struct sync_file *sync_file;
struct dma_fence **fences, **nfences, **a_fences, **b_fences; struct dma_fence **fences = NULL, **nfences, **a_fences, **b_fences;
int i, i_a, i_b, num_fences, a_num_fences, b_num_fences; int i = 0, i_a, i_b, num_fences, a_num_fences, b_num_fences;
sync_file = sync_file_alloc(); sync_file = sync_file_alloc();
if (!sync_file) if (!sync_file)
...@@ -236,7 +236,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, ...@@ -236,7 +236,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
* If a sync_file can only be created with sync_file_merge * If a sync_file can only be created with sync_file_merge
* and sync_file_create, this is a reasonable assumption. * and sync_file_create, this is a reasonable assumption.
*/ */
for (i = i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) { for (i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) {
struct dma_fence *pt_a = a_fences[i_a]; struct dma_fence *pt_a = a_fences[i_a];
struct dma_fence *pt_b = b_fences[i_b]; struct dma_fence *pt_b = b_fences[i_b];
...@@ -277,15 +277,16 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, ...@@ -277,15 +277,16 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
fences = nfences; fences = nfences;
} }
if (sync_file_set_fence(sync_file, fences, i) < 0) { if (sync_file_set_fence(sync_file, fences, i) < 0)
kfree(fences);
goto err; goto err;
}
strlcpy(sync_file->user_name, name, sizeof(sync_file->user_name)); strlcpy(sync_file->user_name, name, sizeof(sync_file->user_name));
return sync_file; return sync_file;
err: err:
while (i)
dma_fence_put(fences[--i]);
kfree(fences);
fput(sync_file->file); fput(sync_file->file);
return NULL; return NULL;
......
...@@ -269,7 +269,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( ...@@ -269,7 +269,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv, struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv,
uint64_t *size); uint64_t *size);
int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv, bool *table_freed); struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv); struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_sync_memory( int amdgpu_amdkfd_gpuvm_sync_memory(
......
...@@ -1057,8 +1057,7 @@ static void unmap_bo_from_gpuvm(struct kgd_mem *mem, ...@@ -1057,8 +1057,7 @@ static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
static int update_gpuvm_pte(struct kgd_mem *mem, static int update_gpuvm_pte(struct kgd_mem *mem,
struct kfd_mem_attachment *entry, struct kfd_mem_attachment *entry,
struct amdgpu_sync *sync, struct amdgpu_sync *sync)
bool *table_freed)
{ {
struct amdgpu_bo_va *bo_va = entry->bo_va; struct amdgpu_bo_va *bo_va = entry->bo_va;
struct amdgpu_device *adev = entry->adev; struct amdgpu_device *adev = entry->adev;
...@@ -1069,7 +1068,7 @@ static int update_gpuvm_pte(struct kgd_mem *mem, ...@@ -1069,7 +1068,7 @@ static int update_gpuvm_pte(struct kgd_mem *mem,
return ret; return ret;
/* Update the page tables */ /* Update the page tables */
ret = amdgpu_vm_bo_update(adev, bo_va, false, table_freed); ret = amdgpu_vm_bo_update(adev, bo_va, false);
if (ret) { if (ret) {
pr_err("amdgpu_vm_bo_update failed\n"); pr_err("amdgpu_vm_bo_update failed\n");
return ret; return ret;
...@@ -1081,8 +1080,7 @@ static int update_gpuvm_pte(struct kgd_mem *mem, ...@@ -1081,8 +1080,7 @@ static int update_gpuvm_pte(struct kgd_mem *mem,
static int map_bo_to_gpuvm(struct kgd_mem *mem, static int map_bo_to_gpuvm(struct kgd_mem *mem,
struct kfd_mem_attachment *entry, struct kfd_mem_attachment *entry,
struct amdgpu_sync *sync, struct amdgpu_sync *sync,
bool no_update_pte, bool no_update_pte)
bool *table_freed)
{ {
int ret; int ret;
...@@ -1099,7 +1097,7 @@ static int map_bo_to_gpuvm(struct kgd_mem *mem, ...@@ -1099,7 +1097,7 @@ static int map_bo_to_gpuvm(struct kgd_mem *mem,
if (no_update_pte) if (no_update_pte)
return 0; return 0;
ret = update_gpuvm_pte(mem, entry, sync, table_freed); ret = update_gpuvm_pte(mem, entry, sync);
if (ret) { if (ret) {
pr_err("update_gpuvm_pte() failed\n"); pr_err("update_gpuvm_pte() failed\n");
goto update_gpuvm_pte_failed; goto update_gpuvm_pte_failed;
...@@ -1393,8 +1391,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( ...@@ -1393,8 +1391,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM; domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE; alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ? alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0;
AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
} else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) { } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT; domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_flags = 0; alloc_flags = 0;
...@@ -1597,8 +1594,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( ...@@ -1597,8 +1594,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
} }
int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
struct kgd_dev *kgd, struct kgd_mem *mem, struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv)
void *drm_priv, bool *table_freed)
{ {
struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
...@@ -1686,7 +1682,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( ...@@ -1686,7 +1682,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
entry->va, entry->va + bo_size, entry); entry->va, entry->va + bo_size, entry);
ret = map_bo_to_gpuvm(mem, entry, ctx.sync, ret = map_bo_to_gpuvm(mem, entry, ctx.sync,
is_invalid_userptr, table_freed); is_invalid_userptr);
if (ret) { if (ret) {
pr_err("Failed to map bo to gpuvm\n"); pr_err("Failed to map bo to gpuvm\n");
goto out_unreserve; goto out_unreserve;
...@@ -2136,7 +2132,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) ...@@ -2136,7 +2132,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
continue; continue;
kfd_mem_dmaunmap_attachment(mem, attachment); kfd_mem_dmaunmap_attachment(mem, attachment);
ret = update_gpuvm_pte(mem, attachment, &sync, NULL); ret = update_gpuvm_pte(mem, attachment, &sync);
if (ret) { if (ret) {
pr_err("%s: update PTE failed\n", __func__); pr_err("%s: update PTE failed\n", __func__);
/* make sure this gets validated again */ /* make sure this gets validated again */
...@@ -2342,7 +2338,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) ...@@ -2342,7 +2338,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
continue; continue;
kfd_mem_dmaunmap_attachment(mem, attachment); kfd_mem_dmaunmap_attachment(mem, attachment);
ret = update_gpuvm_pte(mem, attachment, &sync_obj, NULL); ret = update_gpuvm_pte(mem, attachment, &sync_obj);
if (ret) { if (ret) {
pr_debug("Memory eviction: update PTE failed. Try again\n"); pr_debug("Memory eviction: update PTE failed. Try again\n");
goto validate_map_fail; goto validate_map_fail;
......
...@@ -781,7 +781,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) ...@@ -781,7 +781,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r) if (r)
return r; return r;
r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false, NULL); r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
if (r) if (r)
return r; return r;
...@@ -792,7 +792,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) ...@@ -792,7 +792,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) { if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
bo_va = fpriv->csa_va; bo_va = fpriv->csa_va;
BUG_ON(!bo_va); BUG_ON(!bo_va);
r = amdgpu_vm_bo_update(adev, bo_va, false, NULL); r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r) if (r)
return r; return r;
...@@ -811,7 +811,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) ...@@ -811,7 +811,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (bo_va == NULL) if (bo_va == NULL)
continue; continue;
r = amdgpu_vm_bo_update(adev, bo_va, false, NULL); r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r) if (r)
return r; return r;
......
...@@ -1168,6 +1168,7 @@ static const struct pci_device_id pciidlist[] = { ...@@ -1168,6 +1168,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14}, {0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
/* Renoir */ /* Renoir */
{0x1002, 0x15E7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
{0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU}, {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
{0x1002, 0x1638, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU}, {0x1002, 0x1638, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
{0x1002, 0x164C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU}, {0x1002, 0x164C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
......
...@@ -612,7 +612,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, ...@@ -612,7 +612,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
if (operation == AMDGPU_VA_OP_MAP || if (operation == AMDGPU_VA_OP_MAP ||
operation == AMDGPU_VA_OP_REPLACE) { operation == AMDGPU_VA_OP_REPLACE) {
r = amdgpu_vm_bo_update(adev, bo_va, false, NULL); r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r) if (r)
goto error; goto error;
} }
......
...@@ -278,6 +278,21 @@ static bool amdgpu_msi_ok(struct amdgpu_device *adev) ...@@ -278,6 +278,21 @@ static bool amdgpu_msi_ok(struct amdgpu_device *adev)
return true; return true;
} }
static void amdgpu_restore_msix(struct amdgpu_device *adev)
{
u16 ctrl;
pci_read_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
if (!(ctrl & PCI_MSIX_FLAGS_ENABLE))
return;
/* VF FLR */
ctrl &= ~PCI_MSIX_FLAGS_ENABLE;
pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
ctrl |= PCI_MSIX_FLAGS_ENABLE;
pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
}
/** /**
* amdgpu_irq_init - initialize interrupt handling * amdgpu_irq_init - initialize interrupt handling
* *
...@@ -569,6 +584,9 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev) ...@@ -569,6 +584,9 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
{ {
int i, j, k; int i, j, k;
if (amdgpu_sriov_vf(adev))
amdgpu_restore_msix(adev);
for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) { for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
if (!adev->irq.client[i].sources) if (!adev->irq.client[i].sources)
continue; continue;
......
...@@ -1043,8 +1043,18 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, ...@@ -1043,8 +1043,18 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
return ret; return ret;
} }
/* get the total error counts on all IPs */ /**
void amdgpu_ras_query_error_count(struct amdgpu_device *adev, * amdgpu_ras_query_error_count -- Get error counts of all IPs
* adev: pointer to AMD GPU device
* ce_count: pointer to an integer to be set to the count of correctible errors.
* ue_count: pointer to an integer to be set to the count of uncorrectible
* errors.
*
* If set, @ce_count or @ue_count, count and return the corresponding
* error counts in those integer pointers. Return 0 if the device
* supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS.
*/
int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
unsigned long *ce_count, unsigned long *ce_count,
unsigned long *ue_count) unsigned long *ue_count)
{ {
...@@ -1053,7 +1063,12 @@ void amdgpu_ras_query_error_count(struct amdgpu_device *adev, ...@@ -1053,7 +1063,12 @@ void amdgpu_ras_query_error_count(struct amdgpu_device *adev,
unsigned long ce, ue; unsigned long ce, ue;
if (!adev->ras_enabled || !con) if (!adev->ras_enabled || !con)
return; return -EOPNOTSUPP;
/* Don't count since no reporting.
*/
if (!ce_count && !ue_count)
return 0;
ce = 0; ce = 0;
ue = 0; ue = 0;
...@@ -1061,9 +1076,11 @@ void amdgpu_ras_query_error_count(struct amdgpu_device *adev, ...@@ -1061,9 +1076,11 @@ void amdgpu_ras_query_error_count(struct amdgpu_device *adev,
struct ras_query_if info = { struct ras_query_if info = {
.head = obj->head, .head = obj->head,
}; };
int res;
if (amdgpu_ras_query_error_status(adev, &info)) res = amdgpu_ras_query_error_status(adev, &info);
return; if (res)
return res;
ce += info.ce_count; ce += info.ce_count;
ue += info.ue_count; ue += info.ue_count;
...@@ -1074,6 +1091,8 @@ void amdgpu_ras_query_error_count(struct amdgpu_device *adev, ...@@ -1074,6 +1091,8 @@ void amdgpu_ras_query_error_count(struct amdgpu_device *adev,
if (ue_count) if (ue_count)
*ue_count = ue; *ue_count = ue;
return 0;
} }
/* query/inject/cure end */ /* query/inject/cure end */
...@@ -2137,9 +2156,10 @@ static void amdgpu_ras_counte_dw(struct work_struct *work) ...@@ -2137,9 +2156,10 @@ static void amdgpu_ras_counte_dw(struct work_struct *work)
/* Cache new values. /* Cache new values.
*/ */
amdgpu_ras_query_error_count(adev, &ce_count, &ue_count); if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) {
atomic_set(&con->ras_ce_count, ce_count); atomic_set(&con->ras_ce_count, ce_count);
atomic_set(&con->ras_ue_count, ue_count); atomic_set(&con->ras_ue_count, ue_count);
}
pm_runtime_mark_last_busy(dev->dev); pm_runtime_mark_last_busy(dev->dev);
Out: Out:
...@@ -2312,9 +2332,10 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev, ...@@ -2312,9 +2332,10 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev,
/* Those are the cached values at init. /* Those are the cached values at init.
*/ */
amdgpu_ras_query_error_count(adev, &ce_count, &ue_count); if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) {
atomic_set(&con->ras_ce_count, ce_count); atomic_set(&con->ras_ce_count, ce_count);
atomic_set(&con->ras_ue_count, ue_count); atomic_set(&con->ras_ue_count, ue_count);
}
return 0; return 0;
cleanup: cleanup:
......
...@@ -490,7 +490,7 @@ int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev, ...@@ -490,7 +490,7 @@ int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev,
void amdgpu_ras_resume(struct amdgpu_device *adev); void amdgpu_ras_resume(struct amdgpu_device *adev);
void amdgpu_ras_suspend(struct amdgpu_device *adev); void amdgpu_ras_suspend(struct amdgpu_device *adev);
void amdgpu_ras_query_error_count(struct amdgpu_device *adev, int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
unsigned long *ce_count, unsigned long *ce_count,
unsigned long *ue_count); unsigned long *ue_count);
......
...@@ -1758,7 +1758,7 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, ...@@ -1758,7 +1758,7 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
r = vm->update_funcs->commit(&params, fence); r = vm->update_funcs->commit(&params, fence);
if (table_freed) if (table_freed)
*table_freed = *table_freed || params.table_freed; *table_freed = params.table_freed;
error_unlock: error_unlock:
amdgpu_vm_eviction_unlock(vm); amdgpu_vm_eviction_unlock(vm);
...@@ -1816,7 +1816,6 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem, ...@@ -1816,7 +1816,6 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
* @adev: amdgpu_device pointer * @adev: amdgpu_device pointer
* @bo_va: requested BO and VM object * @bo_va: requested BO and VM object
* @clear: if true clear the entries * @clear: if true clear the entries
* @table_freed: return true if page table is freed
* *
* Fill in the page table entries for @bo_va. * Fill in the page table entries for @bo_va.
* *
...@@ -1824,7 +1823,7 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem, ...@@ -1824,7 +1823,7 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
* 0 for success, -EINVAL for failure. * 0 for success, -EINVAL for failure.
*/ */
int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
bool clear, bool *table_freed) bool clear)
{ {
struct amdgpu_bo *bo = bo_va->base.bo; struct amdgpu_bo *bo = bo_va->base.bo;
struct amdgpu_vm *vm = bo_va->base.vm; struct amdgpu_vm *vm = bo_va->base.vm;
...@@ -1903,7 +1902,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, ...@@ -1903,7 +1902,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
resv, mapping->start, resv, mapping->start,
mapping->last, update_flags, mapping->last, update_flags,
mapping->offset, mem, mapping->offset, mem,
pages_addr, last_update, table_freed); pages_addr, last_update, NULL);
if (r) if (r)
return r; return r;
} }
...@@ -2155,7 +2154,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, ...@@ -2155,7 +2154,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
/* Per VM BOs never need to bo cleared in the page tables */ /* Per VM BOs never need to bo cleared in the page tables */
r = amdgpu_vm_bo_update(adev, bo_va, false, NULL); r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r) if (r)
return r; return r;
} }
...@@ -2174,7 +2173,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, ...@@ -2174,7 +2173,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
else else
clear = true; clear = true;
r = amdgpu_vm_bo_update(adev, bo_va, clear, NULL); r = amdgpu_vm_bo_update(adev, bo_va, clear);
if (r) if (r)
return r; return r;
......
...@@ -406,7 +406,7 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, ...@@ -406,7 +406,7 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
struct dma_fence **fence, bool *free_table); struct dma_fence **fence, bool *free_table);
int amdgpu_vm_bo_update(struct amdgpu_device *adev, int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va, struct amdgpu_bo_va *bo_va,
bool clear, bool *table_freed); bool clear);
bool amdgpu_vm_evictable(struct amdgpu_bo *bo); bool amdgpu_vm_evictable(struct amdgpu_bo *bo);
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
struct amdgpu_bo *bo, bool evicted); struct amdgpu_bo *bo, bool evicted);
......
...@@ -766,7 +766,7 @@ static const struct amdgpu_irq_src_funcs dce_virtual_crtc_irq_funcs = { ...@@ -766,7 +766,7 @@ static const struct amdgpu_irq_src_funcs dce_virtual_crtc_irq_funcs = {
static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev) static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev)
{ {
adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VBLANK6 + 1; adev->crtc_irq.num_types = adev->mode_info.num_crtc;
adev->crtc_irq.funcs = &dce_virtual_crtc_irq_funcs; adev->crtc_irq.funcs = &dce_virtual_crtc_irq_funcs;
} }
......
...@@ -252,7 +252,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work) ...@@ -252,7 +252,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
* otherwise the mailbox msg will be ruined/reseted by * otherwise the mailbox msg will be ruined/reseted by
* the VF FLR. * the VF FLR.
*/ */
if (!down_read_trylock(&adev->reset_sem)) if (!down_write_trylock(&adev->reset_sem))
return; return;
amdgpu_virt_fini_data_exchange(adev); amdgpu_virt_fini_data_exchange(adev);
...@@ -268,7 +268,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work) ...@@ -268,7 +268,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
flr_done: flr_done:
atomic_set(&adev->in_gpu_reset, 0); atomic_set(&adev->in_gpu_reset, 0);
up_read(&adev->reset_sem); up_write(&adev->reset_sem);
/* Trigger recovery for world switch failure if no TDR */ /* Trigger recovery for world switch failure if no TDR */
if (amdgpu_device_should_recover_gpu(adev) if (amdgpu_device_should_recover_gpu(adev)
......
...@@ -273,7 +273,7 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work) ...@@ -273,7 +273,7 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
* otherwise the mailbox msg will be ruined/reseted by * otherwise the mailbox msg will be ruined/reseted by
* the VF FLR. * the VF FLR.
*/ */
if (!down_read_trylock(&adev->reset_sem)) if (!down_write_trylock(&adev->reset_sem))
return; return;
amdgpu_virt_fini_data_exchange(adev); amdgpu_virt_fini_data_exchange(adev);
...@@ -289,7 +289,7 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work) ...@@ -289,7 +289,7 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
flr_done: flr_done:
atomic_set(&adev->in_gpu_reset, 0); atomic_set(&adev->in_gpu_reset, 0);
up_read(&adev->reset_sem); up_write(&adev->reset_sem);