Commit 786cb0a2 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'drm-fixes-2021-07-16' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Regular rc2 fixes though a bit more than usual at rc2 stage, people
  must have been testing early or else some fixes from last week got a
  bit laggy.

  There is one larger change in the amd fixes to amalgamate some power
  management code on the newer chips with the code from the older chips,
  it should only affects chips where support was introduced in rc1 and
  it should make future fixes easier to maintain probably a good idea to
  merge it now.

  Otherwise it's mostly fixes across the board.

  dma-buf:
   - Fix fence leak in sync_file_merge() error code

  drm/panel:
   - nt35510: Don't fail on DSI reads

  fbdev:
   - Avoid use-after-free by not deleting current video mode

  ttm:
   - Avoid NULL-ptr deref in ttm_range_man_fini()

  vmwgfx:
   - Fix a merge commit

  qxl:
   - fix a TTM regression

  amdgpu:
   - SR-IOV fixes
   - RAS fixes
   - eDP fixes
   - SMU13 code unification to facilitate fixes in the future
   - Add new renoir DID
   - Yellow Carp fixes
   - Beige Goby fixes
   - Revert a bunch of TLB fixes that caused regressions
   - Revert an LTTPR display regression

  amdkfd
   - Fix VRAM access regression
   - SVM fixes

  i915:
   - Fix -EDEADLK handling regression
   - Drop the page table optimisation"

* tag 'drm-fixes-2021-07-16' of git://anongit.freedesktop.org/drm/drm: (29 commits)
  drm/amdgpu: add another Renoir DID
  drm/ttm: add a check against null pointer dereference
  drm/i915/gtt: drop the page table optimisation
  drm/i915/gt: Fix -EDEADLK handling regression
  drm/amd/pm: Add waiting for response of mode-reset message for yellow carp
  Revert "drm/amdkfd: Add heavy-weight TLB flush after unmapping"
  Revert "drm/amdgpu: Add table_freed parameter to amdgpu_vm_bo_update"
  Revert "drm/amdkfd: Make TLB flush conditional on mapping"
  Revert "drm/amdgpu: Fix warning of Function parameter or member not described"
  Revert "drm/amdkfd: Add memory sync before TLB flush on unmap"
  drm/amd/pm: Fix BACO state setting for Beige_Goby
  drm/amdgpu: Restore msix after FLR
  drm/amdkfd: Allow CPU access for all VRAM BOs
  drm/amdgpu/display - only update eDP's backlight level when necessary
  drm/amdkfd: handle fault counters on invalid address
  drm/amdgpu: Correct the irq numbers for virtual crtc
  drm/amd/display: update header file name
  drm/amd/pm: drop smu_v13_0_1.c|h files for yellow carp
  drm/amd/display: remove faulty assert
  Revert "drm/amd/display: Always write repeater mode regardless of LTTPR"
  ...
parents 6e442d06 876d98e5
......@@ -211,8 +211,8 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
struct sync_file *b)
{
struct sync_file *sync_file;
struct dma_fence **fences, **nfences, **a_fences, **b_fences;
int i, i_a, i_b, num_fences, a_num_fences, b_num_fences;
struct dma_fence **fences = NULL, **nfences, **a_fences, **b_fences;
int i = 0, i_a, i_b, num_fences, a_num_fences, b_num_fences;
sync_file = sync_file_alloc();
if (!sync_file)
......@@ -236,7 +236,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
* If a sync_file can only be created with sync_file_merge
* and sync_file_create, this is a reasonable assumption.
*/
for (i = i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) {
for (i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) {
struct dma_fence *pt_a = a_fences[i_a];
struct dma_fence *pt_b = b_fences[i_b];
......@@ -277,15 +277,16 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
fences = nfences;
}
if (sync_file_set_fence(sync_file, fences, i) < 0) {
kfree(fences);
if (sync_file_set_fence(sync_file, fences, i) < 0)
goto err;
}
strlcpy(sync_file->user_name, name, sizeof(sync_file->user_name));
return sync_file;
err:
while (i)
dma_fence_put(fences[--i]);
kfree(fences);
fput(sync_file->file);
return NULL;
......
......@@ -269,7 +269,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv,
uint64_t *size);
int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv, bool *table_freed);
struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_sync_memory(
......
......@@ -1057,8 +1057,7 @@ static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
static int update_gpuvm_pte(struct kgd_mem *mem,
struct kfd_mem_attachment *entry,
struct amdgpu_sync *sync,
bool *table_freed)
struct amdgpu_sync *sync)
{
struct amdgpu_bo_va *bo_va = entry->bo_va;
struct amdgpu_device *adev = entry->adev;
......@@ -1069,7 +1068,7 @@ static int update_gpuvm_pte(struct kgd_mem *mem,
return ret;
/* Update the page tables */
ret = amdgpu_vm_bo_update(adev, bo_va, false, table_freed);
ret = amdgpu_vm_bo_update(adev, bo_va, false);
if (ret) {
pr_err("amdgpu_vm_bo_update failed\n");
return ret;
......@@ -1081,8 +1080,7 @@ static int update_gpuvm_pte(struct kgd_mem *mem,
static int map_bo_to_gpuvm(struct kgd_mem *mem,
struct kfd_mem_attachment *entry,
struct amdgpu_sync *sync,
bool no_update_pte,
bool *table_freed)
bool no_update_pte)
{
int ret;
......@@ -1099,7 +1097,7 @@ static int map_bo_to_gpuvm(struct kgd_mem *mem,
if (no_update_pte)
return 0;
ret = update_gpuvm_pte(mem, entry, sync, table_freed);
ret = update_gpuvm_pte(mem, entry, sync);
if (ret) {
pr_err("update_gpuvm_pte() failed\n");
goto update_gpuvm_pte_failed;
......@@ -1393,8 +1391,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0;
} else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_flags = 0;
......@@ -1597,8 +1594,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
}
int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
struct kgd_dev *kgd, struct kgd_mem *mem,
void *drm_priv, bool *table_freed)
struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
......@@ -1686,7 +1682,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
entry->va, entry->va + bo_size, entry);
ret = map_bo_to_gpuvm(mem, entry, ctx.sync,
is_invalid_userptr, table_freed);
is_invalid_userptr);
if (ret) {
pr_err("Failed to map bo to gpuvm\n");
goto out_unreserve;
......@@ -2136,7 +2132,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
continue;
kfd_mem_dmaunmap_attachment(mem, attachment);
ret = update_gpuvm_pte(mem, attachment, &sync, NULL);
ret = update_gpuvm_pte(mem, attachment, &sync);
if (ret) {
pr_err("%s: update PTE failed\n", __func__);
/* make sure this gets validated again */
......@@ -2342,7 +2338,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
continue;
kfd_mem_dmaunmap_attachment(mem, attachment);
ret = update_gpuvm_pte(mem, attachment, &sync_obj, NULL);
ret = update_gpuvm_pte(mem, attachment, &sync_obj);
if (ret) {
pr_debug("Memory eviction: update PTE failed. Try again\n");
goto validate_map_fail;
......
......@@ -781,7 +781,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false, NULL);
r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
if (r)
return r;
......@@ -792,7 +792,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
bo_va = fpriv->csa_va;
BUG_ON(!bo_va);
r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
return r;
......@@ -811,7 +811,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (bo_va == NULL)
continue;
r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
return r;
......
......@@ -1168,6 +1168,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
/* Renoir */
{0x1002, 0x15E7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
{0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
{0x1002, 0x1638, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
{0x1002, 0x164C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
......
......@@ -612,7 +612,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
if (operation == AMDGPU_VA_OP_MAP ||
operation == AMDGPU_VA_OP_REPLACE) {
r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
goto error;
}
......
......@@ -278,6 +278,21 @@ static bool amdgpu_msi_ok(struct amdgpu_device *adev)
return true;
}
static void amdgpu_restore_msix(struct amdgpu_device *adev)
{
u16 ctrl;
pci_read_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
if (!(ctrl & PCI_MSIX_FLAGS_ENABLE))
return;
/* VF FLR */
ctrl &= ~PCI_MSIX_FLAGS_ENABLE;
pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
ctrl |= PCI_MSIX_FLAGS_ENABLE;
pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
}
/**
* amdgpu_irq_init - initialize interrupt handling
*
......@@ -569,6 +584,9 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
{
int i, j, k;
if (amdgpu_sriov_vf(adev))
amdgpu_restore_msix(adev);
for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
if (!adev->irq.client[i].sources)
continue;
......
......@@ -809,7 +809,7 @@ static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
/* query/inject/cure begin */
int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
struct ras_query_if *info)
struct ras_query_if *info)
{
struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
struct ras_err_data err_data = {0, 0, 0, NULL};
......@@ -1043,17 +1043,32 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
return ret;
}
/* get the total error counts on all IPs */
void amdgpu_ras_query_error_count(struct amdgpu_device *adev,
unsigned long *ce_count,
unsigned long *ue_count)
/**
* amdgpu_ras_query_error_count -- Get error counts of all IPs
* adev: pointer to AMD GPU device
* ce_count: pointer to an integer to be set to the count of correctible errors.
* ue_count: pointer to an integer to be set to the count of uncorrectible
* errors.
*
* If set, @ce_count or @ue_count, count and return the corresponding
* error counts in those integer pointers. Return 0 if the device
* supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS.
*/
int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
unsigned long *ce_count,
unsigned long *ue_count)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_manager *obj;
unsigned long ce, ue;
if (!adev->ras_enabled || !con)
return;
return -EOPNOTSUPP;
/* Don't count since no reporting.
*/
if (!ce_count && !ue_count)
return 0;
ce = 0;
ue = 0;
......@@ -1061,9 +1076,11 @@ void amdgpu_ras_query_error_count(struct amdgpu_device *adev,
struct ras_query_if info = {
.head = obj->head,
};
int res;
if (amdgpu_ras_query_error_status(adev, &info))
return;
res = amdgpu_ras_query_error_status(adev, &info);
if (res)
return res;
ce += info.ce_count;
ue += info.ue_count;
......@@ -1074,6 +1091,8 @@ void amdgpu_ras_query_error_count(struct amdgpu_device *adev,
if (ue_count)
*ue_count = ue;
return 0;
}
/* query/inject/cure end */
......@@ -2137,9 +2156,10 @@ static void amdgpu_ras_counte_dw(struct work_struct *work)
/* Cache new values.
*/
amdgpu_ras_query_error_count(adev, &ce_count, &ue_count);
atomic_set(&con->ras_ce_count, ce_count);
atomic_set(&con->ras_ue_count, ue_count);
if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) {
atomic_set(&con->ras_ce_count, ce_count);
atomic_set(&con->ras_ue_count, ue_count);
}
pm_runtime_mark_last_busy(dev->dev);
Out:
......@@ -2312,9 +2332,10 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev,
/* Those are the cached values at init.
*/
amdgpu_ras_query_error_count(adev, &ce_count, &ue_count);
atomic_set(&con->ras_ce_count, ce_count);
atomic_set(&con->ras_ue_count, ue_count);
if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) {
atomic_set(&con->ras_ce_count, ce_count);
atomic_set(&con->ras_ue_count, ue_count);
}
return 0;
cleanup:
......
......@@ -490,9 +490,9 @@ int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev,
void amdgpu_ras_resume(struct amdgpu_device *adev);
void amdgpu_ras_suspend(struct amdgpu_device *adev);
void amdgpu_ras_query_error_count(struct amdgpu_device *adev,
unsigned long *ce_count,
unsigned long *ue_count);
int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
unsigned long *ce_count,
unsigned long *ue_count);
/* error handling functions */
int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
......
......@@ -1758,7 +1758,7 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
r = vm->update_funcs->commit(&params, fence);
if (table_freed)
*table_freed = *table_freed || params.table_freed;
*table_freed = params.table_freed;
error_unlock:
amdgpu_vm_eviction_unlock(vm);
......@@ -1816,7 +1816,6 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
* @adev: amdgpu_device pointer
* @bo_va: requested BO and VM object
* @clear: if true clear the entries
* @table_freed: return true if page table is freed
*
* Fill in the page table entries for @bo_va.
*
......@@ -1824,7 +1823,7 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
* 0 for success, -EINVAL for failure.
*/
int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
bool clear, bool *table_freed)
bool clear)
{
struct amdgpu_bo *bo = bo_va->base.bo;
struct amdgpu_vm *vm = bo_va->base.vm;
......@@ -1903,7 +1902,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
resv, mapping->start,
mapping->last, update_flags,
mapping->offset, mem,
pages_addr, last_update, table_freed);
pages_addr, last_update, NULL);
if (r)
return r;
}
......@@ -2155,7 +2154,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
/* Per VM BOs never need to bo cleared in the page tables */
r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
return r;
}
......@@ -2174,7 +2173,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
else
clear = true;
r = amdgpu_vm_bo_update(adev, bo_va, clear, NULL);
r = amdgpu_vm_bo_update(adev, bo_va, clear);
if (r)
return r;
......
......@@ -406,7 +406,7 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
struct dma_fence **fence, bool *free_table);
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
bool clear, bool *table_freed);
bool clear);
bool amdgpu_vm_evictable(struct amdgpu_bo *bo);
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
struct amdgpu_bo *bo, bool evicted);
......
......@@ -766,7 +766,7 @@ static const struct amdgpu_irq_src_funcs dce_virtual_crtc_irq_funcs = {
static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev)
{
adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VBLANK6 + 1;
adev->crtc_irq.num_types = adev->mode_info.num_crtc;
adev->crtc_irq.funcs = &dce_virtual_crtc_irq_funcs;
}
......
......@@ -252,7 +252,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
* otherwise the mailbox msg will be ruined/reseted by
* the VF FLR.
*/
if (!down_read_trylock(&adev->reset_sem))
if (!down_write_trylock(&adev->reset_sem))
return;
amdgpu_virt_fini_data_exchange(adev);
......@@ -268,7 +268,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
flr_done:
atomic_set(&adev->in_gpu_reset, 0);
up_read(&adev->reset_sem);
up_write(&adev->reset_sem);
/* Trigger recovery for world switch failure if no TDR */
if (amdgpu_device_should_recover_gpu(adev)
......
......@@ -273,7 +273,7 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
* otherwise the mailbox msg will be ruined/reseted by
* the VF FLR.
*/
if (!down_read_trylock(&adev->reset_sem))
if (!down_write_trylock(&adev->reset_sem))
return;
amdgpu_virt_fini_data_exchange(adev);
......@@ -289,7 +289,7 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
flr_done:
atomic_set(&adev->in_gpu_reset, 0);
up_read(&adev->reset_sem);
up_write(&adev->reset_sem);
/* Trigger recovery for world switch failure if no TDR */
if (amdgpu_device_should_recover_gpu(adev)
......
......@@ -1393,7 +1393,6 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
long err = 0;
int i;
uint32_t *devices_arr = NULL;
bool table_freed = false;
dev = kfd_device_by_id(GET_GPU_ID(args->handle));
if (!dev)
......@@ -1451,8 +1450,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
goto get_mem_obj_from_handle_failed;
}
err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
peer->kgd, (struct kgd_mem *)mem,
peer_pdd->drm_priv, &table_freed);
peer->kgd, (struct kgd_mem *)mem, peer_pdd->drm_priv);
if (err) {
pr_err("Failed to map to gpu %d/%d\n",
i, args->n_devices);
......@@ -1470,17 +1468,16 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
}
/* Flush TLBs after waiting for the page table updates to complete */
if (table_freed) {
for (i = 0; i < args->n_devices; i++) {
peer = kfd_device_by_id(devices_arr[i]);
if (WARN_ON_ONCE(!peer))
continue;
peer_pdd = kfd_get_process_device_data(peer, p);
if (WARN_ON_ONCE(!peer_pdd))
continue;
kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY);
}
for (i = 0; i < args->n_devices; i++) {
peer = kfd_device_by_id(devices_arr[i]);
if (WARN_ON_ONCE(!peer))
continue;
peer_pdd = kfd_get_process_device_data(peer, p);
if (WARN_ON_ONCE(!peer_pdd))
continue;
kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY);
}
kfree(devices_arr);
return err;
......@@ -1568,27 +1565,10 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
}
args->n_success = i+1;
}
mutex_unlock(&p->mutex);
err = amdgpu_amdkfd_gpuvm_sync_memory(dev->kgd, (struct kgd_mem *) mem, true);
if (err) {
pr_debug("Sync memory failed, wait interrupted by user signal\n");
goto sync_memory_failed;
}
/* Flush TLBs after waiting for the page table updates to complete */
for (i = 0; i < args->n_devices; i++) {
peer = kfd_device_by_id(devices_arr[i]);
if (WARN_ON_ONCE(!peer))
continue;
peer_pdd = kfd_get_process_device_data(peer, p);
if (WARN_ON_ONCE(!peer_pdd))
continue;
kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT);
}
kfree(devices_arr);
mutex_unlock(&p->mutex);
return 0;
bind_process_to_device_failed:
......@@ -1596,7 +1576,6 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
unmap_memory_from_gpu_failed:
mutex_unlock(&p->mutex);
copy_from_user_failed:
sync_memory_failed:
kfree(devices_arr);
return err;
}
......
......@@ -714,8 +714,7 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
if (err)
goto err_alloc_mem;
err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem,
pdd->drm_priv, NULL);
err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, pdd->drm_priv);
if (err)
goto err_map_mem;
......
......@@ -2375,21 +2375,27 @@ static bool svm_range_skip_recover(struct svm_range *prange)
static void
svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p,
struct svm_range *prange, int32_t gpuidx)
int32_t gpuidx)
{
struct kfd_process_device *pdd;
if (gpuidx == MAX_GPU_INSTANCE)
/* fault is on different page of same range
* or fault is skipped to recover later
*/
pdd = svm_range_get_pdd_by_adev(prange, adev);
else
/* fault recovered
* or fault cannot recover because GPU no access on the range
*/
pdd = kfd_process_device_from_gpuidx(p, gpuidx);
/* fault is on different page of same range
* or fault is skipped to recover later
* or fault is on invalid virtual address
*/
if (gpuidx == MAX_GPU_INSTANCE) {
uint32_t gpuid;
int r;
r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpuidx);
if (r < 0)
return;
}
/* fault is recovered
* or fault cannot recover because GPU no access on the range
*/
pdd = kfd_process_device_from_gpuidx(p, gpuidx);
if (pdd)
WRITE_ONCE(pdd->faults, pdd->faults + 1);
}
......@@ -2525,7 +2531,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
mutex_unlock(&svms->lock);
mmap_read_unlock(mm);
svm_range_count_fault(adev, p, prange, gpuidx);
svm_range_count_fault(adev, p, gpuidx);
mmput(mm);
out:
......
......@@ -9191,7 +9191,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
/* restore the backlight level */
if (dm->backlight_dev)
if (dm->backlight_dev && (amdgpu_dm_backlight_get_level(dm) != dm->brightness[0]))
amdgpu_dm_backlight_set_level(dm, dm->brightness[0]);
#endif
/*
......
......@@ -31,8 +31,8 @@
#include "dcn31_smu.h"
#include "yellow_carp_offset.h"
#include "mp/mp_13_0_1_offset.h"
#include "mp/mp_13_0_1_sh_mask.h"
#include "mp/mp_13_0_2_offset.h"
#include "mp/mp_13_0_2_sh_mask.h"
#define REG(reg_name) \
(MP0_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
......
......@@ -1620,11 +1620,12 @@ enum dc_status dpcd_configure_lttpr_mode(struct dc_link *link, struct link_train
{
enum dc_status status = DC_OK;
if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
status = configure_lttpr_mode_non_transparent(link, lt_settings);
else
if (lt_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT)
status = configure_lttpr_mode_transparent(link);
else if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)