diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index 196b8b9dba1112b245e331a76b62e804604b191a..b0300529ab13ede1178cf51097a02da67cfcc315 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -6,11 +6,36 @@
Linux DRM Developer's Guide
+
+
+ Jesse
+ Barnes
+ Initial version
+
+ Intel Corporation
+
+ jesse.barnes@intel.com
+
+
+
+
+ Laurent
+ Pinchart
+ Driver internals
+
+ Ideas on board SPRL
+
+ laurent.pinchart@ideasonboard.com
+
+
+
+
+
2008-2009
-
- Intel Corporation (Jesse Barnes <jesse.barnes@intel.com>)
-
+ 2012
+ Intel Corporation
+ Laurent Pinchart
@@ -20,6 +45,17 @@
the kernel source COPYING file.
+
+
+
+
+ 1.0
+ 2012-07-13
+ LP
+ Added extensive documentation about driver internals.
+
+
+
@@ -72,342 +108,361 @@
submission & fencing, suspend/resume support, and DMA
services.
-
- The core of every DRM driver is struct drm_driver. Drivers
- typically statically initialize a drm_driver structure,
- then pass it to drm_init() at load time.
-
- Driver initialization
-
- Before calling the DRM initialization routines, the driver must
- first create and fill out a struct drm_driver structure.
-
-
- static struct drm_driver driver = {
- /* Don't use MTRRs here; the Xserver or userspace app should
- * deal with them for Intel hardware.
- */
- .driver_features =
- DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
- DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_MODESET,
- .load = i915_driver_load,
- .unload = i915_driver_unload,
- .firstopen = i915_driver_firstopen,
- .lastclose = i915_driver_lastclose,
- .preclose = i915_driver_preclose,
- .save = i915_save,
- .restore = i915_restore,
- .device_is_agp = i915_driver_device_is_agp,
- .get_vblank_counter = i915_get_vblank_counter,
- .enable_vblank = i915_enable_vblank,
- .disable_vblank = i915_disable_vblank,
- .irq_preinstall = i915_driver_irq_preinstall,
- .irq_postinstall = i915_driver_irq_postinstall,
- .irq_uninstall = i915_driver_irq_uninstall,
- .irq_handler = i915_driver_irq_handler,
- .reclaim_buffers = drm_core_reclaim_buffers,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
- .fb_probe = intelfb_probe,
- .fb_remove = intelfb_remove,
- .fb_resize = intelfb_resize,
- .master_create = i915_master_create,
- .master_destroy = i915_master_destroy,
-#if defined(CONFIG_DEBUG_FS)
- .debugfs_init = i915_debugfs_init,
- .debugfs_cleanup = i915_debugfs_cleanup,
-#endif
- .gem_init_object = i915_gem_init_object,
- .gem_free_object = i915_gem_free_object,
- .gem_vm_ops = &i915_gem_vm_ops,
- .ioctls = i915_ioctls,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .ioctl = drm_ioctl,
- .mmap = drm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = i915_compat_ioctl,
-#endif
- .llseek = noop_llseek,
- },
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- .probe = probe,
- .remove = __devexit_p(drm_cleanup_pci),
- },
- .name = DRIVER_NAME,
- .desc = DRIVER_DESC,
- .date = DRIVER_DATE,
- .major = DRIVER_MAJOR,
- .minor = DRIVER_MINOR,
- .patchlevel = DRIVER_PATCHLEVEL,
- };
-
-
- In the example above, taken from the i915 DRM driver, the driver
- sets several flags indicating what core features it supports;
- we go over the individual callbacks in later sections. Since
- flags indicate which features your driver supports to the DRM
- core, you need to set most of them prior to calling drm_init(). Some,
- like DRIVER_MODESET can be set later based on user supplied parameters,
- but that's the exception rather than the rule.
-
-
- Driver flags
-
- DRIVER_USE_AGP
-
- Driver uses AGP interface
-
-
-
- DRIVER_REQUIRE_AGP
-
- Driver needs AGP interface to function.
-
-
-
- DRIVER_USE_MTRR
-
-
- Driver uses MTRR interface for mapping memory. Deprecated.
-
-
-
-
- DRIVER_PCI_DMA
-
- Driver is capable of PCI DMA. Deprecated.
-
-
-
- DRIVER_SG
-
- Driver can perform scatter/gather DMA. Deprecated.
-
-
-
- DRIVER_HAVE_DMA
- Driver supports DMA. Deprecated.
-
-
- DRIVER_HAVE_IRQDRIVER_IRQ_SHARED
-
-
- DRIVER_HAVE_IRQ indicates whether the driver has an IRQ
- handler. DRIVER_IRQ_SHARED indicates whether the device &
- handler support shared IRQs (note that this is required of
- PCI drivers).
-
-
-
-
- DRIVER_DMA_QUEUE
-
-
- Should be set if the driver queues DMA requests and completes them
- asynchronously. Deprecated.
-
-
-
-
- DRIVER_FB_DMA
-
-
- Driver supports DMA to/from the framebuffer. Deprecated.
-
-
-
-
- DRIVER_MODESET
-
-
- Driver supports mode setting interfaces.
-
-
-
-
-
- In this specific case, the driver requires AGP and supports
- IRQs. DMA, as discussed later, is handled by device-specific ioctls
- in this case. It also supports the kernel mode setting APIs, though
- unlike in the actual i915 driver source, this example unconditionally
- exports KMS capability.
+ Driver Initialization
+
+ At the core of every DRM driver is a drm_driver
+ structure. Drivers typically statically initialize a drm_driver structure,
+ and then pass it to one of the drm_*_init() functions
+ to register it with the DRM subsystem.
-
-
-
-
-
- Driver load
-
- In the previous section, we saw what a typical drm_driver
- structure might look like. One of the more important fields in
- the structure is the hook for the load function.
-
-
- static struct drm_driver driver = {
- ...
- .load = i915_driver_load,
- ...
- };
-
-
- The load function has many responsibilities: allocating a driver
- private structure, specifying supported performance counters,
- configuring the device (e.g. mapping registers & command
- buffers), initializing the memory manager, and setting up the
- initial output configuration.
-
-
- If compatibility is a concern (e.g. with drivers converted over
- to the new interfaces from the old ones), care must be taken to
- prevent device initialization and control that is incompatible with
- currently active userspace drivers. For instance, if user
- level mode setting drivers are in use, it would be problematic
- to perform output discovery & configuration at load time.
- Likewise, if user-level drivers unaware of memory management are
- in use, memory management and command buffer setup may need to
- be omitted. These requirements are driver-specific, and care
- needs to be taken to keep both old and new applications and
- libraries working. The i915 driver supports the "modeset"
- module parameter to control whether advanced features are
- enabled at load time or in legacy fashion.
+
+ The drm_driver structure contains static
+ information that describes the driver and features it supports, and
+ pointers to methods that the DRM core will call to implement the DRM API.
+ We will first go through the drm_driver static
+ information fields, and will then describe individual operations in
+ details as they get used in later sections.
-
- Driver private & performance counters
-
- The driver private hangs off the main drm_device structure and
- can be used for tracking various device-specific bits of
- information, like register offsets, command buffer status,
- register state for suspend/resume, etc. At load time, a
- driver may simply allocate one and set drm_device.dev_priv
- appropriately; it should be freed and drm_device.dev_priv set
- to NULL when the driver is unloaded.
-
+ Driver Information
+
+ Driver Features
+
+ Drivers inform the DRM core about their requirements and supported
+ features by setting appropriate flags in the
+ driver_features field. Since those flags
+ influence the DRM core behaviour since registration time, most of them
+ must be set to registering the drm_driver
+ instance.
+
+ u32 driver_features;
+
+ Driver Feature Flags
+
+ DRIVER_USE_AGP
+
+ Driver uses AGP interface, the DRM core will manage AGP resources.
+
+
+
+ DRIVER_REQUIRE_AGP
+
+ Driver needs AGP interface to function. AGP initialization failure
+ will become a fatal error.
+
+
+
+ DRIVER_USE_MTRR
+
+ Driver uses MTRR interface for mapping memory, the DRM core will
+ manage MTRR resources. Deprecated.
+
+
+
+ DRIVER_PCI_DMA
+
+ Driver is capable of PCI DMA, mapping of PCI DMA buffers to
+ userspace will be enabled. Deprecated.
+
+
+
+ DRIVER_SG
+
+ Driver can perform scatter/gather DMA, allocation and mapping of
+ scatter/gather buffers will be enabled. Deprecated.
+
+
+
+ DRIVER_HAVE_DMA
+
+ Driver supports DMA, the userspace DMA API will be supported.
+ Deprecated.
+
+
+
+ DRIVER_HAVE_IRQDRIVER_IRQ_SHARED
+
+ DRIVER_HAVE_IRQ indicates whether the driver has an IRQ handler. The
+ DRM core will automatically register an interrupt handler when the
+ flag is set. DRIVER_IRQ_SHARED indicates whether the device &
+ handler support shared IRQs (note that this is required of PCI
+ drivers).
+
+
+
+ DRIVER_IRQ_VBL
+ Unused. Deprecated.
+
+
+ DRIVER_DMA_QUEUE
+
+ Should be set if the driver queues DMA requests and completes them
+ asynchronously. Deprecated.
+
+
+
+ DRIVER_FB_DMA
+
+ Driver supports DMA to/from the framebuffer, mapping of frambuffer
+ DMA buffers to userspace will be supported. Deprecated.
+
+
+
+ DRIVER_IRQ_VBL2
+ Unused. Deprecated.
+
+
+ DRIVER_GEM
+
+ Driver use the GEM memory manager.
+
+
+
+ DRIVER_MODESET
+
+ Driver supports mode setting interfaces (KMS).
+
+
+
+ DRIVER_PRIME
+
+ Driver implements DRM PRIME buffer sharing.
+
+
+
+
+
+ Major, Minor and Patchlevel
+ int major;
+int minor;
+int patchlevel;
+
+ The DRM core identifies driver versions by a major, minor and patch
+ level triplet. The information is printed to the kernel log at
+ initialization time and passed to userspace through the
+ DRM_IOCTL_VERSION ioctl.
+
+
+ The major and minor numbers are also used to verify the requested driver
+ API version passed to DRM_IOCTL_SET_VERSION. When the driver API changes
+ between minor versions, applications can call DRM_IOCTL_SET_VERSION to
+ select a specific version of the API. If the requested major isn't equal
+ to the driver major, or the requested minor is larger than the driver
+ minor, the DRM_IOCTL_SET_VERSION call will return an error. Otherwise
+ the driver's set_version() method will be called with the requested
+ version.
+
+
+
+ Name, Description and Date
+ char *name;
+char *desc;
+char *date;
+
+ The driver name is printed to the kernel log at initialization time,
+ used for IRQ registration and passed to userspace through
+ DRM_IOCTL_VERSION.
+
+
+ The driver description is a purely informative string passed to
+ userspace through the DRM_IOCTL_VERSION ioctl and otherwise unused by
+ the kernel.
+
+
+ The driver date, formatted as YYYYMMDD, is meant to identify the date of
+ the latest modification to the driver. However, as most drivers fail to
+ update it, its value is mostly useless. The DRM core prints it to the
+ kernel log at initialization time and passes it to userspace through the
+ DRM_IOCTL_VERSION ioctl.
+
+
+
+
+ Driver Load
- The DRM supports several counters which may be used for rough
- performance characterization. Note that the DRM stat counter
- system is not often used by applications, and supporting
- additional counters is completely optional.
+ The load method is the driver and device
+ initialization entry point. The method is responsible for allocating and
+ initializing driver private data, specifying supported performance
+ counters, performing resource allocation and mapping (e.g. acquiring
+ clocks, mapping registers or allocating command buffers), initializing
+ the memory manager (), installing
+ the IRQ handler (), setting up
+ vertical blanking handling (), mode
+ setting () and initial output
+ configuration ().
+
+ If compatibility is a concern (e.g. with drivers converted over from
+ User Mode Setting to Kernel Mode Setting), care must be taken to prevent
+ device initialization and control that is incompatible with currently
+ active userspace drivers. For instance, if user level mode setting
+ drivers are in use, it would be problematic to perform output discovery
+ & configuration at load time. Likewise, if user-level drivers
+ unaware of memory management are in use, memory management and command
+ buffer setup may need to be omitted. These requirements are
+ driver-specific, and care needs to be taken to keep both old and new
+ applications and libraries working.
+
+ int (*load) (struct drm_device *, unsigned long flags);
- These interfaces are deprecated and should not be used. If performance
- monitoring is desired, the developer should investigate and
- potentially enhance the kernel perf and tracing infrastructure to export
- GPU related performance information for consumption by performance
- monitoring tools and applications.
+ The method takes two arguments, a pointer to the newly created
+ drm_device and flags. The flags are used to
+ pass the driver_data field of the device id
+ corresponding to the device passed to drm_*_init().
+ Only PCI devices currently use this, USB and platform DRM drivers have
+ their load method called with flags to 0.
+
+ Driver Private & Performance Counters
+
+ The driver private hangs off the main
+ drm_device structure and can be used for
+ tracking various device-specific bits of information, like register
+ offsets, command buffer status, register state for suspend/resume, etc.
+ At load time, a driver may simply allocate one and set
+ drm_device.dev_priv
+ appropriately; it should be freed and
+ drm_device.dev_priv
+ set to NULL when the driver is unloaded.
+
+
+ DRM supports several counters which were used for rough performance
+ characterization. This stat counter system is deprecated and should not
+ be used. If performance monitoring is desired, the developer should
+ investigate and potentially enhance the kernel perf and tracing
+ infrastructure to export GPU related performance information for
+ consumption by performance monitoring tools and applications.
+
+
+
+ IRQ Registration
+
+ The DRM core tries to facilitate IRQ handler registration and
+ unregistration by providing drm_irq_install and
+ drm_irq_uninstall functions. Those functions only
+ support a single interrupt per device.
+
+
+
+ Both functions get the device IRQ by calling
+ drm_dev_to_irq. This inline function will call a
+ bus-specific operation to retrieve the IRQ number. For platform devices,
+ platform_get_irq(..., 0) is used to retrieve the
+ IRQ number.
+
+
+ drm_irq_install starts by calling the
+ irq_preinstall driver operation. The operation
+ is optional and must make sure that the interrupt will not get fired by
+ clearing all pending interrupt flags or disabling the interrupt.
+
+
+ The IRQ will then be requested by a call to
+ request_irq. If the DRIVER_IRQ_SHARED driver
+ feature flag is set, a shared (IRQF_SHARED) IRQ handler will be
+ requested.
+
+
+ The IRQ handler function must be provided as the mandatory irq_handler
+ driver operation. It will get passed directly to
+ request_irq and thus has the same prototype as all
+ IRQ handlers. It will get called with a pointer to the DRM device as the
+ second argument.
+
+
+ Finally the function calls the optional
+ irq_postinstall driver operation. The operation
+ usually enables interrupts (excluding the vblank interrupt, which is
+ enabled separately), but drivers may choose to enable/disable interrupts
+ at a different time.
+
+
+ drm_irq_uninstall is similarly used to uninstall an
+ IRQ handler. It starts by waking up all processes waiting on a vblank
+ interrupt to make sure they don't hang, and then calls the optional
+ irq_uninstall driver operation. The operation
+ must disable all hardware interrupts. Finally the function frees the IRQ
+ by calling free_irq.
+
+
+
+ Memory Manager Initialization
+
+ Every DRM driver requires a memory manager which must be initialized at
+ load time. DRM currently contains two memory managers, the Translation
+ Table Manager (TTM) and the Graphics Execution Manager (GEM).
+ This document describes the use of the GEM memory manager only. See
+ for details.
+
+
+
+ Miscellaneous Device Configuration
+
+ Another task that may be necessary for PCI devices during configuration
+ is mapping the video BIOS. On many devices, the VBIOS describes device
+ configuration, LCD panel timings (if any), and contains flags indicating
+ device state. Mapping the BIOS can be done using the pci_map_rom() call,
+ a convenience function that takes care of mapping the actual ROM,
+ whether it has been shadowed into memory (typically at address 0xc0000)
+ or exists on the PCI device in the ROM BAR. Note that after the ROM has
+ been mapped and any necessary information has been extracted, it should
+ be unmapped; on many devices, the ROM address decoder is shared with
+ other BARs, so leaving it mapped could cause undesired behaviour like
+ hangs or memory corruption.
+
+
+
+
-
- Configuring the device
-
- Obviously, device configuration is device-specific.
- However, there are several common operations: finding a
- device's PCI resources, mapping them, and potentially setting
- up an IRQ handler.
-
-
- Finding & mapping resources is fairly straightforward. The
- DRM wrapper functions, drm_get_resource_start() and
- drm_get_resource_len(), may be used to find BARs on the given
- drm_device struct. Once those values have been retrieved, the
- driver load function can call drm_addmap() to create a new
- mapping for the BAR in question. Note that you probably want a
- drm_local_map_t in your driver private structure to track any
- mappings you create.
-
-
-
-
- if compatibility with other operating systems isn't a concern
- (DRM drivers can run under various BSD variants and OpenSolaris),
- native Linux calls may be used for the above, e.g. pci_resource_*
- and iomap*/iounmap. See the Linux device driver book for more
- info.
-
-
- Once you have a register map, you may use the DRM_READn() and
- DRM_WRITEn() macros to access the registers on your device, or
- use driver-specific versions to offset into your MMIO space
- relative to a driver-specific base pointer (see I915_READ for
- an example).
-
-
- If your device supports interrupt generation, you may want to
- set up an interrupt handler when the driver is loaded. This
- is done using the drm_irq_install() function. If your device
- supports vertical blank interrupts, it should call
- drm_vblank_init() to initialize the core vblank handling code before
- enabling interrupts on your device. This ensures the vblank related
- structures are allocated and allows the core to handle vblank events.
-
-
-
- Once your interrupt handler is registered (it uses your
- drm_driver.irq_handler as the actual interrupt handling
- function), you can safely enable interrupts on your device,
- assuming any other state your interrupt handler uses is also
- initialized.
-
-
- Another task that may be necessary during configuration is
- mapping the video BIOS. On many devices, the VBIOS describes
- device configuration, LCD panel timings (if any), and contains
- flags indicating device state. Mapping the BIOS can be done
- using the pci_map_rom() call, a convenience function that
- takes care of mapping the actual ROM, whether it has been
- shadowed into memory (typically at address 0xc0000) or exists
- on the PCI device in the ROM BAR. Note that after the ROM
- has been mapped and any necessary information has been extracted,
- it should be unmapped; on many devices, the ROM address decoder is
- shared with other BARs, so leaving it mapped could cause
- undesired behavior like hangs or memory corruption.
-
-
-
+
+
+ Memory management
+
+ Modern Linux systems require large amount of graphics memory to store
+ frame buffers, textures, vertices and other graphics-related data. Given
+ the very dynamic nature of many of that data, managing graphics memory
+ efficiently is thus crucial for the graphics stack and plays a central
+ role in the DRM infrastructure.
+
+
+ The DRM core includes two memory managers, namely Translation Table Maps
+ (TTM) and Graphics Execution Manager (GEM). TTM was the first DRM memory
+ manager to be developed and tried to be a one-size-fits-them all
+ solution. It provides a single userspace API to accomodate the need of
+ all hardware, supporting both Unified Memory Architecture (UMA) devices
+ and devices with dedicated video RAM (i.e. most discrete video cards).
+ This resulted in a large, complex piece of code that turned out to be
+ hard to use for driver development.
+
+
+ GEM started as an Intel-sponsored project in reaction to TTM's
+ complexity. Its design philosophy is completely different: instead of
+ providing a solution to every graphics memory-related problems, GEM
+ identified common code between drivers and created a support library to
+ share it. GEM has simpler initialization and execution requirements than
+ TTM, but has no video RAM management capabitilies and is thus limited to
+ UMA devices.
+
- Memory manager initialization
-
- In order to allocate command buffers, cursor memory, scanout
- buffers, etc., as well as support the latest features provided
- by packages like Mesa and the X.Org X server, your driver
- should support a memory manager.
-
+ The Translation Table Manager (TTM)
- If your driver supports memory management (it should!), you
- need to set that up at load time as well. How you initialize
- it depends on which memory manager you're using: TTM or GEM.
+ TTM design background and information belongs here.
TTM initialization
-
- TTM (for Translation Table Manager) manages video memory and
- aperture space for graphics devices. TTM supports both UMA devices
- and devices with dedicated video RAM (VRAM), i.e. most discrete
- graphics devices. If your device has dedicated RAM, supporting
- TTM is desirable. TTM also integrates tightly with your
- driver-specific buffer execution function. See the radeon
- driver for examples.
-
-
- The core TTM structure is the ttm_bo_driver struct. It contains
- several fields with function pointers for initializing the TTM,
- allocating and freeing memory, waiting for command completion
- and fence synchronization, and memory migration. See the
- radeon_ttm.c file for an example of usage.
+ This section is outdated.
+
+ Drivers wishing to support TTM must fill out a drm_bo_driver
+ structure. The structure contains several fields with function
+ pointers for initializing the TTM, allocating and freeing memory,
+ waiting for command completion and fence synchronization, and memory
+ migration. See the radeon_ttm.c file for an example of usage.
The ttm_global_reference structure is made up of several fields:
@@ -445,82 +500,1081 @@
count for the TTM, which will call your initialization function.
+
+
+ The Graphics Execution Manager (GEM)
+
+ The GEM design approach has resulted in a memory manager that doesn't
+ provide full coverage of all (or even all common) use cases in its
+ userspace or kernel API. GEM exposes a set of standard memory-related
+ operations to userspace and a set of helper functions to drivers, and let
+ drivers implement hardware-specific operations with their own private API.
+
+
+ The GEM userspace API is described in the
+ GEM - the Graphics
+ Execution Manager article on LWN. While slightly
+ outdated, the document provides a good overview of the GEM API principles.
+ Buffer allocation and read and write operations, described as part of the
+ common GEM API, are currently implemented using driver-specific ioctls.
+
+
+ GEM is data-agnostic. It manages abstract buffer objects without knowing
+ what individual buffers contain. APIs that require knowledge of buffer
+ contents or purpose, such as buffer allocation or synchronization
+ primitives, are thus outside of the scope of GEM and must be implemented
+ using driver-specific ioctls.
+
+
+ On a fundamental level, GEM involves several operations:
+
+ Memory allocation and freeing
+ Command execution
+ Aperture management at command execution time
+
+ Buffer object allocation is relatively straightforward and largely
+ provided by Linux's shmem layer, which provides memory to back each
+ object.
+
+
+ Device-specific operations, such as command execution, pinning, buffer
+ read & write, mapping, and domain ownership transfers are left to
+ driver-specific ioctls.
+
+
+ GEM Initialization
+
+ Drivers that use GEM must set the DRIVER_GEM bit in the struct
+ drm_driver
+ driver_features field. The DRM core will
+ then automatically initialize the GEM core before calling the
+ load operation. Behind the scene, this will
+ create a DRM Memory Manager object which provides an address space
+ pool for object allocation.
+
+
+ In a KMS configuration, drivers need to allocate and initialize a
+ command ring buffer following core GEM initialization if required by
+ the hardware. UMA devices usually have what is called a "stolen"
+ memory region, which provides space for the initial framebuffer and
+ large, contiguous memory regions required by the device. This space is
+ typically not managed by GEM, and must be initialized separately into
+ its own DRM MM object.
+
+
- GEM initialization
-
- GEM is an alternative to TTM, designed specifically for UMA
- devices. It has simpler initialization and execution requirements
- than TTM, but has no VRAM management capability. Core GEM
- is initialized by calling drm_mm_init() to create
- a GTT DRM MM object, which provides an address space pool for
- object allocation. In a KMS configuration, the driver
- needs to allocate and initialize a command ring buffer following
- core GEM initialization. A UMA device usually has what is called a
- "stolen" memory region, which provides space for the initial
- framebuffer and large, contiguous memory regions required by the
- device. This space is not typically managed by GEM, and it must
- be initialized separately into its own DRM MM object.
-
-
- Initialization is driver-specific. In the case of Intel
- integrated graphics chips like 965GM, GEM initialization can
- be done by calling the internal GEM init function,
- i915_gem_do_init(). Since the 965GM is a UMA device
- (i.e. it doesn't have dedicated VRAM), GEM manages
- making regular RAM available for GPU operations. Memory set
- aside by the BIOS (called "stolen" memory by the i915
- driver) is managed by the DRM memrange allocator; the
- rest of the aperture is managed by GEM.
-
- /* Basic memrange allocator for stolen space (aka vram) */
- drm_memrange_init(&dev_priv->vram, 0, prealloc_size);
- /* Let GEM Manage from end of prealloc space to end of aperture */
- i915_gem_do_init(dev, prealloc_size, agp_size);
-
-
-
-
- Once the memory manager has been set up, we may allocate the
- command buffer. In the i915 case, this is also done with a
- GEM function, i915_gem_init_ringbuffer().
-
+ GEM Objects Creation
+
+ GEM splits creation of GEM objects and allocation of the memory that
+ backs them in two distinct operations.
+
+
+ GEM objects are represented by an instance of struct
+ drm_gem_object. Drivers usually need to extend
+ GEM objects with private information and thus create a driver-specific
+ GEM object structure type that embeds an instance of struct
+ drm_gem_object.
+
+
+ To create a GEM object, a driver allocates memory for an instance of its
+ specific GEM object type and initializes the embedded struct
+ drm_gem_object with a call to
+ drm_gem_object_init. The function takes a pointer to
+ the DRM device, a pointer to the GEM object and the buffer object size
+ in bytes.
+
+
+ GEM uses shmem to allocate anonymous pageable memory.
+ drm_gem_object_init will create an shmfs file of
+ the requested size and store it into the struct
+ drm_gem_object filp
+ field. The memory is used as either main storage for the object when the
+ graphics hardware uses system memory directly or as a backing store
+ otherwise.
+
+
+ Drivers are responsible for the actual physical pages allocation by
+ calling shmem_read_mapping_page_gfp for each page.
+ Note that they can decide to allocate pages when initializing the GEM
+ object, or to delay allocation until the memory is needed (for instance
+ when a page fault occurs as a result of a userspace memory access or
+ when the driver needs to start a DMA transfer involving the memory).
+
+
+ Anonymous pageable memory allocation is not always desired, for instance
+ when the hardware requires physically contiguous system memory as is
+ often the case in embedded devices. Drivers can create GEM objects with
+ no shmfs backing (called private GEM objects) by initializing them with
+ a call to drm_gem_private_object_init instead of
+ drm_gem_object_init. Storage for private GEM
+ objects must be managed by drivers.
+
+
+ Drivers that do not need to extend GEM objects with private information
+ can call the drm_gem_object_alloc function to
+ allocate and initialize a struct drm_gem_object
+ instance. The GEM core will call the optional driver
+ gem_init_object operation after initializing
+ the GEM object with drm_gem_object_init.
+ int (*gem_init_object) (struct drm_gem_object *obj);
+
+
+ No alloc-and-init function exists for private GEM objects.
+
+
+
+ GEM Objects Lifetime
+
+ All GEM objects are reference-counted by the GEM core. References can be
+ acquired and release by calling drm_gem_object_reference
+ and drm_gem_object_unreference respectively. The
+ caller must hold the drm_device
+ struct_mutex lock. As a convenience, GEM
+ provides the drm_gem_object_reference_unlocked and
+ drm_gem_object_unreference_unlocked functions that
+ can be called without holding the lock.
+
+
+ When the last reference to a GEM object is released the GEM core calls
+ the drm_driver
+ gem_free_object operation. That operation is
+ mandatory for GEM-enabled drivers and must free the GEM object and all
+ associated resources.
+
+
+ void (*gem_free_object) (struct drm_gem_object *obj);
+ Drivers are responsible for freeing all GEM object resources, including
+ the resources created by the GEM core. If an mmap offset has been
+ created for the object (in which case
+ drm_gem_object::map_list::map
+ is not NULL) it must be freed by a call to
+ drm_gem_free_mmap_offset. The shmfs backing store
+ must be released by calling drm_gem_object_release
+ (that function can safely be called if no shmfs backing store has been
+ created).
+
+
+
+ GEM Objects Naming
+
+ Communication between userspace and the kernel refers to GEM objects
+ using local handles, global names or, more recently, file descriptors.
+ All of those are 32-bit integer values; the usual Linux kernel limits
+ apply to the file descriptors.
+
+
+ GEM handles are local to a DRM file. Applications get a handle to a GEM
+ object through a driver-specific ioctl, and can use that handle to refer
+ to the GEM object in other standard or driver-specific ioctls. Closing a
+ DRM file handle frees all its GEM handles and dereferences the
+ associated GEM objects.
+
+
+ To create a handle for a GEM object drivers call
+ drm_gem_handle_create. The function takes a pointer
+ to the DRM file and the GEM object and returns a locally unique handle.
+ When the handle is no longer needed drivers delete it with a call to
+ drm_gem_handle_delete. Finally the GEM object
+ associated with a handle can be retrieved by a call to
+ drm_gem_object_lookup.
+
+
+ Handles don't take ownership of GEM objects, they only take a reference
+ to the object that will be dropped when the handle is destroyed. To
+ avoid leaking GEM objects, drivers must make sure they drop the
+ reference(s) they own (such as the initial reference taken at object
+ creation time) as appropriate, without any special consideration for the
+ handle. For example, in the particular case of combined GEM object and
+ handle creation in the implementation of the
+ dumb_create operation, drivers must drop the
+ initial reference to the GEM object before returning the handle.
+
+
+ GEM names are similar in purpose to handles but are not local to DRM
+ files. They can be passed between processes to reference a GEM object
+ globally. Names can't be used directly to refer to objects in the DRM
+ API, applications must convert handles to names and names to handles
+ using the DRM_IOCTL_GEM_FLINK and DRM_IOCTL_GEM_OPEN ioctls
+ respectively. The conversion is handled by the DRM core without any
+ driver-specific support.
+
+
+ Similar to global names, GEM file descriptors are also used to share GEM
+ objects across processes. They offer additional security: as file
+ descriptors must be explictly sent over UNIX domain sockets to be shared
+ between applications, they can't be guessed like the globally unique GEM
+ names.
+
+
+ Drivers that support GEM file descriptors, also known as the DRM PRIME
+ API, must set the DRIVER_PRIME bit in the struct
+ drm_driver
+ driver_features field, and implement the
+ prime_handle_to_fd and
+ prime_fd_to_handle operations.
+
+
+ int (*prime_handle_to_fd)(struct drm_device *dev,
+ struct drm_file *file_priv, uint32_t handle,
+ uint32_t flags, int *prime_fd);
+ int (*prime_fd_to_handle)(struct drm_device *dev,
+ struct drm_file *file_priv, int prime_fd,
+ uint32_t *handle);
+ Those two operations convert a handle to a PRIME file descriptor and
+ vice versa. Drivers must use the kernel dma-buf buffer sharing framework
+ to manage the PRIME file descriptors.
+
+
+ While non-GEM drivers must implement the operations themselves, GEM
+ drivers must use the drm_gem_prime_handle_to_fd
+ and drm_gem_prime_fd_to_handle helper functions.
+ Those helpers rely on the driver
+ gem_prime_export and
+ gem_prime_import operations to create a dma-buf
+ instance from a GEM object (dma-buf exporter role) and to create a GEM
+ object from a dma-buf instance (dma-buf importer role).
+
+
+ struct dma_buf * (*gem_prime_export)(struct drm_device *dev,
+ struct drm_gem_object *obj,
+ int flags);
+ struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
+ struct dma_buf *dma_buf);
+ These two operations are mandatory for GEM drivers that support DRM
+ PRIME.
+
+
+
+ GEM Objects Mapping
+
+ Because mapping operations are fairly heavyweight GEM favours
+ read/write-like access to buffers, implemented through driver-specific
+ ioctls, over mapping buffers to userspace. However, when random access
+ to the buffer is needed (to perform software rendering for instance),
+ direct access to the object can be more efficient.
+
+
+ The mmap system call can't be used directly to map GEM objects, as they
+ don't have their own file handle. Two alternative methods currently
+ co-exist to map GEM objects to userspace. The first method uses a
+ driver-specific ioctl to perform the mapping operation, calling
+ do_mmap under the hood. This is often considered
+ dubious, seems to be discouraged for new GEM-enabled drivers, and will
+ thus not be described here.
+
+
+ The second method uses the mmap system call on the DRM file handle.
+ void *mmap(void *addr, size_t length, int prot, int flags, int fd,
+ off_t offset);
+ DRM identifies the GEM object to be mapped by a fake offset passed
+ through the mmap offset argument. Prior to being mapped, a GEM object
+ must thus be associated with a fake offset. To do so, drivers must call
+ drm_gem_create_mmap_offset on the object. The
+ function allocates a fake offset range from a pool and stores the
+ offset divided by PAGE_SIZE in
+ obj->map_list.hash.key. Care must be taken not to
+ call drm_gem_create_mmap_offset if a fake offset
+ has already been allocated for the object. This can be tested by
+ obj->map_list.map being non-NULL.
+
+
+ Once allocated, the fake offset value
+ (obj->map_list.hash.key << PAGE_SHIFT)
+ must be passed to the application in a driver-specific way and can then
+ be used as the mmap offset argument.
+
+
+ The GEM core provides a helper method drm_gem_mmap
+ to handle object mapping. The method can be set directly as the mmap
+ file operation handler. It will look up the GEM object based on the
+ offset value and set the VMA operations to the
+ drm_driver gem_vm_ops
+ field. Note that drm_gem_mmap doesn't map memory to
+ userspace, but relies on the driver-provided fault handler to map pages
+ individually.
+
+
+ To use drm_gem_mmap, drivers must fill the struct
+ drm_driver gem_vm_ops
+ field with a pointer to VM operations.
+
+
+ struct vm_operations_struct *gem_vm_ops
+
+ struct vm_operations_struct {
+ void (*open)(struct vm_area_struct * area);
+ void (*close)(struct vm_area_struct * area);
+ int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
+ };
+
+
+ The open and close
+ operations must update the GEM object reference count. Drivers can use
+ the drm_gem_vm_open and
+ drm_gem_vm_close helper functions directly as open
+ and close handlers.
+
+
+ The fault operation handler is responsible for mapping individual pages
+ to userspace when a page fault occurs. Depending on the memory
+ allocation scheme, drivers can allocate pages at fault time, or can
+ decide to allocate memory for the GEM object at the time the object is
+ created.
+
+
+ Drivers that want to map the GEM object upfront instead of handling page
+ faults can implement their own mmap file operation handler.
+
+
+
+ Dumb GEM Objects
+
+ The GEM API doesn't standardize GEM objects creation and leaves it to
+ driver-specific ioctls. While not an issue for full-fledged graphics
+ stacks that include device-specific userspace components (in libdrm for
+ instance), this limit makes DRM-based early boot graphics unnecessarily
+ complex.
+
+
+ Dumb GEM objects partly alleviate the problem by providing a standard
+ API to create dumb buffers suitable for scanout, which can then be used
+ to create KMS frame buffers.
+
+
+ To support dumb GEM objects drivers must implement the
+ dumb_create,
+ dumb_destroy and
+ dumb_map_offset operations.
+
+
+
+ int (*dumb_create)(struct drm_file *file_priv, struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+
+ The dumb_create operation creates a GEM
+ object suitable for scanout based on the width, height and depth
+ from the struct drm_mode_create_dumb
+ argument. It fills the argument's handle,
+ pitch and size
+ fields with a handle for the newly created GEM object and its line
+ pitch and size in bytes.
+
+
+
+ int (*dumb_destroy)(struct drm_file *file_priv, struct drm_device *dev,
+ uint32_t handle);
+
+ The dumb_destroy operation destroys a dumb
+ GEM object created by dumb_create.
+
+
+
+ int (*dumb_map_offset)(struct drm_file *file_priv, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset);
+
+ The dumb_map_offset operation associates an
+ mmap fake offset with the GEM object given by the handle and returns
+ it. Drivers must use the
+ drm_gem_create_mmap_offset function to
+ associate the fake offset as described in
+ .
+
+
+
+
+
+ Memory Coherency
+
+ When mapped to the device or used in a command buffer, backing pages
+ for an object are flushed to memory and marked write combined so as to
+ be coherent with the GPU. Likewise, if the CPU accesses an object
+ after the GPU has finished rendering to the object, then the object
+ must be made coherent with the CPU's view of memory, usually involving
+ GPU cache flushing of various kinds. This core CPU<->GPU
+ coherency management is provided by a device-specific ioctl, which
+ evaluates an object's current domain and performs any necessary
+ flushing or synchronization to put the object into the desired
+ coherency domain (note that the object may be busy, i.e. an active
+ render target; in that case, setting the domain blocks the client and
+ waits for rendering to complete before performing any necessary
+ flushing operations).
+
+
+
+ Command Execution
+
+ Perhaps the most important GEM function for GPU devices is providing a
+ command execution interface to clients. Client programs construct
+ command buffers containing references to previously allocated memory
+ objects, and then submit them to GEM. At that point, GEM takes care to
+ bind all the objects into the GTT, execute the buffer, and provide
+ necessary synchronization between clients accessing the same buffers.
+ This often involves evicting some objects from the GTT and re-binding
+ others (a fairly expensive operation), and providing relocation
+ support which hides fixed GTT offsets from clients. Clients must take
+ care not to submit command buffers that reference more objects than
+ can fit in the GTT; otherwise, GEM will reject them and no rendering
+ will occur. Similarly, if several objects in the buffer require fence
+ registers to be allocated for correct rendering (e.g. 2D blits on
+ pre-965 chips), care must be taken not to require more fence registers
+ than are available to the client. Such resource management should be
+ abstracted from the client in libdrm.
+
+
+
+
+
+ Mode Setting
+
+ Drivers must initialize the mode setting core by calling
+ drm_mode_config_init on the DRM device. The function
+ initializes the drm_device
+ mode_config field and never fails. Once done,
+ mode configuration must be setup by initializing the following fields.
+
+
+
+ int min_width, min_height;
+int max_width, max_height;
+
+ Minimum and maximum width and height of the frame buffers in pixel
+ units.
+
+
+
+ struct drm_mode_config_funcs *funcs;
+ Mode setting functions.
+
+
- Output configuration
+ Frame Buffer Creation
+ struct drm_framebuffer *(*fb_create)(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_mode_fb_cmd2 *mode_cmd);
- The final initialization task is output configuration. This involves:
-
-
- Finding and initializing the CRTCs, encoders, and connectors
- for the device.
-
-
- Creating an initial configuration.
-
-
- Registering a framebuffer console driver.
-
-
+ Frame buffers are abstract memory objects that provide a source of
+ pixels to scanout to a CRTC. Applications explicitly request the
+ creation of frame buffers through the DRM_IOCTL_MODE_ADDFB(2) ioctls and
+ receive an opaque handle that can be passed to the KMS CRTC control,
+ plane configuration and page flip functions.
+
+
+ Frame buffers rely on the underneath memory manager for low-level memory
+ operations. When creating a frame buffer applications pass a memory
+ handle (or a list of memory handles for multi-planar formats) through
+ the drm_mode_fb_cmd2 argument. This document
+ assumes that the driver uses GEM, those handles thus reference GEM
+ objects.
+
+
+ Drivers must first validate the requested frame buffer parameters passed
+ through the mode_cmd argument. In particular this is where invalid
+ sizes, pixel formats or pitches can be caught.
+
+
+ If the parameters are deemed valid, drivers then create, initialize and
+ return an instance of struct drm_framebuffer.
+ If desired the instance can be embedded in a larger driver-specific
+ structure. The new instance is initialized with a call to
+ drm_framebuffer_init which takes a pointer to DRM
+ frame buffer operations (struct
+ drm_framebuffer_funcs). Frame buffer operations are
+
+
+ int (*create_handle)(struct drm_framebuffer *fb,
+ struct drm_file *file_priv, unsigned int *handle);
+
+ Create a handle to the frame buffer underlying memory object. If
+ the frame buffer uses a multi-plane format, the handle will
+ reference the memory object associated with the first plane.
+
+
+ Drivers call drm_gem_handle_create to create
+ the handle.
+
+
+
+ void (*destroy)(struct drm_framebuffer *framebuffer);
+
+ Destroy the frame buffer object and frees all associated
+ resources. Drivers must call
+ drm_framebuffer_cleanup to free resources
+ allocated by the DRM core for the frame buffer object, and must
+ make sure to unreference all memory objects associated with the
+ frame buffer. Handles created by the
+ create_handle operation are released by
+ the DRM core.
+
+
+
+ int (*dirty)(struct drm_framebuffer *framebuffer,
+ struct drm_file *file_priv, unsigned flags, unsigned color,
+ struct drm_clip_rect *clips, unsigned num_clips);
+
+ This optional operation notifies the driver that a region of the
+ frame buffer has changed in response to a DRM_IOCTL_MODE_DIRTYFB
+ ioctl call.
+
+
+
+
+
+ After initializing the drm_framebuffer
+ instance drivers must fill its width,
+ height, pitches,
+ offsets, depth,
+ bits_per_pixel and
+ pixel_format fields from the values passed
+ through the drm_mode_fb_cmd2 argument. They
+ should call the drm_helper_mode_fill_fb_struct
+ helper function to do so.
+
+
+
+ Output Polling
+ void (*output_poll_changed)(struct drm_device *dev);
+
+ This operation notifies the driver that the status of one or more
+ connectors has changed. Drivers that use the fb helper can just call the
+ drm_fb_helper_hotplug_event function to handle this
+ operation.
+
+
+
+
+
+
+
+ KMS Initialization and Cleanup
+
+ A KMS device is abstracted and exposed as a set of planes, CRTCs, encoders
+ and connectors. KMS drivers must thus create and initialize all those
+ objects at load time after initializing mode setting.
+
+
+ CRTCs (struct drm_crtc)
+
+ A CRTC is an abstraction representing a part of the chip that contains a
+ pointer to a scanout buffer. Therefore, the number of CRTCs available
+ determines how many independent scanout buffers can be active at any
+ given time. The CRTC structure contains several fields to support this:
+ a pointer to some video memory (abstracted as a frame buffer object), a
+ display mode, and an (x, y) offset into the video memory to support
+ panning or configurations where one piece of video memory spans multiple
+ CRTCs.
- Output discovery and initialization
-
- Several core functions exist to create CRTCs, encoders, and
- connectors, namely: drm_crtc_init(), drm_connector_init(), and
- drm_encoder_init(), along with several "helper" functions to
- perform common tasks.
-
-
- Connectors should be registered with sysfs once they've been
- detected and initialized, using the
- drm_sysfs_connector_add() function. Likewise, when they're
- removed from the system, they should be destroyed with
- drm_sysfs_connector_remove().
-
-
-CRTC Initialization
+
+ A KMS device must create and register at least one struct
+ drm_crtc instance. The instance is allocated
+ and zeroed by the driver, possibly as part of a larger structure, and
+ registered with a call to drm_crtc_init with a
+ pointer to CRTC functions.
+
+
+
+ CRTC Operations
+
+ Set Configuration
+ int (*set_config)(struct drm_mode_set *set);
+
+ Apply a new CRTC configuration to the device. The configuration
+ specifies a CRTC, a frame buffer to scan out from, a (x,y) position in
+ the frame buffer, a display mode and an array of connectors to drive
+ with the CRTC if possible.
+
+
+ If the frame buffer specified in the configuration is NULL, the driver
+ must detach all encoders connected to the CRTC and all connectors
+ attached to those encoders and disable them.
+
+
+ This operation is called with the mode config lock held.
+
+
+ FIXME: How should set_config interact with DPMS? If the CRTC is
+ suspended, should it be resumed?
+
+
+
+ Page Flipping
+ int (*page_flip)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event);
+
+ Schedule a page flip to the given frame buffer for the CRTC. This
+ operation is called with the mode config mutex held.
+
+
+ Page flipping is a synchronization mechanism that replaces the frame
+ buffer being scanned out by the CRTC with a new frame buffer during
+ vertical blanking, avoiding tearing. When an application requests a page
+ flip the DRM core verifies that the new frame buffer is large enough to
+ be scanned out by the CRTC in the currently configured mode and then
+ calls the CRTC page_flip operation with a
+ pointer to the new frame buffer.
+
+
+ The page_flip operation schedules a page flip.
+ Once any pending rendering targetting the new frame buffer has
+ completed, the CRTC will be reprogrammed to display that frame buffer
+ after the next vertical refresh. The operation must return immediately
+ without waiting for rendering or page flip to complete and must block
+ any new rendering to the frame buffer until the page flip completes.
+
+
+ If a page flip is already pending, the
+ page_flip operation must return
+ -EBUSY.
+
+
+ To synchronize page flip to vertical blanking the driver will likely
+ need to enable vertical blanking interrupts. It should call
+ drm_vblank_get for that purpose, and call
+ drm_vblank_put after the page flip completes.
+
+
+ If the application has requested to be notified when page flip completes
+ the page_flip operation will be called with a
+ non-NULL event argument pointing to a
+ drm_pending_vblank_event instance. Upon page
+ flip completion the driver must fill the
+ event::event
+ sequence, tv_sec
+ and tv_usec fields with the associated
+ vertical blanking count and timestamp, add the event to the
+ drm_file list of events to be signaled, and wake
+ up any waiting process. This can be performed with
+ event.sequence = drm_vblank_count_and_time(..., &now);
+ event->event.tv_sec = now.tv_sec;
+ event->event.tv_usec = now.tv_usec;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ list_add_tail(&event->base.link, &event->base.file_priv->event_list);
+ wake_up_interruptible(&event->base.file_priv->event_wait);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ ]]>
+
+
+ FIXME: Could drivers that don't need to wait for rendering to complete
+ just add the event to dev->vblank_event_list and
+ let the DRM core handle everything, as for "normal" vertical blanking
+ events?
+
+
+ While waiting for the page flip to complete, the
+ event->base.link list head can be used freely by
+ the driver to store the pending event in a driver-specific list.
+
+
+ If the file handle is closed before the event is signaled, drivers must
+ take care to destroy the event in their
+ preclose operation (and, if needed, call
+ drm_vblank_put).
+
+
+
+ Miscellaneous
+
+
+ void (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
+ uint32_t start, uint32_t size);
+
+ Apply a gamma table to the device. The operation is optional.
+
+
+
+ void (*destroy)(struct drm_crtc *crtc);
+
+ Destroy the CRTC when not needed anymore. See
+ .
+
+
+
+
+
+
+
+ Planes (struct drm_plane)
+
+ A plane represents an image source that can be blended with or overlayed
+ on top of a CRTC during the scanout process. Planes are associated with
+ a frame buffer to crop a portion of the image memory (source) and
+ optionally scale it to a destination size. The result is then blended
+ with or overlayed on top of a CRTC.
+
+
+ Plane Initialization
+
+ Planes are optional. To create a plane, a KMS drivers allocates and
+ zeroes an instances of struct drm_plane
+ (possibly as part of a larger structure) and registers it with a call
+ to drm_plane_init. The function takes a bitmask
+ of the CRTCs that can be associated with the plane, a pointer to the
+ plane functions and a list of format supported formats.
+
+
+
+ Plane Operations
+
+
+ int (*update_plane)(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h);
+
+ Enable and configure the plane to use the given CRTC and frame buffer.
+
+
+ The source rectangle in frame buffer memory coordinates is given by
+ the src_x, src_y,
+ src_w and src_h
+ parameters (as 16.16 fixed point values). Devices that don't support
+ subpixel plane coordinates can ignore the fractional part.
+
+
+ The destination rectangle in CRTC coordinates is given by the
+ crtc_x, crtc_y,
+ crtc_w and crtc_h
+ parameters (as integer values). Devices scale the source rectangle to
+ the destination rectangle. If scaling is not supported, and the source
+ rectangle size doesn't match the destination rectangle size, the
+ driver must return a -EINVAL error.
+
+
+
+ int (*disable_plane)(struct drm_plane *plane);
+
+ Disable the plane. The DRM core calls this method in response to a
+ DRM_IOCTL_MODE_SETPLANE ioctl call with the frame buffer ID set to 0.
+ Disabled planes must not be processed by the CRTC.
+
+
+
+ void (*destroy)(struct drm_plane *plane);
+
+ Destroy the plane when not needed anymore. See
+ .
+
+
+
+
+
+
+ Encoders (struct drm_encoder)
+
+ An encoder takes pixel data from a CRTC and converts it to a format
+ suitable for any attached connectors. On some devices, it may be
+ possible to have a CRTC send data to more than one encoder. In that
+ case, both encoders would receive data from the same scanout buffer,
+ resulting in a "cloned" display configuration across the connectors
+ attached to each encoder.
+
+
+ Encoder Initialization
+
+ As for CRTCs, a KMS driver must create, initialize and register at
+ least one struct drm_encoder instance. The
+ instance is allocated and zeroed by the driver, possibly as part of a
+ larger structure.
+
+
+ Drivers must initialize the struct drm_encoder
+ possible_crtcs and
+ possible_clones fields before registering the
+ encoder. Both fields are bitmasks of respectively the CRTCs that the
+ encoder can be connected to, and sibling encoders candidate for cloning.
+
+
+ After being initialized, the encoder must be registered with a call to
+ drm_encoder_init. The function takes a pointer to
+ the encoder functions and an encoder type. Supported types are
+
+
+ DRM_MODE_ENCODER_DAC for VGA and analog on DVI-I/DVI-A
+
+
+ DRM_MODE_ENCODER_TMDS for DVI, HDMI and (embedded) DisplayPort
+
+
+ DRM_MODE_ENCODER_LVDS for display panels
+
+
+ DRM_MODE_ENCODER_TVDAC for TV output (Composite, S-Video, Component,
+ SCART)
+
+
+ DRM_MODE_ENCODER_VIRTUAL for virtual machine displays
+
+
+
+
+ Encoders must be attached to a CRTC to be used. DRM drivers leave
+ encoders unattached at initialization time. Applications (or the fbdev
+ compatibility layer when implemented) are responsible for attaching the
+ encoders they want to use to a CRTC.
+
+
+
+ Encoder Operations
+
+
+ void (*destroy)(struct drm_encoder *encoder);
+
+ Called to destroy the encoder when not needed anymore. See
+ .
+
+
+
+
+
+
+ Connectors (struct drm_connector)
+
+ A connector is the final destination for pixel data on a device, and
+ usually connects directly to an external display device like a monitor
+ or laptop panel. A connector can only be attached to one encoder at a
+ time. The connector is also the structure where information about the
+ attached display is kept, so it contains fields for display data, EDID
+ data, DPMS & connection status, and information about modes
+ supported on the attached displays.
+
+
+ Connector Initialization
+
+ Finally a KMS driver must create, initialize, register and attach at
+ least one struct drm_connector instance. The
+ instance is created as other KMS objects and initialized by setting the
+ following fields.
+
+
+
+ interlace_allowed
+
+ Whether the connector can handle interlaced modes.
+
+
+
+ doublescan_allowed
+
+ Whether the connector can handle doublescan.
+
+
+
+ display_info
+
+
+ Display information is filled from EDID information when a display
+ is detected. For non hot-pluggable displays such as flat panels in
+ embedded systems, the driver should initialize the
+ display_info.width_mm
+ and
+ display_info.height_mm
+ fields with the physical size of the display.
+
+
+
+ polled
+
+ Connector polling mode, a combination of
+
+
+ DRM_CONNECTOR_POLL_HPD
+
+ The connector generates hotplug events and doesn't need to be
+ periodically polled. The CONNECT and DISCONNECT flags must not
+ be set together with the HPD flag.
+
+
+
+ DRM_CONNECTOR_POLL_CONNECT
+
+ Periodically poll the connector for connection.
+
+
+
+ DRM_CONNECTOR_POLL_DISCONNECT
+
+ Periodically poll the connector for disconnection.
+
+
+
+ Set to 0 for connectors that don't support connection status
+ discovery.
+
+
+
+
+ The connector is then registered with a call to
+ drm_connector_init with a pointer to the connector
+ functions and a connector type, and exposed through sysfs with a call to
+ drm_sysfs_connector_add.
+
+
+ Supported connector types are
+
+ DRM_MODE_CONNECTOR_VGA
+ DRM_MODE_CONNECTOR_DVII
+ DRM_MODE_CONNECTOR_DVID
+ DRM_MODE_CONNECTOR_DVIA
+ DRM_MODE_CONNECTOR_Composite
+ DRM_MODE_CONNECTOR_SVIDEO
+ DRM_MODE_CONNECTOR_LVDS
+ DRM_MODE_CONNECTOR_Component
+ DRM_MODE_CONNECTOR_9PinDIN
+ DRM_MODE_CONNECTOR_DisplayPort
+ DRM_MODE_CONNECTOR_HDMIA
+ DRM_MODE_CONNECTOR_HDMIB
+ DRM_MODE_CONNECTOR_TV
+ DRM_MODE_CONNECTOR_eDP
+ DRM_MODE_CONNECTOR_VIRTUAL
+
+
+
+ Connectors must be attached to an encoder to be used. For devices that
+ map connectors to encoders 1:1, the connector should be attached at
+ initialization time with a call to
+ drm_mode_connector_attach_encoder. The driver must
+ also set the drm_connector
+ encoder field to point to the attached
+ encoder.
+
+
+ Finally, drivers must initialize the connectors state change detection
+ with a call to drm_kms_helper_poll_init. If at
+ least one connector is pollable but can't generate hotplug interrupts
+ (indicated by the DRM_CONNECTOR_POLL_CONNECT and
+ DRM_CONNECTOR_POLL_DISCONNECT connector flags), a delayed work will
+ automatically be queued to periodically poll for changes. Connectors
+ that can generate hotplug interrupts must be marked with the
+ DRM_CONNECTOR_POLL_HPD flag instead, and their interrupt handler must
+ call drm_helper_hpd_irq_event. The function will
+ queue a delayed work to check the state of all connectors, but no
+ periodic polling will be done.
+
+
+
+ Connector Operations
+
+ Unless otherwise state, all operations are mandatory.
+
+
+ DPMS
+ void (*dpms)(struct drm_connector *connector, int mode);
+
+ The DPMS operation sets the power state of a connector. The mode
+ argument is one of
+
+ DRM_MODE_DPMS_ON
+ DRM_MODE_DPMS_STANDBY
+ DRM_MODE_DPMS_SUSPEND
+ DRM_MODE_DPMS_OFF
+
+
+
+ In all but DPMS_ON mode the encoder to which the connector is attached
+ should put the display in low-power mode by driving its signals
+ appropriately. If more than one connector is attached to the encoder
+ care should be taken not to change the power state of other displays as
+ a side effect. Low-power mode should be propagated to the encoders and
+ CRTCs when all related connectors are put in low-power mode.
+
+
+
+ Modes
+ int (*fill_modes)(struct drm_connector *connector, uint32_t max_width,
+ uint32_t max_height);
+
+ Fill the mode list with all supported modes for the connector. If the
+ max_width and max_height
+ arguments are non-zero, the implementation must ignore all modes wider
+ than max_width or higher than
+ max_height.
+
+
+ The connector must also fill in this operation its
+ display_info
+ width_mm and
+ height_mm fields with the connected display
+ physical size in millimeters. The fields should be set to 0 if the value
+ isn't known or is not applicable (for instance for projector devices).
+
+
+
+ Connection Status
+
+ The connection status is updated through polling or hotplug events when
+ supported (see ). The status
+ value is reported to userspace through ioctls and must not be used
+ inside the driver, as it only gets initialized by a call to
+ drm_mode_getconnector from userspace.
+
+ enum drm_connector_status (*detect)(struct drm_connector *connector,
+ bool force);
+
+ Check to see if anything is attached to the connector. The
+ force parameter is set to false whilst polling or
+ to true when checking the connector due to user request.
+ force can be used by the driver to avoid
+ expensive, destructive operations during automated probing.
+
+
+ Return connector_status_connected if something is connected to the
+ connector, connector_status_disconnected if nothing is connected and
+ connector_status_unknown if the connection state isn't known.
+
+
+ Drivers should only return connector_status_connected if the connection
+ status has really been probed as connected. Connectors that can't detect
+ the connection status, or failed connection status probes, should return
+ connector_status_unknown.
+
+
+
+ Miscellaneous
+
+
+ void (*destroy)(struct drm_connector *connector);
+
+ Destroy the connector when not needed anymore. See
+ .
+
+
+
+
+
+
+
+ Cleanup
+
+ The DRM core manages its objects' lifetime. When an object is not needed
+ anymore the core calls its destroy function, which must clean up and
+ free every resource allocated for the object. Every
+ drm_*_init call must be matched with a
+ corresponding drm_*_cleanup call to cleanup CRTCs
+ (drm_crtc_cleanup), planes
+ (drm_plane_cleanup), encoders
+ (drm_encoder_cleanup) and connectors
+ (drm_connector_cleanup). Furthermore, connectors
+ that have been added to sysfs must be removed by a call to
+ drm_sysfs_connector_remove before calling
+ drm_connector_cleanup.
+
+
+ Connectors state change detection must be cleanup up with a call to
+ drm_kms_helper_poll_fini.
+
+
+
+ Output discovery and initialization example
+
-
-
- In the example above (again, taken from the i915 driver), a
- CRT connector and encoder combination is created. A device-specific
- i2c bus is also created for fetching EDID data and
- performing monitor detection. Once the process is complete,
- the new connector is registered with sysfs to make its
- properties available to applications.
-
-
- Helper functions and core functions
-
- Since many PC-class graphics devices have similar display output
- designs, the DRM provides a set of helper functions to make
- output management easier. The core helper routines handle
- encoder re-routing and the disabling of unused functions following
- mode setting. Using the helpers is optional, but recommended for
- devices with PC-style architectures (i.e. a set of display planes
- for feeding pixels to encoders which are in turn routed to
- connectors). Devices with more complex requirements needing
- finer grained management may opt to use the core callbacks
- directly.
-
-
- [Insert typical diagram here.] [Insert OMAP style config here.]
-
-
-
- Each encoder object needs to provide:
-
-
- A DPMS (basically on/off) function.
-
-
- A mode-fixup function (for converting requested modes into
- native hardware timings).
-
-
- Functions (prepare, set, and commit) for use by the core DRM
- helper functions.
-
-
- Connector helpers need to provide functions (mode-fetch, validity,
- and encoder-matching) for returning an ideal encoder for a given
- connector. The core connector functions include a DPMS callback,
- save/restore routines (deprecated), detection, mode probing,
- property handling, and cleanup functions.
-
-
-
-
-
+}]]>
+
+ In the example above (taken from the i915 driver), a CRTC, connector and
+ encoder combination is created. A device-specific i2c bus is also
+ created for fetching EDID data and performing monitor detection. Once
+ the process is complete, the new connector is registered with sysfs to
+ make its properties available to applications.
+
-
+
- VBlank event handling
+ Mid-layer Helper Functions
- The DRM core exposes two vertical blank related ioctls:
-
-
- DRM_IOCTL_WAIT_VBLANK
-
-
- This takes a struct drm_wait_vblank structure as its argument,
- and it is used to block or request a signal when a specified
- vblank event occurs.
-
-
-
-
- DRM_IOCTL_MODESET_CTL
-
-
- This should be called by application level drivers before and
- after mode setting, since on many devices the vertical blank
- counter is reset at that time. Internally, the DRM snapshots
- the last vblank count when the ioctl is called with the
- _DRM_PRE_MODESET command, so that the counter won't go backwards
- (which is dealt with when _DRM_POST_MODESET is used).
-
-
-
-
-
+ The CRTC, encoder and connector functions provided by the drivers
+ implement the DRM API. They're called by the DRM core and ioctl handlers
+ to handle device state changes and configuration request. As implementing
+ those functions often requires logic not specific to drivers, mid-layer
+ helper functions are available to avoid duplicating boilerplate code.
+
+
+ The DRM core contains one mid-layer implementation. The mid-layer provides
+ implementations of several CRTC, encoder and connector functions (called
+ from the top of the mid-layer) that pre-process requests and call
+ lower-level functions provided by the driver (at the bottom of the
+ mid-layer). For instance, the
+ drm_crtc_helper_set_config function can be used to
+ fill the struct drm_crtc_funcs
+ set_config field. When called, it will split
+ the set_config operation in smaller, simpler
+ operations and call the driver to handle them.
- To support the functions above, the DRM core provides several
- helper functions for tracking vertical blank counters, and
- requires drivers to provide several callbacks:
- get_vblank_counter(), enable_vblank() and disable_vblank(). The
- core uses get_vblank_counter() to keep the counter accurate
- across interrupt disable periods. It should return the current
- vertical blank event count, which is often tracked in a device
- register. The enable and disable vblank callbacks should enable
- and disable vertical blank interrupts, respectively. In the
- absence of DRM clients waiting on vblank events, the core DRM
- code uses the disable_vblank() function to disable
- interrupts, which saves power. They are re-enabled again when
- a client calls the vblank wait ioctl above.
+ To use the mid-layer, drivers call drm_crtc_helper_add,
+ drm_encoder_helper_add and
+ drm_connector_helper_add functions to install their
+ mid-layer bottom operations handlers, and fill the
+ drm_crtc_funcs,
+ drm_encoder_funcs and
+ drm_connector_funcs structures with pointers to
+ the mid-layer top API functions. Installing the mid-layer bottom operation
+ handlers is best done right after registering the corresponding KMS object.
- A device that doesn't provide a count register may simply use an
- internal atomic counter incremented on every vertical blank
- interrupt (and then treat the enable_vblank() and disable_vblank()
- callbacks as no-ops).
+ The mid-layer is not split between CRTC, encoder and connector operations.
+ To use it, a driver must provide bottom functions for all of the three KMS
+ entities.
+
+ Helper Functions
+
+
+ int drm_crtc_helper_set_config(struct drm_mode_set *set);
+
+ The drm_crtc_helper_set_config helper function
+ is a CRTC set_config implementation. It
+ first tries to locate the best encoder for each connector by calling
+ the connector best_encoder helper
+ operation.
+
+
+ After locating the appropriate encoders, the helper function will
+ call the mode_fixup encoder and CRTC helper
+ operations to adjust the requested mode, or reject it completely in
+ which case an error will be returned to the application. If the new
+ configuration after mode adjustment is identical to the current
+ configuration the helper function will return without performing any
+ other operation.
+
+
+ If the adjusted mode is identical to the current mode but changes to
+ the frame buffer need to be applied, the
+ drm_crtc_helper_set_config function will call
+ the CRTC mode_set_base helper operation. If
+ the adjusted mode differs from the current mode, or if the
+ mode_set_base helper operation is not
+ provided, the helper function performs a full mode set sequence by
+ calling the prepare,
+ mode_set and
+ commit CRTC and encoder helper operations,
+ in that order.
+
+
+
+ void drm_helper_connector_dpms(struct drm_connector *connector, int mode);
+
+ The drm_helper_connector_dpms helper function
+ is a connector dpms implementation that
+ tracks power state of connectors. To use the function, drivers must
+ provide dpms helper operations for CRTCs
+ and encoders to apply the DPMS state to the device.
+
+
+ The mid-layer doesn't track the power state of CRTCs and encoders.
+ The dpms helper operations can thus be
+ called with a mode identical to the currently active mode.
+
+
+
+ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
+ uint32_t maxX, uint32_t maxY);
+
+ The drm_helper_probe_single_connector_modes helper
+ function is a connector fill_modes
+ implementation that updates the connection status for the connector
+ and then retrieves a list of modes by calling the connector
+ get_modes helper operation.
+
+
+ The function filters out modes larger than
+ max_width and max_height
+ if specified. It then calls the connector
+ mode_valid helper operation for each mode in
+ the probed list to check whether the mode is valid for the connector.
+
+
+
+
+
+ CRTC Helper Operations
+
+
+ bool (*mode_fixup)(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+
+ Let CRTCs adjust the requested mode or reject it completely. This
+ operation returns true if the mode is accepted (possibly after being
+ adjusted) or false if it is rejected.
+
+
+ The mode_fixup operation should reject the
+ mode if it can't reasonably use it. The definition of "reasonable"
+ is currently fuzzy in this context. One possible behaviour would be
+ to set the adjusted mode to the panel timings when a fixed-mode
+ panel is used with hardware capable of scaling. Another behaviour
+ would be to accept any input mode and adjust it to the closest mode
+ supported by the hardware (FIXME: This needs to be clarified).
+
+
+
+ int (*mode_set_base)(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+
+ Move the CRTC on the current frame buffer (stored in
+ crtc->fb) to position (x,y). Any of the frame
+ buffer, x position or y position may have been modified.
+
+
+ This helper operation is optional. If not provided, the
+ drm_crtc_helper_set_config function will fall
+ back to the mode_set helper operation.
+
+
+ FIXME: Why are x and y passed as arguments, as they can be accessed
+ through crtc->x and
+ crtc->y?
+
+
+
+ void (*prepare)(struct drm_crtc *crtc);
+
+ Prepare the CRTC for mode setting. This operation is called after
+ validating the requested mode. Drivers use it to perform
+ device-specific operations required before setting the new mode.
+
+
+
+ int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode, int x, int y,
+ struct drm_framebuffer *old_fb);
+
+ Set a new mode, position and frame buffer. Depending on the device
+ requirements, the mode can be stored internally by the driver and
+ applied in the commit operation, or
+ programmed to the hardware immediately.
+
+
+ The mode_set operation returns 0 on success
+ or a negative error code if an error occurs.
+
+
+
+ void (*commit)(struct drm_crtc *crtc);
+
+ Commit a mode. This operation is called after setting the new mode.
+ Upon return the device must use the new mode and be fully
+ operational.
+
+
+
+
+
+ Encoder Helper Operations
+
+
+ bool (*mode_fixup)(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+
+ FIXME: The mode argument be const, but the i915 driver modifies
+ mode->clock in intel_dp_mode_fixup.
+
+
+ Let encoders adjust the requested mode or reject it completely. This
+ operation returns true if the mode is accepted (possibly after being
+ adjusted) or false if it is rejected. See the
+ mode_fixup CRTC helper
+ operation for an explanation of the allowed adjustments.
+
+
+
+ void (*prepare)(struct drm_encoder *encoder);
+
+ Prepare the encoder for mode setting. This operation is called after
+ validating the requested mode. Drivers use it to perform
+ device-specific operations required before setting the new mode.
+
+
+
+ void (*mode_set)(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+
+ Set a new mode. Depending on the device requirements, the mode can
+ be stored internally by the driver and applied in the
+ commit operation, or programmed to the
+ hardware immediately.
+
+
+
+ void (*commit)(struct drm_encoder *encoder);
+
+ Commit a mode. This operation is called after setting the new mode.
+ Upon return the device must use the new mode and be fully
+ operational.
+
+
+
+
+
+ Connector Helper Operations
+
+
+ struct drm_encoder *(*best_encoder)(struct drm_connector *connector);
+
+ Return a pointer to the best encoder for the connecter. Device that
+ map connectors to encoders 1:1 simply return the pointer to the
+ associated encoder. This operation is mandatory.
+
+
+
+ int (*get_modes)(struct drm_connector *connector);
+
+ Fill the connector's probed_modes list
+ by parsing EDID data with drm_add_edid_modes or
+ calling drm_mode_probed_add directly for every
+ supported mode and return the number of modes it has detected. This
+ operation is mandatory.
+
+
+ When adding modes manually the driver creates each mode with a call to
+ drm_mode_create and must fill the following fields.
+
+
+ __u32 type;
+
+ Mode type bitmask, a combination of
+
+
+ DRM_MODE_TYPE_BUILTIN
+ not used?
+
+
+ DRM_MODE_TYPE_CLOCK_C
+ not used?
+
+
+ DRM_MODE_TYPE_CRTC_C
+ not used?
+
+
+
+ DRM_MODE_TYPE_PREFERRED - The preferred mode for the connector
+
+
+ not used?
+
+
+
+ DRM_MODE_TYPE_DEFAULT
+ not used?
+
+
+ DRM_MODE_TYPE_USERDEF
+ not used?
+
+
+ DRM_MODE_TYPE_DRIVER
+
+
+ The mode has been created by the driver (as opposed to
+ to user-created modes).
+
+
+
+
+ Drivers must set the DRM_MODE_TYPE_DRIVER bit for all modes they
+ create, and set the DRM_MODE_TYPE_PREFERRED bit for the preferred
+ mode.
+
+
+
+ __u32 clock;
+ Pixel clock frequency in kHz unit
+
+
+ __u16 hdisplay, hsync_start, hsync_end, htotal;
+ __u16 vdisplay, vsync_start, vsync_end, vtotal;
+ Horizontal and vertical timing information
+ <----------------><-------------><-------------->
+
+ //////////////////////|
+ ////////////////////// |
+ ////////////////////// |.................. ................
+ _______________
+
+ <----- [hv]display ----->
+ <------------- [hv]sync_start ------------>
+ <--------------------- [hv]sync_end --------------------->
+ <-------------------------------- [hv]total ----------------------------->
+]]>
+
+
+ __u16 hskew;
+ __u16 vscan;
+ Unknown
+
+
+ __u32 flags;
+
+ Mode flags, a combination of
+
+
+ DRM_MODE_FLAG_PHSYNC
+
+ Horizontal sync is active high
+
+
+
+ DRM_MODE_FLAG_NHSYNC
+
+ Horizontal sync is active low
+
+
+
+ DRM_MODE_FLAG_PVSYNC
+
+ Vertical sync is active high
+
+
+
+ DRM_MODE_FLAG_NVSYNC
+
+ Vertical sync is active low
+
+
+
+ DRM_MODE_FLAG_INTERLACE
+
+ Mode is interlaced
+
+
+
+ DRM_MODE_FLAG_DBLSCAN
+
+ Mode uses doublescan
+
+
+
+ DRM_MODE_FLAG_CSYNC
+
+ Mode uses composite sync
+
+
+
+ DRM_MODE_FLAG_PCSYNC
+
+ Composite sync is active high
+
+
+
+ DRM_MODE_FLAG_NCSYNC
+
+ Composite sync is active low
+
+
+
+ DRM_MODE_FLAG_HSKEW
+
+ hskew provided (not used?)
+
+
+
+ DRM_MODE_FLAG_BCAST
+
+ not used?
+
+
+
+ DRM_MODE_FLAG_PIXMUX
+
+ not used?
+
+
+
+ DRM_MODE_FLAG_DBLCLK
+
+ not used?
+
+
+
+ DRM_MODE_FLAG_CLKDIV2
+
+ ?
+
+
+
+
+
+ Note that modes marked with the INTERLACE or DBLSCAN flags will be
+ filtered out by
+ drm_helper_probe_single_connector_modes if
+ the connector's interlace_allowed or
+ doublescan_allowed field is set to 0.
+
+
+
+ char name[DRM_DISPLAY_MODE_LEN];
+
+ Mode name. The driver must call
+ drm_mode_set_name to fill the mode name from
+ hdisplay,
+ vdisplay and interlace flag after
+ filling the corresponding fields.
+
+
+
+
+
+ The vrefresh value is computed by
+ drm_helper_probe_single_connector_modes.
+
+
+ When parsing EDID data, drm_add_edid_modes fill the
+ connector display_info
+ width_mm and
+ height_mm fields. When creating modes
+ manually the get_modes helper operation must
+ set the display_info
+ width_mm and
+ height_mm fields if they haven't been set
+ already (for instance at initilization time when a fixed-size panel is
+ attached to the connector). The mode width_mm
+ and height_mm fields are only used internally
+ during EDID parsing and should not be set when creating modes manually.
+
+
+
+ int (*mode_valid)(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+
+ Verify whether a mode is valid for the connector. Return MODE_OK for
+ supported modes and one of the enum drm_mode_status values (MODE_*)
+ for unsupported modes. This operation is mandatory.
+
+
+ As the mode rejection reason is currently not used beside for
+ immediately removing the unsupported mode, an implementation can
+ return MODE_BAD regardless of the exact reason why the mode is not
+ valid.
+
+
+ Note that the mode_valid helper operation is
+ only called for modes detected by the device, and
+ not for modes set by the user through the CRTC
+ set_config operation.
+
+
+
+
-
- Memory management
+
+
+
+ Vertical Blanking
+
+ Vertical blanking plays a major role in graphics rendering. To achieve
+ tear-free display, users must synchronize page flips and/or rendering to
+ vertical blanking. The DRM API offers ioctls to perform page flips
+ synchronized to vertical blanking and wait for vertical blanking.
+
+
+ The DRM core handles most of the vertical blanking management logic, which
+ involves filtering out spurious interrupts, keeping race-free blanking
+ counters, coping with counter wrap-around and resets and keeping use
+ counts. It relies on the driver to generate vertical blanking interrupts
+ and optionally provide a hardware vertical blanking counter. Drivers must
+ implement the following operations.
+
+
+
+ int (*enable_vblank) (struct drm_device *dev, int crtc);
+void (*disable_vblank) (struct drm_device *dev, int crtc);
+
+ Enable or disable vertical blanking interrupts for the given CRTC.
+
+
+
+ u32 (*get_vblank_counter) (struct drm_device *dev, int crtc);
+
+ Retrieve the value of the vertical blanking counter for the given
+ CRTC. If the hardware maintains a vertical blanking counter its value
+ should be returned. Otherwise drivers can use the
+ drm_vblank_count helper function to handle this
+ operation.
+
+
+
- The memory manager lies at the heart of many DRM operations; it
- is required to support advanced client features like OpenGL
- pbuffers. The DRM currently contains two memory managers: TTM
- and GEM.
+ Drivers must initialize the vertical blanking handling core with a call to
+ drm_vblank_init in their
+ load operation. The function will set the struct
+ drm_device
+ vblank_disable_allowed field to 0. This will
+ keep vertical blanking interrupts enabled permanently until the first mode
+ set operation, where vblank_disable_allowed is
+ set to 1. The reason behind this is not clear. Drivers can set the field
+ to 1 after calling drm_vblank_init to make vertical
+ blanking interrupts dynamically managed from the beginning.
+
+ Vertical blanking interrupts can be enabled by the DRM core or by drivers
+ themselves (for instance to handle page flipping operations). The DRM core
+ maintains a vertical blanking use count to ensure that the interrupts are
+ not disabled while a user still needs them. To increment the use count,
+ drivers call drm_vblank_get. Upon return vertical
+ blanking interrupts are guaranteed to be enabled.
+
+
+ To decrement the use count drivers call
+ drm_vblank_put. Only when the use count drops to zero
+ will the DRM core disable the vertical blanking interrupts after a delay
+ by scheduling a timer. The delay is accessible through the vblankoffdelay
+ module parameter or the drm_vblank_offdelay global
+ variable and expressed in milliseconds. Its default value is 5000 ms.
+
+
+ When a vertical blanking interrupt occurs drivers only need to call the
+ drm_handle_vblank function to account for the
+ interrupt.
+
+
+ Resources allocated by drm_vblank_init must be freed
+ with a call to drm_vblank_cleanup in the driver
+ unload operation handler.
+
+
+
+
+
+ Open/Close, File Operations and IOCTLs
- The Translation Table Manager (TTM)
+ Open and Close
+ int (*firstopen) (struct drm_device *);
+void (*lastclose) (struct drm_device *);
+int (*open) (struct drm_device *, struct drm_file *);
+void (*preclose) (struct drm_device *, struct drm_file *);
+void (*postclose) (struct drm_device *, struct drm_file *);
+ Open and close handlers. None of those methods are mandatory.
+
- TTM was developed by Tungsten Graphics, primarily by Thomas
- Hellström, and is intended to be a flexible, high performance
- graphics memory manager.
+ The firstopen method is called by the DRM core
+ when an application opens a device that has no other opened file handle.
+ Similarly the lastclose method is called when
+ the last application holding a file handle opened on the device closes
+ it. Both methods are mostly used for UMS (User Mode Setting) drivers to
+ acquire and release device resources which should be done in the
+ load and unload
+ methods for KMS drivers.
- Drivers wishing to support TTM must fill out a drm_bo_driver
- structure.
+ Note that the lastclose method is also called
+ at module unload time or, for hot-pluggable devices, when the device is
+ unplugged. The firstopen and
+ lastclose calls can thus be unbalanced.
- TTM design background and information belongs here.
+ The open method is called every time the device
+ is opened by an application. Drivers can allocate per-file private data
+ in this method and store them in the struct
+ drm_file driver_priv
+ field. Note that the open method is called
+ before firstopen.
+
+
+ The close operation is split into preclose and
+ postclose methods. Drivers must stop and
+ cleanup all per-file operations in the preclose
+ method. For instance pending vertical blanking and page flip events must
+ be cancelled. No per-file operation is allowed on the file handle after
+ returning from the preclose method.
+
+
+ Finally the postclose method is called as the
+ last step of the close operation, right before calling the
+ lastclose method if no other open file handle
+ exists for the device. Drivers that have allocated per-file private data
+ in the open method should free it here.
+
+
+ The lastclose method should restore CRTC and
+ plane properties to default value, so that a subsequent open of the
+ device will not inherit state from the previous user.
-
- The Graphics Execution Manager (GEM)
+ File Operations
+ const struct file_operations *fops
+ File operations for the DRM device node.
- GEM is an Intel project, authored by Eric Anholt and Keith
- Packard. It provides simpler interfaces than TTM, and is well
- suited for UMA devices.
+ Drivers must define the file operations structure that forms the DRM
+ userspace API entry point, even though most of those operations are
+ implemented in the DRM core. The open,
+ release and ioctl
+ operations are handled by
+
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ #ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+ #endif
+
- GEM-enabled drivers must provide gem_init_object() and
- gem_free_object() callbacks to support the core memory
- allocation routines. They should also provide several driver-specific
- ioctls to support command execution, pinning, buffer
- read & write, mapping, and domain ownership transfers.
+ Drivers that implement private ioctls that requires 32/64bit
+ compatibility support must provide their own
+ compat_ioctl handler that processes private
+ ioctls and calls drm_compat_ioctl for core ioctls.
- On a fundamental level, GEM involves several operations:
-
- Memory allocation and freeing
- Command execution
- Aperture management at command execution time
-
- Buffer object allocation is relatively
- straightforward and largely provided by Linux's shmem layer, which
- provides memory to back each object. When mapped into the GTT
- or used in a command buffer, the backing pages for an object are
- flushed to memory and marked write combined so as to be coherent
- with the GPU. Likewise, if the CPU accesses an object after the GPU
- has finished rendering to the object, then the object must be made
- coherent with the CPU's view
- of memory, usually involving GPU cache flushing of various kinds.
- This core CPU<->GPU coherency management is provided by a
- device-specific ioctl, which evaluates an object's current domain and
- performs any necessary flushing or synchronization to put the object
- into the desired coherency domain (note that the object may be busy,
- i.e. an active render target; in that case, setting the domain
- blocks the client and waits for rendering to complete before
- performing any necessary flushing operations).
-
-
- Perhaps the most important GEM function is providing a command
- execution interface to clients. Client programs construct command
- buffers containing references to previously allocated memory objects,
- and then submit them to GEM. At that point, GEM takes care to bind
- all the objects into the GTT, execute the buffer, and provide
- necessary synchronization between clients accessing the same buffers.
- This often involves evicting some objects from the GTT and re-binding
- others (a fairly expensive operation), and providing relocation
- support which hides fixed GTT offsets from clients. Clients must
- take care not to submit command buffers that reference more objects
- than can fit in the GTT; otherwise, GEM will reject them and no rendering
- will occur. Similarly, if several objects in the buffer require
- fence registers to be allocated for correct rendering (e.g. 2D blits
- on pre-965 chips), care must be taken not to require more fence
- registers than are available to the client. Such resource management
- should be abstracted from the client in libdrm.
+ The read and poll
+ operations provide support for reading DRM events and polling them. They
+ are implemented by
+
+ .poll = drm_poll,
+ .read = drm_read,
+ .fasync = drm_fasync,
+ .llseek = no_llseek,
+
+
+
+ The memory mapping implementation varies depending on how the driver
+ manages memory. Pre-GEM drivers will use drm_mmap,
+ while GEM-aware drivers will use drm_gem_mmap. See
+ .
+
+ .mmap = drm_gem_mmap,
+
+
+
+ No other file operation is supported by the DRM API.
+
+
+
+ IOCTLs
+ struct drm_ioctl_desc *ioctls;
+int num_ioctls;
+ Driver-specific ioctls descriptors table.
+
+ Driver-specific ioctls numbers start at DRM_COMMAND_BASE. The ioctls
+ descriptors table is indexed by the ioctl number offset from the base
+ value. Drivers can use the DRM_IOCTL_DEF_DRV() macro to initialize the
+ table entries.
+
+
+ DRM_IOCTL_DEF_DRV(ioctl, func, flags)
+
+ ioctl is the ioctl name. Drivers must define
+ the DRM_##ioctl and DRM_IOCTL_##ioctl macros to the ioctl number
+ offset from DRM_COMMAND_BASE and the ioctl number respectively. The
+ first macro is private to the device while the second must be exposed
+ to userspace in a public header.
+
+
+ func is a pointer to the ioctl handler function
+ compatible with the drm_ioctl_t type.
+ typedef int drm_ioctl_t(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+
+ flags is a bitmask combination of the following
+ values. It restricts how the ioctl is allowed to be called.
+
+
+ DRM_AUTH - Only authenticated callers allowed
+
+
+ DRM_MASTER - The ioctl can only be called on the master file
+ handle
+
+
+ DRM_ROOT_ONLY - Only callers with the SYSADMIN capability allowed
+
+
+ DRM_CONTROL_ALLOW - The ioctl can only be called on a control
+ device
+
+
+ DRM_UNLOCKED - The ioctl handler will be called without locking
+ the DRM global mutex
+
+
+
-
-
-
-
-
- Output management
-
- At the core of the DRM output management code is a set of
- structures representing CRTCs, encoders, and connectors.
-
-
- A CRTC is an abstraction representing a part of the chip that
- contains a pointer to a scanout buffer. Therefore, the number
- of CRTCs available determines how many independent scanout
- buffers can be active at any given time. The CRTC structure
- contains several fields to support this: a pointer to some video
- memory, a display mode, and an (x, y) offset into the video
- memory to support panning or configurations where one piece of
- video memory spans multiple CRTCs.
-
-
- An encoder takes pixel data from a CRTC and converts it to a
- format suitable for any attached connectors. On some devices,
- it may be possible to have a CRTC send data to more than one
- encoder. In that case, both encoders would receive data from
- the same scanout buffer, resulting in a "cloned" display
- configuration across the connectors attached to each encoder.
-
-
- A connector is the final destination for pixel data on a device,
- and usually connects directly to an external display device like
- a monitor or laptop panel. A connector can only be attached to
- one encoder at a time. The connector is also the structure
- where information about the attached display is kept, so it
- contains fields for display data, EDID data, DPMS &
- connection status, and information about modes supported on the
- attached displays.
-
-
-
-
-
- Framebuffer management
-
- Clients need to provide a framebuffer object which provides a source
- of pixels for a CRTC to deliver to the encoder(s) and ultimately the
- connector(s). A framebuffer is fundamentally a driver-specific memory
- object, made into an opaque handle by the DRM's addfb() function.
- Once a framebuffer has been created this way, it may be passed to the
- KMS mode setting routines for use in a completed configuration.
-
@@ -812,15 +2355,24 @@ void intel_crt_init(struct drm_device *dev)
+
+
- Suspend/resume
+ Suspend/Resume
+
+ The DRM core provides some suspend/resume code, but drivers wanting full
+ suspend/resume support should provide save() and restore() functions.
+ These are called at suspend, hibernate, or resume time, and should perform
+ any state save or restore required by your device across suspend or
+ hibernate states.
+
+ int (*suspend) (struct drm_device *, pm_message_t state);
+int (*resume) (struct drm_device *);
- The DRM core provides some suspend/resume code, but drivers
- wanting full suspend/resume support should provide save() and
- restore() functions. These are called at suspend,
- hibernate, or resume time, and should perform any state save or
- restore required by your device across suspend or hibernate
- states.
+ Those are legacy suspend and resume methods. New driver should use the
+ power management interface provided by their bus type (usually through
+ the struct device_driver dev_pm_ops) and set
+ these methods to NULL.
@@ -833,6 +2385,35 @@ void intel_crt_init(struct drm_device *dev)
+
+
@@ -853,6 +2434,42 @@ void intel_crt_init(struct drm_device *dev)
Cover generic ioctls and sysfs layout here. We only need high-level
info, since man pages should cover the rest.
+
+
+
+
+ VBlank event handling
+
+ The DRM core exposes two vertical blank related ioctls:
+
+
+ DRM_IOCTL_WAIT_VBLANK
+
+
+ This takes a struct drm_wait_vblank structure as its argument,
+ and it is used to block or request a signal when a specified
+ vblank event occurs.
+
+
+
+
+ DRM_IOCTL_MODESET_CTL
+
+
+ This should be called by application level drivers before and
+ after mode setting, since on many devices the vertical blank
+ counter is reset at that time. Internally, the DRM snapshots
+ the last vblank count when the ioctl is called with the
+ _DRM_PRE_MODESET command, so that the counter won't go backwards
+ (which is dealt with when _DRM_POST_MODESET is used).
+
+
+
+
+
+
+
+
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 1e0a9e17c31d805a201e8dbcbf7933efe8eec111..f94d4c818fc74dc9a076e8f67fe98d7bc6620a61 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -1448,8 +1448,7 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
case ACPI_VIDEO_NOTIFY_SWITCH: /* User requested a switch,
* most likely via hotkey. */
acpi_bus_generate_proc_event(device, event, 0);
- if (!acpi_notifier_call_chain(device, event, 0))
- keycode = KEY_SWITCHVIDEOMODE;
+ keycode = KEY_SWITCHVIDEOMODE;
break;
case ACPI_VIDEO_NOTIFY_PROBE: /* User plugged in or removed a video
@@ -1479,8 +1478,9 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
break;
}
- if (event != ACPI_VIDEO_NOTIFY_SWITCH)
- acpi_notifier_call_chain(device, event, 0);
+ if (acpi_notifier_call_chain(device, event, 0))
+ /* Something vetoed the keypress. */
+ keycode = 0;
if (keycode) {
input_report_key(input, keycode, 1);
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 58e32f7c322956209bd3a63ce16f5ce16135c59b..e01f5eaaec82a9317724c90337d9d8e45da1bb39 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -84,40 +84,33 @@ static struct _intel_private {
#define IS_IRONLAKE intel_private.driver->is_ironlake
#define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable
-int intel_gtt_map_memory(struct page **pages, unsigned int num_entries,
- struct scatterlist **sg_list, int *num_sg)
+static int intel_gtt_map_memory(struct page **pages,
+ unsigned int num_entries,
+ struct sg_table *st)
{
- struct sg_table st;
struct scatterlist *sg;
int i;
- if (*sg_list)
- return 0; /* already mapped (for e.g. resume */
-
DBG("try mapping %lu pages\n", (unsigned long)num_entries);
- if (sg_alloc_table(&st, num_entries, GFP_KERNEL))
+ if (sg_alloc_table(st, num_entries, GFP_KERNEL))
goto err;
- *sg_list = sg = st.sgl;
-
- for (i = 0 ; i < num_entries; i++, sg = sg_next(sg))
+ for_each_sg(st->sgl, sg, num_entries, i)
sg_set_page(sg, pages[i], PAGE_SIZE, 0);
- *num_sg = pci_map_sg(intel_private.pcidev, *sg_list,
- num_entries, PCI_DMA_BIDIRECTIONAL);
- if (unlikely(!*num_sg))
+ if (!pci_map_sg(intel_private.pcidev,
+ st->sgl, st->nents, PCI_DMA_BIDIRECTIONAL))
goto err;
return 0;
err:
- sg_free_table(&st);
+ sg_free_table(st);
return -ENOMEM;
}
-EXPORT_SYMBOL(intel_gtt_map_memory);
-void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
+static void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
{
struct sg_table st;
DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
@@ -130,7 +123,6 @@ void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
sg_free_table(&st);
}
-EXPORT_SYMBOL(intel_gtt_unmap_memory);
static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
{
@@ -674,9 +666,14 @@ static int intel_gtt_init(void)
gtt_map_size = intel_private.base.gtt_total_entries * 4;
- intel_private.gtt = ioremap(intel_private.gtt_bus_addr,
- gtt_map_size);
- if (!intel_private.gtt) {
+ intel_private.gtt = NULL;
+ if (INTEL_GTT_GEN < 6)
+ intel_private.gtt = ioremap_wc(intel_private.gtt_bus_addr,
+ gtt_map_size);
+ if (intel_private.gtt == NULL)
+ intel_private.gtt = ioremap(intel_private.gtt_bus_addr,
+ gtt_map_size);
+ if (intel_private.gtt == NULL) {
intel_private.driver->cleanup();
iounmap(intel_private.registers);
return -ENOMEM;
@@ -879,8 +876,7 @@ static bool i830_check_flags(unsigned int flags)
return false;
}
-void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
- unsigned int sg_len,
+void intel_gtt_insert_sg_entries(struct sg_table *st,
unsigned int pg_start,
unsigned int flags)
{
@@ -892,12 +888,11 @@ void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
/* sg may merge pages, but we have to separate
* per-page addr for GTT */
- for_each_sg(sg_list, sg, sg_len, i) {
+ for_each_sg(st->sgl, sg, st->nents, i) {
len = sg_dma_len(sg) >> PAGE_SHIFT;
for (m = 0; m < len; m++) {
dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
- intel_private.driver->write_entry(addr,
- j, flags);
+ intel_private.driver->write_entry(addr, j, flags);
j++;
}
}
@@ -905,8 +900,10 @@ void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
}
EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
-void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries,
- struct page **pages, unsigned int flags)
+static void intel_gtt_insert_pages(unsigned int first_entry,
+ unsigned int num_entries,
+ struct page **pages,
+ unsigned int flags)
{
int i, j;
@@ -917,7 +914,6 @@ void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries,
}
readl(intel_private.gtt+j-1);
}
-EXPORT_SYMBOL(intel_gtt_insert_pages);
static int intel_fake_agp_insert_entries(struct agp_memory *mem,
off_t pg_start, int type)
@@ -953,13 +949,15 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
global_cache_flush();
if (intel_private.base.needs_dmar) {
- ret = intel_gtt_map_memory(mem->pages, mem->page_count,
- &mem->sg_list, &mem->num_sg);
+ struct sg_table st;
+
+ ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st);
if (ret != 0)
return ret;
- intel_gtt_insert_sg_entries(mem->sg_list, mem->num_sg,
- pg_start, type);
+ intel_gtt_insert_sg_entries(&st, pg_start, type);
+ mem->sg_list = st.sgl;
+ mem->num_sg = st.nents;
} else
intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages,
type);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 90e28081712dbd7c68343f88bd10b468c10f604a..18321b68b88069f3e5588bf23fabd83c268e0359 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -22,7 +22,7 @@ menuconfig DRM
config DRM_USB
tristate
depends on DRM
- depends on USB_ARCH_HAS_HCD
+ depends on USB_SUPPORT && USB_ARCH_HAS_HCD
select USB
config DRM_KMS_HELPER
@@ -54,6 +54,21 @@ config DRM_TTM
GPU memory types. Will be enabled automatically if a device driver
uses it.
+config DRM_GEM_CMA_HELPER
+ bool
+ depends on DRM
+ help
+ Choose this if you need the GEM CMA helper functions
+
+config DRM_KMS_CMA_HELPER
+ bool
+ select DRM_GEM_CMA_HELPER
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ help
+ Choose this if you need the KMS CMA helper functions
+
config DRM_TDFX
tristate "3dfx Banshee/Voodoo3+"
depends on DRM && PCI
@@ -193,3 +208,5 @@ source "drivers/gpu/drm/ast/Kconfig"
source "drivers/gpu/drm/mgag200/Kconfig"
source "drivers/gpu/drm/cirrus/Kconfig"
+
+source "drivers/gpu/drm/shmobile/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index f65f65ed0ddfc8542f42ea578978ab5db367ceac..2ff5cefe9eadd106fe1e611c19388d097716ccb8 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -15,11 +15,13 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm_trace_points.o drm_global.o drm_prime.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
+drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
drm-usb-y := drm_usb.o
drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o
drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
+drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
@@ -45,4 +47,5 @@ obj-$(CONFIG_DRM_EXYNOS) +=exynos/
obj-$(CONFIG_DRM_GMA500) += gma500/
obj-$(CONFIG_DRM_UDL) += udl/
obj-$(CONFIG_DRM_AST) += ast/
+obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
obj-y += i2c/
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index aea439760b60db68af0f9fadec8c2834cfe20ce2..5ccf984f063acf484a365ce9f62caca95fd093c2 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -94,7 +94,6 @@ struct ast_private {
struct drm_global_reference mem_global_ref;
struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
- atomic_t validate_sequence;
} ttm;
struct drm_gem_object *cursor_cache;
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index a6982b86df9b8a0cc62108d272e2bca43c433b64..7fc9f7272b56e7e9ebe846b650e9e2f18f28cc5f 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -582,7 +582,6 @@ static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = {
.mode_set_base = ast_crtc_mode_set_base,
.disable = ast_crtc_disable,
.load_lut = ast_crtc_load_lut,
- .disable = ast_crtc_disable,
.prepare = ast_crtc_prepare,
.commit = ast_crtc_commit,
@@ -737,6 +736,7 @@ static int ast_get_modes(struct drm_connector *connector)
if (edid) {
drm_mode_connector_update_edid_property(&ast_connector->base, edid);
ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
return ret;
} else
drm_mode_connector_update_edid_property(&ast_connector->base, NULL);
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index 7f0d71ffba3fc50ba41b814496fb745a8f310016..6e0cc724e5a23635c8f82fbf72b40b442caf42ef 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -143,7 +143,6 @@ struct cirrus_device {
struct drm_global_reference mem_global_ref;
struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
- atomic_t validate_sequence;
} ttm;
bool mm_inited;
};
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index ec4698246213adf94b6c56130c0cff5a452b0bb8..a575cb2e6bdbe0b40320b1b836fb0780d7186aef 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -37,12 +37,13 @@ drm_clflush_page(struct page *page)
{
uint8_t *page_virtual;
unsigned int i;
+ const int size = boot_cpu_data.x86_clflush_size;
if (unlikely(page == NULL))
return;
page_virtual = kmap_atomic(page);
- for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
+ for (i = 0; i < PAGE_SIZE; i += size)
clflush(page_virtual + i);
kunmap_atomic(page_virtual);
}
@@ -99,6 +100,31 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
}
EXPORT_SYMBOL(drm_clflush_pages);
+void
+drm_clflush_sg(struct sg_table *st)
+{
+#if defined(CONFIG_X86)
+ if (cpu_has_clflush) {
+ struct scatterlist *sg;
+ int i;
+
+ mb();
+ for_each_sg(st->sgl, sg, st->nents, i)
+ drm_clflush_page(sg_page(sg));
+ mb();
+
+ return;
+ }
+
+ if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
+ printk(KERN_ERR "Timed out waiting for cache flush.\n");
+#else
+ printk(KERN_ERR "Architecture has no drm_cache.c support\n");
+ WARN_ON_ONCE(1);
+#endif
+}
+EXPORT_SYMBOL(drm_clflush_sg);
+
void
drm_clflush_virt_range(char *addr, unsigned long length)
{
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 271ffa4fdb47f3b197fc86f6c3e973def66b36b4..ef1b22144d3707af9cecddee17af4d25b9b834d1 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -293,6 +293,8 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
{
int ret;
+ kref_init(&fb->refcount);
+
ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB);
if (ret)
return ret;
@@ -306,6 +308,38 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
}
EXPORT_SYMBOL(drm_framebuffer_init);
+static void drm_framebuffer_free(struct kref *kref)
+{
+ struct drm_framebuffer *fb =
+ container_of(kref, struct drm_framebuffer, refcount);
+ fb->funcs->destroy(fb);
+}
+
+/**
+ * drm_framebuffer_unreference - unref a framebuffer
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ */
+void drm_framebuffer_unreference(struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = fb->dev;
+ DRM_DEBUG("FB ID: %d\n", fb->base.id);
+ WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+ kref_put(&fb->refcount, drm_framebuffer_free);
+}
+EXPORT_SYMBOL(drm_framebuffer_unreference);
+
+/**
+ * drm_framebuffer_reference - incr the fb refcnt
+ */
+void drm_framebuffer_reference(struct drm_framebuffer *fb)
+{
+ DRM_DEBUG("FB ID: %d\n", fb->base.id);
+ kref_get(&fb->refcount);
+}
+EXPORT_SYMBOL(drm_framebuffer_reference);
+
/**
* drm_framebuffer_cleanup - remove a framebuffer object
* @fb: framebuffer to remove
@@ -317,6 +351,32 @@ EXPORT_SYMBOL(drm_framebuffer_init);
* it, setting it to NULL.
*/
void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = fb->dev;
+ /*
+ * This could be moved to drm_framebuffer_remove(), but for
+ * debugging is nice to keep around the list of fb's that are
+ * no longer associated w/ a drm_file but are not unreferenced
+ * yet. (i915 and omapdrm have debugfs files which will show
+ * this.)
+ */
+ drm_mode_object_put(dev, &fb->base);
+ list_del(&fb->head);
+ dev->mode_config.num_fb--;
+}
+EXPORT_SYMBOL(drm_framebuffer_cleanup);
+
+/**
+ * drm_framebuffer_remove - remove and unreference a framebuffer object
+ * @fb: framebuffer to remove
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Scans all the CRTCs and planes in @dev's mode_config. If they're
+ * using @fb, removes it, setting it to NULL.
+ */
+void drm_framebuffer_remove(struct drm_framebuffer *fb)
{
struct drm_device *dev = fb->dev;
struct drm_crtc *crtc;
@@ -349,11 +409,11 @@ void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
}
}
- drm_mode_object_put(dev, &fb->base);
- list_del(&fb->head);
- dev->mode_config.num_fb--;
+ list_del(&fb->filp_head);
+
+ drm_framebuffer_unreference(fb);
}
-EXPORT_SYMBOL(drm_framebuffer_cleanup);
+EXPORT_SYMBOL(drm_framebuffer_remove);
/**
* drm_crtc_init - Initialise a new CRTC object
@@ -376,6 +436,7 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
crtc->dev = dev;
crtc->funcs = funcs;
+ crtc->invert_dimensions = false;
mutex_lock(&dev->mode_config.mutex);
@@ -1030,11 +1091,7 @@ void drm_mode_config_cleanup(struct drm_device *dev)
}
list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
- fb->funcs->destroy(fb);
- }
-
- list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
- crtc->funcs->destroy(crtc);
+ drm_framebuffer_remove(fb);
}
list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
@@ -1042,6 +1099,10 @@ void drm_mode_config_cleanup(struct drm_device *dev)
plane->funcs->destroy(plane);
}
+ list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
+ crtc->funcs->destroy(crtc);
+ }
+
idr_remove_all(&dev->mode_config.crtc_idr);
idr_destroy(&dev->mode_config.crtc_idr);
}
@@ -1851,6 +1912,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
if (crtc_req->mode_valid) {
+ int hdisplay, vdisplay;
/* If we have a mode we need a framebuffer. */
/* If we pass -1, set the mode with the currently bound fb */
if (crtc_req->fb_id == -1) {
@@ -1886,14 +1948,20 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
- if (mode->hdisplay > fb->width ||
- mode->vdisplay > fb->height ||
- crtc_req->x > fb->width - mode->hdisplay ||
- crtc_req->y > fb->height - mode->vdisplay) {
- DRM_DEBUG_KMS("Invalid CRTC viewport %ux%u+%u+%u for fb size %ux%u.\n",
- mode->hdisplay, mode->vdisplay,
- crtc_req->x, crtc_req->y,
- fb->width, fb->height);
+ hdisplay = mode->hdisplay;
+ vdisplay = mode->vdisplay;
+
+ if (crtc->invert_dimensions)
+ swap(hdisplay, vdisplay);
+
+ if (hdisplay > fb->width ||
+ vdisplay > fb->height ||
+ crtc_req->x > fb->width - hdisplay ||
+ crtc_req->y > fb->height - vdisplay) {
+ DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
+ fb->width, fb->height,
+ hdisplay, vdisplay, crtc_req->x, crtc_req->y,
+ crtc->invert_dimensions ? " (inverted)" : "");
ret = -ENOSPC;
goto out;
}
@@ -2168,6 +2236,8 @@ static int format_check(const struct drm_mode_fb_cmd2 *r)
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV61:
+ case DRM_FORMAT_NV24:
+ case DRM_FORMAT_NV42:
case DRM_FORMAT_YUV410:
case DRM_FORMAT_YVU410:
case DRM_FORMAT_YUV411:
@@ -2334,11 +2404,7 @@ int drm_mode_rmfb(struct drm_device *dev,
goto out;
}
- /* TODO release all crtc connected to the framebuffer */
- /* TODO unhock the destructor from the buffer object */
-
- list_del(&fb->filp_head);
- fb->funcs->destroy(fb);
+ drm_framebuffer_remove(fb);
out:
mutex_unlock(&dev->mode_config.mutex);
@@ -2488,8 +2554,7 @@ void drm_fb_release(struct drm_file *priv)
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
- list_del(&fb->filp_head);
- fb->funcs->destroy(fb);
+ drm_framebuffer_remove(fb);
}
mutex_unlock(&dev->mode_config.mutex);
}
@@ -3488,6 +3553,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
struct drm_framebuffer *fb;
struct drm_pending_vblank_event *e = NULL;
unsigned long flags;
+ int hdisplay, vdisplay;
int ret = -EINVAL;
if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
@@ -3517,14 +3583,19 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
goto out;
fb = obj_to_fb(obj);
- if (crtc->mode.hdisplay > fb->width ||
- crtc->mode.vdisplay > fb->height ||
- crtc->x > fb->width - crtc->mode.hdisplay ||
- crtc->y > fb->height - crtc->mode.vdisplay) {
- DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d.\n",
- fb->width, fb->height,
- crtc->mode.hdisplay, crtc->mode.vdisplay,
- crtc->x, crtc->y);
+ hdisplay = crtc->mode.hdisplay;
+ vdisplay = crtc->mode.vdisplay;
+
+ if (crtc->invert_dimensions)
+ swap(hdisplay, vdisplay);
+
+ if (hdisplay > fb->width ||
+ vdisplay > fb->height ||
+ crtc->x > fb->width - hdisplay ||
+ crtc->y > fb->height - vdisplay) {
+ DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
+ fb->width, fb->height, hdisplay, vdisplay, crtc->x, crtc->y,
+ crtc->invert_dimensions ? " (inverted)" : "");
ret = -ENOSPC;
goto out;
}
@@ -3717,6 +3788,8 @@ int drm_format_num_planes(uint32_t format)
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV61:
+ case DRM_FORMAT_NV24:
+ case DRM_FORMAT_NV42:
return 2;
default:
return 1;
@@ -3750,6 +3823,8 @@ int drm_format_plane_cpp(uint32_t format, int plane)
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV61:
+ case DRM_FORMAT_NV24:
+ case DRM_FORMAT_NV42:
return plane ? 2 : 1;
case DRM_FORMAT_YUV410:
case DRM_FORMAT_YVU410:
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index c8fdf03f32c22a897bab0c202dc2cdc1fec55a6e..be174cab105a7ccc27b127f307a07652fdc8678a 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -140,10 +140,10 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED),
@@ -152,19 +152,19 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
};
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index a2e54769344a45baea38fafd17de1c78569ea930..5dda07cf709796afb222e66a6de23f3e63beadba 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -161,7 +161,7 @@ MODULE_PARM_DESC(edid_fixup,
* Sanity check the EDID block (base or extension). Return 0 if the block
* doesn't check out, or 1 if it's valid.
*/
-bool drm_edid_block_valid(u8 *raw_edid, int block)
+bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
{
int i;
u8 csum = 0;
@@ -184,7 +184,9 @@ bool drm_edid_block_valid(u8 *raw_edid, int block)
for (i = 0; i < EDID_LENGTH; i++)
csum += raw_edid[i];
if (csum) {
- DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
+ if (print_bad_edid) {
+ DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
+ }
/* allow CEA to slide through, switches mangle this */
if (raw_edid[0] != 0x02)
@@ -210,7 +212,7 @@ bool drm_edid_block_valid(u8 *raw_edid, int block)
return 1;
bad:
- if (raw_edid) {
+ if (raw_edid && print_bad_edid) {
printk(KERN_ERR "Raw EDID:\n");
print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1,
raw_edid, EDID_LENGTH, false);
@@ -234,7 +236,7 @@ bool drm_edid_is_valid(struct edid *edid)
return false;
for (i = 0; i <= edid->extensions; i++)
- if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i))
+ if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i, true))
return false;
return true;
@@ -257,6 +259,8 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
int block, int len)
{
unsigned char start = block * EDID_LENGTH;
+ unsigned char segment = block >> 1;
+ unsigned char xfers = segment ? 3 : 2;
int ret, retries = 5;
/* The core i2c driver will automatically retry the transfer if the
@@ -268,6 +272,11 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
do {
struct i2c_msg msgs[] = {
{
+ .addr = DDC_SEGMENT_ADDR,
+ .flags = 0,
+ .len = 1,
+ .buf = &segment,
+ }, {
.addr = DDC_ADDR,
.flags = 0,
.len = 1,
@@ -279,15 +288,21 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
.buf = buf,
}
};
- ret = i2c_transfer(adapter, msgs, 2);
+
+ /*
+ * Avoid sending the segment addr to not upset non-compliant ddc
+ * monitors.
+ */
+ ret = i2c_transfer(adapter, &msgs[3 - xfers], xfers);
+
if (ret == -ENXIO) {
DRM_DEBUG_KMS("drm: skipping non-existent adapter %s\n",
adapter->name);
break;
}
- } while (ret != 2 && --retries);
+ } while (ret != xfers && --retries);
- return ret == 2 ? 0 : -1;
+ return ret == xfers ? 0 : -1;
}
static bool drm_edid_is_zero(u8 *in_edid, int length)
@@ -306,6 +321,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
{
int i, j = 0, valid_extensions = 0;
u8 *block, *new;
+ bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
return NULL;
@@ -314,7 +330,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
for (i = 0; i < 4; i++) {
if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH))
goto out;
- if (drm_edid_block_valid(block, 0))
+ if (drm_edid_block_valid(block, 0, print_bad_edid))
break;
if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) {
connector->null_edid_counter++;
@@ -339,7 +355,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
block + (valid_extensions + 1) * EDID_LENGTH,
j, EDID_LENGTH))
goto out;
- if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH, j)) {
+ if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH, j, print_bad_edid)) {
valid_extensions++;
break;
}
@@ -362,8 +378,11 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
return block;
carp:
- dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n",
- drm_get_connector_name(connector), j);
+ if (print_bad_edid) {
+ dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n",
+ drm_get_connector_name(connector), j);
+ }
+ connector->bad_edid_counter++;
out:
kfree(block);
@@ -402,10 +421,7 @@ struct edid *drm_get_edid(struct drm_connector *connector,
if (drm_probe_ddc(adapter))
edid = (struct edid *)drm_do_get_edid(connector, adapter);
- connector->display_info.raw_edid = (char *)edid;
-
return edid;
-
}
EXPORT_SYMBOL(drm_get_edid);
@@ -1522,6 +1538,40 @@ do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
return modes;
}
+static int
+cea_db_payload_len(const u8 *db)
+{
+ return db[0] & 0x1f;
+}
+
+static int
+cea_db_tag(const u8 *db)
+{
+ return db[0] >> 5;
+}
+
+static int
+cea_revision(const u8 *cea)
+{
+ return cea[1];
+}
+
+static int
+cea_db_offsets(const u8 *cea, int *start, int *end)
+{
+ /* Data block offset in CEA extension block */
+ *start = 4;
+ *end = cea[2];
+ if (*end == 0)
+ *end = 127;
+ if (*end < 4 || *end > 127)
+ return -ERANGE;
+ return 0;
+}
+
+#define for_each_cea_db(cea, i, start, end) \
+ for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); (i) += cea_db_payload_len(&(cea)[(i)]) + 1)
+
static int
add_cea_modes(struct drm_connector *connector, struct edid *edid)
{
@@ -1529,10 +1579,17 @@ add_cea_modes(struct drm_connector *connector, struct edid *edid)
u8 * db, dbl;
int modes = 0;
- if (cea && cea[1] >= 3) {
- for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) {
- dbl = db[0] & 0x1f;
- if (((db[0] & 0xe0) >> 5) == VIDEO_BLOCK)
+ if (cea && cea_revision(cea) >= 3) {
+ int i, start, end;
+
+ if (cea_db_offsets(cea, &start, &end))
+ return 0;
+
+ for_each_cea_db(cea, i, start, end) {
+ db = &cea[i];
+ dbl = cea_db_payload_len(db);
+
+ if (cea_db_tag(db) == VIDEO_BLOCK)
modes += do_cea_modes (connector, db+1, dbl);
}
}
@@ -1541,19 +1598,28 @@ add_cea_modes(struct drm_connector *connector, struct edid *edid)
}
static void
-parse_hdmi_vsdb(struct drm_connector *connector, uint8_t *db)
+parse_hdmi_vsdb(struct drm_connector *connector, const u8 *db)
{
- connector->eld[5] |= (db[6] >> 7) << 1; /* Supports_AI */
-
- connector->dvi_dual = db[6] & 1;
- connector->max_tmds_clock = db[7] * 5;
+ u8 len = cea_db_payload_len(db);
- connector->latency_present[0] = db[8] >> 7;
- connector->latency_present[1] = (db[8] >> 6) & 1;
- connector->video_latency[0] = db[9];
- connector->audio_latency[0] = db[10];
- connector->video_latency[1] = db[11];
- connector->audio_latency[1] = db[12];
+ if (len >= 6) {
+ connector->eld[5] |= (db[6] >> 7) << 1; /* Supports_AI */
+ connector->dvi_dual = db[6] & 1;
+ }
+ if (len >= 7)
+ connector->max_tmds_clock = db[7] * 5;
+ if (len >= 8) {
+ connector->latency_present[0] = db[8] >> 7;
+ connector->latency_present[1] = (db[8] >> 6) & 1;
+ }
+ if (len >= 9)
+ connector->video_latency[0] = db[9];
+ if (len >= 10)
+ connector->audio_latency[0] = db[10];
+ if (len >= 11)
+ connector->video_latency[1] = db[11];
+ if (len >= 12)
+ connector->audio_latency[1] = db[12];
DRM_LOG_KMS("HDMI: DVI dual %d, "
"max TMDS clock %d, "
@@ -1577,6 +1643,21 @@ monitor_name(struct detailed_timing *t, void *data)
*(u8 **)data = t->data.other_data.data.str.str;
}
+static bool cea_db_is_hdmi_vsdb(const u8 *db)
+{
+ int hdmi_id;
+
+ if (cea_db_tag(db) != VENDOR_BLOCK)
+ return false;
+
+ if (cea_db_payload_len(db) < 5)
+ return false;
+
+ hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16);
+
+ return hdmi_id == HDMI_IDENTIFIER;
+}
+
/**
* drm_edid_to_eld - build ELD from EDID
* @connector: connector corresponding to the HDMI/DP sink
@@ -1623,29 +1704,40 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
eld[18] = edid->prod_code[0];
eld[19] = edid->prod_code[1];
- if (cea[1] >= 3)
- for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) {
- dbl = db[0] & 0x1f;
-
- switch ((db[0] & 0xe0) >> 5) {
+ if (cea_revision(cea) >= 3) {
+ int i, start, end;
+
+ if (cea_db_offsets(cea, &start, &end)) {
+ start = 0;
+ end = 0;
+ }
+
+ for_each_cea_db(cea, i, start, end) {
+ db = &cea[i];
+ dbl = cea_db_payload_len(db);
+
+ switch (cea_db_tag(db)) {
case AUDIO_BLOCK:
/* Audio Data Block, contains SADs */
sad_count = dbl / 3;
- memcpy(eld + 20 + mnl, &db[1], dbl);
+ if (dbl >= 1)
+ memcpy(eld + 20 + mnl, &db[1], dbl);
break;
case SPEAKER_BLOCK:
- /* Speaker Allocation Data Block */
- eld[7] = db[1];
+ /* Speaker Allocation Data Block */
+ if (dbl >= 1)
+ eld[7] = db[1];
break;
case VENDOR_BLOCK:
/* HDMI Vendor-Specific Data Block */
- if (db[1] == 0x03 && db[2] == 0x0c && db[3] == 0)
+ if (cea_db_is_hdmi_vsdb(db))
parse_hdmi_vsdb(connector, db);
break;
default:
break;
}
}
+ }
eld[5] |= sad_count << 4;
eld[2] = (20 + mnl + sad_count * 3 + 3) / 4;
@@ -1723,38 +1815,26 @@ EXPORT_SYMBOL(drm_select_eld);
bool drm_detect_hdmi_monitor(struct edid *edid)
{
u8 *edid_ext;
- int i, hdmi_id;
+ int i;
int start_offset, end_offset;
- bool is_hdmi = false;
edid_ext = drm_find_cea_extension(edid);
if (!edid_ext)
- goto end;
+ return false;
- /* Data block offset in CEA extension block */
- start_offset = 4;
- end_offset = edid_ext[2];
+ if (cea_db_offsets(edid_ext, &start_offset, &end_offset))
+ return false;
/*
* Because HDMI identifier is in Vendor Specific Block,
* search it from all data blocks of CEA extension.
*/
- for (i = start_offset; i < end_offset;
- /* Increased by data block len */
- i += ((edid_ext[i] & 0x1f) + 1)) {
- /* Find vendor specific block */
- if ((edid_ext[i] >> 5) == VENDOR_BLOCK) {
- hdmi_id = edid_ext[i + 1] | (edid_ext[i + 2] << 8) |
- edid_ext[i + 3] << 16;
- /* Find HDMI identifier */
- if (hdmi_id == HDMI_IDENTIFIER)
- is_hdmi = true;
- break;
- }
+ for_each_cea_db(edid_ext, i, start_offset, end_offset) {
+ if (cea_db_is_hdmi_vsdb(&edid_ext[i]))
+ return true;
}
-end:
- return is_hdmi;
+ return false;
}
EXPORT_SYMBOL(drm_detect_hdmi_monitor);
@@ -1786,15 +1866,13 @@ bool drm_detect_monitor_audio(struct edid *edid)
goto end;
}
- /* Data block offset in CEA extension block */
- start_offset = 4;
- end_offset = edid_ext[2];
+ if (cea_db_offsets(edid_ext, &start_offset, &end_offset))
+ goto end;
- for (i = start_offset; i < end_offset;
- i += ((edid_ext[i] & 0x1f) + 1)) {
- if ((edid_ext[i] >> 5) == AUDIO_BLOCK) {
+ for_each_cea_db(edid_ext, i, start_offset, end_offset) {
+ if (cea_db_tag(&edid_ext[i]) == AUDIO_BLOCK) {
has_audio = true;
- for (j = 1; j < (edid_ext[i] & 0x1f); j += 3)
+ for (j = 1; j < cea_db_payload_len(&edid_ext[i]) + 1; j += 3)
DRM_DEBUG_KMS("CEA audio format %d\n",
(edid_ext[i + j] >> 3) & 0xf);
goto end;
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 9d53e6503f9a961d1fcbfd2e68d7f723f21bd521..38d3943f72defa65c878bf198fe2ac5742de5ad8 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -114,8 +114,8 @@ static u8 generic_edid[GENERIC_EDIDS][128] = {
},
};
-static int edid_load(struct drm_connector *connector, char *name,
- char *connector_name)
+static u8 *edid_load(struct drm_connector *connector, char *name,
+ char *connector_name)
{
const struct firmware *fw;
struct platform_device *pdev;
@@ -123,6 +123,7 @@ static int edid_load(struct drm_connector *connector, char *name,
int fwsize, expected;
int builtin = 0, err = 0;
int i, valid_extensions = 0;
+ bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
pdev = platform_device_register_simple(connector_name, -1, NULL, 0);
if (IS_ERR(pdev)) {
@@ -173,7 +174,8 @@ static int edid_load(struct drm_connector *connector, char *name,
}
memcpy(edid, fwdata, fwsize);
- if (!drm_edid_block_valid(edid, 0)) {
+ if (!drm_edid_block_valid(edid, 0, print_bad_edid)) {
+ connector->bad_edid_counter++;
DRM_ERROR("Base block of EDID firmware \"%s\" is invalid ",
name);
kfree(edid);
@@ -185,7 +187,7 @@ static int edid_load(struct drm_connector *connector, char *name,
if (i != valid_extensions + 1)
memcpy(edid + (valid_extensions + 1) * EDID_LENGTH,
edid + i * EDID_LENGTH, EDID_LENGTH);
- if (drm_edid_block_valid(edid + i * EDID_LENGTH, i))
+ if (drm_edid_block_valid(edid + i * EDID_LENGTH, i, print_bad_edid))
valid_extensions++;
}
@@ -205,7 +207,6 @@ static int edid_load(struct drm_connector *connector, char *name,
edid = new_edid;
}
- connector->display_info.raw_edid = edid;
DRM_INFO("Got %s EDID base block and %d extension%s from "
"\"%s\" for connector \"%s\"\n", builtin ? "built-in" :
"external", valid_extensions, valid_extensions == 1 ? "" : "s",
@@ -215,7 +216,10 @@ relfw_out:
release_firmware(fw);
out:
- return err;
+ if (err)
+ return ERR_PTR(err);
+
+ return edid;
}
int drm_load_edid_firmware(struct drm_connector *connector)
@@ -223,6 +227,7 @@ int drm_load_edid_firmware(struct drm_connector *connector)
char *connector_name = drm_get_connector_name(connector);
char *edidname = edid_firmware, *last, *colon;
int ret;
+ struct edid *edid;
if (*edidname == '\0')
return 0;
@@ -240,13 +245,13 @@ int drm_load_edid_firmware(struct drm_connector *connector)
if (*last == '\n')
*last = '\0';
- ret = edid_load(connector, edidname, connector_name);
- if (ret)
+ edid = (struct edid *) edid_load(connector, edidname, connector_name);
+ if (IS_ERR_OR_NULL(edid))
return 0;
- drm_mode_connector_update_edid_property(connector,
- (struct edid *) connector->display_info.raw_edid);
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
- return drm_add_edid_modes(connector, (struct edid *)
- connector->display_info.raw_edid);
+ return ret;
}
diff --git a/drivers/gpu/drm/drm_edid_modes.h b/drivers/gpu/drm/drm_edid_modes.h
index fbd354c1f1f472a748f71914a0505099d5121c04..5dbf7d2557b4c0b3218bda3d51245b7a78f2d7fe 100644
--- a/drivers/gpu/drm/drm_edid_modes.h
+++ b/drivers/gpu/drm/drm_edid_modes.h
@@ -89,7 +89,7 @@ static const struct drm_display_mode drm_dmt_modes[] = {
976, 1088, 0, 480, 486, 494, 517, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1024x768@43Hz, interlace */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
+ { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
1208, 1264, 0, 768, 768, 772, 817, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
@@ -395,7 +395,7 @@ static const struct drm_display_mode edid_est_modes[] = {
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
1184, 1344, 0, 768, 771, 777, 806, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
+ { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
1208, 1264, 0, 768, 768, 776, 817, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */
{ DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
@@ -506,17 +506,17 @@ static const struct drm_display_mode edid_cea_modes[] = {
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 5 - 1920x1080i@60Hz */
- { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 6 - 1440x480i@60Hz */
- { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 7 - 1440x480i@60Hz */
- { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
@@ -531,12 +531,12 @@ static const struct drm_display_mode edid_cea_modes[] = {
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK) },
/* 10 - 2880x480i@60Hz */
- { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 11 - 2880x480i@60Hz */
- { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
@@ -573,17 +573,17 @@ static const struct drm_display_mode edid_cea_modes[] = {
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 20 - 1920x1080i@50Hz */
- { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 21 - 1440x576i@50Hz */
- { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 22 - 1440x576i@50Hz */
- { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
@@ -598,12 +598,12 @@ static const struct drm_display_mode edid_cea_modes[] = {
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK) },
/* 25 - 2880x576i@50Hz */
- { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 26 - 2880x576i@50Hz */
- { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
@@ -656,12 +656,12 @@ static const struct drm_display_mode edid_cea_modes[] = {
3184, 3456, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 39 - 1920x1080i@50Hz */
- { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
+ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 40 - 1920x1080i@100Hz */
- { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
@@ -688,7 +688,7 @@ static const struct drm_display_mode edid_cea_modes[] = {
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK) },
/* 46 - 1920x1080i@120Hz */
- { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
@@ -705,12 +705,12 @@ static const struct drm_display_mode edid_cea_modes[] = {
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 50 - 1440x480i@120Hz */
- { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
+ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 51 - 1440x480i@120Hz */
- { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
+ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
@@ -723,12 +723,12 @@ static const struct drm_display_mode edid_cea_modes[] = {
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 54 - 1440x576i@200Hz */
- { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
+ { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 55 - 1440x576i@200Hz */
- { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
+ { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
@@ -741,12 +741,12 @@ static const struct drm_display_mode edid_cea_modes[] = {
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 58 - 1440x480i@240 */
- { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
+ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 59 - 1440x480i@240 */
- { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
+ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
new file mode 100644
index 0000000000000000000000000000000000000000..09e11a5d921a4487b273bcdc2c479e4a26f90301
--- /dev/null
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -0,0 +1,406 @@
+/*
+ * drm kms/fb cma (contiguous memory allocator) helper functions
+ *
+ * Copyright (C) 2012 Analog Device Inc.
+ * Author: Lars-Peter Clausen
+ *
+ * Based on udl_fbdev.c
+ * Copyright (C) 2012 Red Hat
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+struct drm_fb_cma {
+ struct drm_framebuffer fb;
+ struct drm_gem_cma_object *obj[4];
+};
+
+struct drm_fbdev_cma {
+ struct drm_fb_helper fb_helper;
+ struct drm_fb_cma *fb;
+};
+
+static inline struct drm_fbdev_cma *to_fbdev_cma(struct drm_fb_helper *helper)
+{
+ return container_of(helper, struct drm_fbdev_cma, fb_helper);
+}
+
+static inline struct drm_fb_cma *to_fb_cma(struct drm_framebuffer *fb)
+{
+ return container_of(fb, struct drm_fb_cma, fb);
+}
+
+static void drm_fb_cma_destroy(struct drm_framebuffer *fb)
+{
+ struct drm_fb_cma *fb_cma = to_fb_cma(fb);
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ if (fb_cma->obj[i])
+ drm_gem_object_unreference_unlocked(&fb_cma->obj[i]->base);
+ }
+
+ drm_framebuffer_cleanup(fb);
+ kfree(fb_cma);
+}
+
+static int drm_fb_cma_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv, unsigned int *handle)
+{
+ struct drm_fb_cma *fb_cma = to_fb_cma(fb);
+
+ return drm_gem_handle_create(file_priv,
+ &fb_cma->obj[0]->base, handle);
+}
+
+static struct drm_framebuffer_funcs drm_fb_cma_funcs = {
+ .destroy = drm_fb_cma_destroy,
+ .create_handle = drm_fb_cma_create_handle,
+};
+
+static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_cma_object **obj,
+ unsigned int num_planes)
+{
+ struct drm_fb_cma *fb_cma;
+ int ret;
+ int i;
+
+ fb_cma = kzalloc(sizeof(*fb_cma), GFP_KERNEL);
+ if (!fb_cma)
+ return ERR_PTR(-ENOMEM);
+
+ ret = drm_framebuffer_init(dev, &fb_cma->fb, &drm_fb_cma_funcs);
+ if (ret) {
+ dev_err(dev->dev, "Failed to initalize framebuffer: %d\n", ret);
+ kfree(fb_cma);
+ return ERR_PTR(ret);
+ }
+
+ drm_helper_mode_fill_fb_struct(&fb_cma->fb, mode_cmd);
+
+ for (i = 0; i < num_planes; i++)
+ fb_cma->obj[i] = obj[i];
+
+ return fb_cma;
+}
+
+/**
+ * drm_fb_cma_create() - (struct drm_mode_config_funcs *)->fb_create callback function
+ *
+ * If your hardware has special alignment or pitch requirements these should be
+ * checked before calling this function.
+ */
+struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
+ struct drm_file *file_priv, struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_fb_cma *fb_cma;
+ struct drm_gem_cma_object *objs[4];
+ struct drm_gem_object *obj;
+ unsigned int hsub;
+ unsigned int vsub;
+ int ret;
+ int i;
+
+ hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
+ vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
+
+ for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) {
+ unsigned int width = mode_cmd->width / (i ? hsub : 1);
+ unsigned int height = mode_cmd->height / (i ? vsub : 1);
+ unsigned int min_size;
+
+ obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[i]);
+ if (!obj) {
+ dev_err(dev->dev, "Failed to lookup GEM object\n");
+ ret = -ENXIO;
+ goto err_gem_object_unreference;
+ }
+
+ min_size = (height - 1) * mode_cmd->pitches[i]
+ + width * drm_format_plane_cpp(mode_cmd->pixel_format, i)
+ + mode_cmd->offsets[i];
+
+ if (obj->size < min_size) {
+ drm_gem_object_unreference_unlocked(obj);
+ ret = -EINVAL;
+ goto err_gem_object_unreference;
+ }
+ objs[i] = to_drm_gem_cma_obj(obj);
+ }
+
+ fb_cma = drm_fb_cma_alloc(dev, mode_cmd, objs, i);
+ if (IS_ERR(fb_cma)) {
+ ret = PTR_ERR(fb_cma);
+ goto err_gem_object_unreference;
+ }
+
+ return &fb_cma->fb;
+
+err_gem_object_unreference:
+ for (i--; i >= 0; i--)
+ drm_gem_object_unreference_unlocked(&objs[i]->base);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_fb_cma_create);
+
+/**
+ * drm_fb_cma_get_gem_obj() - Get CMA GEM object for framebuffer
+ * @fb: The framebuffer
+ * @plane: Which plane
+ *
+ * Return the CMA GEM object for given framebuffer.
+ *
+ * This function will usually be called from the CRTC callback functions.
+ */
+struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
+ unsigned int plane)
+{
+ struct drm_fb_cma *fb_cma = to_fb_cma(fb);
+
+ if (plane >= 4)
+ return NULL;
+
+ return fb_cma->obj[plane];
+}
+EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
+
+static struct fb_ops drm_fbdev_cma_ops = {
+ .owner = THIS_MODULE,
+ .fb_fillrect = sys_fillrect,
+ .fb_copyarea = sys_copyarea,
+ .fb_imageblit = sys_imageblit,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_fbdev_cma *fbdev_cma = to_fbdev_cma(helper);
+ struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+ struct drm_device *dev = helper->dev;
+ struct drm_gem_cma_object *obj;
+ struct drm_framebuffer *fb;
+ unsigned int bytes_per_pixel;
+ unsigned long offset;
+ struct fb_info *fbi;
+ size_t size;
+ int ret;
+
+ DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d\n",
+ sizes->surface_width, sizes->surface_height,
+ sizes->surface_bpp);
+
+ bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ mode_cmd.pitches[0] = sizes->surface_width * bytes_per_pixel;
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ obj = drm_gem_cma_create(dev, size);
+ if (!obj)
+ return -ENOMEM;
+
+ fbi = framebuffer_alloc(0, dev->dev);
+ if (!fbi) {
+ dev_err(dev->dev, "Failed to allocate framebuffer info.\n");
+ ret = -ENOMEM;
+ goto err_drm_gem_cma_free_object;
+ }
+
+ fbdev_cma->fb = drm_fb_cma_alloc(dev, &mode_cmd, &obj, 1);
+ if (IS_ERR(fbdev_cma->fb)) {
+ dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
+ ret = PTR_ERR(fbdev_cma->fb);
+ goto err_framebuffer_release;
+ }
+
+ fb = &fbdev_cma->fb->fb;
+ helper->fb = fb;
+ helper->fbdev = fbi;
+
+ fbi->par = helper;
+ fbi->flags = FBINFO_FLAG_DEFAULT;
+ fbi->fbops = &drm_fbdev_cma_ops;
+
+ ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
+ if (ret) {
+ dev_err(dev->dev, "Failed to allocate color map.\n");
+ goto err_drm_fb_cma_destroy;
+ }
+
+ drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
+
+ offset = fbi->var.xoffset * bytes_per_pixel;
+ offset += fbi->var.yoffset * fb->pitches[0];
+
+ dev->mode_config.fb_base = (resource_size_t)obj->paddr;
+ fbi->screen_base = obj->vaddr + offset;
+ fbi->fix.smem_start = (unsigned long)(obj->paddr + offset);
+ fbi->screen_size = size;
+ fbi->fix.smem_len = size;
+
+ return 0;
+
+err_drm_fb_cma_destroy:
+ drm_fb_cma_destroy(fb);
+err_framebuffer_release:
+ framebuffer_release(fbi);
+err_drm_gem_cma_free_object:
+ drm_gem_cma_free_object(&obj->base);
+ return ret;
+}
+
+static int drm_fbdev_cma_probe(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ int ret = 0;
+
+ if (!helper->fb) {
+ ret = drm_fbdev_cma_create(helper, sizes);
+ if (ret < 0)
+ return ret;
+ ret = 1;
+ }
+
+ return ret;
+}
+
+static struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
+ .fb_probe = drm_fbdev_cma_probe,
+};
+
+/**
+ * drm_fbdev_cma_init() - Allocate and initializes a drm_fbdev_cma struct
+ * @dev: DRM device
+ * @preferred_bpp: Preferred bits per pixel for the device
+ * @num_crtc: Number of CRTCs
+ * @max_conn_count: Maximum number of connectors
+ *
+ * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
+ */
+struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
+ unsigned int preferred_bpp, unsigned int num_crtc,
+ unsigned int max_conn_count)
+{
+ struct drm_fbdev_cma *fbdev_cma;
+ struct drm_fb_helper *helper;
+ int ret;
+
+ fbdev_cma = kzalloc(sizeof(*fbdev_cma), GFP_KERNEL);
+ if (!fbdev_cma) {
+ dev_err(dev->dev, "Failed to allocate drm fbdev.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ fbdev_cma->fb_helper.funcs = &drm_fb_cma_helper_funcs;
+ helper = &fbdev_cma->fb_helper;
+
+ ret = drm_fb_helper_init(dev, helper, num_crtc, max_conn_count);
+ if (ret < 0) {
+ dev_err(dev->dev, "Failed to initialize drm fb helper.\n");
+ goto err_free;
+ }
+
+ ret = drm_fb_helper_single_add_all_connectors(helper);
+ if (ret < 0) {
+ dev_err(dev->dev, "Failed to add connectors.\n");
+ goto err_drm_fb_helper_fini;
+
+ }
+
+ ret = drm_fb_helper_initial_config(helper, preferred_bpp);
+ if (ret < 0) {
+ dev_err(dev->dev, "Failed to set inital hw configuration.\n");
+ goto err_drm_fb_helper_fini;
+ }
+
+ return fbdev_cma;
+
+err_drm_fb_helper_fini:
+ drm_fb_helper_fini(helper);
+err_free:
+ kfree(fbdev_cma);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
+
+/**
+ * drm_fbdev_cma_fini() - Free drm_fbdev_cma struct
+ * @fbdev_cma: The drm_fbdev_cma struct
+ */
+void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
+{
+ if (fbdev_cma->fb_helper.fbdev) {
+ struct fb_info *info;
+ int ret;
+
+ info = fbdev_cma->fb_helper.fbdev;
+ ret = unregister_framebuffer(info);
+ if (ret < 0)
+ DRM_DEBUG_KMS("failed unregister_framebuffer()\n");
+
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+
+ framebuffer_release(info);
+ }
+
+ if (fbdev_cma->fb)
+ drm_fb_cma_destroy(&fbdev_cma->fb->fb);
+
+ drm_fb_helper_fini(&fbdev_cma->fb_helper);
+ kfree(fbdev_cma);
+}
+EXPORT_SYMBOL_GPL(drm_fbdev_cma_fini);
+
+/**
+ * drm_fbdev_cma_restore_mode() - Restores initial framebuffer mode
+ * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
+ *
+ * This function is usually called from the DRM drivers lastclose callback.
+ */
+void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma)
+{
+ if (fbdev_cma)
+ drm_fb_helper_restore_fbdev_mode(&fbdev_cma->fb_helper);
+}
+EXPORT_SYMBOL_GPL(drm_fbdev_cma_restore_mode);
+
+/**
+ * drm_fbdev_cma_hotplug_event() - Poll for hotpulug events
+ * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
+ *
+ * This function is usually called from the DRM drivers output_poll_changed
+ * callback.
+ */
+void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma)
+{
+ if (fbdev_cma)
+ drm_fb_helper_hotplug_event(&fbdev_cma->fb_helper);
+}
+EXPORT_SYMBOL_GPL(drm_fbdev_cma_hotplug_event);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index dde5c345e75feabfd87813ea0850fd4cc5922b29..4d58d7e6af3f0255b05cb6538a03de016d0fe08d 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -236,7 +236,7 @@ bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
}
EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode);
-bool drm_fb_helper_force_kernel_mode(void)
+static bool drm_fb_helper_force_kernel_mode(void)
{
bool ret, error = false;
struct drm_fb_helper *helper;
@@ -330,7 +330,7 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
/* Walk the connectors & encoders on this fb turning them on/off */
for (j = 0; j < fb_helper->connector_count; j++) {
connector = fb_helper->connector_info[j]->connector;
- drm_helper_connector_dpms(connector, dpms_mode);
+ connector->funcs->dpms(connector, dpms_mode);
drm_connector_property_set_value(connector,
dev->mode_config.dpms_property, dpms_mode);
}
@@ -1230,7 +1230,6 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
struct drm_device *dev = fb_helper->dev;
struct drm_fb_helper_crtc **crtcs;
struct drm_display_mode **modes;
- struct drm_encoder *encoder;
struct drm_mode_set *modeset;
bool *enabled;
int width, height;
@@ -1241,11 +1240,6 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
width = dev->mode_config.max_width;
height = dev->mode_config.max_height;
- /* clean out all the encoder/crtc combos */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- encoder->crtc = NULL;
- }
-
crtcs = kcalloc(dev->mode_config.num_connector,
sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
modes = kcalloc(dev->mode_config.num_connector,
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
new file mode 100644
index 0000000000000000000000000000000000000000..1aa8fee1e865804b73bcf42ebbf4c814befa2fa4
--- /dev/null
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -0,0 +1,251 @@
+/*
+ * drm gem CMA (contiguous memory allocator) helper functions
+ *
+ * Copyright (C) 2012 Sascha Hauer, Pengutronix
+ *
+ * Based on Samsung Exynos code
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+
+static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj)
+{
+ return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT;
+}
+
+static void drm_gem_cma_buf_destroy(struct drm_device *drm,
+ struct drm_gem_cma_object *cma_obj)
+{
+ dma_free_writecombine(drm->dev, cma_obj->base.size, cma_obj->vaddr,
+ cma_obj->paddr);
+}
+
+/*
+ * drm_gem_cma_create - allocate an object with the given size
+ *
+ * returns a struct drm_gem_cma_object* on success or ERR_PTR values
+ * on failure.
+ */
+struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
+ unsigned int size)
+{
+ struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_object *gem_obj;
+ int ret;
+
+ size = round_up(size, PAGE_SIZE);
+
+ cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
+ if (!cma_obj)
+ return ERR_PTR(-ENOMEM);
+
+ cma_obj->vaddr = dma_alloc_writecombine(drm->dev, size,
+ &cma_obj->paddr, GFP_KERNEL | __GFP_NOWARN);
+ if (!cma_obj->vaddr) {
+ dev_err(drm->dev, "failed to allocate buffer with size %d\n", size);
+ ret = -ENOMEM;
+ goto err_dma_alloc;
+ }
+
+ gem_obj = &cma_obj->base;
+
+ ret = drm_gem_object_init(drm, gem_obj, size);
+ if (ret)
+ goto err_obj_init;
+
+ ret = drm_gem_create_mmap_offset(gem_obj);
+ if (ret)
+ goto err_create_mmap_offset;
+
+ return cma_obj;
+
+err_create_mmap_offset:
+ drm_gem_object_release(gem_obj);
+
+err_obj_init:
+ drm_gem_cma_buf_destroy(drm, cma_obj);
+
+err_dma_alloc:
+ kfree(cma_obj);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_create);
+
+/*
+ * drm_gem_cma_create_with_handle - allocate an object with the given
+ * size and create a gem handle on it
+ *
+ * returns a struct drm_gem_cma_object* on success or ERR_PTR values
+ * on failure.
+ */
+static struct drm_gem_cma_object *drm_gem_cma_create_with_handle(
+ struct drm_file *file_priv,
+ struct drm_device *drm, unsigned int size,
+ unsigned int *handle)
+{
+ struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_object *gem_obj;
+ int ret;
+
+ cma_obj = drm_gem_cma_create(drm, size);
+ if (IS_ERR(cma_obj))
+ return cma_obj;
+
+ gem_obj = &cma_obj->base;
+
+ /*
+ * allocate a id of idr table where the obj is registered
+ * and handle has the id what user can see.
+ */
+ ret = drm_gem_handle_create(file_priv, gem_obj, handle);
+ if (ret)
+ goto err_handle_create;
+
+ /* drop reference from allocate - handle holds it now. */
+ drm_gem_object_unreference_unlocked(gem_obj);
+
+ return cma_obj;
+
+err_handle_create:
+ drm_gem_cma_free_object(gem_obj);
+
+ return ERR_PTR(ret);
+}
+
+/*
+ * drm_gem_cma_free_object - (struct drm_driver)->gem_free_object callback
+ * function
+ */
+void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
+{
+ struct drm_gem_cma_object *cma_obj;
+
+ if (gem_obj->map_list.map)
+ drm_gem_free_mmap_offset(gem_obj);
+
+ drm_gem_object_release(gem_obj);
+
+ cma_obj = to_drm_gem_cma_obj(gem_obj);
+
+ drm_gem_cma_buf_destroy(gem_obj->dev, cma_obj);
+
+ kfree(cma_obj);
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_free_object);
+
+/*
+ * drm_gem_cma_dumb_create - (struct drm_driver)->dumb_create callback
+ * function
+ *
+ * This aligns the pitch and size arguments to the minimum required. wrap
+ * this into your own function if you need bigger alignment.
+ */
+int drm_gem_cma_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev, struct drm_mode_create_dumb *args)
+{
+ struct drm_gem_cma_object *cma_obj;
+ int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+
+ if (args->pitch < min_pitch)
+ args->pitch = min_pitch;
+
+ if (args->size < args->pitch * args->height)
+ args->size = args->pitch * args->height;
+
+ cma_obj = drm_gem_cma_create_with_handle(file_priv, dev,
+ args->size, &args->handle);
+ if (IS_ERR(cma_obj))
+ return PTR_ERR(cma_obj);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
+
+/*
+ * drm_gem_cma_dumb_map_offset - (struct drm_driver)->dumb_map_offset callback
+ * function
+ */
+int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
+ struct drm_device *drm, uint32_t handle, uint64_t *offset)
+{
+ struct drm_gem_object *gem_obj;
+
+ mutex_lock(&drm->struct_mutex);
+
+ gem_obj = drm_gem_object_lookup(drm, file_priv, handle);
+ if (!gem_obj) {
+ dev_err(drm->dev, "failed to lookup gem object\n");
+ mutex_unlock(&drm->struct_mutex);
+ return -EINVAL;
+ }
+
+ *offset = get_gem_mmap_offset(gem_obj);
+
+ drm_gem_object_unreference(gem_obj);
+
+ mutex_unlock(&drm->struct_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_map_offset);
+
+const struct vm_operations_struct drm_gem_cma_vm_ops = {
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops);
+
+/*
+ * drm_gem_cma_mmap - (struct file_operation)->mmap callback function
+ */
+int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_gem_object *gem_obj;
+ struct drm_gem_cma_object *cma_obj;
+ int ret;
+
+ ret = drm_gem_mmap(filp, vma);
+ if (ret)
+ return ret;
+
+ gem_obj = vma->vm_private_data;
+ cma_obj = to_drm_gem_cma_obj(gem_obj);
+
+ ret = remap_pfn_range(vma, vma->vm_start, cma_obj->paddr >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot);
+ if (ret)
+ drm_gem_vm_close(vma);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
+
+/*
+ * drm_gem_cma_dumb_destroy - (struct drm_driver)->dumb_destroy callback function
+ */
+int drm_gem_cma_dumb_destroy(struct drm_file *file_priv,
+ struct drm_device *drm, unsigned int handle)
+{
+ return drm_gem_handle_delete(file_priv, handle);
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_destroy);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 09975ba1a8f79038971f04156f820ec7819eb920..3a3d0ce891b9a12586d2670463f73002a64dad6e 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -1236,7 +1236,7 @@ done:
return ret;
}
-void drm_handle_vblank_events(struct drm_device *dev, int crtc)
+static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
{
struct drm_pending_vblank_event *e, *t;
struct timeval now;
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 85a8fa6e09fe24ad472f6d9ef6fce0627a78a456..23a824e6a22ab8a5a067ba857772bc0d7581f85f 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -62,7 +62,7 @@ static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
tmp = pgprot_writecombine(tmp);
else
tmp = pgprot_noncached(tmp);
-#elif defined(__sparc__) || defined(__arm__)
+#elif defined(__sparc__) || defined(__arm__) || defined(__mips__)
tmp = pgprot_noncached(tmp);
#endif
return tmp;
@@ -619,20 +619,11 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
offset = drm_core_get_reg_ofs(dev);
vma->vm_flags |= VM_IO; /* not in core dump */
vma->vm_page_prot = drm_io_prot(map->type, vma);
-#if !defined(__arm__)
if (io_remap_pfn_range(vma, vma->vm_start,
(map->offset + offset) >> PAGE_SHIFT,
vma->vm_end - vma->vm_start,
vma->vm_page_prot))
return -EAGAIN;
-#else
- if (remap_pfn_range(vma, vma->vm_start,
- (map->offset + offset) >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot))
- return -EAGAIN;
-#endif
-
DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
" offset = 0x%llx\n",
map->type,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index ad01d3a09c11f06230c1b3bdda97d285c6916318..c2b1b1441ed04df7380971a887e0e040c5df9ed7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -147,9 +147,7 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
drm_mode_connector_update_edid_property(connector, edid);
count = drm_add_edid_modes(connector, edid);
-
- kfree(connector->display_info.raw_edid);
- connector->display_info.raw_edid = edid;
+ kfree(edid);
} else {
struct drm_display_mode *mode = drm_mode_create(connector->dev);
struct exynos_drm_panel_info *panel;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index be879c0793462acfb2cc7e1abab430fb4149cc68..bd4ff6348239ea9d36339745f7001c45153c9405 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -266,8 +266,8 @@ static void exynos_drm_fbdev_destroy(struct drm_device *dev,
/* release drm framebuffer and real buffer */
if (fb_helper->fb && fb_helper->fb->funcs) {
fb = fb_helper->fb;
- if (fb && fb->funcs->destroy)
- fb->funcs->destroy(fb);
+ if (fb)
+ drm_framebuffer_remove(fb);
}
/* release linux framebuffer */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 3e933c91101755105ccb9786add9112b4dc14976..8fe431ae537b1a0d155d10de3978611098cbcb6d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -102,7 +102,6 @@ static int vidi_get_edid(struct device *dev, struct drm_connector *connector,
u8 *edid, int len)
{
struct vidi_context *ctx = get_vidi_context(dev);
- struct edid *raw_edid;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -115,18 +114,6 @@ static int vidi_get_edid(struct device *dev, struct drm_connector *connector,
return -EFAULT;
}
- raw_edid = kzalloc(len, GFP_KERNEL);
- if (!raw_edid) {
- DRM_DEBUG_KMS("failed to allocate raw_edid.\n");
- return -ENOMEM;
- }
-
- memcpy(raw_edid, ctx->raw_edid, min((1 + ctx->raw_edid->extensions)
- * EDID_LENGTH, len));
-
- /* attach the edid data to connector. */
- connector->display_info.raw_edid = (char *)raw_edid;
-
memcpy(edid, ctx->raw_edid, min((1 + ctx->raw_edid->extensions)
* EDID_LENGTH, len));
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
index abfa2a93f0d0326bba0b8d7e14c9c0b69909d56c..7a2d40a5c1e181e9406fdd76c1b6403d11a32a73 100644
--- a/drivers/gpu/drm/gma500/Makefile
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -3,7 +3,7 @@
#
ccflags-y += -I$(srctree)/include/drm
-gma500_gfx-y += gem_glue.o \
+gma500_gfx-y += \
accel_2d.o \
backlight.o \
framebuffer.o \
@@ -30,7 +30,8 @@ gma500_gfx-$(CONFIG_DRM_GMA3600) += cdv_device.o \
cdv_intel_crt.o \
cdv_intel_display.o \
cdv_intel_hdmi.o \
- cdv_intel_lvds.o
+ cdv_intel_lvds.o \
+ cdv_intel_dp.o
gma500_gfx-$(CONFIG_DRM_GMA600) += oaktrail_device.o \
oaktrail_crtc.o \
diff --git a/drivers/gpu/drm/gma500/backlight.c b/drivers/gpu/drm/gma500/backlight.c
index 20793951fcac777134300fbe4b890df06b1c9980..143eba3309c5fc7685a48a9ec56a875580f4f038 100644
--- a/drivers/gpu/drm/gma500/backlight.c
+++ b/drivers/gpu/drm/gma500/backlight.c
@@ -26,10 +26,55 @@
#include "intel_bios.h"
#include "power.h"
+static void do_gma_backlight_set(struct drm_device *dev)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ backlight_update_status(dev_priv->backlight_device);
+#endif
+}
+
+void gma_backlight_enable(struct drm_device *dev)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ dev_priv->backlight_enabled = true;
+ if (dev_priv->backlight_device) {
+ dev_priv->backlight_device->props.brightness = dev_priv->backlight_level;
+ do_gma_backlight_set(dev);
+ }
+#endif
+}
+
+void gma_backlight_disable(struct drm_device *dev)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ dev_priv->backlight_enabled = false;
+ if (dev_priv->backlight_device) {
+ dev_priv->backlight_device->props.brightness = 0;
+ do_gma_backlight_set(dev);
+ }
+#endif
+}
+
+void gma_backlight_set(struct drm_device *dev, int v)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ dev_priv->backlight_level = v;
+ if (dev_priv->backlight_device && dev_priv->backlight_enabled) {
+ dev_priv->backlight_device->props.brightness = v;
+ do_gma_backlight_set(dev);
+ }
+#endif
+}
+
int gma_backlight_init(struct drm_device *dev)
{
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
struct drm_psb_private *dev_priv = dev->dev_private;
+ dev_priv->backlight_enabled = true;
return dev_priv->ops->backlight_init(dev);
#else
return 0;
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 7db0e3bf5a5b443ea603e8f9fc903ed85ccd4bee..1ceca3d13b6560561b3da7bd791d4ba8a043539c 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -58,10 +58,17 @@ static int cdv_output_init(struct drm_device *dev)
cdv_intel_lvds_init(dev, &dev_priv->mode_dev);
/* These bits indicate HDMI not SDVO on CDV */
- if (REG_READ(SDVOB) & SDVO_DETECTED)
+ if (REG_READ(SDVOB) & SDVO_DETECTED) {
cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOB);
- if (REG_READ(SDVOC) & SDVO_DETECTED)
+ if (REG_READ(DP_B) & DP_DETECTED)
+ cdv_intel_dp_init(dev, &dev_priv->mode_dev, DP_B);
+ }
+
+ if (REG_READ(SDVOC) & SDVO_DETECTED) {
cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOC);
+ if (REG_READ(DP_C) & DP_DETECTED)
+ cdv_intel_dp_init(dev, &dev_priv->mode_dev, DP_C);
+ }
return 0;
}
@@ -163,6 +170,7 @@ static int cdv_backlight_init(struct drm_device *dev)
cdv_get_brightness(cdv_backlight_device);
backlight_update_status(cdv_backlight_device);
dev_priv->backlight_device = cdv_backlight_device;
+ dev_priv->backlight_enabled = true;
return 0;
}
@@ -449,6 +457,7 @@ static void cdv_get_core_freq(struct drm_device *dev)
case 6:
case 7:
dev_priv->core_freq = 266;
+ break;
default:
dev_priv->core_freq = 0;
}
@@ -488,6 +497,65 @@ static void cdv_hotplug_enable(struct drm_device *dev, bool on)
}
}
+static const char *force_audio_names[] = {
+ "off",
+ "auto",
+ "on",
+};
+
+void cdv_intel_attach_force_audio_property(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_property *prop;
+ int i;
+
+ prop = dev_priv->force_audio_property;
+ if (prop == NULL) {
+ prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ "audio",
+ ARRAY_SIZE(force_audio_names));
+ if (prop == NULL)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(force_audio_names); i++)
+ drm_property_add_enum(prop, i, i-1, force_audio_names[i]);
+
+ dev_priv->force_audio_property = prop;
+ }
+ drm_connector_attach_property(connector, prop, 0);
+}
+
+
+static const char *broadcast_rgb_names[] = {
+ "Full",
+ "Limited 16:235",
+};
+
+void cdv_intel_attach_broadcast_rgb_property(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_property *prop;
+ int i;
+
+ prop = dev_priv->broadcast_rgb_property;
+ if (prop == NULL) {
+ prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ "Broadcast RGB",
+ ARRAY_SIZE(broadcast_rgb_names));
+ if (prop == NULL)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(broadcast_rgb_names); i++)
+ drm_property_add_enum(prop, i, i, broadcast_rgb_names[i]);
+
+ dev_priv->broadcast_rgb_property = prop;
+ }
+
+ drm_connector_attach_property(connector, prop, 0);
+}
+
/* Cedarview */
static const struct psb_offset cdv_regmap[2] = {
{
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index a68509ba22a8ae705421307e17c6a9c13d08331c..3cfd0931fbfb1ce41013475ef70c62343b8b4cd8 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -57,15 +57,26 @@ struct cdv_intel_clock_t {
struct cdv_intel_limit_t {
struct cdv_intel_range_t dot, vco, n, m, m1, m2, p, p1;
struct cdv_intel_p2_t p2;
+ bool (*find_pll)(const struct cdv_intel_limit_t *, struct drm_crtc *,
+ int, int, struct cdv_intel_clock_t *);
};
+static bool cdv_intel_find_best_PLL(const struct cdv_intel_limit_t *limit,
+ struct drm_crtc *crtc, int target, int refclk,
+ struct cdv_intel_clock_t *best_clock);
+static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct drm_crtc *crtc, int target,
+ int refclk,
+ struct cdv_intel_clock_t *best_clock);
+
#define CDV_LIMIT_SINGLE_LVDS_96 0
#define CDV_LIMIT_SINGLE_LVDS_100 1
#define CDV_LIMIT_DAC_HDMI_27 2
#define CDV_LIMIT_DAC_HDMI_96 3
+#define CDV_LIMIT_DP_27 4
+#define CDV_LIMIT_DP_100 5
static const struct cdv_intel_limit_t cdv_intel_limits[] = {
- { /* CDV_SIGNLE_LVDS_96MHz */
+ { /* CDV_SINGLE_LVDS_96MHz */
.dot = {.min = 20000, .max = 115500},
.vco = {.min = 1800000, .max = 3600000},
.n = {.min = 2, .max = 6},
@@ -76,6 +87,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {
.p1 = {.min = 2, .max = 10},
.p2 = {.dot_limit = 200000,
.p2_slow = 14, .p2_fast = 14},
+ .find_pll = cdv_intel_find_best_PLL,
},
{ /* CDV_SINGLE_LVDS_100MHz */
.dot = {.min = 20000, .max = 115500},
@@ -90,6 +102,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {
* is 80-224Mhz. Prefer single channel as much as possible.
*/
.p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14},
+ .find_pll = cdv_intel_find_best_PLL,
},
{ /* CDV_DAC_HDMI_27MHz */
.dot = {.min = 20000, .max = 400000},
@@ -101,6 +114,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {
.p = {.min = 5, .max = 90},
.p1 = {.min = 1, .max = 9},
.p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
+ .find_pll = cdv_intel_find_best_PLL,
},
{ /* CDV_DAC_HDMI_96MHz */
.dot = {.min = 20000, .max = 400000},
@@ -112,7 +126,32 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {
.p = {.min = 5, .max = 100},
.p1 = {.min = 1, .max = 10},
.p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
+ .find_pll = cdv_intel_find_best_PLL,
+ },
+ { /* CDV_DP_27MHz */
+ .dot = {.min = 160000, .max = 272000},
+ .vco = {.min = 1809000, .max = 3564000},
+ .n = {.min = 1, .max = 1},
+ .m = {.min = 67, .max = 132},
+ .m1 = {.min = 0, .max = 0},
+ .m2 = {.min = 65, .max = 130},
+ .p = {.min = 5, .max = 90},
+ .p1 = {.min = 1, .max = 9},
+ .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 10},
+ .find_pll = cdv_intel_find_dp_pll,
},
+ { /* CDV_DP_100MHz */
+ .dot = {.min = 160000, .max = 272000},
+ .vco = {.min = 1800000, .max = 3600000},
+ .n = {.min = 2, .max = 6},
+ .m = {.min = 60, .max = 164},
+ .m1 = {.min = 0, .max = 0},
+ .m2 = {.min = 58, .max = 162},
+ .p = {.min = 5, .max = 100},
+ .p1 = {.min = 1, .max = 10},
+ .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 10},
+ .find_pll = cdv_intel_find_dp_pll,
+ }
};
#define _wait_for(COND, MS, W) ({ \
@@ -132,7 +171,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {
#define wait_for(COND, MS) _wait_for(COND, MS, 1)
-static int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val)
+int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val)
{
int ret;
@@ -159,7 +198,7 @@ static int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val)
return 0;
}
-static int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val)
+int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val)
{
int ret;
static bool dpio_debug = true;
@@ -201,7 +240,7 @@ static int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val)
/* Reset the DPIO configuration register. The BIOS does this at every
* mode set.
*/
-static void cdv_sb_reset(struct drm_device *dev)
+void cdv_sb_reset(struct drm_device *dev)
{
REG_WRITE(DPIO_CFG, 0);
@@ -216,7 +255,7 @@ static void cdv_sb_reset(struct drm_device *dev)
*/
static int
cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
- struct cdv_intel_clock_t *clock, bool is_lvds)
+ struct cdv_intel_clock_t *clock, bool is_lvds, u32 ddi_select)
{
struct psb_intel_crtc *psb_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_crtc->pipe;
@@ -259,7 +298,7 @@ cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
ref_value &= ~(REF_CLK_MASK);
/* use DPLL_A for pipeB on CRT/HDMI */
- if (pipe == 1 && !is_lvds) {
+ if (pipe == 1 && !is_lvds && !(ddi_select & DP_MASK)) {
DRM_DEBUG_KMS("use DPLLA for pipe B\n");
ref_value |= REF_CLK_DPLLA;
} else {
@@ -336,30 +375,33 @@ cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
if (ret)
return ret;
- lane_reg = PSB_LANE0;
- cdv_sb_read(dev, lane_reg, &lane_value);
- lane_value &= ~(LANE_PLL_MASK);
- lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
- cdv_sb_write(dev, lane_reg, lane_value);
-
- lane_reg = PSB_LANE1;
- cdv_sb_read(dev, lane_reg, &lane_value);
- lane_value &= ~(LANE_PLL_MASK);
- lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
- cdv_sb_write(dev, lane_reg, lane_value);
-
- lane_reg = PSB_LANE2;
- cdv_sb_read(dev, lane_reg, &lane_value);
- lane_value &= ~(LANE_PLL_MASK);
- lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
- cdv_sb_write(dev, lane_reg, lane_value);
-
- lane_reg = PSB_LANE3;
- cdv_sb_read(dev, lane_reg, &lane_value);
- lane_value &= ~(LANE_PLL_MASK);
- lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
- cdv_sb_write(dev, lane_reg, lane_value);
-
+ if (ddi_select) {
+ if ((ddi_select & DDI_MASK) == DDI0_SELECT) {
+ lane_reg = PSB_LANE0;
+ cdv_sb_read(dev, lane_reg, &lane_value);
+ lane_value &= ~(LANE_PLL_MASK);
+ lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
+ cdv_sb_write(dev, lane_reg, lane_value);
+
+ lane_reg = PSB_LANE1;
+ cdv_sb_read(dev, lane_reg, &lane_value);
+ lane_value &= ~(LANE_PLL_MASK);
+ lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
+ cdv_sb_write(dev, lane_reg, lane_value);
+ } else {
+ lane_reg = PSB_LANE2;
+ cdv_sb_read(dev, lane_reg, &lane_value);
+ lane_value &= ~(LANE_PLL_MASK);
+ lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
+ cdv_sb_write(dev, lane_reg, lane_value);
+
+ lane_reg = PSB_LANE3;
+ cdv_sb_read(dev, lane_reg, &lane_value);
+ lane_value &= ~(LANE_PLL_MASK);
+ lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
+ cdv_sb_write(dev, lane_reg, lane_value);
+ }
+ }
return 0;
}
@@ -396,6 +438,12 @@ static const struct cdv_intel_limit_t *cdv_intel_limit(struct drm_crtc *crtc,
limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_96];
else
limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_100];
+ } else if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
+ psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
+ if (refclk == 27000)
+ limit = &cdv_intel_limits[CDV_LIMIT_DP_27];
+ else
+ limit = &cdv_intel_limits[CDV_LIMIT_DP_100];
} else {
if (refclk == 27000)
limit = &cdv_intel_limits[CDV_LIMIT_DAC_HDMI_27];
@@ -438,13 +486,12 @@ static bool cdv_intel_PLL_is_valid(struct drm_crtc *crtc,
return true;
}
-static bool cdv_intel_find_best_PLL(struct drm_crtc *crtc, int target,
- int refclk,
- struct cdv_intel_clock_t *best_clock)
+static bool cdv_intel_find_best_PLL(const struct cdv_intel_limit_t *limit,
+ struct drm_crtc *crtc, int target, int refclk,
+ struct cdv_intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
struct cdv_intel_clock_t clock;
- const struct cdv_intel_limit_t *limit = cdv_intel_limit(crtc, refclk);
int err = target;
@@ -498,6 +545,49 @@ static bool cdv_intel_find_best_PLL(struct drm_crtc *crtc, int target,
return err != target;
}
+static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct drm_crtc *crtc, int target,
+ int refclk,
+ struct cdv_intel_clock_t *best_clock)
+{
+ struct cdv_intel_clock_t clock;
+ if (refclk == 27000) {
+ if (target < 200000) {
+ clock.p1 = 2;
+ clock.p2 = 10;
+ clock.n = 1;
+ clock.m1 = 0;
+ clock.m2 = 118;
+ } else {
+ clock.p1 = 1;
+ clock.p2 = 10;
+ clock.n = 1;
+ clock.m1 = 0;
+ clock.m2 = 98;
+ }
+ } else if (refclk == 100000) {
+ if (target < 200000) {
+ clock.p1 = 2;
+ clock.p2 = 10;
+ clock.n = 5;
+ clock.m1 = 0;
+ clock.m2 = 160;
+ } else {
+ clock.p1 = 1;
+ clock.p2 = 10;
+ clock.n = 5;
+ clock.m1 = 0;
+ clock.m2 = 133;
+ }
+ } else
+ return false;
+ clock.m = clock.m2 + 2;
+ clock.p = clock.p1 * clock.p2;
+ clock.vco = (refclk * clock.m) / clock.n;
+ clock.dot = clock.vco / clock.p;
+ memcpy(best_clock, &clock, sizeof(struct cdv_intel_clock_t));
+ return true;
+}
+
static int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
int x, int y, struct drm_framebuffer *old_fb)
{
@@ -791,7 +881,7 @@ static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
if (psb_intel_crtc->active)
- return;
+ break;
psb_intel_crtc->active = true;
@@ -835,17 +925,15 @@ static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
REG_WRITE(map->status, temp);
REG_READ(map->status);
- cdv_intel_update_watermark(dev, crtc);
cdv_intel_crtc_load_lut(crtc);
/* Give the overlay scaler a chance to enable
* if it's on this pipe */
/* psb_intel_crtc_dpms_video(crtc, true); TODO */
- psb_intel_crtc->crtc_enable = true;
break;
case DRM_MODE_DPMS_OFF:
if (!psb_intel_crtc->active)
- return;
+ break;
psb_intel_crtc->active = false;
@@ -892,10 +980,9 @@ static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
/* Wait for the clocks to turn off. */
udelay(150);
- cdv_intel_update_watermark(dev, crtc);
- psb_intel_crtc->crtc_enable = false;
break;
}
+ cdv_intel_update_watermark(dev, crtc);
/*Set FIFO Watermarks*/
REG_WRITE(DSPARB, 0x3F3E);
}
@@ -952,9 +1039,12 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
u32 dpll = 0, dspcntr, pipeconf;
bool ok;
bool is_crt = false, is_lvds = false, is_tv = false;
- bool is_hdmi = false;
+ bool is_hdmi = false, is_dp = false;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
+ const struct cdv_intel_limit_t *limit;
+ u32 ddi_select = 0;
+ bool is_edp = false;
list_for_each_entry(connector, &mode_config->connector_list, head) {
struct psb_intel_encoder *psb_intel_encoder =
@@ -964,6 +1054,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
|| connector->encoder->crtc != crtc)
continue;
+ ddi_select = psb_intel_encoder->ddi_select;
switch (psb_intel_encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
@@ -977,6 +1068,15 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
case INTEL_OUTPUT_HDMI:
is_hdmi = true;
break;
+ case INTEL_OUTPUT_DISPLAYPORT:
+ is_dp = true;
+ break;
+ case INTEL_OUTPUT_EDP:
+ is_edp = true;
+ break;
+ default:
+ DRM_ERROR("invalid output type.\n");
+ return 0;
}
}
@@ -986,6 +1086,20 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
else
/* high-end sku, 27/100 mhz */
refclk = 27000;
+ if (is_dp || is_edp) {
+ /*
+ * Based on the spec the low-end SKU has only CRT/LVDS. So it is
+ * unnecessary to consider it for DP/eDP.
+ * On the high-end SKU, it will use the 27/100M reference clk
+ * for DP/eDP. When using SSC clock, the ref clk is 100MHz.Otherwise
+ * it will be 27MHz. From the VBIOS code it seems that the pipe A choose
+ * 27MHz for DP/eDP while the Pipe B chooses the 100MHz.
+ */
+ if (pipe == 0)
+ refclk = 27000;
+ else
+ refclk = 100000;
+ }
if (is_lvds && dev_priv->lvds_use_ssc) {
refclk = dev_priv->lvds_ssc_freq * 1000;
@@ -993,8 +1107,10 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
}
drm_mode_debug_printmodeline(adjusted_mode);
+
+ limit = cdv_intel_limit(crtc, refclk);
- ok = cdv_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk,
+ ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk,
&clock);
if (!ok) {
dev_err(dev->dev, "Couldn't find PLL settings for mode!\n");
@@ -1009,6 +1125,15 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
}
/* dpll |= PLL_REF_INPUT_DREFCLK; */
+ if (is_dp || is_edp) {
+ cdv_intel_dp_set_m_n(crtc, mode, adjusted_mode);
+ } else {
+ REG_WRITE(PIPE_GMCH_DATA_M(pipe), 0);
+ REG_WRITE(PIPE_GMCH_DATA_N(pipe), 0);
+ REG_WRITE(PIPE_DP_LINK_M(pipe), 0);
+ REG_WRITE(PIPE_DP_LINK_N(pipe), 0);
+ }
+
dpll |= DPLL_SYNCLOCK_ENABLE;
/* if (is_lvds)
dpll |= DPLLB_MODE_LVDS;
@@ -1019,6 +1144,31 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
/* setup pipeconf */
pipeconf = REG_READ(map->conf);
+ pipeconf &= ~(PIPE_BPC_MASK);
+ if (is_edp) {
+ switch (dev_priv->edp.bpp) {
+ case 24:
+ pipeconf |= PIPE_8BPC;
+ break;
+ case 18:
+ pipeconf |= PIPE_6BPC;
+ break;
+ case 30:
+ pipeconf |= PIPE_10BPC;
+ break;
+ default:
+ pipeconf |= PIPE_8BPC;
+ break;
+ }
+ } else if (is_lvds) {
+ /* the BPC will be 6 if it is 18-bit LVDS panel */
+ if ((REG_READ(LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
+ pipeconf |= PIPE_8BPC;
+ else
+ pipeconf |= PIPE_6BPC;
+ } else
+ pipeconf |= PIPE_8BPC;
+
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
@@ -1033,7 +1183,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
REG_WRITE(map->dpll, dpll | DPLL_VGA_MODE_DIS | DPLL_SYNCLOCK_ENABLE);
REG_READ(map->dpll);
- cdv_dpll_set_clock_cdv(dev, crtc, &clock, is_lvds);
+ cdv_dpll_set_clock_cdv(dev, crtc, &clock, is_lvds, ddi_select);
udelay(150);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
new file mode 100644
index 0000000000000000000000000000000000000000..e3a3978cf320b52ca6b0d84ae05b1a120e683088
--- /dev/null
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -0,0 +1,1950 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Keith Packard
+ *
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include
+
+#define _wait_for(COND, MS, W) ({ \
+ unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
+ int ret__ = 0; \
+ while (! (COND)) { \
+ if (time_after(jiffies, timeout__)) { \
+ ret__ = -ETIMEDOUT; \
+ break; \
+ } \
+ if (W && !in_dbg_master()) msleep(W); \
+ } \
+ ret__; \
+})
+
+#define wait_for(COND, MS) _wait_for(COND, MS, 1)
+
+#define DP_LINK_STATUS_SIZE 6
+#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
+
+#define DP_LINK_CONFIGURATION_SIZE 9
+
+#define CDV_FAST_LINK_TRAIN 1
+
+struct cdv_intel_dp {
+ uint32_t output_reg;
+ uint32_t DP;
+ uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
+ bool has_audio;
+ int force_audio;
+ uint32_t color_range;
+ uint8_t link_bw;
+ uint8_t lane_count;
+ uint8_t dpcd[4];
+ struct psb_intel_encoder *encoder;
+ struct i2c_adapter adapter;
+ struct i2c_algo_dp_aux_data algo;
+ uint8_t train_set[4];
+ uint8_t link_status[DP_LINK_STATUS_SIZE];
+ int panel_power_up_delay;
+ int panel_power_down_delay;
+ int panel_power_cycle_delay;
+ int backlight_on_delay;
+ int backlight_off_delay;
+ struct drm_display_mode *panel_fixed_mode; /* for eDP */
+ bool panel_on;
+};
+
+struct ddi_regoff {
+ uint32_t PreEmph1;
+ uint32_t PreEmph2;
+ uint32_t VSwing1;
+ uint32_t VSwing2;
+ uint32_t VSwing3;
+ uint32_t VSwing4;
+ uint32_t VSwing5;
+};
+
+static struct ddi_regoff ddi_DP_train_table[] = {
+ {.PreEmph1 = 0x812c, .PreEmph2 = 0x8124, .VSwing1 = 0x8154,
+ .VSwing2 = 0x8148, .VSwing3 = 0x814C, .VSwing4 = 0x8150,
+ .VSwing5 = 0x8158,},
+ {.PreEmph1 = 0x822c, .PreEmph2 = 0x8224, .VSwing1 = 0x8254,
+ .VSwing2 = 0x8248, .VSwing3 = 0x824C, .VSwing4 = 0x8250,
+ .VSwing5 = 0x8258,},
+};
+
+static uint32_t dp_vswing_premph_table[] = {
+ 0x55338954, 0x4000,
+ 0x554d8954, 0x2000,
+ 0x55668954, 0,
+ 0x559ac0d4, 0x6000,
+};
+/**
+ * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
+ * @intel_dp: DP struct
+ *
+ * If a CPU or PCH DP output is attached to an eDP panel, this function
+ * will return true, and false otherwise.
+ */
+static bool is_edp(struct psb_intel_encoder *encoder)
+{
+ return encoder->type == INTEL_OUTPUT_EDP;
+}
+
+
+static void cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder);
+static void cdv_intel_dp_complete_link_train(struct psb_intel_encoder *encoder);
+static void cdv_intel_dp_link_down(struct psb_intel_encoder *encoder);
+
+static int
+cdv_intel_dp_max_lane_count(struct psb_intel_encoder *encoder)
+{
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ int max_lane_count = 4;
+
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
+ max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
+ switch (max_lane_count) {
+ case 1: case 2: case 4:
+ break;
+ default:
+ max_lane_count = 4;
+ }
+ }
+ return max_lane_count;
+}
+
+static int
+cdv_intel_dp_max_link_bw(struct psb_intel_encoder *encoder)
+{
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
+
+ switch (max_link_bw) {
+ case DP_LINK_BW_1_62:
+ case DP_LINK_BW_2_7:
+ break;
+ default:
+ max_link_bw = DP_LINK_BW_1_62;
+ break;
+ }
+ return max_link_bw;
+}
+
+static int
+cdv_intel_dp_link_clock(uint8_t link_bw)
+{
+ if (link_bw == DP_LINK_BW_2_7)
+ return 270000;
+ else
+ return 162000;
+}
+
+static int
+cdv_intel_dp_link_required(int pixel_clock, int bpp)
+{
+ return (pixel_clock * bpp + 7) / 8;
+}
+
+static int
+cdv_intel_dp_max_data_rate(int max_link_clock, int max_lanes)
+{
+ return (max_link_clock * max_lanes * 19) / 20;
+}
+
+static void cdv_intel_edp_panel_vdd_on(struct psb_intel_encoder *intel_encoder)
+{
+ struct drm_device *dev = intel_encoder->base.dev;
+ struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+ u32 pp;
+
+ if (intel_dp->panel_on) {
+ DRM_DEBUG_KMS("Skip VDD on because of panel on\n");
+ return;
+ }
+ DRM_DEBUG_KMS("\n");
+
+ pp = REG_READ(PP_CONTROL);
+
+ pp |= EDP_FORCE_VDD;
+ REG_WRITE(PP_CONTROL, pp);
+ REG_READ(PP_CONTROL);
+ msleep(intel_dp->panel_power_up_delay);
+}
+
+static void cdv_intel_edp_panel_vdd_off(struct psb_intel_encoder *intel_encoder)
+{
+ struct drm_device *dev = intel_encoder->base.dev;
+ u32 pp;
+
+ DRM_DEBUG_KMS("\n");
+ pp = REG_READ(PP_CONTROL);
+
+ pp &= ~EDP_FORCE_VDD;
+ REG_WRITE(PP_CONTROL, pp);
+ REG_READ(PP_CONTROL);
+
+}
+
+/* Returns true if the panel was already on when called */
+static bool cdv_intel_edp_panel_on(struct psb_intel_encoder *intel_encoder)
+{
+ struct drm_device *dev = intel_encoder->base.dev;
+ struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+ u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_NONE;
+
+ if (intel_dp->panel_on)
+ return true;
+
+ DRM_DEBUG_KMS("\n");
+ pp = REG_READ(PP_CONTROL);
+ pp &= ~PANEL_UNLOCK_MASK;
+
+ pp |= (PANEL_UNLOCK_REGS | POWER_TARGET_ON);
+ REG_WRITE(PP_CONTROL, pp);
+ REG_READ(PP_CONTROL);
+
+ if (wait_for(((REG_READ(PP_STATUS) & idle_on_mask) == idle_on_mask), 1000)) {
+ DRM_DEBUG_KMS("Error in Powering up eDP panel, status %x\n", REG_READ(PP_STATUS));
+ intel_dp->panel_on = false;
+ } else
+ intel_dp->panel_on = true;
+ msleep(intel_dp->panel_power_up_delay);
+
+ return false;
+}
+
+static void cdv_intel_edp_panel_off (struct psb_intel_encoder *intel_encoder)
+{
+ struct drm_device *dev = intel_encoder->base.dev;
+ u32 pp, idle_off_mask = PP_ON ;
+ struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+
+ DRM_DEBUG_KMS("\n");
+
+ pp = REG_READ(PP_CONTROL);
+
+ if ((pp & POWER_TARGET_ON) == 0)
+ return;
+
+ intel_dp->panel_on = false;
+ pp &= ~PANEL_UNLOCK_MASK;
+ /* ILK workaround: disable reset around power sequence */
+
+ pp &= ~POWER_TARGET_ON;
+ pp &= ~EDP_FORCE_VDD;
+ pp &= ~EDP_BLC_ENABLE;
+ REG_WRITE(PP_CONTROL, pp);
+ REG_READ(PP_CONTROL);
+ DRM_DEBUG_KMS("PP_STATUS %x\n", REG_READ(PP_STATUS));
+
+ if (wait_for((REG_READ(PP_STATUS) & idle_off_mask) == 0, 1000)) {
+ DRM_DEBUG_KMS("Error in turning off Panel\n");
+ }
+
+ msleep(intel_dp->panel_power_cycle_delay);
+ DRM_DEBUG_KMS("Over\n");
+}
+
+static void cdv_intel_edp_backlight_on (struct psb_intel_encoder *intel_encoder)
+{
+ struct drm_device *dev = intel_encoder->base.dev;
+ u32 pp;
+
+ DRM_DEBUG_KMS("\n");
+ /*
+ * If we enable the backlight right away following a panel power
+ * on, we may see slight flicker as the panel syncs with the eDP
+ * link. So delay a bit to make sure the image is solid before
+ * allowing it to appear.
+ */
+ msleep(300);
+ pp = REG_READ(PP_CONTROL);
+
+ pp |= EDP_BLC_ENABLE;
+ REG_WRITE(PP_CONTROL, pp);
+ gma_backlight_enable(dev);
+}
+
+static void cdv_intel_edp_backlight_off (struct psb_intel_encoder *intel_encoder)
+{
+ struct drm_device *dev = intel_encoder->base.dev;
+ struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+ u32 pp;
+
+ DRM_DEBUG_KMS("\n");
+ gma_backlight_disable(dev);
+ msleep(10);
+ pp = REG_READ(PP_CONTROL);
+
+ pp &= ~EDP_BLC_ENABLE;
+ REG_WRITE(PP_CONTROL, pp);
+ msleep(intel_dp->backlight_off_delay);
+}
+
+static int
+cdv_intel_dp_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector);
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ int max_link_clock = cdv_intel_dp_link_clock(cdv_intel_dp_max_link_bw(encoder));
+ int max_lanes = cdv_intel_dp_max_lane_count(encoder);
+ struct drm_psb_private *dev_priv = connector->dev->dev_private;
+
+ if (is_edp(encoder) && intel_dp->panel_fixed_mode) {
+ if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
+ return MODE_PANEL;
+ if (mode->vdisplay > intel_dp->panel_fixed_mode->vdisplay)
+ return MODE_PANEL;
+ }
+
+ /* only refuse the mode on non eDP since we have seen some weird eDP panels
+ which are outside spec tolerances but somehow work by magic */
+ if (!is_edp(encoder) &&
+ (cdv_intel_dp_link_required(mode->clock, dev_priv->edp.bpp)
+ > cdv_intel_dp_max_data_rate(max_link_clock, max_lanes)))
+ return MODE_CLOCK_HIGH;
+
+ if (is_edp(encoder)) {
+ if (cdv_intel_dp_link_required(mode->clock, 24)
+ > cdv_intel_dp_max_data_rate(max_link_clock, max_lanes))
+ return MODE_CLOCK_HIGH;
+
+ }
+ if (mode->clock < 10000)
+ return MODE_CLOCK_LOW;
+
+ return MODE_OK;
+}
+
+static uint32_t
+pack_aux(uint8_t *src, int src_bytes)
+{
+ int i;
+ uint32_t v = 0;
+
+ if (src_bytes > 4)
+ src_bytes = 4;
+ for (i = 0; i < src_bytes; i++)
+ v |= ((uint32_t) src[i]) << ((3-i) * 8);
+ return v;
+}
+
+static void
+unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
+{
+ int i;
+ if (dst_bytes > 4)
+ dst_bytes = 4;
+ for (i = 0; i < dst_bytes; i++)
+ dst[i] = src >> ((3-i) * 8);
+}
+
+static int
+cdv_intel_dp_aux_ch(struct psb_intel_encoder *encoder,
+ uint8_t *send, int send_bytes,
+ uint8_t *recv, int recv_size)
+{
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ uint32_t output_reg = intel_dp->output_reg;
+ struct drm_device *dev = encoder->base.dev;
+ uint32_t ch_ctl = output_reg + 0x10;
+ uint32_t ch_data = ch_ctl + 4;
+ int i;
+ int recv_bytes;
+ uint32_t status;
+ uint32_t aux_clock_divider;
+ int try, precharge;
+
+ /* The clock divider is based off the hrawclk,
+ * and would like to run at 2MHz. So, take the
+ * hrawclk value and divide by 2 and use that
+ * On CDV platform it uses 200MHz as hrawclk.
+ *
+ */
+ aux_clock_divider = 200 / 2;
+
+ precharge = 4;
+ if (is_edp(encoder))
+ precharge = 10;
+
+ if (REG_READ(ch_ctl) & DP_AUX_CH_CTL_SEND_BUSY) {
+ DRM_ERROR("dp_aux_ch not started status 0x%08x\n",
+ REG_READ(ch_ctl));
+ return -EBUSY;
+ }
+
+ /* Must try at least 3 times according to DP spec */
+ for (try = 0; try < 5; try++) {
+ /* Load the send data into the aux channel data registers */
+ for (i = 0; i < send_bytes; i += 4)
+ REG_WRITE(ch_data + i,
+ pack_aux(send + i, send_bytes - i));
+
+ /* Send the command and wait for it to complete */
+ REG_WRITE(ch_ctl,
+ DP_AUX_CH_CTL_SEND_BUSY |
+ DP_AUX_CH_CTL_TIME_OUT_400us |
+ (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+ (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR);
+ for (;;) {
+ status = REG_READ(ch_ctl);
+ if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+ break;
+ udelay(100);
+ }
+
+ /* Clear done status and any errors */
+ REG_WRITE(ch_ctl,
+ status |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR);
+ if (status & DP_AUX_CH_CTL_DONE)
+ break;
+ }
+
+ if ((status & DP_AUX_CH_CTL_DONE) == 0) {
+ DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
+ return -EBUSY;
+ }
+
+ /* Check for timeout or receive error.
+ * Timeouts occur when the sink is not connected
+ */
+ if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
+ DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
+ return -EIO;
+ }
+
+ /* Timeouts occur when the device isn't connected, so they're
+ * "normal" -- don't fill the kernel log with these */
+ if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
+ DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
+ return -ETIMEDOUT;
+ }
+
+ /* Unload any bytes sent back from the other side */
+ recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
+ DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
+ if (recv_bytes > recv_size)
+ recv_bytes = recv_size;
+
+ for (i = 0; i < recv_bytes; i += 4)
+ unpack_aux(REG_READ(ch_data + i),
+ recv + i, recv_bytes - i);
+
+ return recv_bytes;
+}
+
+/* Write data to the aux channel in native mode */
+static int
+cdv_intel_dp_aux_native_write(struct psb_intel_encoder *encoder,
+ uint16_t address, uint8_t *send, int send_bytes)
+{
+ int ret;
+ uint8_t msg[20];
+ int msg_bytes;
+ uint8_t ack;
+
+ if (send_bytes > 16)
+ return -1;
+ msg[0] = AUX_NATIVE_WRITE << 4;
+ msg[1] = address >> 8;
+ msg[2] = address & 0xff;
+ msg[3] = send_bytes - 1;
+ memcpy(&msg[4], send, send_bytes);
+ msg_bytes = send_bytes + 4;
+ for (;;) {
+ ret = cdv_intel_dp_aux_ch(encoder, msg, msg_bytes, &ack, 1);
+ if (ret < 0)
+ return ret;
+ if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
+ break;
+ else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+ udelay(100);
+ else
+ return -EIO;
+ }
+ return send_bytes;
+}
+
+/* Write a single byte to the aux channel in native mode */
+static int
+cdv_intel_dp_aux_native_write_1(struct psb_intel_encoder *encoder,
+ uint16_t address, uint8_t byte)
+{
+ return cdv_intel_dp_aux_native_write(encoder, address, &byte, 1);
+}
+
+/* read bytes from a native aux channel */
+static int
+cdv_intel_dp_aux_native_read(struct psb_intel_encoder *encoder,
+ uint16_t address, uint8_t *recv, int recv_bytes)
+{
+ uint8_t msg[4];
+ int msg_bytes;
+ uint8_t reply[20];
+ int reply_bytes;
+ uint8_t ack;
+ int ret;
+
+ msg[0] = AUX_NATIVE_READ << 4;
+ msg[1] = address >> 8;
+ msg[2] = address & 0xff;
+ msg[3] = recv_bytes - 1;
+
+ msg_bytes = 4;
+ reply_bytes = recv_bytes + 1;
+
+ for (;;) {
+ ret = cdv_intel_dp_aux_ch(encoder, msg, msg_bytes,
+ reply, reply_bytes);
+ if (ret == 0)
+ return -EPROTO;
+ if (ret < 0)
+ return ret;
+ ack = reply[0];
+ if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
+ memcpy(recv, reply + 1, ret - 1);
+ return ret - 1;
+ }
+ else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+ udelay(100);
+ else
+ return -EIO;
+ }
+}
+
+static int
+cdv_intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
+ uint8_t write_byte, uint8_t *read_byte)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ struct cdv_intel_dp *intel_dp = container_of(adapter,
+ struct cdv_intel_dp,
+ adapter);
+ struct psb_intel_encoder *encoder = intel_dp->encoder;
+ uint16_t address = algo_data->address;
+ uint8_t msg[5];
+ uint8_t reply[2];
+ unsigned retry;
+ int msg_bytes;
+ int reply_bytes;
+ int ret;
+
+ /* Set up the command byte */
+ if (mode & MODE_I2C_READ)
+ msg[0] = AUX_I2C_READ << 4;
+ else
+ msg[0] = AUX_I2C_WRITE << 4;
+
+ if (!(mode & MODE_I2C_STOP))
+ msg[0] |= AUX_I2C_MOT << 4;
+
+ msg[1] = address >> 8;
+ msg[2] = address;
+
+ switch (mode) {
+ case MODE_I2C_WRITE:
+ msg[3] = 0;
+ msg[4] = write_byte;
+ msg_bytes = 5;
+ reply_bytes = 1;
+ break;
+ case MODE_I2C_READ:
+ msg[3] = 0;
+ msg_bytes = 4;
+ reply_bytes = 2;
+ break;
+ default:
+ msg_bytes = 3;
+ reply_bytes = 1;
+ break;
+ }
+
+ for (retry = 0; retry < 5; retry++) {
+ ret = cdv_intel_dp_aux_ch(encoder,
+ msg, msg_bytes,
+ reply, reply_bytes);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
+ return ret;
+ }
+
+ switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
+ case AUX_NATIVE_REPLY_ACK:
+ /* I2C-over-AUX Reply field is only valid
+ * when paired with AUX ACK.
+ */
+ break;
+ case AUX_NATIVE_REPLY_NACK:
+ DRM_DEBUG_KMS("aux_ch native nack\n");
+ return -EREMOTEIO;
+ case AUX_NATIVE_REPLY_DEFER:
+ udelay(100);
+ continue;
+ default:
+ DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
+ reply[0]);
+ return -EREMOTEIO;
+ }
+
+ switch (reply[0] & AUX_I2C_REPLY_MASK) {
+ case AUX_I2C_REPLY_ACK:
+ if (mode == MODE_I2C_READ) {
+ *read_byte = reply[1];
+ }
+ return reply_bytes - 1;
+ case AUX_I2C_REPLY_NACK:
+ DRM_DEBUG_KMS("aux_i2c nack\n");
+ return -EREMOTEIO;
+ case AUX_I2C_REPLY_DEFER:
+ DRM_DEBUG_KMS("aux_i2c defer\n");
+ udelay(100);
+ break;
+ default:
+ DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
+ return -EREMOTEIO;
+ }
+ }
+
+ DRM_ERROR("too many retries, giving up\n");
+ return -EREMOTEIO;
+}
+
+static int
+cdv_intel_dp_i2c_init(struct psb_intel_connector *connector, struct psb_intel_encoder *encoder, const char *name)
+{
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ int ret;
+
+ DRM_DEBUG_KMS("i2c_init %s\n", name);
+
+ intel_dp->algo.running = false;
+ intel_dp->algo.address = 0;
+ intel_dp->algo.aux_ch = cdv_intel_dp_i2c_aux_ch;
+
+ memset(&intel_dp->adapter, '\0', sizeof (intel_dp->adapter));
+ intel_dp->adapter.owner = THIS_MODULE;
+ intel_dp->adapter.class = I2C_CLASS_DDC;
+ strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
+ intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
+ intel_dp->adapter.algo_data = &intel_dp->algo;
+ intel_dp->adapter.dev.parent = &connector->base.kdev;
+
+ if (is_edp(encoder))
+ cdv_intel_edp_panel_vdd_on(encoder);
+ ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
+ if (is_edp(encoder))
+ cdv_intel_edp_panel_vdd_off(encoder);
+
+ return ret;
+}
+
+void cdv_intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ adjusted_mode->hdisplay = fixed_mode->hdisplay;
+ adjusted_mode->hsync_start = fixed_mode->hsync_start;
+ adjusted_mode->hsync_end = fixed_mode->hsync_end;
+ adjusted_mode->htotal = fixed_mode->htotal;
+
+ adjusted_mode->vdisplay = fixed_mode->vdisplay;
+ adjusted_mode->vsync_start = fixed_mode->vsync_start;
+ adjusted_mode->vsync_end = fixed_mode->vsync_end;
+ adjusted_mode->vtotal = fixed_mode->vtotal;
+
+ adjusted_mode->clock = fixed_mode->clock;
+
+ drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+}
+
+static bool
+cdv_intel_dp_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_psb_private *dev_priv = encoder->dev->dev_private;
+ struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+ struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+ int lane_count, clock;
+ int max_lane_count = cdv_intel_dp_max_lane_count(intel_encoder);
+ int max_clock = cdv_intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0;
+ static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
+ int refclock = mode->clock;
+ int bpp = 24;
+
+ if (is_edp(intel_encoder) && intel_dp->panel_fixed_mode) {
+ cdv_intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode);
+ refclock = intel_dp->panel_fixed_mode->clock;
+ bpp = dev_priv->edp.bpp;
+ }
+
+ for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
+ for (clock = max_clock; clock >= 0; clock--) {
+ int link_avail = cdv_intel_dp_max_data_rate(cdv_intel_dp_link_clock(bws[clock]), lane_count);
+
+ if (cdv_intel_dp_link_required(refclock, bpp) <= link_avail) {
+ intel_dp->link_bw = bws[clock];
+ intel_dp->lane_count = lane_count;
+ adjusted_mode->clock = cdv_intel_dp_link_clock(intel_dp->link_bw);
+ DRM_DEBUG_KMS("Display port link bw %02x lane "
+ "count %d clock %d\n",
+ intel_dp->link_bw, intel_dp->lane_count,
+ adjusted_mode->clock);
+ return true;
+ }
+ }
+ }
+ if (is_edp(intel_encoder)) {
+ /* okay we failed just pick the highest */
+ intel_dp->lane_count = max_lane_count;
+ intel_dp->link_bw = bws[max_clock];
+ adjusted_mode->clock = cdv_intel_dp_link_clock(intel_dp->link_bw);
+ DRM_DEBUG_KMS("Force picking display port link bw %02x lane "
+ "count %d clock %d\n",
+ intel_dp->link_bw, intel_dp->lane_count,
+ adjusted_mode->clock);
+
+ return true;
+ }
+ return false;
+}
+
+struct cdv_intel_dp_m_n {
+ uint32_t tu;
+ uint32_t gmch_m;
+ uint32_t gmch_n;
+ uint32_t link_m;
+ uint32_t link_n;
+};
+
+static void
+cdv_intel_reduce_ratio(uint32_t *num, uint32_t *den)
+{
+ /*
+ while (*num > 0xffffff || *den > 0xffffff) {
+ *num >>= 1;
+ *den >>= 1;
+ }*/
+ uint64_t value, m;
+ m = *num;
+ value = m * (0x800000);
+ m = do_div(value, *den);
+ *num = value;
+ *den = 0x800000;
+}
+
+static void
+cdv_intel_dp_compute_m_n(int bpp,
+ int nlanes,
+ int pixel_clock,
+ int link_clock,
+ struct cdv_intel_dp_m_n *m_n)
+{
+ m_n->tu = 64;
+ m_n->gmch_m = (pixel_clock * bpp + 7) >> 3;
+ m_n->gmch_n = link_clock * nlanes;
+ cdv_intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
+ m_n->link_m = pixel_clock;
+ m_n->link_n = link_clock;
+ cdv_intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
+}
+
+void
+cdv_intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_encoder *encoder;
+ struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
+ int lane_count = 4, bpp = 24;
+ struct cdv_intel_dp_m_n m_n;
+ int pipe = intel_crtc->pipe;
+
+ /*
+ * Find the lane count in the intel_encoder private
+ */
+ list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+ struct psb_intel_encoder *intel_encoder;
+ struct cdv_intel_dp *intel_dp;
+
+ if (encoder->crtc != crtc)
+ continue;
+
+ intel_encoder = to_psb_intel_encoder(encoder);
+ intel_dp = intel_encoder->dev_priv;
+ if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
+ lane_count = intel_dp->lane_count;
+ break;
+ } else if (is_edp(intel_encoder)) {
+ lane_count = intel_dp->lane_count;
+ bpp = dev_priv->edp.bpp;
+ break;
+ }
+ }
+
+ /*
+ * Compute the GMCH and Link ratios. The '3' here is
+ * the number of bytes_per_pixel post-LUT, which we always
+ * set up for 8-bits of R/G/B, or 3 bytes total.
+ */
+ cdv_intel_dp_compute_m_n(bpp, lane_count,
+ mode->clock, adjusted_mode->clock, &m_n);
+
+ {
+ REG_WRITE(PIPE_GMCH_DATA_M(pipe),
+ ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
+ m_n.gmch_m);
+ REG_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n);
+ REG_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m);
+ REG_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n);
+ }
+}
+
+static void
+cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+ struct drm_crtc *crtc = encoder->crtc;
+ struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
+ struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+ struct drm_device *dev = encoder->dev;
+
+ intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
+ intel_dp->DP |= intel_dp->color_range;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ intel_dp->DP |= DP_SYNC_HS_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ intel_dp->DP |= DP_SYNC_VS_HIGH;
+
+ intel_dp->DP |= DP_LINK_TRAIN_OFF;
+
+ switch (intel_dp->lane_count) {
+ case 1:
+ intel_dp->DP |= DP_PORT_WIDTH_1;
+ break;
+ case 2:
+ intel_dp->DP |= DP_PORT_WIDTH_2;
+ break;
+ case 4:
+ intel_dp->DP |= DP_PORT_WIDTH_4;
+ break;
+ }
+ if (intel_dp->has_audio)
+ intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
+
+ memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
+ intel_dp->link_configuration[0] = intel_dp->link_bw;
+ intel_dp->link_configuration[1] = intel_dp->lane_count;
+
+ /*
+ * Check for DPCD version > 1.1 and enhanced framing support
+ */
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
+ (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
+ intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ intel_dp->DP |= DP_ENHANCED_FRAMING;
+ }
+
+ /* CPT DP's pipe select is decided in TRANS_DP_CTL */
+ if (intel_crtc->pipe == 1)
+ intel_dp->DP |= DP_PIPEB_SELECT;
+
+ REG_WRITE(intel_dp->output_reg, (intel_dp->DP | DP_PORT_EN));
+ DRM_DEBUG_KMS("DP expected reg is %x\n", intel_dp->DP);
+ if (is_edp(intel_encoder)) {
+ uint32_t pfit_control;
+ cdv_intel_edp_panel_on(intel_encoder);
+
+ if (mode->hdisplay != adjusted_mode->hdisplay ||
+ mode->vdisplay != adjusted_mode->vdisplay)
+ pfit_control = PFIT_ENABLE;
+ else
+ pfit_control = 0;
+
+ pfit_control |= intel_crtc->pipe << PFIT_PIPE_SHIFT;
+
+ REG_WRITE(PFIT_CONTROL, pfit_control);
+ }
+}
+
+
+/* If the sink supports it, try to set the power state appropriately */
+static void cdv_intel_dp_sink_dpms(struct psb_intel_encoder *encoder, int mode)
+{
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ int ret, i;
+
+ /* Should have a valid DPCD by this point */
+ if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
+ return;
+
+ if (mode != DRM_MODE_DPMS_ON) {
+ ret = cdv_intel_dp_aux_native_write_1(encoder, DP_SET_POWER,
+ DP_SET_POWER_D3);
+ if (ret != 1)
+ DRM_DEBUG_DRIVER("failed to write sink power state\n");
+ } else {
+ /*
+ * When turning on, we need to retry for 1ms to give the sink
+ * time to wake up.
+ */
+ for (i = 0; i < 3; i++) {
+ ret = cdv_intel_dp_aux_native_write_1(encoder,
+ DP_SET_POWER,
+ DP_SET_POWER_D0);
+ if (ret == 1)
+ break;
+ udelay(1000);
+ }
+ }
+}
+
+static void cdv_intel_dp_prepare(struct drm_encoder *encoder)
+{
+ struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+ int edp = is_edp(intel_encoder);
+
+ if (edp) {
+ cdv_intel_edp_backlight_off(intel_encoder);
+ cdv_intel_edp_panel_off(intel_encoder);
+ cdv_intel_edp_panel_vdd_on(intel_encoder);
+ }
+ /* Wake up the sink first */
+ cdv_intel_dp_sink_dpms(intel_encoder, DRM_MODE_DPMS_ON);
+ cdv_intel_dp_link_down(intel_encoder);
+ if (edp)
+ cdv_intel_edp_panel_vdd_off(intel_encoder);
+}
+
+static void cdv_intel_dp_commit(struct drm_encoder *encoder)
+{
+ struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+ int edp = is_edp(intel_encoder);
+
+ if (edp)
+ cdv_intel_edp_panel_on(intel_encoder);
+ cdv_intel_dp_start_link_train(intel_encoder);
+ cdv_intel_dp_complete_link_train(intel_encoder);
+ if (edp)
+ cdv_intel_edp_backlight_on(intel_encoder);
+}
+
+static void
+cdv_intel_dp_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+ struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+ struct drm_device *dev = encoder->dev;
+ uint32_t dp_reg = REG_READ(intel_dp->output_reg);
+ int edp = is_edp(intel_encoder);
+
+ if (mode != DRM_MODE_DPMS_ON) {
+ if (edp) {
+ cdv_intel_edp_backlight_off(intel_encoder);
+ cdv_intel_edp_panel_vdd_on(intel_encoder);
+ }
+ cdv_intel_dp_sink_dpms(intel_encoder, mode);
+ cdv_intel_dp_link_down(intel_encoder);
+ if (edp) {
+ cdv_intel_edp_panel_vdd_off(intel_encoder);
+ cdv_intel_edp_panel_off(intel_encoder);
+ }
+ } else {
+ if (edp)
+ cdv_intel_edp_panel_on(intel_encoder);
+ cdv_intel_dp_sink_dpms(intel_encoder, mode);
+ if (!(dp_reg & DP_PORT_EN)) {
+ cdv_intel_dp_start_link_train(intel_encoder);
+ cdv_intel_dp_complete_link_train(intel_encoder);
+ }
+ if (edp)
+ cdv_intel_edp_backlight_on(intel_encoder);
+ }
+}
+
+/*
+ * Native read with retry for link status and receiver capability reads for
+ * cases where the sink may still be asleep.
+ */
+static bool
+cdv_intel_dp_aux_native_read_retry(struct psb_intel_encoder *encoder, uint16_t address,
+ uint8_t *recv, int recv_bytes)
+{
+ int ret, i;
+
+ /*
+ * Sinks are *supposed* to come up within 1ms from an off state,
+ * but we're also supposed to retry 3 times per the spec.
+ */
+ for (i = 0; i < 3; i++) {
+ ret = cdv_intel_dp_aux_native_read(encoder, address, recv,
+ recv_bytes);
+ if (ret == recv_bytes)
+ return true;
+ udelay(1000);
+ }
+
+ return false;
+}
+
+/*
+ * Fetch AUX CH registers 0x202 - 0x207 which contain
+ * link status information
+ */
+static bool
+cdv_intel_dp_get_link_status(struct psb_intel_encoder *encoder)
+{
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ return cdv_intel_dp_aux_native_read_retry(encoder,
+ DP_LANE0_1_STATUS,
+ intel_dp->link_status,
+ DP_LINK_STATUS_SIZE);
+}
+
+static uint8_t
+cdv_intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
+ int r)
+{
+ return link_status[r - DP_LANE0_1_STATUS];
+}
+
+static uint8_t
+cdv_intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+ int s = ((lane & 1) ?
+ DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
+ DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
+ uint8_t l = cdv_intel_dp_link_status(link_status, i);
+
+ return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
+}
+
+static uint8_t
+cdv_intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+ int s = ((lane & 1) ?
+ DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
+ DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
+ uint8_t l = cdv_intel_dp_link_status(link_status, i);
+
+ return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
+}
+
+
+#if 0
+static char *voltage_names[] = {
+ "0.4V", "0.6V", "0.8V", "1.2V"
+};
+static char *pre_emph_names[] = {
+ "0dB", "3.5dB", "6dB", "9.5dB"
+};
+static char *link_train_names[] = {
+ "pattern 1", "pattern 2", "idle", "off"
+};
+#endif
+
+#define CDV_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200
+/*
+static uint8_t
+cdv_intel_dp_pre_emphasis_max(uint8_t voltage_swing)
+{
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
+ }
+}
+*/
+static void
+cdv_intel_get_adjust_train(struct psb_intel_encoder *encoder)
+{
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ uint8_t v = 0;
+ uint8_t p = 0;
+ int lane;
+
+ for (lane = 0; lane < intel_dp->lane_count; lane++) {
+ uint8_t this_v = cdv_intel_get_adjust_request_voltage(intel_dp->link_status, lane);
+ uint8_t this_p = cdv_intel_get_adjust_request_pre_emphasis(intel_dp->link_status, lane);
+
+ if (this_v > v)
+ v = this_v;
+ if (this_p > p)
+ p = this_p;
+ }
+
+ if (v >= CDV_DP_VOLTAGE_MAX)
+ v = CDV_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED;
+
+ if (p == DP_TRAIN_PRE_EMPHASIS_MASK)
+ p |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+
+ for (lane = 0; lane < 4; lane++)
+ intel_dp->train_set[lane] = v | p;
+}
+
+
+static uint8_t
+cdv_intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_LANE0_1_STATUS + (lane >> 1);
+ int s = (lane & 1) * 4;
+ uint8_t l = cdv_intel_dp_link_status(link_status, i);
+
+ return (l >> s) & 0xf;
+}
+
+/* Check for clock recovery is done on all channels */
+static bool
+cdv_intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
+{
+ int lane;
+ uint8_t lane_status;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = cdv_intel_get_lane_status(link_status, lane);
+ if ((lane_status & DP_LANE_CR_DONE) == 0)
+ return false;
+ }
+ return true;
+}
+
+/* Check to see if channel eq is done on all channels */
+#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\
+ DP_LANE_CHANNEL_EQ_DONE|\
+ DP_LANE_SYMBOL_LOCKED)
+static bool
+cdv_intel_channel_eq_ok(struct psb_intel_encoder *encoder)
+{
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ uint8_t lane_align;
+ uint8_t lane_status;
+ int lane;
+
+ lane_align = cdv_intel_dp_link_status(intel_dp->link_status,
+ DP_LANE_ALIGN_STATUS_UPDATED);
+ if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
+ return false;
+ for (lane = 0; lane < intel_dp->lane_count; lane++) {
+ lane_status = cdv_intel_get_lane_status(intel_dp->link_status, lane);
+ if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
+ return false;
+ }
+ return true;
+}
+
+static bool
+cdv_intel_dp_set_link_train(struct psb_intel_encoder *encoder,
+ uint32_t dp_reg_value,
+ uint8_t dp_train_pat)
+{
+
+ struct drm_device *dev = encoder->base.dev;
+ int ret;
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+
+ REG_WRITE(intel_dp->output_reg, dp_reg_value);
+ REG_READ(intel_dp->output_reg);
+
+ ret = cdv_intel_dp_aux_native_write_1(encoder,
+ DP_TRAINING_PATTERN_SET,
+ dp_train_pat);
+
+ if (ret != 1) {
+ DRM_DEBUG_KMS("Failure in setting link pattern %x\n",
+ dp_train_pat);
+ return false;
+ }
+
+ return true;
+}
+
+
+static bool
+cdv_intel_dplink_set_level(struct psb_intel_encoder *encoder,
+ uint8_t dp_train_pat)
+{
+
+ int ret;
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+
+ ret = cdv_intel_dp_aux_native_write(encoder,
+ DP_TRAINING_LANE0_SET,
+ intel_dp->train_set,
+ intel_dp->lane_count);
+
+ if (ret != intel_dp->lane_count) {
+ DRM_DEBUG_KMS("Failure in setting level %d, lane_cnt= %d\n",
+ intel_dp->train_set[0], intel_dp->lane_count);
+ return false;
+ }
+ return true;
+}
+
+static void
+cdv_intel_dp_set_vswing_premph(struct psb_intel_encoder *encoder, uint8_t signal_level)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ struct ddi_regoff *ddi_reg;
+ int vswing, premph, index;
+
+ if (intel_dp->output_reg == DP_B)
+ ddi_reg = &ddi_DP_train_table[0];
+ else
+ ddi_reg = &ddi_DP_train_table[1];
+
+ vswing = (signal_level & DP_TRAIN_VOLTAGE_SWING_MASK);
+ premph = ((signal_level & DP_TRAIN_PRE_EMPHASIS_MASK)) >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+ if (vswing + premph > 3)
+ return;
+#ifdef CDV_FAST_LINK_TRAIN
+ return;
+#endif
+ DRM_DEBUG_KMS("Test2\n");
+ //return ;
+ cdv_sb_reset(dev);
+ /* ;Swing voltage programming
+ ;gfx_dpio_set_reg(0xc058, 0x0505313A) */
+ cdv_sb_write(dev, ddi_reg->VSwing5, 0x0505313A);
+
+ /* ;gfx_dpio_set_reg(0x8154, 0x43406055) */
+ cdv_sb_write(dev, ddi_reg->VSwing1, 0x43406055);
+
+ /* ;gfx_dpio_set_reg(0x8148, 0x55338954)
+ * The VSwing_PreEmph table is also considered based on the vswing/premp
+ */
+ index = (vswing + premph) * 2;
+ if (premph == 1 && vswing == 1) {
+ cdv_sb_write(dev, ddi_reg->VSwing2, 0x055738954);
+ } else
+ cdv_sb_write(dev, ddi_reg->VSwing2, dp_vswing_premph_table[index]);
+
+ /* ;gfx_dpio_set_reg(0x814c, 0x40802040) */
+ if ((vswing + premph) == DP_TRAIN_VOLTAGE_SWING_1200)
+ cdv_sb_write(dev, ddi_reg->VSwing3, 0x70802040);
+ else
+ cdv_sb_write(dev, ddi_reg->VSwing3, 0x40802040);
+
+ /* ;gfx_dpio_set_reg(0x8150, 0x2b405555) */
+ /* cdv_sb_write(dev, ddi_reg->VSwing4, 0x2b405555); */
+
+ /* ;gfx_dpio_set_reg(0x8154, 0xc3406055) */
+ cdv_sb_write(dev, ddi_reg->VSwing1, 0xc3406055);
+
+ /* ;Pre emphasis programming
+ * ;gfx_dpio_set_reg(0xc02c, 0x1f030040)
+ */
+ cdv_sb_write(dev, ddi_reg->PreEmph1, 0x1f030040);
+
+ /* ;gfx_dpio_set_reg(0x8124, 0x00004000) */
+ index = 2 * premph + 1;
+ cdv_sb_write(dev, ddi_reg->PreEmph2, dp_vswing_premph_table[index]);
+ return;
+}
+
+
+/* Enable corresponding port and start training pattern 1 */
+static void
+cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ int i;
+ uint8_t voltage;
+ bool clock_recovery = false;
+ int tries;
+ u32 reg;
+ uint32_t DP = intel_dp->DP;
+
+ DP |= DP_PORT_EN;
+ DP &= ~DP_LINK_TRAIN_MASK;
+
+ reg = DP;
+ reg |= DP_LINK_TRAIN_PAT_1;
+ /* Enable output, wait for it to become active */
+ REG_WRITE(intel_dp->output_reg, reg);
+ REG_READ(intel_dp->output_reg);
+ psb_intel_wait_for_vblank(dev);
+
+ DRM_DEBUG_KMS("Link config\n");
+ /* Write the link configuration data */
+ cdv_intel_dp_aux_native_write(encoder, DP_LINK_BW_SET,
+ intel_dp->link_configuration,
+ 2);
+
+ memset(intel_dp->train_set, 0, 4);
+ voltage = 0;
+ tries = 0;
+ clock_recovery = false;
+
+ DRM_DEBUG_KMS("Start train\n");
+ reg = DP | DP_LINK_TRAIN_PAT_1;
+
+
+ for (;;) {
+ /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
+ DRM_DEBUG_KMS("DP Link Train Set %x, Link_config %x, %x\n",
+ intel_dp->train_set[0],
+ intel_dp->link_configuration[0],
+ intel_dp->link_configuration[1]);
+
+ if (!cdv_intel_dp_set_link_train(encoder, reg, DP_TRAINING_PATTERN_1)) {
+ DRM_DEBUG_KMS("Failure in aux-transfer setting pattern 1\n");
+ }
+ cdv_intel_dp_set_vswing_premph(encoder, intel_dp->train_set[0]);
+ /* Set training pattern 1 */
+
+ cdv_intel_dplink_set_level(encoder, DP_TRAINING_PATTERN_1);
+
+ udelay(200);
+ if (!cdv_intel_dp_get_link_status(encoder))
+ break;
+
+ DRM_DEBUG_KMS("DP Link status %x, %x, %x, %x, %x, %x\n",
+ intel_dp->link_status[0], intel_dp->link_status[1], intel_dp->link_status[2],
+ intel_dp->link_status[3], intel_dp->link_status[4], intel_dp->link_status[5]);
+
+ if (cdv_intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
+ DRM_DEBUG_KMS("PT1 train is done\n");
+ clock_recovery = true;
+ break;
+ }
+
+ /* Check to see if we've tried the max voltage */
+ for (i = 0; i < intel_dp->lane_count; i++)
+ if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
+ break;
+ if (i == intel_dp->lane_count)
+ break;
+
+ /* Check to see if we've tried the same voltage 5 times */
+ if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
+ ++tries;
+ if (tries == 5)
+ break;
+ } else
+ tries = 0;
+ voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+
+ /* Compute new intel_dp->train_set as requested by target */
+ cdv_intel_get_adjust_train(encoder);
+
+ }
+
+ if (!clock_recovery) {
+ DRM_DEBUG_KMS("failure in DP patter 1 training, train set %x\n", intel_dp->train_set[0]);
+ }
+
+ intel_dp->DP = DP;
+}
+
+static void
+cdv_intel_dp_complete_link_train(struct psb_intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ bool channel_eq = false;
+ int tries, cr_tries;
+ u32 reg;
+ uint32_t DP = intel_dp->DP;
+
+ /* channel equalization */
+ tries = 0;
+ cr_tries = 0;
+ channel_eq = false;
+
+ DRM_DEBUG_KMS("\n");
+ reg = DP | DP_LINK_TRAIN_PAT_2;
+
+ for (;;) {
+
+ DRM_DEBUG_KMS("DP Link Train Set %x, Link_config %x, %x\n",
+ intel_dp->train_set[0],
+ intel_dp->link_configuration[0],
+ intel_dp->link_configuration[1]);
+ /* channel eq pattern */
+
+ if (!cdv_intel_dp_set_link_train(encoder, reg,
+ DP_TRAINING_PATTERN_2)) {
+ DRM_DEBUG_KMS("Failure in aux-transfer setting pattern 2\n");
+ }
+ /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
+
+ if (cr_tries > 5) {
+ DRM_ERROR("failed to train DP, aborting\n");
+ cdv_intel_dp_link_down(encoder);
+ break;
+ }
+
+ cdv_intel_dp_set_vswing_premph(encoder, intel_dp->train_set[0]);
+
+ cdv_intel_dplink_set_level(encoder, DP_TRAINING_PATTERN_2);
+
+ udelay(1000);
+ if (!cdv_intel_dp_get_link_status(encoder))
+ break;
+
+ DRM_DEBUG_KMS("DP Link status %x, %x, %x, %x, %x, %x\n",
+ intel_dp->link_status[0], intel_dp->link_status[1], intel_dp->link_status[2],
+ intel_dp->link_status[3], intel_dp->link_status[4], intel_dp->link_status[5]);
+
+ /* Make sure clock is still ok */
+ if (!cdv_intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
+ cdv_intel_dp_start_link_train(encoder);
+ cr_tries++;
+ continue;
+ }
+
+ if (cdv_intel_channel_eq_ok(encoder)) {
+ DRM_DEBUG_KMS("PT2 train is done\n");
+ channel_eq = true;
+ break;
+ }
+
+ /* Try 5 times, then try clock recovery if that fails */
+ if (tries > 5) {
+ cdv_intel_dp_link_down(encoder);
+ cdv_intel_dp_start_link_train(encoder);
+ tries = 0;
+ cr_tries++;
+ continue;
+ }
+
+ /* Compute new intel_dp->train_set as requested by target */
+ cdv_intel_get_adjust_train(encoder);
+ ++tries;
+
+ }
+
+ reg = DP | DP_LINK_TRAIN_OFF;
+
+ REG_WRITE(intel_dp->output_reg, reg);
+ REG_READ(intel_dp->output_reg);
+ cdv_intel_dp_aux_native_write_1(encoder,
+ DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
+}
+
+static void
+cdv_intel_dp_link_down(struct psb_intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ uint32_t DP = intel_dp->DP;
+
+ if ((REG_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)
+ return;
+
+ DRM_DEBUG_KMS("\n");
+
+
+ {
+ DP &= ~DP_LINK_TRAIN_MASK;
+ REG_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
+ }
+ REG_READ(intel_dp->output_reg);
+
+ msleep(17);
+
+ REG_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
+ REG_READ(intel_dp->output_reg);
+}
+
+static enum drm_connector_status
+cdv_dp_detect(struct psb_intel_encoder *encoder)
+{
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ enum drm_connector_status status;
+
+ status = connector_status_disconnected;
+ if (cdv_intel_dp_aux_native_read(encoder, 0x000, intel_dp->dpcd,
+ sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
+ {
+ if (intel_dp->dpcd[DP_DPCD_REV] != 0)
+ status = connector_status_connected;
+ }
+ if (status == connector_status_connected)
+ DRM_DEBUG_KMS("DPCD: Rev=%x LN_Rate=%x LN_CNT=%x LN_DOWNSP=%x\n",
+ intel_dp->dpcd[0], intel_dp->dpcd[1],
+ intel_dp->dpcd[2], intel_dp->dpcd[3]);
+ return status;
+}
+
+/**
+ * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
+ *
+ * \return true if DP port is connected.
+ * \return false if DP port is disconnected.
+ */
+static enum drm_connector_status
+cdv_intel_dp_detect(struct drm_connector *connector, bool force)
+{
+ struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector);
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ enum drm_connector_status status;
+ struct edid *edid = NULL;
+ int edp = is_edp(encoder);
+
+ intel_dp->has_audio = false;
+
+ if (edp)
+ cdv_intel_edp_panel_vdd_on(encoder);
+ status = cdv_dp_detect(encoder);
+ if (status != connector_status_connected) {
+ if (edp)
+ cdv_intel_edp_panel_vdd_off(encoder);
+ return status;
+ }
+
+ if (intel_dp->force_audio) {
+ intel_dp->has_audio = intel_dp->force_audio > 0;
+ } else {
+ edid = drm_get_edid(connector, &intel_dp->adapter);
+ if (edid) {
+ intel_dp->has_audio = drm_detect_monitor_audio(edid);
+ kfree(edid);
+ }
+ }
+ if (edp)
+ cdv_intel_edp_panel_vdd_off(encoder);
+
+ return connector_status_connected;
+}
+
+static int cdv_intel_dp_get_modes(struct drm_connector *connector)
+{
+ struct psb_intel_encoder *intel_encoder = psb_intel_attached_encoder(connector);
+ struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+ struct edid *edid = NULL;
+ int ret = 0;
+ int edp = is_edp(intel_encoder);
+
+
+ edid = drm_get_edid(connector, &intel_dp->adapter);
+ if (edid) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ }
+
+ if (is_edp(intel_encoder)) {
+ struct drm_device *dev = connector->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ cdv_intel_edp_panel_vdd_off(intel_encoder);
+ if (ret) {
+ if (edp && !intel_dp->panel_fixed_mode) {
+ struct drm_display_mode *newmode;
+ list_for_each_entry(newmode, &connector->probed_modes,
+ head) {
+ if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
+ intel_dp->panel_fixed_mode =
+ drm_mode_duplicate(dev, newmode);
+ break;
+ }
+ }
+ }
+
+ return ret;
+ }
+ if (!intel_dp->panel_fixed_mode && dev_priv->lfp_lvds_vbt_mode) {
+ intel_dp->panel_fixed_mode =
+ drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
+ if (intel_dp->panel_fixed_mode) {
+ intel_dp->panel_fixed_mode->type |=
+ DRM_MODE_TYPE_PREFERRED;
+ }
+ }
+ if (intel_dp->panel_fixed_mode != NULL) {
+ struct drm_display_mode *mode;
+ mode = drm_mode_duplicate(dev, intel_dp->panel_fixed_mode);
+ drm_mode_probed_add(connector, mode);
+ return 1;
+ }
+ }
+
+ return ret;
+}
+
+static bool
+cdv_intel_dp_detect_audio(struct drm_connector *connector)
+{
+ struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector);
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ struct edid *edid;
+ bool has_audio = false;
+ int edp = is_edp(encoder);
+
+ if (edp)
+ cdv_intel_edp_panel_vdd_on(encoder);
+
+ edid = drm_get_edid(connector, &intel_dp->adapter);
+ if (edid) {
+ has_audio = drm_detect_monitor_audio(edid);
+ kfree(edid);
+ }
+ if (edp)
+ cdv_intel_edp_panel_vdd_off(encoder);
+
+ return has_audio;
+}
+
+static int
+cdv_intel_dp_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_psb_private *dev_priv = connector->dev->dev_private;
+ struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector);
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ int ret;
+
+ ret = drm_connector_property_set_value(connector, property, val);
+ if (ret)
+ return ret;
+
+ if (property == dev_priv->force_audio_property) {
+ int i = val;
+ bool has_audio;
+
+ if (i == intel_dp->force_audio)
+ return 0;
+
+ intel_dp->force_audio = i;
+
+ if (i == 0)
+ has_audio = cdv_intel_dp_detect_audio(connector);
+ else
+ has_audio = i > 0;
+
+ if (has_audio == intel_dp->has_audio)
+ return 0;
+
+ intel_dp->has_audio = has_audio;
+ goto done;
+ }
+
+ if (property == dev_priv->broadcast_rgb_property) {
+ if (val == !!intel_dp->color_range)
+ return 0;
+
+ intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0;
+ goto done;
+ }
+
+ return -EINVAL;
+
+done:
+ if (encoder->base.crtc) {
+ struct drm_crtc *crtc = encoder->base.crtc;
+ drm_crtc_helper_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y,
+ crtc->fb);
+ }
+
+ return 0;
+}
+
+static void
+cdv_intel_dp_destroy(struct drm_connector *connector)
+{
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+ struct cdv_intel_dp *intel_dp = psb_intel_encoder->dev_priv;
+
+ if (is_edp(psb_intel_encoder)) {
+ /* cdv_intel_panel_destroy_backlight(connector->dev); */
+ if (intel_dp->panel_fixed_mode) {
+ kfree(intel_dp->panel_fixed_mode);
+ intel_dp->panel_fixed_mode = NULL;
+ }
+ }
+ i2c_del_adapter(&intel_dp->adapter);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static void cdv_intel_dp_encoder_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_helper_funcs cdv_intel_dp_helper_funcs = {
+ .dpms = cdv_intel_dp_dpms,
+ .mode_fixup = cdv_intel_dp_mode_fixup,
+ .prepare = cdv_intel_dp_prepare,
+ .mode_set = cdv_intel_dp_mode_set,
+ .commit = cdv_intel_dp_commit,
+};
+
+static const struct drm_connector_funcs cdv_intel_dp_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = cdv_intel_dp_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = cdv_intel_dp_set_property,
+ .destroy = cdv_intel_dp_destroy,
+};
+
+static const struct drm_connector_helper_funcs cdv_intel_dp_connector_helper_funcs = {
+ .get_modes = cdv_intel_dp_get_modes,
+ .mode_valid = cdv_intel_dp_mode_valid,
+ .best_encoder = psb_intel_best_encoder,
+};
+
+static const struct drm_encoder_funcs cdv_intel_dp_enc_funcs = {
+ .destroy = cdv_intel_dp_encoder_destroy,
+};
+
+
+static void cdv_intel_dp_add_properties(struct drm_connector *connector)
+{
+ cdv_intel_attach_force_audio_property(connector);
+ cdv_intel_attach_broadcast_rgb_property(connector);
+}
+
+/* check the VBT to see whether the eDP is on DP-D port */
+static bool cdv_intel_dpc_is_edp(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct child_device_config *p_child;
+ int i;
+
+ if (!dev_priv->child_dev_num)
+ return false;
+
+ for (i = 0; i < dev_priv->child_dev_num; i++) {
+ p_child = dev_priv->child_dev + i;
+
+ if (p_child->dvo_port == PORT_IDPC &&
+ p_child->device_type == DEVICE_TYPE_eDP)
+ return true;
+ }
+ return false;
+}
+
+/* Cedarview display clock gating
+
+ We need this disable dot get correct behaviour while enabling
+ DP/eDP. TODO - investigate if we can turn it back to normality
+ after enabling */
+static void cdv_disable_intel_clock_gating(struct drm_device *dev)
+{
+ u32 reg_value;
+ reg_value = REG_READ(DSPCLK_GATE_D);
+
+ reg_value |= (DPUNIT_PIPEB_GATE_DISABLE |
+ DPUNIT_PIPEA_GATE_DISABLE |
+ DPCUNIT_CLOCK_GATE_DISABLE |
+ DPLSUNIT_CLOCK_GATE_DISABLE |
+ DPOUNIT_CLOCK_GATE_DISABLE |
+ DPIOUNIT_CLOCK_GATE_DISABLE);
+
+ REG_WRITE(DSPCLK_GATE_D, reg_value);
+
+ udelay(500);
+}
+
+void
+cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev, int output_reg)
+{
+ struct psb_intel_encoder *psb_intel_encoder;
+ struct psb_intel_connector *psb_intel_connector;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct cdv_intel_dp *intel_dp;
+ const char *name = NULL;
+ int type = DRM_MODE_CONNECTOR_DisplayPort;
+
+ psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
+ if (!psb_intel_encoder)
+ return;
+ psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
+ if (!psb_intel_connector)
+ goto err_connector;
+ intel_dp = kzalloc(sizeof(struct cdv_intel_dp), GFP_KERNEL);
+ if (!intel_dp)
+ goto err_priv;
+
+ if ((output_reg == DP_C) && cdv_intel_dpc_is_edp(dev))
+ type = DRM_MODE_CONNECTOR_eDP;
+
+ connector = &psb_intel_connector->base;
+ encoder = &psb_intel_encoder->base;
+
+ drm_connector_init(dev, connector, &cdv_intel_dp_connector_funcs, type);
+ drm_encoder_init(dev, encoder, &cdv_intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS);
+
+ psb_intel_connector_attach_encoder(psb_intel_connector, psb_intel_encoder);
+
+ if (type == DRM_MODE_CONNECTOR_DisplayPort)
+ psb_intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+ else
+ psb_intel_encoder->type = INTEL_OUTPUT_EDP;
+
+
+ psb_intel_encoder->dev_priv=intel_dp;
+ intel_dp->encoder = psb_intel_encoder;
+ intel_dp->output_reg = output_reg;
+
+ drm_encoder_helper_add(encoder, &cdv_intel_dp_helper_funcs);
+ drm_connector_helper_add(connector, &cdv_intel_dp_connector_helper_funcs);
+
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ drm_sysfs_connector_add(connector);
+
+ /* Set up the DDC bus. */
+ switch (output_reg) {
+ case DP_B:
+ name = "DPDDC-B";
+ psb_intel_encoder->ddi_select = (DP_MASK | DDI0_SELECT);
+ break;
+ case DP_C:
+ name = "DPDDC-C";
+ psb_intel_encoder->ddi_select = (DP_MASK | DDI1_SELECT);
+ break;
+ }
+
+ cdv_disable_intel_clock_gating(dev);
+
+ cdv_intel_dp_i2c_init(psb_intel_connector, psb_intel_encoder, name);
+ /* FIXME:fail check */
+ cdv_intel_dp_add_properties(connector);
+
+ if (is_edp(psb_intel_encoder)) {
+ int ret;
+ struct edp_power_seq cur;
+ u32 pp_on, pp_off, pp_div;
+ u32 pwm_ctrl;
+
+ pp_on = REG_READ(PP_CONTROL);
+ pp_on &= ~PANEL_UNLOCK_MASK;
+ pp_on |= PANEL_UNLOCK_REGS;
+
+ REG_WRITE(PP_CONTROL, pp_on);
+
+ pwm_ctrl = REG_READ(BLC_PWM_CTL2);
+ pwm_ctrl |= PWM_PIPE_B;
+ REG_WRITE(BLC_PWM_CTL2, pwm_ctrl);
+
+ pp_on = REG_READ(PP_ON_DELAYS);
+ pp_off = REG_READ(PP_OFF_DELAYS);
+ pp_div = REG_READ(PP_DIVISOR);
+
+ /* Pull timing values out of registers */
+ cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
+ PANEL_POWER_UP_DELAY_SHIFT;
+
+ cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
+ PANEL_LIGHT_ON_DELAY_SHIFT;
+
+ cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
+ PANEL_LIGHT_OFF_DELAY_SHIFT;
+
+ cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
+ PANEL_POWER_DOWN_DELAY_SHIFT;
+
+ cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
+ PANEL_POWER_CYCLE_DELAY_SHIFT);
+
+ DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+ cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
+
+
+ intel_dp->panel_power_up_delay = cur.t1_t3 / 10;
+ intel_dp->backlight_on_delay = cur.t8 / 10;
+ intel_dp->backlight_off_delay = cur.t9 / 10;
+ intel_dp->panel_power_down_delay = cur.t10 / 10;
+ intel_dp->panel_power_cycle_delay = (cur.t11_t12 - 1) * 100;
+
+ DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
+ intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
+ intel_dp->panel_power_cycle_delay);
+
+ DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
+ intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
+
+
+ cdv_intel_edp_panel_vdd_on(psb_intel_encoder);
+ ret = cdv_intel_dp_aux_native_read(psb_intel_encoder, DP_DPCD_REV,
+ intel_dp->dpcd,
+ sizeof(intel_dp->dpcd));
+ cdv_intel_edp_panel_vdd_off(psb_intel_encoder);
+ if (ret == 0) {
+ /* if this fails, presume the device is a ghost */
+ DRM_INFO("failed to retrieve link info, disabling eDP\n");
+ cdv_intel_dp_encoder_destroy(encoder);
+ cdv_intel_dp_destroy(connector);
+ goto err_priv;
+ } else {
+ DRM_DEBUG_KMS("DPCD: Rev=%x LN_Rate=%x LN_CNT=%x LN_DOWNSP=%x\n",
+ intel_dp->dpcd[0], intel_dp->dpcd[1],
+ intel_dp->dpcd[2], intel_dp->dpcd[3]);
+
+ }
+ /* The CDV reference driver moves pnale backlight setup into the displays that
+ have a backlight: this is a good idea and one we should probably adopt, however
+ we need to migrate all the drivers before we can do that */
+ /*cdv_intel_panel_setup_backlight(dev); */
+ }
+ return;
+
+err_priv:
+ kfree(psb_intel_connector);
+err_connector:
+ kfree(psb_intel_encoder);
+}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index a86f87b9ddde8cc1595c3980263a1261c8aefc4b..7272a461edfe8f91fb0686db7118024efbac4e55 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -139,8 +139,6 @@ static enum drm_connector_status cdv_hdmi_detect(
{
struct psb_intel_encoder *psb_intel_encoder =
psb_intel_attached_encoder(connector);
- struct psb_intel_connector *psb_intel_connector =
- to_psb_intel_connector(connector);
struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
struct edid *edid = NULL;
enum drm_connector_status status = connector_status_disconnected;
@@ -157,8 +155,6 @@ static enum drm_connector_status cdv_hdmi_detect(
hdmi_priv->has_hdmi_audio =
drm_detect_monitor_audio(edid);
}
-
- psb_intel_connector->base.display_info.raw_edid = NULL;
kfree(edid);
}
return status;
@@ -352,9 +348,11 @@ void cdv_hdmi_init(struct drm_device *dev,
switch (reg) {
case SDVOB:
ddc_bus = GPIOE;
+ psb_intel_encoder->ddi_select = DDI0_SELECT;
break;
case SDVOC:
ddc_bus = GPIOD;
+ psb_intel_encoder->ddi_select = DDI1_SELECT;
break;
default:
DRM_ERROR("unknown reg 0x%x for HDMI\n", reg);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index c7f9468b74ba8d5fab7b5cc6d1066dca10a97d8f..b362dd39bf5aab2462a15ce9872e8744263a1ab7 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -506,16 +506,8 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
property,
value))
return -1;
- else {
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
- struct drm_psb_private *dev_priv =
- encoder->dev->dev_private;
- struct backlight_device *bd =
- dev_priv->backlight_device;
- bd->props.brightness = value;
- backlight_update_status(bd);
-#endif
- }
+ else
+ gma_backlight_set(encoder->dev, value);
} else if (!strcmp(property->name, "DPMS") && encoder) {
struct drm_encoder_helper_funcs *helpers =
encoder->helper_private;
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 5732b5702e1cef5f002ac707fcb34302c33f53de..884ba73ac6ce0f42dbafd82799110cd2fdfcd15f 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -764,6 +764,13 @@ static void psb_setup_outputs(struct drm_device *dev)
crtc_mask = dev_priv->ops->hdmi_mask;
clone_mask = (1 << INTEL_OUTPUT_HDMI);
break;
+ case INTEL_OUTPUT_DISPLAYPORT:
+ crtc_mask = (1 << 0) | (1 << 1);
+ clone_mask = (1 << INTEL_OUTPUT_DISPLAYPORT);
+ break;
+ case INTEL_OUTPUT_EDP:
+ crtc_mask = (1 << 1);
+ clone_mask = (1 << INTEL_OUTPUT_EDP);
}
encoder->possible_crtcs = crtc_mask;
encoder->possible_clones =
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index f3a1ae8eb77babb19465a88d40dcaf1b682294eb..eefd6cc5b80d3c70abf84bc635fd5b668f504959 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -36,7 +36,12 @@ int psb_gem_init_object(struct drm_gem_object *obj)
void psb_gem_free_object(struct drm_gem_object *obj)
{
struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
- drm_gem_object_release_wrap(obj);
+
+ /* Remove the list map if one is present */
+ if (obj->map_list.map)
+ drm_gem_free_mmap_offset(obj);
+ drm_gem_object_release(obj);
+
/* This must occur last as it frees up the memory of the GEM object */
psb_gtt_free_range(obj->dev, gtt);
}
@@ -77,7 +82,7 @@ int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
/* Make it mmapable */
if (!obj->map_list.map) {
- ret = gem_create_mmap_offset(obj);
+ ret = drm_gem_create_mmap_offset(obj);
if (ret)
goto out;
}
diff --git a/drivers/gpu/drm/gma500/gem_glue.c b/drivers/gpu/drm/gma500/gem_glue.c
deleted file mode 100644
index 3c17634f60611b79f8ff37720300f33eaca79d59..0000000000000000000000000000000000000000
--- a/drivers/gpu/drm/gma500/gem_glue.c
+++ /dev/null
@@ -1,90 +0,0 @@
-/**************************************************************************
- * Copyright (c) 2011, Intel Corporation.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- **************************************************************************/
-
-#include
-#include
-#include "gem_glue.h"
-
-void drm_gem_object_release_wrap(struct drm_gem_object *obj)
-{
- /* Remove the list map if one is present */
- if (obj->map_list.map) {
- struct drm_gem_mm *mm = obj->dev->mm_private;
- struct drm_map_list *list = &obj->map_list;
- drm_ht_remove_item(&mm->offset_hash, &list->hash);
- drm_mm_put_block(list->file_offset_node);
- kfree(list->map);
- list->map = NULL;
- }
- drm_gem_object_release(obj);
-}
-
-/**
- * gem_create_mmap_offset - invent an mmap offset
- * @obj: our object
- *
- * Standard implementation of offset generation for mmap as is
- * duplicated in several drivers. This belongs in GEM.
- */
-int gem_create_mmap_offset(struct drm_gem_object *obj)
-{
- struct drm_device *dev = obj->dev;
- struct drm_gem_mm *mm = dev->mm_private;
- struct drm_map_list *list;
- struct drm_local_map *map;
- int ret;
-
- list = &obj->map_list;
- list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
- if (list->map == NULL)
- return -ENOMEM;
- map = list->map;
- map->type = _DRM_GEM;
- map->size = obj->size;
- map->handle = obj;
-
- list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
- obj->size / PAGE_SIZE, 0, 0);
- if (!list->file_offset_node) {
- dev_err(dev->dev, "failed to allocate offset for bo %d\n",
- obj->name);
- ret = -ENOSPC;
- goto free_it;
- }
- list->file_offset_node = drm_mm_get_block(list->file_offset_node,
- obj->size / PAGE_SIZE, 0);
- if (!list->file_offset_node) {
- ret = -ENOMEM;
- goto free_it;
- }
- list->hash.key = list->file_offset_node->start;
- ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
- if (ret) {
- dev_err(dev->dev, "failed to add to map hash\n");
- goto free_mm;
- }
- return 0;
-
-free_mm:
- drm_mm_put_block(list->file_offset_node);
-free_it:
- kfree(list->map);
- list->map = NULL;
- return ret;
-}
diff --git a/drivers/gpu/drm/gma500/gem_glue.h b/drivers/gpu/drm/gma500/gem_glue.h
deleted file mode 100644
index ce5ce30f74dbff75ea343c0936795311d8ae40fe..0000000000000000000000000000000000000000
--- a/drivers/gpu/drm/gma500/gem_glue.h
+++ /dev/null
@@ -1,2 +0,0 @@
-extern void drm_gem_object_release_wrap(struct drm_gem_object *obj);
-extern int gem_create_mmap_offset(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c
index a837ee97787c2631363c7d383ad7d24a014ef162..403fffb03abd5c7d9594a92c0569206ea5d4edb8 100644
--- a/drivers/gpu/drm/gma500/intel_bios.c
+++ b/drivers/gpu/drm/gma500/intel_bios.c
@@ -54,6 +54,98 @@ static void *find_section(struct bdb_header *bdb, int section_id)
return NULL;
}
+static void
+parse_edp(struct drm_psb_private *dev_priv, struct bdb_header *bdb)
+{
+ struct bdb_edp *edp;
+ struct edp_power_seq *edp_pps;
+ struct edp_link_params *edp_link_params;
+ uint8_t panel_type;
+
+ edp = find_section(bdb, BDB_EDP);
+
+ dev_priv->edp.bpp = 18;
+ if (!edp) {
+ if (dev_priv->edp.support) {
+ DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported, assume %dbpp panel color depth.\n",
+ dev_priv->edp.bpp);
+ }
+ return;
+ }
+
+ panel_type = dev_priv->panel_type;
+ switch ((edp->color_depth >> (panel_type * 2)) & 3) {
+ case EDP_18BPP:
+ dev_priv->edp.bpp = 18;
+ break;
+ case EDP_24BPP:
+ dev_priv->edp.bpp = 24;
+ break;
+ case EDP_30BPP:
+ dev_priv->edp.bpp = 30;
+ break;
+ }
+
+ /* Get the eDP sequencing and link info */
+ edp_pps = &edp->power_seqs[panel_type];
+ edp_link_params = &edp->link_params[panel_type];
+
+ dev_priv->edp.pps = *edp_pps;
+
+ DRM_DEBUG_KMS("EDP timing in vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+ dev_priv->edp.pps.t1_t3, dev_priv->edp.pps.t8,
+ dev_priv->edp.pps.t9, dev_priv->edp.pps.t10,
+ dev_priv->edp.pps.t11_t12);
+
+ dev_priv->edp.rate = edp_link_params->rate ? DP_LINK_BW_2_7 :
+ DP_LINK_BW_1_62;
+ switch (edp_link_params->lanes) {
+ case 0:
+ dev_priv->edp.lanes = 1;
+ break;
+ case 1:
+ dev_priv->edp.lanes = 2;
+ break;
+ case 3:
+ default:
+ dev_priv->edp.lanes = 4;
+ break;
+ }
+ DRM_DEBUG_KMS("VBT reports EDP: Lane_count %d, Lane_rate %d, Bpp %d\n",
+ dev_priv->edp.lanes, dev_priv->edp.rate, dev_priv->edp.bpp);
+
+ switch (edp_link_params->preemphasis) {
+ case 0:
+ dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
+ break;
+ case 1:
+ dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
+ break;
+ case 2:
+ dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
+ break;
+ case 3:
+ dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
+ break;
+ }
+ switch (edp_link_params->vswing) {
+ case 0:
+ dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_400;
+ break;
+ case 1:
+ dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_600;
+ break;
+ case 2:
+ dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_800;
+ break;
+ case 3:
+ dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_1200;
+ break;
+ }
+ DRM_DEBUG_KMS("VBT reports EDP: VSwing %d, Preemph %d\n",
+ dev_priv->edp.vswing, dev_priv->edp.preemphasis);
+}
+
static u16
get_blocksize(void *p)
{
@@ -154,6 +246,8 @@ static void parse_lfp_panel_data(struct drm_psb_private *dev_priv,
return;
dev_priv->lvds_dither = lvds_options->pixel_dither;
+ dev_priv->panel_type = lvds_options->panel_type;
+
if (lvds_options->panel_type == 0xff)
return;
@@ -340,6 +434,9 @@ parse_driver_features(struct drm_psb_private *dev_priv,
if (!driver)
return;
+ if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
+ dev_priv->edp.support = 1;
+
/* This bit means to use 96Mhz for DPLL_A or not */
if (driver->primary_lfp_id)
dev_priv->dplla_96mhz = true;
@@ -437,6 +534,9 @@ int psb_intel_init_bios(struct drm_device *dev)
size_t size;
int i;
+
+ dev_priv->panel_type = 0xff;
+
/* XXX Should this validation be moved to intel_opregion.c? */
if (dev_priv->opregion.vbt) {
struct vbt_header *vbt = dev_priv->opregion.vbt;
@@ -477,6 +577,7 @@ int psb_intel_init_bios(struct drm_device *dev)
parse_sdvo_device_mapping(dev_priv, bdb);
parse_device_mapping(dev_priv, bdb);
parse_backlight_data(dev_priv, bdb);
+ parse_edp(dev_priv, bdb);
if (bios)
pci_unmap_rom(pdev, bios);
diff --git a/drivers/gpu/drm/gma500/intel_bios.h b/drivers/gpu/drm/gma500/intel_bios.h
index 2e95523b84b19803aeab511a4d70fa15923d726a..c6267c98c9e733079803f550daaff99b407a15f1 100644
--- a/drivers/gpu/drm/gma500/intel_bios.h
+++ b/drivers/gpu/drm/gma500/intel_bios.h
@@ -23,6 +23,7 @@
#define _I830_BIOS_H_
#include
+#include
struct vbt_header {
u8 signature[20]; /**< Always starts with 'VBT$' */
@@ -93,6 +94,7 @@ struct vbios_data {
#define BDB_SDVO_LVDS_PNP_IDS 24
#define BDB_SDVO_LVDS_POWER_SEQ 25
#define BDB_TV_OPTIONS 26
+#define BDB_EDP 27
#define BDB_LVDS_OPTIONS 40
#define BDB_LVDS_LFP_DATA_PTRS 41
#define BDB_LVDS_LFP_DATA 42
@@ -391,6 +393,11 @@ struct bdb_sdvo_lvds_options {
u8 panel_misc_bits_4;
} __attribute__((packed));
+#define BDB_DRIVER_FEATURE_NO_LVDS 0
+#define BDB_DRIVER_FEATURE_INT_LVDS 1
+#define BDB_DRIVER_FEATURE_SDVO_LVDS 2
+#define BDB_DRIVER_FEATURE_EDP 3
+
struct bdb_driver_features {
u8 boot_dev_algorithm:1;
u8 block_display_switch:1;
@@ -431,6 +438,45 @@ struct bdb_driver_features {
u8 custom_vbt_version;
} __attribute__((packed));
+#define EDP_18BPP 0
+#define EDP_24BPP 1
+#define EDP_30BPP 2
+#define EDP_RATE_1_62 0
+#define EDP_RATE_2_7 1
+#define EDP_LANE_1 0
+#define EDP_LANE_2 1
+#define EDP_LANE_4 3
+#define EDP_PREEMPHASIS_NONE 0
+#define EDP_PREEMPHASIS_3_5dB 1
+#define EDP_PREEMPHASIS_6dB 2
+#define EDP_PREEMPHASIS_9_5dB 3
+#define EDP_VSWING_0_4V 0
+#define EDP_VSWING_0_6V 1
+#define EDP_VSWING_0_8V 2
+#define EDP_VSWING_1_2V 3
+
+struct edp_power_seq {
+ u16 t1_t3;
+ u16 t8;
+ u16 t9;
+ u16 t10;
+ u16 t11_t12;
+} __attribute__ ((packed));
+
+struct edp_link_params {
+ u8 rate:4;
+ u8 lanes:4;
+ u8 preemphasis:4;
+ u8 vswing:4;
+} __attribute__ ((packed));
+
+struct bdb_edp {
+ struct edp_power_seq power_seqs[16];
+ u32 color_depth;
+ u32 sdrrs_msa_timing_delay;
+ struct edp_link_params link_params[16];
+} __attribute__ ((packed));
+
extern int psb_intel_init_bios(struct drm_device *dev);
extern void psb_intel_destroy_bios(struct drm_device *dev);
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 5675d93b420567b3c3c5f2730c915434da9be21a..32dba2ab53e1dbf2e33476f7d3a226a7fb1e3d44 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -299,17 +299,8 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
if (drm_connector_property_set_value(connector, property,
value))
goto set_prop_error;
- else {
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
- struct backlight_device *psb_bd;
-
- psb_bd = mdfld_get_backlight_device();
- if (psb_bd) {
- psb_bd->props.brightness = value;
- mdfld_set_brightness(psb_bd);
- }
-#endif
- }
+ else
+ gma_backlight_set(encoder->dev, value);
}
set_prop_done:
return 0;
diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c
index 64d18a37da40e2f77e2c5be6d6be12956dff5008..a97e38e284fa3714b08ef29a9ee2c07d0b7f826c 100644
--- a/drivers/gpu/drm/gma500/mid_bios.c
+++ b/drivers/gpu/drm/gma500/mid_bios.c
@@ -118,20 +118,20 @@ static void mid_get_pci_revID(struct drm_psb_private *dev_priv)
dev_priv->platform_rev_id);
}
-struct vbt_header {
+struct mid_vbt_header {
u32 signature;
u8 revision;
} __packed;
/* The same for r0 and r1 */
struct vbt_r0 {
- struct vbt_header vbt_header;
+ struct mid_vbt_header vbt_header;
u8 size;
u8 checksum;
} __packed;
struct vbt_r10 {
- struct vbt_header vbt_header;
+ struct mid_vbt_header vbt_header;
u8 checksum;
u16 size;
u8 panel_count;
@@ -281,7 +281,7 @@ static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
struct drm_device *dev = dev_priv->dev;
u32 addr;
u8 __iomem *vbt_virtual;
- struct vbt_header vbt_header;
+ struct mid_vbt_header vbt_header;
struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
int ret = -1;
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index 2eb3dc4e9c9b03186c2734a5818aacec70892f47..69e51e903f3585f3600e46c6f3baee6a3fed1ab0 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -252,7 +252,6 @@ static int oaktrail_hdmi_get_modes(struct drm_connector *connector)
if (edid) {
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
- connector->display_info.raw_edid = NULL;
}
/*
diff --git a/drivers/gpu/drm/gma500/opregion.c b/drivers/gpu/drm/gma500/opregion.c
index c430bd424681c0b28625972a796aeb96b0524423..ad0d6de938f3654aae0ccef2f02ba2d4abb6a1ef 100644
--- a/drivers/gpu/drm/gma500/opregion.c
+++ b/drivers/gpu/drm/gma500/opregion.c
@@ -166,8 +166,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
if (config_enabled(CONFIG_BACKLIGHT_CLASS_DEVICE)) {
int max = bd->props.max_brightness;
- bd->props.brightness = bclp * max / 255;
- backlight_update_status(bd);
+ gma_backlight_set(dev, bclp * max / 255);
}
asle->cblv = (bclp * 0x64) / 0xff | ASLE_CBLV_VALID;
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index 7563cd51851a2f28b6855c9d20b8deb030889083..b58c4701c4e8f50d98c1ca9464e1399a0b4d4c94 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -290,6 +290,7 @@ static void psb_get_core_freq(struct drm_device *dev)
case 6:
case 7:
dev_priv->core_freq = 266;
+ break;
default:
dev_priv->core_freq = 0;
}
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index b15282fdbf97601d071767e8fb3094e6bb952e01..a7fd6c48b793ea95806b107f67969666153d7136 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -24,10 +24,10 @@
#include
#include
-#include "gem_glue.h"
#include
#include "psb_reg.h"
#include "psb_intel_drv.h"
+#include "intel_bios.h"
#include "gtt.h"
#include "power.h"
#include "opregion.h"
@@ -613,6 +613,8 @@ struct drm_psb_private {
*/
struct backlight_device *backlight_device;
struct drm_property *backlight_property;
+ bool backlight_enabled;
+ int backlight_level;
uint32_t blc_adj1;
uint32_t blc_adj2;
@@ -640,6 +642,19 @@ struct drm_psb_private {
int mdfld_panel_id;
bool dplla_96mhz; /* DPLL data from the VBT */
+
+ struct {
+ int rate;
+ int lanes;
+ int preemphasis;
+ int vswing;
+
+ bool initialized;
+ bool support;
+ int bpp;
+ struct edp_power_seq pps;
+ } edp;
+ uint8_t panel_type;
};
@@ -796,6 +811,9 @@ extern int psb_fbdev_init(struct drm_device *dev);
/* backlight.c */
int gma_backlight_init(struct drm_device *dev);
void gma_backlight_exit(struct drm_device *dev);
+void gma_backlight_disable(struct drm_device *dev);
+void gma_backlight_enable(struct drm_device *dev);
+void gma_backlight_set(struct drm_device *dev, int v);
/* oaktrail_crtc.c */
extern const struct drm_crtc_helper_funcs oaktrail_helper_funcs;
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index ebe1a28f60e1502240c798a0eccdb4a6b799c800..90f2d11e686ba3f18a9ea9e4c08849abc8ab8914 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -29,10 +29,6 @@
* Display related stuff
*/
-/* store information about an Ixxx DVO */
-/* The i830->i865 use multiple DVOs with multiple i2cs */
-/* the i915, i945 have a single sDVO i2c bus - which is different */
-#define MAX_OUTPUTS 6
/* maximum connectors per crtcs in the mode set */
#define INTELFB_CONN_LIMIT 4
@@ -69,6 +65,8 @@
#define INTEL_OUTPUT_HDMI 6
#define INTEL_OUTPUT_MIPI 7
#define INTEL_OUTPUT_MIPI2 8
+#define INTEL_OUTPUT_DISPLAYPORT 9
+#define INTEL_OUTPUT_EDP 10
#define INTEL_DVO_CHIP_NONE 0
#define INTEL_DVO_CHIP_LVDS 1
@@ -133,6 +131,11 @@ struct psb_intel_encoder {
void (*hot_plug)(struct psb_intel_encoder *);
int crtc_mask;
int clone_mask;
+ u32 ddi_select; /* Channel info */
+#define DDI0_SELECT 0x01
+#define DDI1_SELECT 0x02
+#define DP_MASK 0x8000
+#define DDI_MASK 0x03
void *dev_priv; /* For sdvo_priv, lvds_priv, etc... */
/* FIXME: Either make SDVO and LVDS store it's i2c here or give CDV it's
@@ -190,7 +193,6 @@ struct psb_intel_crtc {
u32 mode_flags;
bool active;
- bool crtc_enable;
/* Saved Crtc HW states */
struct psb_intel_crtc_state *crtc_state;
@@ -285,4 +287,20 @@ extern void gma_intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
extern void gma_intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
extern void gma_intel_teardown_gmbus(struct drm_device *dev);
+/* DP support */
+extern void cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev, int output_reg);
+extern void cdv_intel_dp_set_m_n(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+
+extern void psb_intel_attach_force_audio_property(struct drm_connector *connector);
+extern void psb_intel_attach_broadcast_rgb_property(struct drm_connector *connector);
+
+extern int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val);
+extern int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val);
+extern void cdv_sb_reset(struct drm_device *dev);
+
+extern void cdv_intel_attach_force_audio_property(struct drm_connector *connector);
+extern void cdv_intel_attach_broadcast_rgb_property(struct drm_connector *connector);
+
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 37adc9edf9744ec44e779503eaefc336e34013ca..2a4c3a9e33e37a5e9c756e1b5bc17afa1fd394c6 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -630,17 +630,8 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
property,
value))
goto set_prop_error;
- else {
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
- struct drm_psb_private *devp =
- encoder->dev->dev_private;
- struct backlight_device *bd = devp->backlight_device;
- if (bd) {
- bd->props.brightness = value;
- backlight_update_status(bd);
- }
-#endif
- }
+ else
+ gma_backlight_set(encoder->dev, value);
} else if (!strcmp(property->name, "DPMS")) {
struct drm_encoder_helper_funcs *hfuncs
= encoder->helper_private;
diff --git a/drivers/gpu/drm/gma500/psb_intel_reg.h b/drivers/gpu/drm/gma500/psb_intel_reg.h
index 8e8c8efb0a89852ec2c9d2b623f43ca25ac2273e..d914719c4b6010d7f596f982050d70c0de4e0b9c 100644
--- a/drivers/gpu/drm/gma500/psb_intel_reg.h
+++ b/drivers/gpu/drm/gma500/psb_intel_reg.h
@@ -173,15 +173,46 @@
#define PP_SEQUENCE_ON (1 << 28)
#define PP_SEQUENCE_OFF (2 << 28)
#define PP_SEQUENCE_MASK 0x30000000
+#define PP_CYCLE_DELAY_ACTIVE (1 << 27)
+#define PP_SEQUENCE_STATE_ON_IDLE (1 << 3)
+#define PP_SEQUENCE_STATE_MASK 0x0000000f
+
#define PP_CONTROL 0x61204
#define POWER_TARGET_ON (1 << 0)
-
+#define PANEL_UNLOCK_REGS (0xabcd << 16)
+#define PANEL_UNLOCK_MASK (0xffff << 16)
+#define EDP_FORCE_VDD (1 << 3)
+#define EDP_BLC_ENABLE (1 << 2)
+#define PANEL_POWER_RESET (1 << 1)
+#define PANEL_POWER_OFF (0 << 0)
+#define PANEL_POWER_ON (1 << 0)
+
+/* Poulsbo/Oaktrail */
#define LVDSPP_ON 0x61208
#define LVDSPP_OFF 0x6120c
#define PP_CYCLE 0x61210
+/* Cedartrail */
#define PP_ON_DELAYS 0x61208 /* Cedartrail */
+#define PANEL_PORT_SELECT_MASK (3 << 30)
+#define PANEL_PORT_SELECT_LVDS (0 << 30)
+#define PANEL_PORT_SELECT_EDP (1 << 30)
+#define PANEL_POWER_UP_DELAY_MASK (0x1fff0000)
+#define PANEL_POWER_UP_DELAY_SHIFT 16
+#define PANEL_LIGHT_ON_DELAY_MASK (0x1fff)
+#define PANEL_LIGHT_ON_DELAY_SHIFT 0
+
#define PP_OFF_DELAYS 0x6120c /* Cedartrail */
+#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000)
+#define PANEL_POWER_DOWN_DELAY_SHIFT 16
+#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff)
+#define PANEL_LIGHT_OFF_DELAY_SHIFT 0
+
+#define PP_DIVISOR 0x61210 /* Cedartrail */
+#define PP_REFERENCE_DIVIDER_MASK (0xffffff00)
+#define PP_REFERENCE_DIVIDER_SHIFT 8
+#define PANEL_POWER_CYCLE_DELAY_MASK (0x1f)
+#define PANEL_POWER_CYCLE_DELAY_SHIFT 0
#define PFIT_CONTROL 0x61230
#define PFIT_ENABLE (1 << 31)
@@ -1282,6 +1313,10 @@ No status bits are changed.
# define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* Fixed value on CDV */
# define DPOUNIT_CLOCK_GATE_DISABLE (1 << 11)
# define DPIOUNIT_CLOCK_GATE_DISABLE (1 << 6)
+# define DPUNIT_PIPEB_GATE_DISABLE (1 << 30)
+# define DPUNIT_PIPEA_GATE_DISABLE (1 << 25)
+# define DPCUNIT_CLOCK_GATE_DISABLE (1 << 24)
+# define DPLSUNIT_CLOCK_GATE_DISABLE (1 << 13)
#define RAMCLK_GATE_D 0x6210
@@ -1347,5 +1382,165 @@ No status bits are changed.
#define LANE_PLL_ENABLE (0x3 << 20)
#define LANE_PLL_PIPE(p) (((p) == 0) ? (1 << 21) : (0 << 21))
+#define DP_B 0x64100
+#define DP_C 0x64200
+
+#define DP_PORT_EN (1 << 31)
+#define DP_PIPEB_SELECT (1 << 30)
+#define DP_PIPE_MASK (1 << 30)
+
+/* Link training mode - select a suitable mode for each stage */
+#define DP_LINK_TRAIN_PAT_1 (0 << 28)
+#define DP_LINK_TRAIN_PAT_2 (1 << 28)
+#define DP_LINK_TRAIN_PAT_IDLE (2 << 28)
+#define DP_LINK_TRAIN_OFF (3 << 28)
+#define DP_LINK_TRAIN_MASK (3 << 28)
+#define DP_LINK_TRAIN_SHIFT 28
+
+/* Signal voltages. These are mostly controlled by the other end */
+#define DP_VOLTAGE_0_4 (0 << 25)
+#define DP_VOLTAGE_0_6 (1 << 25)
+#define DP_VOLTAGE_0_8 (2 << 25)
+#define DP_VOLTAGE_1_2 (3 << 25)
+#define DP_VOLTAGE_MASK (7 << 25)
+#define DP_VOLTAGE_SHIFT 25
+
+/* Signal pre-emphasis levels, like voltages, the other end tells us what
+ * they want
+ */
+#define DP_PRE_EMPHASIS_0 (0 << 22)
+#define DP_PRE_EMPHASIS_3_5 (1 << 22)
+#define DP_PRE_EMPHASIS_6 (2 << 22)
+#define DP_PRE_EMPHASIS_9_5 (3 << 22)
+#define DP_PRE_EMPHASIS_MASK (7 << 22)
+#define DP_PRE_EMPHASIS_SHIFT 22
+
+/* How many wires to use. I guess 3 was too hard */
+#define DP_PORT_WIDTH_1 (0 << 19)
+#define DP_PORT_WIDTH_2 (1 << 19)
+#define DP_PORT_WIDTH_4 (3 << 19)
+#define DP_PORT_WIDTH_MASK (7 << 19)
+
+/* Mystic DPCD version 1.1 special mode */
+#define DP_ENHANCED_FRAMING (1 << 18)
+
+/** locked once port is enabled */
+#define DP_PORT_REVERSAL (1 << 15)
+
+/** sends the clock on lane 15 of the PEG for debug */
+#define DP_CLOCK_OUTPUT_ENABLE (1 << 13)
+
+#define DP_SCRAMBLING_DISABLE (1 << 12)
+#define DP_SCRAMBLING_DISABLE_IRONLAKE (1 << 7)
+
+/** limit RGB values to avoid confusing TVs */
+#define DP_COLOR_RANGE_16_235 (1 << 8)
+
+/** Turn on the audio link */
+#define DP_AUDIO_OUTPUT_ENABLE (1 << 6)
+
+/** vs and hs sync polarity */
+#define DP_SYNC_VS_HIGH (1 << 4)
+#define DP_SYNC_HS_HIGH (1 << 3)
+
+/** A fantasy */
+#define DP_DETECTED (1 << 2)
+
+/** The aux channel provides a way to talk to the
+ * signal sink for DDC etc. Max packet size supported
+ * is 20 bytes in each direction, hence the 5 fixed
+ * data registers
+ */
+#define DPB_AUX_CH_CTL 0x64110
+#define DPB_AUX_CH_DATA1 0x64114
+#define DPB_AUX_CH_DATA2 0x64118
+#define DPB_AUX_CH_DATA3 0x6411c
+#define DPB_AUX_CH_DATA4 0x64120
+#define DPB_AUX_CH_DATA5 0x64124
+
+#define DPC_AUX_CH_CTL 0x64210
+#define DPC_AUX_CH_DATA1 0x64214
+#define DPC_AUX_CH_DATA2 0x64218
+#define DPC_AUX_CH_DATA3 0x6421c
+#define DPC_AUX_CH_DATA4 0x64220
+#define DPC_AUX_CH_DATA5 0x64224
+
+#define DP_AUX_CH_CTL_SEND_BUSY (1 << 31)
+#define DP_AUX_CH_CTL_DONE (1 << 30)
+#define DP_AUX_CH_CTL_INTERRUPT (1 << 29)
+#define DP_AUX_CH_CTL_TIME_OUT_ERROR (1 << 28)
+#define DP_AUX_CH_CTL_TIME_OUT_400us (0 << 26)
+#define DP_AUX_CH_CTL_TIME_OUT_600us (1 << 26)
+#define DP_AUX_CH_CTL_TIME_OUT_800us (2 << 26)
+#define DP_AUX_CH_CTL_TIME_OUT_1600us (3 << 26)
+#define DP_AUX_CH_CTL_TIME_OUT_MASK (3 << 26)
+#define DP_AUX_CH_CTL_RECEIVE_ERROR (1 << 25)
+#define DP_AUX_CH_CTL_MESSAGE_SIZE_MASK (0x1f << 20)
+#define DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT 20
+#define DP_AUX_CH_CTL_PRECHARGE_2US_MASK (0xf << 16)
+#define DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT 16
+#define DP_AUX_CH_CTL_AUX_AKSV_SELECT (1 << 15)
+#define DP_AUX_CH_CTL_MANCHESTER_TEST (1 << 14)
+#define DP_AUX_CH_CTL_SYNC_TEST (1 << 13)
+#define DP_AUX_CH_CTL_DEGLITCH_TEST (1 << 12)
+#define DP_AUX_CH_CTL_PRECHARGE_TEST (1 << 11)
+#define DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK (0x7ff)
+#define DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT 0
+
+/*
+ * Computing GMCH M and N values for the Display Port link
+ *
+ * GMCH M/N = dot clock * bytes per pixel / ls_clk * # of lanes
+ *
+ * ls_clk (we assume) is the DP link clock (1.62 or 2.7 GHz)
+ *
+ * The GMCH value is used internally
+ *
+ * bytes_per_pixel is the number of bytes coming out of the plane,
+ * which is after the LUTs, so we want the bytes for our color format.
+ * For our current usage, this is always 3, one byte for R, G and B.
+ */
+
+#define _PIPEA_GMCH_DATA_M 0x70050
+#define _PIPEB_GMCH_DATA_M 0x71050
+
+/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
+#define PIPE_GMCH_DATA_M_TU_SIZE_MASK (0x3f << 25)
+#define PIPE_GMCH_DATA_M_TU_SIZE_SHIFT 25
+
+#define PIPE_GMCH_DATA_M_MASK (0xffffff)
+
+#define _PIPEA_GMCH_DATA_N 0x70054
+#define _PIPEB_GMCH_DATA_N 0x71054
+#define PIPE_GMCH_DATA_N_MASK (0xffffff)
+
+/*
+ * Computing Link M and N values for the Display Port link
+ *
+ * Link M / N = pixel_clock / ls_clk
+ *
+ * (the DP spec calls pixel_clock the 'strm_clk')
+ *
+ * The Link value is transmitted in the Main Stream
+ * Attributes and VB-ID.
+ */
+
+#define _PIPEA_DP_LINK_M 0x70060
+#define _PIPEB_DP_LINK_M 0x71060
+#define PIPEA_DP_LINK_M_MASK (0xffffff)
+
+#define _PIPEA_DP_LINK_N 0x70064
+#define _PIPEB_DP_LINK_N 0x71064
+#define PIPEA_DP_LINK_N_MASK (0xffffff)
+
+#define PIPE_GMCH_DATA_M(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_M, _PIPEB_GMCH_DATA_M)
+#define PIPE_GMCH_DATA_N(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_N, _PIPEB_GMCH_DATA_N)
+#define PIPE_DP_LINK_M(pipe) _PIPE(pipe, _PIPEA_DP_LINK_M, _PIPEB_DP_LINK_M)
+#define PIPE_DP_LINK_N(pipe) _PIPE(pipe, _PIPEA_DP_LINK_N, _PIPEB_DP_LINK_N)
+
+#define PIPE_BPC_MASK (7 << 5)
+#define PIPE_8BPC (0 << 5)
+#define PIPE_10BPC (1 << 5)
+#define PIPE_6BPC (2 << 5)
#endif
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index c148d92229fd17c34d3c985a5f9dc25a9eb0a785..fc9292705dbf7b7f552e8b9910ac93ed5eaec5f4 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -1291,7 +1291,6 @@ psb_intel_sdvo_get_analog_edid(struct drm_connector *connector)
return drm_get_edid(connector,
&dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
- return NULL;
}
static enum drm_connector_status
@@ -1342,7 +1341,6 @@ psb_intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
}
} else
status = connector_status_disconnected;
- connector->display_info.raw_edid = NULL;
kfree(edid);
}
@@ -1403,7 +1401,6 @@ psb_intel_sdvo_detect(struct drm_connector *connector, bool force)
ret = connector_status_disconnected;
else
ret = connector_status_connected;
- connector->display_info.raw_edid = NULL;
kfree(edid);
} else
ret = connector_status_connected;
@@ -1452,7 +1449,6 @@ static void psb_intel_sdvo_get_ddc_modes(struct drm_connector *connector)
drm_add_edid_modes(connector, edid);
}
- connector->display_info.raw_edid = NULL;
kfree(edid);
}
}
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index 36d952280c506d0e808fc924f9b15751862d5395..599099fe76e305372e1494fa67b5a9030db6a399 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -427,15 +427,10 @@ static int ch7006_remove(struct i2c_client *client)
return 0;
}
-static int ch7006_suspend(struct i2c_client *client, pm_message_t mesg)
+static int ch7006_resume(struct device *dev)
{
- ch7006_dbg(client, "\n");
-
- return 0;
-}
+ struct i2c_client *client = to_i2c_client(dev);
-static int ch7006_resume(struct i2c_client *client)
-{
ch7006_dbg(client, "\n");
ch7006_write(client, 0x3d, 0x0);
@@ -499,15 +494,18 @@ static struct i2c_device_id ch7006_ids[] = {
};
MODULE_DEVICE_TABLE(i2c, ch7006_ids);
+static const struct dev_pm_ops ch7006_pm_ops = {
+ .resume = ch7006_resume,
+};
+
static struct drm_i2c_encoder_driver ch7006_driver = {
.i2c_driver = {
.probe = ch7006_probe,
.remove = ch7006_remove,
- .suspend = ch7006_suspend,
- .resume = ch7006_resume,
.driver = {
.name = "ch7006",
+ .pm = &ch7006_pm_ops,
},
.id_table = ch7006_ids,
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index b0bacdba6d7e98da5bdd22a9b72dfc1eb10349fc..0f2c5493242b2a7ff6522f8f0424a0717f347a57 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -40,6 +40,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
dvo_ivch.o \
dvo_tfp410.o \
dvo_sil164.o \
+ dvo_ns2501.o \
i915_gem_dmabuf.o
i915-$(CONFIG_COMPAT) += i915_ioc32.o
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index 573de82c9f5a55b970ef5f44eabf9875e00e2651..33a62ad80100f8912e7e56f46a06c147e5203736 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -57,13 +57,12 @@ struct intel_dvo_dev_ops {
void (*create_resources)(struct intel_dvo_device *dvo);
/*
- * Turn on/off output or set intermediate power levels if available.
+ * Turn on/off output.
*
- * Unsupported intermediate modes drop to the lower power setting.
- * If the mode is DPMSModeOff, the output must be disabled,
- * as the DPLL may be disabled afterwards.
+ * Because none of our dvo drivers support an intermediate power levels,
+ * we don't expose this in the interfac.
*/
- void (*dpms)(struct intel_dvo_device *dvo, int mode);
+ void (*dpms)(struct intel_dvo_device *dvo, bool enable);
/*
* Callback for testing a video mode for a given output.
@@ -114,6 +113,12 @@ struct intel_dvo_dev_ops {
*/
enum drm_connector_status (*detect)(struct intel_dvo_device *dvo);
+ /*
+ * Probe the current hw status, returning true if the connected output
+ * is active.
+ */
+ bool (*get_hw_state)(struct intel_dvo_device *dev);
+
/**
* Query the device for the modes it provides.
*
@@ -139,5 +144,6 @@ extern struct intel_dvo_dev_ops ch7xxx_ops;
extern struct intel_dvo_dev_ops ivch_ops;
extern struct intel_dvo_dev_ops tfp410_ops;
extern struct intel_dvo_dev_ops ch7017_ops;
+extern struct intel_dvo_dev_ops ns2501_ops;
#endif /* _INTEL_DVO_H */
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index 1ca799a1e1fcd6abb19cf50be0cc51b796f20b5e..86b27d1d90c22d5c899d4da78410ce80752c7bd6 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -163,7 +163,7 @@ struct ch7017_priv {
};
static void ch7017_dump_regs(struct intel_dvo_device *dvo);
-static void ch7017_dpms(struct intel_dvo_device *dvo, int mode);
+static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable);
static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val)
{
@@ -309,7 +309,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
lvds_power_down = CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED |
(mode->hdisplay & 0x0700) >> 8;
- ch7017_dpms(dvo, DRM_MODE_DPMS_OFF);
+ ch7017_dpms(dvo, false);
ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT,
horizontal_active_pixel_input);
ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT,
@@ -331,7 +331,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
}
/* set the CH7017 power state */
-static void ch7017_dpms(struct intel_dvo_device *dvo, int mode)
+static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable)
{
uint8_t val;
@@ -345,7 +345,7 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, int mode)
CH7017_DAC3_POWER_DOWN |
CH7017_TV_POWER_DOWN_EN);
- if (mode == DRM_MODE_DPMS_ON) {
+ if (enable) {
/* Turn on the LVDS */
ch7017_write(dvo, CH7017_LVDS_POWER_DOWN,
val & ~CH7017_LVDS_POWER_DOWN_EN);
@@ -359,6 +359,18 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, int mode)
msleep(20);
}
+static bool ch7017_get_hw_state(struct intel_dvo_device *dvo)
+{
+ uint8_t val;
+
+ ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val);
+
+ if (val & CH7017_LVDS_POWER_DOWN_EN)
+ return false;
+ else
+ return true;
+}
+
static void ch7017_dump_regs(struct intel_dvo_device *dvo)
{
uint8_t val;
@@ -396,6 +408,7 @@ struct intel_dvo_dev_ops ch7017_ops = {
.mode_valid = ch7017_mode_valid,
.mode_set = ch7017_mode_set,
.dpms = ch7017_dpms,
+ .get_hw_state = ch7017_get_hw_state,
.dump_regs = ch7017_dump_regs,
.destroy = ch7017_destroy,
};
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index 4a036600e806bf34260d3930d923772a2c73a92e..38f3a6cb8c7d72272c9cffae2466818b440282e9 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -289,14 +289,26 @@ static void ch7xxx_mode_set(struct intel_dvo_device *dvo,
}
/* set the CH7xxx power state */
-static void ch7xxx_dpms(struct intel_dvo_device *dvo, int mode)
+static void ch7xxx_dpms(struct intel_dvo_device *dvo, bool enable)
{
- if (mode == DRM_MODE_DPMS_ON)
+ if (enable)
ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_DVIL | CH7xxx_PM_DVIP);
else
ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_FPD);
}
+static bool ch7xxx_get_hw_state(struct intel_dvo_device *dvo)
+{
+ u8 val;
+
+ ch7xxx_readb(dvo, CH7xxx_PM, &val);
+
+ if (val & CH7xxx_PM_FPD)
+ return false;
+ else
+ return true;
+}
+
static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
{
int i;
@@ -326,6 +338,7 @@ struct intel_dvo_dev_ops ch7xxx_ops = {
.mode_valid = ch7xxx_mode_valid,
.mode_set = ch7xxx_mode_set,
.dpms = ch7xxx_dpms,
+ .get_hw_state = ch7xxx_get_hw_state,
.dump_regs = ch7xxx_dump_regs,
.destroy = ch7xxx_destroy,
};
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index 04f2893d5e3ce91b039d86cb3a77c7eca7d0169e..baaf65bf0bdd1a1430b565fce6c60302c4f95a2f 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -288,7 +288,7 @@ static enum drm_mode_status ivch_mode_valid(struct intel_dvo_device *dvo,
}
/** Sets the power state of the panel connected to the ivch */
-static void ivch_dpms(struct intel_dvo_device *dvo, int mode)
+static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
{
int i;
uint16_t vr01, vr30, backlight;
@@ -297,13 +297,13 @@ static void ivch_dpms(struct intel_dvo_device *dvo, int mode)
if (!ivch_read(dvo, VR01, &vr01))
return;
- if (mode == DRM_MODE_DPMS_ON)
+ if (enable)
backlight = 1;
else
backlight = 0;
ivch_write(dvo, VR80, backlight);
- if (mode == DRM_MODE_DPMS_ON)
+ if (enable)
vr01 |= VR01_LCD_ENABLE | VR01_DVO_ENABLE;
else
vr01 &= ~(VR01_LCD_ENABLE | VR01_DVO_ENABLE);
@@ -315,7 +315,7 @@ static void ivch_dpms(struct intel_dvo_device *dvo, int mode)
if (!ivch_read(dvo, VR30, &vr30))
break;
- if (((vr30 & VR30_PANEL_ON) != 0) == (mode == DRM_MODE_DPMS_ON))
+ if (((vr30 & VR30_PANEL_ON) != 0) == enable)
break;
udelay(1000);
}
@@ -323,6 +323,20 @@ static void ivch_dpms(struct intel_dvo_device *dvo, int mode)
udelay(16 * 1000);
}
+static bool ivch_get_hw_state(struct intel_dvo_device *dvo)
+{
+ uint16_t vr01;
+
+ /* Set the new power state of the panel. */
+ if (!ivch_read(dvo, VR01, &vr01))
+ return false;
+
+ if (vr01 & VR01_LCD_ENABLE)
+ return true;
+ else
+ return false;
+}
+
static void ivch_mode_set(struct intel_dvo_device *dvo,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -413,6 +427,7 @@ static void ivch_destroy(struct intel_dvo_device *dvo)
struct intel_dvo_dev_ops ivch_ops = {
.init = ivch_init,
.dpms = ivch_dpms,
+ .get_hw_state = ivch_get_hw_state,
.mode_valid = ivch_mode_valid,
.mode_set = ivch_mode_set,
.detect = ivch_detect,
diff --git a/drivers/gpu/drm/i915/dvo_ns2501.c b/drivers/gpu/drm/i915/dvo_ns2501.c
new file mode 100644
index 0000000000000000000000000000000000000000..c4a255be697921caade4bf3dd064217e82230763
--- /dev/null
+++ b/drivers/gpu/drm/i915/dvo_ns2501.c
@@ -0,0 +1,588 @@
+/*
+ *
+ * Copyright (c) 2012 Gilles Dartiguelongue, Thomas Richter
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "dvo.h"
+#include "i915_reg.h"
+#include "i915_drv.h"
+
+#define NS2501_VID 0x1305
+#define NS2501_DID 0x6726
+
+#define NS2501_VID_LO 0x00
+#define NS2501_VID_HI 0x01
+#define NS2501_DID_LO 0x02
+#define NS2501_DID_HI 0x03
+#define NS2501_REV 0x04
+#define NS2501_RSVD 0x05
+#define NS2501_FREQ_LO 0x06
+#define NS2501_FREQ_HI 0x07
+
+#define NS2501_REG8 0x08
+#define NS2501_8_VEN (1<<5)
+#define NS2501_8_HEN (1<<4)
+#define NS2501_8_DSEL (1<<3)
+#define NS2501_8_BPAS (1<<2)
+#define NS2501_8_RSVD (1<<1)
+#define NS2501_8_PD (1<<0)
+
+#define NS2501_REG9 0x09
+#define NS2501_9_VLOW (1<<7)
+#define NS2501_9_MSEL_MASK (0x7<<4)
+#define NS2501_9_TSEL (1<<3)
+#define NS2501_9_RSEN (1<<2)
+#define NS2501_9_RSVD (1<<1)
+#define NS2501_9_MDI (1<<0)
+
+#define NS2501_REGC 0x0c
+
+struct ns2501_priv {
+ //I2CDevRec d;
+ bool quiet;
+ int reg_8_shadow;
+ int reg_8_set;
+ // Shadow registers for i915
+ int dvoc;
+ int pll_a;
+ int srcdim;
+ int fw_blc;
+};
+
+#define NSPTR(d) ((NS2501Ptr)(d->DriverPrivate.ptr))
+
+/*
+ * For reasons unclear to me, the ns2501 at least on the Fujitsu/Siemens
+ * laptops does not react on the i2c bus unless
+ * both the PLL is running and the display is configured in its native
+ * resolution.
+ * This function forces the DVO on, and stores the registers it touches.
+ * Afterwards, registers are restored to regular values.
+ *
+ * This is pretty much a hack, though it works.
+ * Without that, ns2501_readb and ns2501_writeb fail
+ * when switching the resolution.
+ */
+
+static void enable_dvo(struct intel_dvo_device *dvo)
+{
+ struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_gmbus *bus = container_of(adapter,
+ struct intel_gmbus,
+ adapter);
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+
+ DRM_DEBUG_KMS("%s: Trying to re-enable the DVO\n", __FUNCTION__);
+
+ ns->dvoc = I915_READ(DVO_C);
+ ns->pll_a = I915_READ(_DPLL_A);
+ ns->srcdim = I915_READ(DVOC_SRCDIM);
+ ns->fw_blc = I915_READ(FW_BLC);
+
+ I915_WRITE(DVOC, 0x10004084);
+ I915_WRITE(_DPLL_A, 0xd0820000);
+ I915_WRITE(DVOC_SRCDIM, 0x400300); // 1024x768
+ I915_WRITE(FW_BLC, 0x1080304);
+
+ I915_WRITE(DVOC, 0x90004084);
+}
+
+/*
+ * Restore the I915 registers modified by the above
+ * trigger function.
+ */
+static void restore_dvo(struct intel_dvo_device *dvo)
+{
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_gmbus *bus = container_of(adapter,
+ struct intel_gmbus,
+ adapter);
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+
+ I915_WRITE(DVOC, ns->dvoc);
+ I915_WRITE(_DPLL_A, ns->pll_a);
+ I915_WRITE(DVOC_SRCDIM, ns->srcdim);
+ I915_WRITE(FW_BLC, ns->fw_blc);
+}
+
+/*
+** Read a register from the ns2501.
+** Returns true if successful, false otherwise.
+** If it returns false, it might be wise to enable the
+** DVO with the above function.
+*/
+static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, uint8_t * ch)
+{
+ struct ns2501_priv *ns = dvo->dev_priv;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ u8 out_buf[2];
+ u8 in_buf[2];
+
+ struct i2c_msg msgs[] = {
+ {
+ .addr = dvo->slave_addr,
+ .flags = 0,
+ .len = 1,
+ .buf = out_buf,
+ },
+ {
+ .addr = dvo->slave_addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = in_buf,
+ }
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = 0;
+
+ if (i2c_transfer(adapter, msgs, 2) == 2) {
+ *ch = in_buf[0];
+ return true;
+ };
+
+ if (!ns->quiet) {
+ DRM_DEBUG_KMS
+ ("Unable to read register 0x%02x from %s:0x%02x.\n", addr,
+ adapter->name, dvo->slave_addr);
+ }
+
+ return false;
+}
+
+/*
+** Write a register to the ns2501.
+** Returns true if successful, false otherwise.
+** If it returns false, it might be wise to enable the
+** DVO with the above function.
+*/
+static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+{
+ struct ns2501_priv *ns = dvo->dev_priv;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ uint8_t out_buf[2];
+
+ struct i2c_msg msg = {
+ .addr = dvo->slave_addr,
+ .flags = 0,
+ .len = 2,
+ .buf = out_buf,
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = ch;
+
+ if (i2c_transfer(adapter, &msg, 1) == 1) {
+ return true;
+ }
+
+ if (!ns->quiet) {
+ DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d\n",
+ addr, adapter->name, dvo->slave_addr);
+ }
+
+ return false;
+}
+
+/* National Semiconductor 2501 driver for chip on i2c bus
+ * scan for the chip on the bus.
+ * Hope the VBIOS initialized the PLL correctly so we can
+ * talk to it. If not, it will not be seen and not detected.
+ * Bummer!
+ */
+static bool ns2501_init(struct intel_dvo_device *dvo,
+ struct i2c_adapter *adapter)
+{
+ /* this will detect the NS2501 chip on the specified i2c bus */
+ struct ns2501_priv *ns;
+ unsigned char ch;
+
+ ns = kzalloc(sizeof(struct ns2501_priv), GFP_KERNEL);
+ if (ns == NULL)
+ return false;
+
+ dvo->i2c_bus = adapter;
+ dvo->dev_priv = ns;
+ ns->quiet = true;
+
+ if (!ns2501_readb(dvo, NS2501_VID_LO, &ch))
+ goto out;
+
+ if (ch != (NS2501_VID & 0xff)) {
+ DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n",
+ ch, adapter->name, dvo->slave_addr);
+ goto out;
+ }
+
+ if (!ns2501_readb(dvo, NS2501_DID_LO, &ch))
+ goto out;
+
+ if (ch != (NS2501_DID & 0xff)) {
+ DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n",
+ ch, adapter->name, dvo->slave_addr);
+ goto out;
+ }
+ ns->quiet = false;
+ ns->reg_8_set = 0;
+ ns->reg_8_shadow =
+ NS2501_8_PD | NS2501_8_BPAS | NS2501_8_VEN | NS2501_8_HEN;
+
+ DRM_DEBUG_KMS("init ns2501 dvo controller successfully!\n");
+ return true;
+
+out:
+ kfree(ns);
+ return false;
+}
+
+static enum drm_connector_status ns2501_detect(struct intel_dvo_device *dvo)
+{
+ /*
+ * This is a Laptop display, it doesn't have hotplugging.
+ * Even if not, the detection bit of the 2501 is unreliable as
+ * it only works for some display types.
+ * It is even more unreliable as the PLL must be active for
+ * allowing reading from the chiop.
+ */
+ return connector_status_connected;
+}
+
+static enum drm_mode_status ns2501_mode_valid(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode)
+{
+ DRM_DEBUG_KMS
+ ("%s: is mode valid (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d)\n",
+ __FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay,
+ mode->vtotal);
+
+ /*
+ * Currently, these are all the modes I have data from.
+ * More might exist. Unclear how to find the native resolution
+ * of the panel in here so we could always accept it
+ * by disabling the scaler.
+ */
+ if ((mode->hdisplay == 800 && mode->vdisplay == 600) ||
+ (mode->hdisplay == 640 && mode->vdisplay == 480) ||
+ (mode->hdisplay == 1024 && mode->vdisplay == 768)) {
+ return MODE_OK;
+ } else {
+ return MODE_ONE_SIZE; /* Is this a reasonable error? */
+ }
+}
+
+static void ns2501_mode_set(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ bool ok;
+ bool restore = false;
+ struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+
+ DRM_DEBUG_KMS
+ ("%s: set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n",
+ __FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay,
+ mode->vtotal);
+
+ /*
+ * Where do I find the native resolution for which scaling is not required???
+ *
+ * First trigger the DVO on as otherwise the chip does not appear on the i2c
+ * bus.
+ */
+ do {
+ ok = true;
+
+ if (mode->hdisplay == 800 && mode->vdisplay == 600) {
+ /* mode 277 */
+ ns->reg_8_shadow &= ~NS2501_8_BPAS;
+ DRM_DEBUG_KMS("%s: switching to 800x600\n",
+ __FUNCTION__);
+
+ /*
+ * No, I do not know where this data comes from.
+ * It is just what the video bios left in the DVO, so
+ * I'm just copying it here over.
+ * This also means that I cannot support any other modes
+ * except the ones supported by the bios.
+ */
+ ok &= ns2501_writeb(dvo, 0x11, 0xc8); // 0xc7 also works.
+ ok &= ns2501_writeb(dvo, 0x1b, 0x19);
+ ok &= ns2501_writeb(dvo, 0x1c, 0x62); // VBIOS left 0x64 here, but 0x62 works nicer
+ ok &= ns2501_writeb(dvo, 0x1d, 0x02);
+
+ ok &= ns2501_writeb(dvo, 0x34, 0x03);
+ ok &= ns2501_writeb(dvo, 0x35, 0xff);
+
+ ok &= ns2501_writeb(dvo, 0x80, 0x27);
+ ok &= ns2501_writeb(dvo, 0x81, 0x03);
+ ok &= ns2501_writeb(dvo, 0x82, 0x41);
+ ok &= ns2501_writeb(dvo, 0x83, 0x05);
+
+ ok &= ns2501_writeb(dvo, 0x8d, 0x02);
+ ok &= ns2501_writeb(dvo, 0x8e, 0x04);
+ ok &= ns2501_writeb(dvo, 0x8f, 0x00);
+
+ ok &= ns2501_writeb(dvo, 0x90, 0xfe); /* vertical. VBIOS left 0xff here, but 0xfe works better */
+ ok &= ns2501_writeb(dvo, 0x91, 0x07);
+ ok &= ns2501_writeb(dvo, 0x94, 0x00);
+ ok &= ns2501_writeb(dvo, 0x95, 0x00);
+
+ ok &= ns2501_writeb(dvo, 0x96, 0x00);
+
+ ok &= ns2501_writeb(dvo, 0x99, 0x00);
+ ok &= ns2501_writeb(dvo, 0x9a, 0x88);
+
+ ok &= ns2501_writeb(dvo, 0x9c, 0x23); /* Looks like first and last line of the image. */
+ ok &= ns2501_writeb(dvo, 0x9d, 0x00);
+ ok &= ns2501_writeb(dvo, 0x9e, 0x25);
+ ok &= ns2501_writeb(dvo, 0x9f, 0x03);
+
+ ok &= ns2501_writeb(dvo, 0xa4, 0x80);
+
+ ok &= ns2501_writeb(dvo, 0xb6, 0x00);
+
+ ok &= ns2501_writeb(dvo, 0xb9, 0xc8); /* horizontal? */
+ ok &= ns2501_writeb(dvo, 0xba, 0x00); /* horizontal? */
+
+ ok &= ns2501_writeb(dvo, 0xc0, 0x05); /* horizontal? */
+ ok &= ns2501_writeb(dvo, 0xc1, 0xd7);
+
+ ok &= ns2501_writeb(dvo, 0xc2, 0x00);
+ ok &= ns2501_writeb(dvo, 0xc3, 0xf8);
+
+ ok &= ns2501_writeb(dvo, 0xc4, 0x03);
+ ok &= ns2501_writeb(dvo, 0xc5, 0x1a);
+
+ ok &= ns2501_writeb(dvo, 0xc6, 0x00);
+ ok &= ns2501_writeb(dvo, 0xc7, 0x73);
+ ok &= ns2501_writeb(dvo, 0xc8, 0x02);
+
+ } else if (mode->hdisplay == 640 && mode->vdisplay == 480) {
+ /* mode 274 */
+ DRM_DEBUG_KMS("%s: switching to 640x480\n",
+ __FUNCTION__);
+ /*
+ * No, I do not know where this data comes from.
+ * It is just what the video bios left in the DVO, so
+ * I'm just copying it here over.
+ * This also means that I cannot support any other modes
+ * except the ones supported by the bios.
+ */
+ ns->reg_8_shadow &= ~NS2501_8_BPAS;
+
+ ok &= ns2501_writeb(dvo, 0x11, 0xa0);
+ ok &= ns2501_writeb(dvo, 0x1b, 0x11);
+ ok &= ns2501_writeb(dvo, 0x1c, 0x54);
+ ok &= ns2501_writeb(dvo, 0x1d, 0x03);
+
+ ok &= ns2501_writeb(dvo, 0x34, 0x03);
+ ok &= ns2501_writeb(dvo, 0x35, 0xff);
+
+ ok &= ns2501_writeb(dvo, 0x80, 0xff);
+ ok &= ns2501_writeb(dvo, 0x81, 0x07);
+ ok &= ns2501_writeb(dvo, 0x82, 0x3d);
+ ok &= ns2501_writeb(dvo, 0x83, 0x05);
+
+ ok &= ns2501_writeb(dvo, 0x8d, 0x02);
+ ok &= ns2501_writeb(dvo, 0x8e, 0x10);
+ ok &= ns2501_writeb(dvo, 0x8f, 0x00);
+
+ ok &= ns2501_writeb(dvo, 0x90, 0xff); /* vertical */
+ ok &= ns2501_writeb(dvo, 0x91, 0x07);
+ ok &= ns2501_writeb(dvo, 0x94, 0x00);
+ ok &= ns2501_writeb(dvo, 0x95, 0x00);
+
+ ok &= ns2501_writeb(dvo, 0x96, 0x05);
+
+ ok &= ns2501_writeb(dvo, 0x99, 0x00);
+ ok &= ns2501_writeb(dvo, 0x9a, 0x88);
+
+ ok &= ns2501_writeb(dvo, 0x9c, 0x24);
+ ok &= ns2501_writeb(dvo, 0x9d, 0x00);
+ ok &= ns2501_writeb(dvo, 0x9e, 0x25);
+ ok &= ns2501_writeb(dvo, 0x9f, 0x03);
+
+ ok &= ns2501_writeb(dvo, 0xa4, 0x84);
+
+ ok &= ns2501_writeb(dvo, 0xb6, 0x09);
+
+ ok &= ns2501_writeb(dvo, 0xb9, 0xa0); /* horizontal? */
+ ok &= ns2501_writeb(dvo, 0xba, 0x00); /* horizontal? */
+
+ ok &= ns2501_writeb(dvo, 0xc0, 0x05); /* horizontal? */
+ ok &= ns2501_writeb(dvo, 0xc1, 0x90);
+
+ ok &= ns2501_writeb(dvo, 0xc2, 0x00);
+ ok &= ns2501_writeb(dvo, 0xc3, 0x0f);
+
+ ok &= ns2501_writeb(dvo, 0xc4, 0x03);
+ ok &= ns2501_writeb(dvo, 0xc5, 0x16);
+
+ ok &= ns2501_writeb(dvo, 0xc6, 0x00);
+ ok &= ns2501_writeb(dvo, 0xc7, 0x02);
+ ok &= ns2501_writeb(dvo, 0xc8, 0x02);
+
+ } else if (mode->hdisplay == 1024 && mode->vdisplay == 768) {
+ /* mode 280 */
+ DRM_DEBUG_KMS("%s: switching to 1024x768\n",
+ __FUNCTION__);
+ /*
+ * This might or might not work, actually. I'm silently
+ * assuming here that the native panel resolution is
+ * 1024x768. If not, then this leaves the scaler disabled
+ * generating a picture that is likely not the expected.
+ *
+ * Problem is that I do not know where to take the panel
+ * dimensions from.
+ *
+ * Enable the bypass, scaling not required.
+ *
+ * The scaler registers are irrelevant here....
+ *
+ */
+ ns->reg_8_shadow |= NS2501_8_BPAS;
+ ok &= ns2501_writeb(dvo, 0x37, 0x44);
+ } else {
+ /*
+ * Data not known. Bummer!
+ * Hopefully, the code should not go here
+ * as mode_OK delivered no other modes.
+ */
+ ns->reg_8_shadow |= NS2501_8_BPAS;
+ }
+ ok &= ns2501_writeb(dvo, NS2501_REG8, ns->reg_8_shadow);
+
+ if (!ok) {
+ if (restore)
+ restore_dvo(dvo);
+ enable_dvo(dvo);
+ restore = true;
+ }
+ } while (!ok);
+ /*
+ * Restore the old i915 registers before
+ * forcing the ns2501 on.
+ */
+ if (restore)
+ restore_dvo(dvo);
+}
+
+/* set the NS2501 power state */
+static bool ns2501_get_hw_state(struct intel_dvo_device *dvo)
+{
+ unsigned char ch;
+
+ if (!ns2501_readb(dvo, NS2501_REG8, &ch))
+ return false;
+
+ if (ch & NS2501_8_PD)
+ return true;
+ else
+ return false;
+}
+
+/* set the NS2501 power state */
+static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
+{
+ bool ok;
+ bool restore = false;
+ struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+ unsigned char ch;
+
+ DRM_DEBUG_KMS("%s: Trying set the dpms of the DVO to %i\n",
+ __FUNCTION__, enable);
+
+ ch = ns->reg_8_shadow;
+
+ if (enable)
+ ch |= NS2501_8_PD;
+ else
+ ch &= ~NS2501_8_PD;
+
+ if (ns->reg_8_set == 0 || ns->reg_8_shadow != ch) {
+ ns->reg_8_set = 1;
+ ns->reg_8_shadow = ch;
+
+ do {
+ ok = true;
+ ok &= ns2501_writeb(dvo, NS2501_REG8, ch);
+ ok &=
+ ns2501_writeb(dvo, 0x34,
+ enable ? 0x03 : 0x00);
+ ok &=
+ ns2501_writeb(dvo, 0x35,
+ enable ? 0xff : 0x00);
+ if (!ok) {
+ if (restore)
+ restore_dvo(dvo);
+ enable_dvo(dvo);
+ restore = true;
+ }
+ } while (!ok);
+
+ if (restore)
+ restore_dvo(dvo);
+ }
+}
+
+static void ns2501_dump_regs(struct intel_dvo_device *dvo)
+{
+ uint8_t val;
+
+ ns2501_readb(dvo, NS2501_FREQ_LO, &val);
+ DRM_LOG_KMS("NS2501_FREQ_LO: 0x%02x\n", val);
+ ns2501_readb(dvo, NS2501_FREQ_HI, &val);
+ DRM_LOG_KMS("NS2501_FREQ_HI: 0x%02x\n", val);
+ ns2501_readb(dvo, NS2501_REG8, &val);
+ DRM_LOG_KMS("NS2501_REG8: 0x%02x\n", val);
+ ns2501_readb(dvo, NS2501_REG9, &val);
+ DRM_LOG_KMS("NS2501_REG9: 0x%02x\n", val);
+ ns2501_readb(dvo, NS2501_REGC, &val);
+ DRM_LOG_KMS("NS2501_REGC: 0x%02x\n", val);
+}
+
+static void ns2501_destroy(struct intel_dvo_device *dvo)
+{
+ struct ns2501_priv *ns = dvo->dev_priv;
+
+ if (ns) {
+ kfree(ns);
+ dvo->dev_priv = NULL;
+ }
+}
+
+struct intel_dvo_dev_ops ns2501_ops = {
+ .init = ns2501_init,
+ .detect = ns2501_detect,
+ .mode_valid = ns2501_mode_valid,
+ .mode_set = ns2501_mode_set,
+ .dpms = ns2501_dpms,
+ .get_hw_state = ns2501_get_hw_state,
+ .dump_regs = ns2501_dump_regs,
+ .destroy = ns2501_destroy,
+};
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index a0b13a6f619d585eb85d6c307966b7dd137192f6..4debd32e3e4ce195e201b8869170a2533a7fd73e 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -208,7 +208,7 @@ static void sil164_mode_set(struct intel_dvo_device *dvo,
}
/* set the SIL164 power state */
-static void sil164_dpms(struct intel_dvo_device *dvo, int mode)
+static void sil164_dpms(struct intel_dvo_device *dvo, bool enable)
{
int ret;
unsigned char ch;
@@ -217,7 +217,7 @@ static void sil164_dpms(struct intel_dvo_device *dvo, int mode)
if (ret == false)
return;
- if (mode == DRM_MODE_DPMS_ON)
+ if (enable)
ch |= SIL164_8_PD;
else
ch &= ~SIL164_8_PD;
@@ -226,6 +226,21 @@ static void sil164_dpms(struct intel_dvo_device *dvo, int mode)
return;
}
+static bool sil164_get_hw_state(struct intel_dvo_device *dvo)
+{
+ int ret;
+ unsigned char ch;
+
+ ret = sil164_readb(dvo, SIL164_REG8, &ch);
+ if (ret == false)
+ return false;
+
+ if (ch & SIL164_8_PD)
+ return true;
+ else
+ return false;
+}
+
static void sil164_dump_regs(struct intel_dvo_device *dvo)
{
uint8_t val;
@@ -258,6 +273,7 @@ struct intel_dvo_dev_ops sil164_ops = {
.mode_valid = sil164_mode_valid,
.mode_set = sil164_mode_set,
.dpms = sil164_dpms,
+ .get_hw_state = sil164_get_hw_state,
.dump_regs = sil164_dump_regs,
.destroy = sil164_destroy,
};
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index aa2cd3ec54aa97d59bd736d67591048ad2da56a5..e17f1b07e915f924244032bc77c3ba2a5919adc8 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -234,14 +234,14 @@ static void tfp410_mode_set(struct intel_dvo_device *dvo,
}
/* set the tfp410 power state */
-static void tfp410_dpms(struct intel_dvo_device *dvo, int mode)
+static void tfp410_dpms(struct intel_dvo_device *dvo, bool enable)
{
uint8_t ctl1;
if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
return;
- if (mode == DRM_MODE_DPMS_ON)
+ if (enable)
ctl1 |= TFP410_CTL_1_PD;
else
ctl1 &= ~TFP410_CTL_1_PD;
@@ -249,6 +249,19 @@ static void tfp410_dpms(struct intel_dvo_device *dvo, int mode)
tfp410_writeb(dvo, TFP410_CTL_1, ctl1);
}
+static bool tfp410_get_hw_state(struct intel_dvo_device *dvo)
+{
+ uint8_t ctl1;
+
+ if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
+ return false;
+
+ if (ctl1 & TFP410_CTL_1_PD)
+ return true;
+ else
+ return false;
+}
+
static void tfp410_dump_regs(struct intel_dvo_device *dvo)
{
uint8_t val, val2;
@@ -299,6 +312,7 @@ struct intel_dvo_dev_ops tfp410_ops = {
.mode_valid = tfp410_mode_valid,
.mode_set = tfp410_mode_set,
.dpms = tfp410_dpms,
+ .get_hw_state = tfp410_get_hw_state,
.dump_regs = tfp410_dump_regs,
.destroy = tfp410_destroy,
};
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 63f01e29c1fa91ac69be334c6392faf361a8e6e8..dde8b505bf7ffdcf3f4aa0ac3b6b169630831e75 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -43,7 +43,6 @@
enum {
ACTIVE_LIST,
- FLUSHING_LIST,
INACTIVE_LIST,
PINNED_LIST,
};
@@ -61,28 +60,11 @@ static int i915_capabilities(struct seq_file *m, void *data)
seq_printf(m, "gen: %d\n", info->gen);
seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
-#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
- B(is_mobile);
- B(is_i85x);
- B(is_i915g);
- B(is_i945gm);
- B(is_g33);
- B(need_gfx_hws);
- B(is_g4x);
- B(is_pineview);
- B(is_broadwater);
- B(is_crestline);
- B(has_fbc);
- B(has_pipe_cxsr);
- B(has_hotplug);
- B(cursor_needs_physical);
- B(has_overlay);
- B(overlay_needs_physical);
- B(supports_tv);
- B(has_bsd_ring);
- B(has_blt_ring);
- B(has_llc);
-#undef B
+#define DEV_INFO_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
+#define DEV_INFO_SEP ;
+ DEV_INFO_FLAGS;
+#undef DEV_INFO_FLAG
+#undef DEV_INFO_SEP
return 0;
}
@@ -120,20 +102,23 @@ static const char *cache_level_str(int type)
static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
- seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d%s%s%s",
+ seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d %d%s%s%s",
&obj->base,
get_pin_flag(obj),
get_tiling_flag(obj),
obj->base.size / 1024,
obj->base.read_domains,
obj->base.write_domain,
- obj->last_rendering_seqno,
+ obj->last_read_seqno,
+ obj->last_write_seqno,
obj->last_fenced_seqno,
cache_level_str(obj->cache_level),
obj->dirty ? " dirty" : "",
obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name)
seq_printf(m, " (name: %d)", obj->base.name);
+ if (obj->pin_count)
+ seq_printf(m, " (pinned x %d)", obj->pin_count);
if (obj->fence_reg != I915_FENCE_REG_NONE)
seq_printf(m, " (fence: %d)", obj->fence_reg);
if (obj->gtt_space != NULL)
@@ -176,10 +161,6 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
seq_printf(m, "Inactive:\n");
head = &dev_priv->mm.inactive_list;
break;
- case FLUSHING_LIST:
- seq_printf(m, "Flushing:\n");
- head = &dev_priv->mm.flushing_list;
- break;
default:
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
@@ -217,8 +198,8 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 count, mappable_count;
- size_t size, mappable_size;
+ u32 count, mappable_count, purgeable_count;
+ size_t size, mappable_size, purgeable_size;
struct drm_i915_gem_object *obj;
int ret;
@@ -231,13 +212,12 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
dev_priv->mm.object_memory);
size = count = mappable_size = mappable_count = 0;
- count_objects(&dev_priv->mm.gtt_list, gtt_list);
+ count_objects(&dev_priv->mm.bound_list, gtt_list);
seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
count_objects(&dev_priv->mm.active_list, mm_list);
- count_objects(&dev_priv->mm.flushing_list, mm_list);
seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
count, mappable_count, size, mappable_size);
@@ -246,8 +226,16 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
count, mappable_count, size, mappable_size);
+ size = count = purgeable_size = purgeable_count = 0;
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) {
+ size += obj->base.size, ++count;
+ if (obj->madv == I915_MADV_DONTNEED)
+ purgeable_size += obj->base.size, ++purgeable_count;
+ }
+ seq_printf(m, "%u unbound objects, %zu bytes\n", count, size);
+
size = count = mappable_size = mappable_count = 0;
- list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
if (obj->fault_mappable) {
size += obj->gtt_space->size;
++count;
@@ -256,7 +244,13 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
mappable_size += obj->gtt_space->size;
++mappable_count;
}
+ if (obj->madv == I915_MADV_DONTNEED) {
+ purgeable_size += obj->base.size;
+ ++purgeable_count;
+ }
}
+ seq_printf(m, "%u purgeable objects, %zu bytes\n",
+ purgeable_count, purgeable_size);
seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
mappable_count, mappable_size);
seq_printf(m, "%u fault mappable objects, %zu bytes\n",
@@ -285,7 +279,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
return ret;
total_obj_size = total_gtt_size = count = 0;
- list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
if (list == PINNED_LIST && obj->pin_count == 0)
continue;
@@ -358,40 +352,22 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
struct drm_i915_gem_request *gem_request;
- int ret, count;
+ int ret, count, i;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
count = 0;
- if (!list_empty(&dev_priv->ring[RCS].request_list)) {
- seq_printf(m, "Render requests:\n");
- list_for_each_entry(gem_request,
- &dev_priv->ring[RCS].request_list,
- list) {
- seq_printf(m, " %d @ %d\n",
- gem_request->seqno,
- (int) (jiffies - gem_request->emitted_jiffies));
- }
- count++;
- }
- if (!list_empty(&dev_priv->ring[VCS].request_list)) {
- seq_printf(m, "BSD requests:\n");
- list_for_each_entry(gem_request,
- &dev_priv->ring[VCS].request_list,
- list) {
- seq_printf(m, " %d @ %d\n",
- gem_request->seqno,
- (int) (jiffies - gem_request->emitted_jiffies));
- }
- count++;
- }
- if (!list_empty(&dev_priv->ring[BCS].request_list)) {
- seq_printf(m, "BLT requests:\n");
+ for_each_ring(ring, dev_priv, i) {
+ if (list_empty(&ring->request_list))
+ continue;
+
+ seq_printf(m, "%s requests:\n", ring->name);
list_for_each_entry(gem_request,
- &dev_priv->ring[BCS].request_list,
+ &ring->request_list,
list) {
seq_printf(m, " %d @ %d\n",
gem_request->seqno,
@@ -412,7 +388,7 @@ static void i915_ring_seqno_info(struct seq_file *m,
{
if (ring->get_seqno) {
seq_printf(m, "Current sequence (%s): %d\n",
- ring->name, ring->get_seqno(ring));
+ ring->name, ring->get_seqno(ring, false));
}
}
@@ -421,14 +397,15 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
int ret, i;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
- for (i = 0; i < I915_NUM_RINGS; i++)
- i915_ring_seqno_info(m, &dev_priv->ring[i]);
+ for_each_ring(ring, dev_priv, i)
+ i915_ring_seqno_info(m, ring);
mutex_unlock(&dev->struct_mutex);
@@ -441,6 +418,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
int ret, i, pipe;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -518,13 +496,13 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
}
seq_printf(m, "Interrupts received: %d\n",
atomic_read(&dev_priv->irq_received));
- for (i = 0; i < I915_NUM_RINGS; i++) {
+ for_each_ring(ring, dev_priv, i) {
if (IS_GEN6(dev) || IS_GEN7(dev)) {
- seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
- dev_priv->ring[i].name,
- I915_READ_IMR(&dev_priv->ring[i]));
+ seq_printf(m,
+ "Graphics Interrupt mask (%s): %08x\n",
+ ring->name, I915_READ_IMR(ring));
}
- i915_ring_seqno_info(m, &dev_priv->ring[i]);
+ i915_ring_seqno_info(m, ring);
}
mutex_unlock(&dev->struct_mutex);
@@ -547,7 +525,8 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
- seq_printf(m, "Fenced object[%2d] = ", i);
+ seq_printf(m, "Fence %d, pin count = %d, object = ",
+ i, dev_priv->fence_regs[i].pin_count);
if (obj == NULL)
seq_printf(m, "unused");
else
@@ -629,12 +608,12 @@ static void print_error_buffers(struct seq_file *m,
seq_printf(m, "%s [%d]:\n", name, count);
while (count--) {
- seq_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s%s",
+ seq_printf(m, " %08x %8u %04x %04x %x %x%s%s%s%s%s%s%s",
err->gtt_offset,
err->size,
err->read_domains,
err->write_domain,
- err->seqno,
+ err->rseqno, err->wseqno,
pin_flag(err->pinned),
tiling_flag(err->tiling),
dirty_flag(err->dirty),
@@ -666,10 +645,9 @@ static void i915_ring_error_state(struct seq_file *m,
seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
- if (ring == RCS && INTEL_INFO(dev)->gen >= 4) {
- seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
+ if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
seq_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr);
- }
+
if (INTEL_INFO(dev)->gen >= 4)
seq_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
@@ -718,11 +696,17 @@ static int i915_error_state(struct seq_file *m, void *unused)
for (i = 0; i < dev_priv->num_fence_regs; i++)
seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
+ for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
+ seq_printf(m, " INSTDONE_%d: 0x%08x\n", i, error->extra_instdone[i]);
+
if (INTEL_INFO(dev)->gen >= 6) {
seq_printf(m, "ERROR: 0x%08x\n", error->error);
seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
}
+ if (INTEL_INFO(dev)->gen == 7)
+ seq_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
+
for_each_ring(ring, dev_priv, i)
i915_ring_error_state(m, dev, error, i);
@@ -798,10 +782,14 @@ i915_error_state_write(struct file *filp,
struct seq_file *m = filp->private_data;
struct i915_error_state_file_priv *error_priv = m->private;
struct drm_device *dev = error_priv->dev;
+ int ret;
DRM_DEBUG_DRIVER("Resetting error state\n");
- mutex_lock(&dev->struct_mutex);
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
i915_destroy_error_state(dev);
mutex_unlock(&dev->struct_mutex);
@@ -925,7 +913,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
seq_printf(m, "Render p-state limit: %d\n",
rp_state_limits & 0xff);
seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >>
- GEN6_CAGF_SHIFT) * 50);
+ GEN6_CAGF_SHIFT) * GT_FREQUENCY_MULTIPLIER);
seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
GEN6_CURICONT_MASK);
seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
@@ -941,15 +929,15 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
max_freq = (rp_state_cap & 0xff0000) >> 16;
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
- max_freq * 50);
+ max_freq * GT_FREQUENCY_MULTIPLIER);
max_freq = (rp_state_cap & 0xff00) >> 8;
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
- max_freq * 50);
+ max_freq * GT_FREQUENCY_MULTIPLIER);
max_freq = rp_state_cap & 0xff;
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
- max_freq * 50);
+ max_freq * GT_FREQUENCY_MULTIPLIER);
} else {
seq_printf(m, "no P-state info available\n");
}
@@ -1291,7 +1279,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
- for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay;
+ for (gpu_freq = dev_priv->rps.min_delay;
+ gpu_freq <= dev_priv->rps.max_delay;
gpu_freq++) {
I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
@@ -1302,7 +1291,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
continue;
}
ia_freq = I915_READ(GEN6_PCODE_DATA);
- seq_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100);
+ seq_printf(m, "%d\t\t%d\n", gpu_freq * GT_FREQUENCY_MULTIPLIER, ia_freq * 100);
}
mutex_unlock(&dev->struct_mutex);
@@ -1471,8 +1460,12 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
- mutex_lock(&dev->struct_mutex);
seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
swizzle_string(dev_priv->mm.bit_6_swizzle_x));
seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
@@ -1519,9 +1512,7 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
if (INTEL_INFO(dev)->gen == 6)
seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
- for (i = 0; i < I915_NUM_RINGS; i++) {
- ring = &dev_priv->ring[i];
-
+ for_each_ring(ring, dev_priv, i) {
seq_printf(m, "%s\n", ring->name);
if (INTEL_INFO(dev)->gen == 7)
seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
@@ -1673,7 +1664,7 @@ i915_ring_stop_write(struct file *filp,
struct drm_device *dev = filp->private_data;
struct drm_i915_private *dev_priv = dev->dev_private;
char buf[20];
- int val = 0;
+ int val = 0, ret;
if (cnt > 0) {
if (cnt > sizeof(buf) - 1)
@@ -1688,7 +1679,10 @@ i915_ring_stop_write(struct file *filp,
DRM_DEBUG_DRIVER("Stopping rings 0x%08x\n", val);
- mutex_lock(&dev->struct_mutex);
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
dev_priv->stop_rings = val;
mutex_unlock(&dev->struct_mutex);
@@ -1712,10 +1706,18 @@ i915_max_freq_read(struct file *filp,
struct drm_device *dev = filp->private_data;
drm_i915_private_t *dev_priv = dev->dev_private;
char buf[80];
- int len;
+ int len, ret;
+
+ if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ return -ENODEV;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
len = snprintf(buf, sizeof(buf),
- "max freq: %d\n", dev_priv->max_delay * 50);
+ "max freq: %d\n", dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER);
+ mutex_unlock(&dev->struct_mutex);
if (len > sizeof(buf))
len = sizeof(buf);
@@ -1732,7 +1734,10 @@ i915_max_freq_write(struct file *filp,
struct drm_device *dev = filp->private_data;
struct drm_i915_private *dev_priv = dev->dev_private;
char buf[20];
- int val = 1;
+ int val = 1, ret;
+
+ if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ return -ENODEV;
if (cnt > 0) {
if (cnt > sizeof(buf) - 1)
@@ -1747,12 +1752,17 @@ i915_max_freq_write(struct file *filp,
DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
/*
* Turbo will still be enabled, but won't go above the set value.
*/
- dev_priv->max_delay = val / 50;
+ dev_priv->rps.max_delay = val / GT_FREQUENCY_MULTIPLIER;
- gen6_set_rps(dev, val / 50);
+ gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
+ mutex_unlock(&dev->struct_mutex);
return cnt;
}
@@ -1772,10 +1782,18 @@ i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max,
struct drm_device *dev = filp->private_data;
drm_i915_private_t *dev_priv = dev->dev_private;
char buf[80];
- int len;
+ int len, ret;
+
+ if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ return -ENODEV;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
len = snprintf(buf, sizeof(buf),
- "min freq: %d\n", dev_priv->min_delay * 50);
+ "min freq: %d\n", dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER);
+ mutex_unlock(&dev->struct_mutex);
if (len > sizeof(buf))
len = sizeof(buf);
@@ -1790,7 +1808,10 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
struct drm_device *dev = filp->private_data;
struct drm_i915_private *dev_priv = dev->dev_private;
char buf[20];
- int val = 1;
+ int val = 1, ret;
+
+ if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ return -ENODEV;
if (cnt > 0) {
if (cnt > sizeof(buf) - 1)
@@ -1805,12 +1826,17 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val);
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
/*
* Turbo will still be enabled, but won't go below the set value.
*/
- dev_priv->min_delay = val / 50;
+ dev_priv->rps.min_delay = val / GT_FREQUENCY_MULTIPLIER;
- gen6_set_rps(dev, val / 50);
+ gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
+ mutex_unlock(&dev->struct_mutex);
return cnt;
}
@@ -1833,9 +1859,15 @@ i915_cache_sharing_read(struct file *filp,
drm_i915_private_t *dev_priv = dev->dev_private;
char buf[80];
u32 snpcr;
- int len;
+ int len, ret;
+
+ if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ return -ENODEV;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
- mutex_lock(&dev_priv->dev->struct_mutex);
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
mutex_unlock(&dev_priv->dev->struct_mutex);
@@ -1861,6 +1893,9 @@ i915_cache_sharing_write(struct file *filp,
u32 snpcr;
int val = 1;
+ if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ return -ENODEV;
+
if (cnt > 0) {
if (cnt > sizeof(buf) - 1)
return -EINVAL;
@@ -1924,16 +1959,11 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
{
struct drm_device *dev = inode->i_private;
struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
if (INTEL_INFO(dev)->gen < 6)
return 0;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
gen6_gt_force_wake_get(dev_priv);
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -1946,16 +1976,7 @@ static int i915_forcewake_release(struct inode *inode, struct file *file)
if (INTEL_INFO(dev)->gen < 6)
return 0;
- /*
- * It's bad that we can potentially hang userspace if struct_mutex gets
- * forever stuck. However, if we cannot acquire this lock it means that
- * almost certainly the driver has hung, is not unload-able. Therefore
- * hanging here is probably a minor inconvenience not to be seen my
- * almost every user.
- */
- mutex_lock(&dev->struct_mutex);
gen6_gt_force_wake_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -2005,7 +2026,6 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_gtt", i915_gem_gtt_info, 0},
{"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
- {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
{"i915_gem_request", i915_gem_request_info, 0},
@@ -2066,6 +2086,7 @@ int i915_debugfs_init(struct drm_minor *minor)
&i915_cache_sharing_fops);
if (ret)
return ret;
+
ret = i915_debugfs_create(minor->debugfs_root, minor,
"i915_ring_stop",
&i915_ring_stop_fops);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 804f1c98e279cf2ee75991915ba1a2c921000165..c9bfd83dde64eb6d83cf181fb6c27b3cfcf0a038 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -234,10 +234,10 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
}
}
- dev_priv->cpp = init->cpp;
- dev_priv->back_offset = init->back_offset;
- dev_priv->front_offset = init->front_offset;
- dev_priv->current_page = 0;
+ dev_priv->dri1.cpp = init->cpp;
+ dev_priv->dri1.back_offset = init->back_offset;
+ dev_priv->dri1.front_offset = init->front_offset;
+ dev_priv->dri1.current_page = 0;
if (master_priv->sarea_priv)
master_priv->sarea_priv->pf_current_page = 0;
@@ -574,7 +574,7 @@ static int i915_dispatch_flip(struct drm_device * dev)
DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
__func__,
- dev_priv->current_page,
+ dev_priv->dri1.current_page,
master_priv->sarea_priv->pf_current_page);
i915_kernel_lost_context(dev);
@@ -588,12 +588,12 @@ static int i915_dispatch_flip(struct drm_device * dev)
OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
OUT_RING(0);
- if (dev_priv->current_page == 0) {
- OUT_RING(dev_priv->back_offset);
- dev_priv->current_page = 1;
+ if (dev_priv->dri1.current_page == 0) {
+ OUT_RING(dev_priv->dri1.back_offset);
+ dev_priv->dri1.current_page = 1;
} else {
- OUT_RING(dev_priv->front_offset);
- dev_priv->current_page = 0;
+ OUT_RING(dev_priv->dri1.front_offset);
+ dev_priv->dri1.current_page = 0;
}
OUT_RING(0);
@@ -612,7 +612,7 @@ static int i915_dispatch_flip(struct drm_device * dev)
ADVANCE_LP_RING();
}
- master_priv->sarea_priv->pf_current_page = dev_priv->current_page;
+ master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page;
return 0;
}
@@ -1008,6 +1008,12 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_WAIT_TIMEOUT:
value = 1;
break;
+ case I915_PARAM_HAS_SEMAPHORES:
+ value = i915_semaphore_is_enabled(dev);
+ break;
+ case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
+ value = 1;
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
@@ -1424,6 +1430,21 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
kfree(ap);
}
+static void i915_dump_device_info(struct drm_i915_private *dev_priv)
+{
+ const struct intel_device_info *info = dev_priv->info;
+
+#define DEV_INFO_FLAG(name) info->name ? #name "," : ""
+#define DEV_INFO_SEP ,
+ DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags="
+ "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
+ info->gen,
+ dev_priv->dev->pdev->device,
+ DEV_INFO_FLAGS);
+#undef DEV_INFO_FLAG
+#undef DEV_INFO_SEP
+}
+
/**
* i915_driver_load - setup chip and create an initial config
* @dev: DRM device
@@ -1439,7 +1460,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
struct drm_i915_private *dev_priv;
struct intel_device_info *info;
- int ret = 0, mmio_bar;
+ int ret = 0, mmio_bar, mmio_size;
uint32_t aperture_size;
info = (struct intel_device_info *) flags;
@@ -1448,7 +1469,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
-
/* i915 has 4 more counters */
dev->counters += 4;
dev->types[6] = _DRM_STAT_IRQ;
@@ -1464,6 +1484,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev_priv->dev = dev;
dev_priv->info = info;
+ i915_dump_device_info(dev_priv);
+
if (i915_get_bridge_dev(dev)) {
ret = -EIO;
goto free_priv;
@@ -1503,7 +1525,19 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
mmio_bar = IS_GEN2(dev) ? 1 : 0;
- dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0);
+ /* Before gen4, the registers and the GTT are behind different BARs.
+ * However, from gen4 onwards, the registers and the GTT are shared
+ * in the same BAR, so we want to restrict this ioremap from
+ * clobbering the GTT which we want ioremap_wc instead. Fortunately,
+ * the register BAR remains the same size for all the earlier
+ * generations up to Ironlake.
+ */
+ if (info->gen < 5)
+ mmio_size = 512*1024;
+ else
+ mmio_size = 2*1024*1024;
+
+ dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
if (!dev_priv->regs) {
DRM_ERROR("failed to map registers\n");
ret = -EIO;
@@ -1535,11 +1569,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
*
* All tasks on the workqueue are expected to acquire the dev mutex
* so there is no point in running more than one instance of the
- * workqueue at any time: max_active = 1 and NON_REENTRANT.
+ * workqueue at any time. Use an ordered one.
*/
- dev_priv->wq = alloc_workqueue("i915",
- WQ_UNBOUND | WQ_NON_REENTRANT,
- 1);
+ dev_priv->wq = alloc_ordered_workqueue("i915", 0);
if (dev_priv->wq == NULL) {
DRM_ERROR("Failed to create our workqueue.\n");
ret = -ENOMEM;
@@ -1585,7 +1617,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->error_lock);
- spin_lock_init(&dev_priv->rps_lock);
+ spin_lock_init(&dev_priv->rps.lock);
spin_lock_init(&dev_priv->dpio_lock);
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
@@ -1835,6 +1867,8 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
@@ -1857,6 +1891,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f6825324e72de6501585e98ecd5ce11f0dac8643..aac4e5e1a5b9b3533f36f801be7c553c2e60c8a1 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -469,6 +469,9 @@ static int i915_drm_freeze(struct drm_device *dev)
"GEM idle failed, resume might fail\n");
return error;
}
+
+ intel_modeset_disable(dev);
+
drm_irq_uninstall(dev);
}
@@ -542,13 +545,9 @@ static int i915_drm_thaw(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
intel_modeset_init_hw(dev);
+ intel_modeset_setup_hw_state(dev);
drm_mode_config_reset(dev);
drm_irq_install(dev);
-
- /* Resume the modeset for every activated CRTC */
- mutex_lock(&dev->mode_config.mutex);
- drm_helper_resume_force_mode(dev);
- mutex_unlock(&dev->mode_config.mutex);
}
intel_opregion_init(dev);
@@ -1059,7 +1058,7 @@ static bool IS_DISPLAYREG(u32 reg)
* This should make it easier to transition modules over to the
* new register block scheme, since we can do it incrementally.
*/
- if (reg >= 0x180000)
+ if (reg >= VLV_DISPLAY_BASE)
return false;
if (reg >= RENDER_RING_BASE &&
@@ -1173,9 +1172,59 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
if (unlikely(__fifo_ret)) { \
gen6_gt_check_fifodbg(dev_priv); \
} \
+ if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \
+ DRM_ERROR("Unclaimed write to %x\n", reg); \
+ writel(ERR_INT_MMIO_UNCLAIMED, dev_priv->regs + GEN7_ERR_INT); \
+ } \
}
__i915_write(8, b)
__i915_write(16, w)
__i915_write(32, l)
__i915_write(64, q)
#undef __i915_write
+
+static const struct register_whitelist {
+ uint64_t offset;
+ uint32_t size;
+ uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
+} whitelist[] = {
+ { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
+};
+
+int i915_reg_read_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_reg_read *reg = data;
+ struct register_whitelist const *entry = whitelist;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
+ if (entry->offset == reg->offset &&
+ (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
+ break;
+ }
+
+ if (i == ARRAY_SIZE(whitelist))
+ return -EINVAL;
+
+ switch (entry->size) {
+ case 8:
+ reg->val = I915_READ64(reg->offset);
+ break;
+ case 4:
+ reg->val = I915_READ(reg->offset);
+ break;
+ case 2:
+ reg->val = I915_READ16(reg->offset);
+ break;
+ case 1:
+ reg->val = I915_READ8(reg->offset);
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 627fe35781b4b92b1272a8bec3282dae2a3a8bc9..4f2831aa5fedd517a4f49189bc19b4803e69b71a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -109,6 +109,7 @@ struct intel_pch_pll {
#define WATCH_COHERENCY 0
#define WATCH_LISTS 0
+#define WATCH_GTT 0
#define I915_GEM_PHYS_CURSOR_0 1
#define I915_GEM_PHYS_CURSOR_1 2
@@ -195,9 +196,10 @@ struct drm_i915_error_state {
u32 cpu_ring_head[I915_NUM_RINGS];
u32 cpu_ring_tail[I915_NUM_RINGS];
u32 error; /* gen6+ */
+ u32 err_int; /* gen7 */
u32 instpm[I915_NUM_RINGS];
u32 instps[I915_NUM_RINGS];
- u32 instdone1;
+ u32 extra_instdone[I915_NUM_INSTDONE_REG];
u32 seqno[I915_NUM_RINGS];
u64 bbaddr;
u32 fault_reg[I915_NUM_RINGS];
@@ -221,7 +223,7 @@ struct drm_i915_error_state {
struct drm_i915_error_buffer {
u32 size;
u32 name;
- u32 seqno;
+ u32 rseqno, wseqno;
u32 gtt_offset;
u32 read_domains;
u32 write_domain;
@@ -239,7 +241,6 @@ struct drm_i915_error_state {
};
struct drm_i915_display_funcs {
- void (*dpms)(struct drm_crtc *crtc, int mode);
bool (*fbc_enabled)(struct drm_device *dev);
void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
void (*disable_fbc)(struct drm_device *dev);
@@ -248,7 +249,6 @@ struct drm_i915_display_funcs {
void (*update_wm)(struct drm_device *dev);
void (*update_sprite_wm)(struct drm_device *dev, int pipe,
uint32_t sprite_width, int pixel_size);
- void (*sanitize_pm)(struct drm_device *dev);
void (*update_linetime_wm)(struct drm_device *dev, int pipe,
struct drm_display_mode *mode);
int (*crtc_mode_set)(struct drm_crtc *crtc,
@@ -256,6 +256,8 @@ struct drm_i915_display_funcs {
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb);
+ void (*crtc_enable)(struct drm_crtc *crtc);
+ void (*crtc_disable)(struct drm_crtc *crtc);
void (*off)(struct drm_crtc *crtc);
void (*write_eld)(struct drm_connector *connector,
struct drm_crtc *crtc);
@@ -279,6 +281,32 @@ struct drm_i915_gt_funcs {
void (*force_wake_put)(struct drm_i915_private *dev_priv);
};
+#define DEV_INFO_FLAGS \
+ DEV_INFO_FLAG(is_mobile) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_i85x) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_i915g) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_i945gm) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_g33) DEV_INFO_SEP \
+ DEV_INFO_FLAG(need_gfx_hws) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_g4x) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_pineview) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_broadwater) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_crestline) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_ivybridge) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_valleyview) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_haswell) DEV_INFO_SEP \
+ DEV_INFO_FLAG(has_force_wake) DEV_INFO_SEP \
+ DEV_INFO_FLAG(has_fbc) DEV_INFO_SEP \
+ DEV_INFO_FLAG(has_pipe_cxsr) DEV_INFO_SEP \
+ DEV_INFO_FLAG(has_hotplug) DEV_INFO_SEP \
+ DEV_INFO_FLAG(cursor_needs_physical) DEV_INFO_SEP \
+ DEV_INFO_FLAG(has_overlay) DEV_INFO_SEP \
+ DEV_INFO_FLAG(overlay_needs_physical) DEV_INFO_SEP \
+ DEV_INFO_FLAG(supports_tv) DEV_INFO_SEP \
+ DEV_INFO_FLAG(has_bsd_ring) DEV_INFO_SEP \
+ DEV_INFO_FLAG(has_blt_ring) DEV_INFO_SEP \
+ DEV_INFO_FLAG(has_llc)
+
struct intel_device_info {
u8 gen;
u8 is_mobile:1;
@@ -402,12 +430,6 @@ typedef struct drm_i915_private {
struct resource mch_res;
- unsigned int cpp;
- int back_offset;
- int front_offset;
- int current_page;
- int page_flipping;
-
atomic_t irq_received;
/* protects the irq masks */
@@ -425,7 +447,6 @@ typedef struct drm_i915_private {
u32 hotplug_supported_mask;
struct work_struct hotplug_work;
- unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
int num_pipe;
int num_pch_pll;
@@ -434,8 +455,7 @@ typedef struct drm_i915_private {
struct timer_list hangcheck_timer;
int hangcheck_count;
uint32_t last_acthd[I915_NUM_RINGS];
- uint32_t last_instdone;
- uint32_t last_instdone1;
+ uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
unsigned int stop_rings;
@@ -666,7 +686,13 @@ typedef struct drm_i915_private {
struct drm_mm gtt_space;
/** List of all objects in gtt_space. Used to restore gtt
* mappings on resume */
- struct list_head gtt_list;
+ struct list_head bound_list;
+ /**
+ * List of objects which are not bound to the GTT (thus
+ * are idle and not used by the GPU) but still have
+ * (presumably uncached) pages still attached.
+ */
+ struct list_head unbound_list;
/** Usable portion of the GTT for GEM */
unsigned long gtt_start;
@@ -695,17 +721,6 @@ typedef struct drm_i915_private {
*/
struct list_head active_list;
- /**
- * List of objects which are not in the ringbuffer but which
- * still have a write_domain which needs to be flushed before
- * unbinding.
- *
- * last_rendering_seqno is 0 while an object is in this list.
- *
- * A reference is held on the buffer while on this list.
- */
- struct list_head flushing_list;
-
/**
* LRU list of objects which are not in the ringbuffer and
* are ready to unbind, but are still in the GTT.
@@ -775,6 +790,12 @@ typedef struct drm_i915_private {
struct {
unsigned allow_batchbuffer : 1;
u32 __iomem *gfx_hws_cpu_addr;
+
+ unsigned int cpp;
+ int back_offset;
+ int front_offset;
+ int current_page;
+ int page_flipping;
} dri1;
/* Kernel Modesetting */
@@ -796,9 +817,6 @@ typedef struct drm_i915_private {
bool lvds_downclock_avail;
/* indicates the reduced downclock for LVDS*/
int lvds_downclock;
- struct work_struct idle_work;
- struct timer_list idle_timer;
- bool busy;
u16 orig_clock;
int child_dev_num;
struct child_device_config *child_dev;
@@ -807,26 +825,41 @@ typedef struct drm_i915_private {
bool mchbar_need_disable;
- struct work_struct rps_work;
- spinlock_t rps_lock;
- u32 pm_iir;
-
- u8 cur_delay;
- u8 min_delay;
- u8 max_delay;
- u8 fmax;
- u8 fstart;
-
- u64 last_count1;
- unsigned long last_time1;
- unsigned long chipset_power;
- u64 last_count2;
- struct timespec last_time2;
- unsigned long gfx_power;
- int c_m;
- int r_t;
- u8 corr;
- spinlock_t *mchdev_lock;
+ /* gen6+ rps state */
+ struct {
+ struct work_struct work;
+ u32 pm_iir;
+ /* lock - irqsave spinlock that protectects the work_struct and
+ * pm_iir. */
+ spinlock_t lock;
+
+ /* The below variables an all the rps hw state are protected by
+ * dev->struct mutext. */
+ u8 cur_delay;
+ u8 min_delay;
+ u8 max_delay;
+ } rps;
+
+ /* ilk-only ips/rps state. Everything in here is protected by the global
+ * mchdev_lock in intel_pm.c */
+ struct {
+ u8 cur_delay;
+ u8 min_delay;
+ u8 max_delay;
+ u8 fmax;
+ u8 fstart;
+
+ u64 last_count1;
+ unsigned long last_time1;
+ unsigned long chipset_power;
+ u64 last_count2;
+ struct timespec last_time2;
+ unsigned long gfx_power;
+ u8 corr;
+
+ int c_m;
+ int r_t;
+ } ips;
enum no_fbc_reason no_fbc_reason;
@@ -861,30 +894,48 @@ enum hdmi_force_audio {
};
enum i915_cache_level {
- I915_CACHE_NONE,
+ I915_CACHE_NONE = 0,
I915_CACHE_LLC,
- I915_CACHE_LLC_MLC, /* gen6+ */
+ I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
+};
+
+struct drm_i915_gem_object_ops {
+ /* Interface between the GEM object and its backing storage.
+ * get_pages() is called once prior to the use of the associated set
+ * of pages before to binding them into the GTT, and put_pages() is
+ * called after we no longer need them. As we expect there to be
+ * associated cost with migrating pages between the backing storage
+ * and making them available for the GPU (e.g. clflush), we may hold
+ * onto the pages after they are no longer referenced by the GPU
+ * in case they may be used again shortly (for example migrating the
+ * pages to a different memory domain within the GTT). put_pages()
+ * will therefore most likely be called when the object itself is
+ * being released or under memory pressure (where we attempt to
+ * reap pages for the shrinker).
+ */
+ int (*get_pages)(struct drm_i915_gem_object *);
+ void (*put_pages)(struct drm_i915_gem_object *);
};
struct drm_i915_gem_object {
struct drm_gem_object base;
+ const struct drm_i915_gem_object_ops *ops;
+
/** Current space allocated to this object in the GTT, if any. */
struct drm_mm_node *gtt_space;
struct list_head gtt_list;
- /** This object's place on the active/flushing/inactive lists */
+ /** This object's place on the active/inactive lists */
struct list_head ring_list;
struct list_head mm_list;
- /** This object's place on GPU write list */
- struct list_head gpu_write_list;
/** This object's place in the batchbuffer or on the eviction list */
struct list_head exec_list;
/**
- * This is set if the object is on the active or flushing lists
- * (has pending rendering), and is not set if it's on inactive (ready
- * to be unbound).
+ * This is set if the object is on the active lists (has pending
+ * rendering and so a non-zero seqno), and is not set if it i s on
+ * inactive (ready to be unbound) list.
*/
unsigned int active:1;
@@ -894,12 +945,6 @@ struct drm_i915_gem_object {
*/
unsigned int dirty:1;
- /**
- * This is set if the object has been written to since the last
- * GPU flush.
- */
- unsigned int pending_gpu_write:1;
-
/**
* Fence register bits (if any) for this object. Will be set
* as needed when mapped into the GTT.
@@ -961,17 +1006,12 @@ struct drm_i915_gem_object {
unsigned int has_aliasing_ppgtt_mapping:1;
unsigned int has_global_gtt_mapping:1;
+ unsigned int has_dma_mapping:1;
- struct page **pages;
-
- /**
- * DMAR support
- */
- struct scatterlist *sg_list;
- int num_sg;
+ struct sg_table *pages;
+ int pages_pin_count;
/* prime dma-buf support */
- struct sg_table *sg_table;
void *dma_buf_vmapping;
int vmapping_count;
@@ -992,7 +1032,8 @@ struct drm_i915_gem_object {
struct intel_ring_buffer *ring;
/** Breadcrumb of last rendering to the buffer. */
- uint32_t last_rendering_seqno;
+ uint32_t last_read_seqno;
+ uint32_t last_write_seqno;
/** Breadcrumb of last fenced GPU access to the buffer. */
uint32_t last_fenced_seqno;
@@ -1135,6 +1176,10 @@ struct drm_i915_file_private {
#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
+#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+
+#define GT_FREQUENCY_MULTIPLIER 50
+
#include "i915_trace.h"
/**
@@ -1256,6 +1301,10 @@ int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
@@ -1274,24 +1323,42 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
int i915_gem_init_object(struct drm_gem_object *obj);
-int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring,
- uint32_t invalidate_domains,
- uint32_t flush_domains);
+void i915_gem_object_init(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_object_ops *ops);
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size);
void i915_gem_free_object(struct drm_gem_object *obj);
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
- bool map_and_fenceable);
+ bool map_and_fenceable,
+ bool nonblocking);
void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
-int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
- gfp_t gfpmask);
+int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
+static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
+{
+ struct scatterlist *sg = obj->pages->sgl;
+ while (n >= SG_MAX_SINGLE_ALLOC) {
+ sg = sg_chain_ptr(sg + SG_MAX_SINGLE_ALLOC - 1);
+ n -= SG_MAX_SINGLE_ALLOC - 1;
+ }
+ return sg_page(sg+n);
+}
+static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
+{
+ BUG_ON(obj->pages == NULL);
+ obj->pages_pin_count++;
+}
+static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
+{
+ BUG_ON(obj->pages_pin_count == 0);
+ obj->pages_pin_count--;
+}
+
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
-int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to);
void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
@@ -1358,9 +1425,9 @@ void i915_gem_init_ppgtt(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_idle(struct drm_device *dev);
-int __must_check i915_add_request(struct intel_ring_buffer *ring,
- struct drm_file *file,
- struct drm_i915_gem_request *request);
+int i915_add_request(struct intel_ring_buffer *ring,
+ struct drm_file *file,
+ struct drm_i915_gem_request *request);
int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
uint32_t seqno);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
@@ -1429,8 +1496,11 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
- unsigned alignment, bool mappable);
-int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only);
+ unsigned alignment,
+ unsigned cache_level,
+ bool mappable,
+ bool nonblock);
+int i915_gem_evict_everything(struct drm_device *dev);
/* i915_gem_stolen.c */
int i915_gem_init_stolen(struct drm_device *dev);
@@ -1519,6 +1589,7 @@ extern void intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
+extern void intel_modeset_setup_hw_state(struct drm_device *dev);
extern bool intel_fbc_enabled(struct drm_device *dev);
extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
@@ -1529,6 +1600,8 @@ extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
extern int intel_enable_rc6(const struct drm_device *dev);
extern bool i915_semaphore_is_enabled(struct drm_device *dev);
+int i915_reg_read_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
/* overlay */
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index e2c93f7be8edea400941a30926acbe36fd50bcaa..e957f3740f682badb8e385bbc8608f3fe60ce7b5 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -36,12 +36,12 @@
#include
#include
-static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
unsigned alignment,
- bool map_and_fenceable);
+ bool map_and_fenceable,
+ bool nonblocking);
static int i915_gem_phys_pwrite(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
@@ -55,6 +55,8 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
static int i915_gem_inactive_shrink(struct shrinker *shrinker,
struct shrink_control *sc);
+static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
+static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
@@ -140,7 +142,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
static inline bool
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
{
- return !obj->active;
+ return obj->gtt_space && !obj->active;
}
int
@@ -179,7 +181,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
pinned = 0;
mutex_lock(&dev->struct_mutex);
- list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list)
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
if (obj->pin_count)
pinned += obj->gtt_space->size;
mutex_unlock(&dev->struct_mutex);
@@ -340,7 +342,7 @@ shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
page_length);
kunmap_atomic(vaddr);
- return ret;
+ return ret ? -EFAULT : 0;
}
static void
@@ -391,7 +393,7 @@ shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
page_length);
kunmap(page);
- return ret;
+ return ret ? - EFAULT : 0;
}
static int
@@ -400,7 +402,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
struct drm_i915_gem_pread *args,
struct drm_file *file)
{
- struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
char __user *user_data;
ssize_t remain;
loff_t offset;
@@ -409,7 +410,8 @@ i915_gem_shmem_pread(struct drm_device *dev,
int hit_slowpath = 0;
int prefaulted = 0;
int needs_clflush = 0;
- int release_page;
+ struct scatterlist *sg;
+ int i;
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
@@ -423,16 +425,30 @@ i915_gem_shmem_pread(struct drm_device *dev,
* anyway again before the next pread happens. */
if (obj->cache_level == I915_CACHE_NONE)
needs_clflush = 1;
- ret = i915_gem_object_set_to_gtt_domain(obj, false);
- if (ret)
- return ret;
+ if (obj->gtt_space) {
+ ret = i915_gem_object_set_to_gtt_domain(obj, false);
+ if (ret)
+ return ret;
+ }
}
+ ret = i915_gem_object_get_pages(obj);
+ if (ret)
+ return ret;
+
+ i915_gem_object_pin_pages(obj);
+
offset = args->offset;
- while (remain > 0) {
+ for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
struct page *page;
+ if (i < offset >> PAGE_SHIFT)
+ continue;
+
+ if (remain <= 0)
+ break;
+
/* Operation in this page
*
* shmem_page_offset = offset within page in shmem file
@@ -443,18 +459,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
if ((shmem_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - shmem_page_offset;
- if (obj->pages) {
- page = obj->pages[offset >> PAGE_SHIFT];
- release_page = 0;
- } else {
- page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
- if (IS_ERR(page)) {
- ret = PTR_ERR(page);
- goto out;
- }
- release_page = 1;
- }
-
+ page = sg_page(sg);
page_do_bit17_swizzling = obj_do_bit17_swizzling &&
(page_to_phys(page) & (1 << 17)) != 0;
@@ -465,7 +470,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
goto next_page;
hit_slowpath = 1;
- page_cache_get(page);
mutex_unlock(&dev->struct_mutex);
if (!prefaulted) {
@@ -483,16 +487,12 @@ i915_gem_shmem_pread(struct drm_device *dev,
needs_clflush);
mutex_lock(&dev->struct_mutex);
- page_cache_release(page);
+
next_page:
mark_page_accessed(page);
- if (release_page)
- page_cache_release(page);
- if (ret) {
- ret = -EFAULT;
+ if (ret)
goto out;
- }
remain -= page_length;
user_data += page_length;
@@ -500,6 +500,8 @@ next_page:
}
out:
+ i915_gem_object_unpin_pages(obj);
+
if (hit_slowpath) {
/* Fixup: Kill any reinstated backing storage pages */
if (obj->madv == __I915_MADV_PURGED)
@@ -605,7 +607,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
char __user *user_data;
int page_offset, page_length, ret;
- ret = i915_gem_object_pin(obj, 0, true);
+ ret = i915_gem_object_pin(obj, 0, true, true);
if (ret)
goto out;
@@ -685,7 +687,7 @@ shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
page_length);
kunmap_atomic(vaddr);
- return ret;
+ return ret ? -EFAULT : 0;
}
/* Only difference to the fast-path function is that this can handle bit17
@@ -719,7 +721,7 @@ shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
page_do_bit17_swizzling);
kunmap(page);
- return ret;
+ return ret ? -EFAULT : 0;
}
static int
@@ -728,7 +730,6 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
struct drm_i915_gem_pwrite *args,
struct drm_file *file)
{
- struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
ssize_t remain;
loff_t offset;
char __user *user_data;
@@ -737,7 +738,8 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
int hit_slowpath = 0;
int needs_clflush_after = 0;
int needs_clflush_before = 0;
- int release_page;
+ int i;
+ struct scatterlist *sg;
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
@@ -751,9 +753,11 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
* right away and we therefore have to clflush anyway. */
if (obj->cache_level == I915_CACHE_NONE)
needs_clflush_after = 1;
- ret = i915_gem_object_set_to_gtt_domain(obj, true);
- if (ret)
- return ret;
+ if (obj->gtt_space) {
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ return ret;
+ }
}
/* Same trick applies for invalidate partially written cachelines before
* writing. */
@@ -761,13 +765,25 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
&& obj->cache_level == I915_CACHE_NONE)
needs_clflush_before = 1;
+ ret = i915_gem_object_get_pages(obj);
+ if (ret)
+ return ret;
+
+ i915_gem_object_pin_pages(obj);
+
offset = args->offset;
obj->dirty = 1;
- while (remain > 0) {
+ for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
struct page *page;
int partial_cacheline_write;
+ if (i < offset >> PAGE_SHIFT)
+ continue;
+
+ if (remain <= 0)
+ break;
+
/* Operation in this page
*
* shmem_page_offset = offset within page in shmem file
@@ -786,18 +802,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
((shmem_page_offset | page_length)
& (boot_cpu_data.x86_clflush_size - 1));
- if (obj->pages) {
- page = obj->pages[offset >> PAGE_SHIFT];
- release_page = 0;
- } else {
- page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
- if (IS_ERR(page)) {
- ret = PTR_ERR(page);
- goto out;
- }
- release_page = 1;
- }
-
+ page = sg_page(sg);
page_do_bit17_swizzling = obj_do_bit17_swizzling &&
(page_to_phys(page) & (1 << 17)) != 0;
@@ -809,26 +814,20 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
goto next_page;
hit_slowpath = 1;
- page_cache_get(page);
mutex_unlock(&dev->struct_mutex);
-
ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
user_data, page_do_bit17_swizzling,
partial_cacheline_write,
needs_clflush_after);
mutex_lock(&dev->struct_mutex);
- page_cache_release(page);
+
next_page:
set_page_dirty(page);
mark_page_accessed(page);
- if (release_page)
- page_cache_release(page);
- if (ret) {
- ret = -EFAULT;
+ if (ret)
goto out;
- }
remain -= page_length;
user_data += page_length;
@@ -836,6 +835,8 @@ next_page:
}
out:
+ i915_gem_object_unpin_pages(obj);
+
if (hit_slowpath) {
/* Fixup: Kill any reinstated backing storage pages */
if (obj->madv == __I915_MADV_PURGED)
@@ -919,10 +920,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
goto out;
}
- if (obj->gtt_space &&
- obj->cache_level == I915_CACHE_NONE &&
+ if (obj->cache_level == I915_CACHE_NONE &&
obj->tiling_mode == I915_TILING_NONE &&
- obj->map_and_fenceable &&
obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
/* Note that the gtt paths might fail with non-page-backed user
@@ -930,7 +929,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
* textures). Fallback to the shmem path in that case. */
}
- if (ret == -EFAULT)
+ if (ret == -EFAULT || ret == -ENOSPC)
ret = i915_gem_shmem_pwrite(dev, obj, args, file);
out:
@@ -940,6 +939,240 @@ unlock:
return ret;
}
+int
+i915_gem_check_wedge(struct drm_i915_private *dev_priv,
+ bool interruptible)
+{
+ if (atomic_read(&dev_priv->mm.wedged)) {
+ struct completion *x = &dev_priv->error_completion;
+ bool recovery_complete;
+ unsigned long flags;
+
+ /* Give the error handler a chance to run. */
+ spin_lock_irqsave(&x->wait.lock, flags);
+ recovery_complete = x->done > 0;
+ spin_unlock_irqrestore(&x->wait.lock, flags);
+
+ /* Non-interruptible callers can't handle -EAGAIN, hence return
+ * -EIO unconditionally for these. */
+ if (!interruptible)
+ return -EIO;
+
+ /* Recovery complete, but still wedged means reset failure. */
+ if (recovery_complete)
+ return -EIO;
+
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+/*
+ * Compare seqno against outstanding lazy request. Emit a request if they are
+ * equal.
+ */
+static int
+i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
+{
+ int ret;
+
+ BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+
+ ret = 0;
+ if (seqno == ring->outstanding_lazy_request)
+ ret = i915_add_request(ring, NULL, NULL);
+
+ return ret;
+}
+
+/**
+ * __wait_seqno - wait until execution of seqno has finished
+ * @ring: the ring expected to report seqno
+ * @seqno: duh!
+ * @interruptible: do an interruptible wait (normally yes)
+ * @timeout: in - how long to wait (NULL forever); out - how much time remaining
+ *
+ * Returns 0 if the seqno was found within the alloted time. Else returns the
+ * errno with remaining time filled in timeout argument.
+ */
+static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
+ bool interruptible, struct timespec *timeout)
+{
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ struct timespec before, now, wait_time={1,0};
+ unsigned long timeout_jiffies;
+ long end;
+ bool wait_forever = true;
+ int ret;
+
+ if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
+ return 0;
+
+ trace_i915_gem_request_wait_begin(ring, seqno);
+
+ if (timeout != NULL) {
+ wait_time = *timeout;
+ wait_forever = false;
+ }
+
+ timeout_jiffies = timespec_to_jiffies(&wait_time);
+
+ if (WARN_ON(!ring->irq_get(ring)))
+ return -ENODEV;
+
+ /* Record current time in case interrupted by signal, or wedged * */
+ getrawmonotonic(&before);
+
+#define EXIT_COND \
+ (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
+ atomic_read(&dev_priv->mm.wedged))
+ do {
+ if (interruptible)
+ end = wait_event_interruptible_timeout(ring->irq_queue,
+ EXIT_COND,
+ timeout_jiffies);
+ else
+ end = wait_event_timeout(ring->irq_queue, EXIT_COND,
+ timeout_jiffies);
+
+ ret = i915_gem_check_wedge(dev_priv, interruptible);
+ if (ret)
+ end = ret;
+ } while (end == 0 && wait_forever);
+
+ getrawmonotonic(&now);
+
+ ring->irq_put(ring);
+ trace_i915_gem_request_wait_end(ring, seqno);
+#undef EXIT_COND
+
+ if (timeout) {
+ struct timespec sleep_time = timespec_sub(now, before);
+ *timeout = timespec_sub(*timeout, sleep_time);
+ }
+
+ switch (end) {
+ case -EIO:
+ case -EAGAIN: /* Wedged */
+ case -ERESTARTSYS: /* Signal */
+ return (int)end;
+ case 0: /* Timeout */
+ if (timeout)
+ set_normalized_timespec(timeout, 0, 0);
+ return -ETIME;
+ default: /* Completed */
+ WARN_ON(end < 0); /* We're not aware of other errors */
+ return 0;
+ }
+}
+
+/**
+ * Waits for a sequence number to be signaled, and cleans up the
+ * request and object lists appropriately for that event.
+ */
+int
+i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ bool interruptible = dev_priv->mm.interruptible;
+ int ret;
+
+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+ BUG_ON(seqno == 0);
+
+ ret = i915_gem_check_wedge(dev_priv, interruptible);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_check_olr(ring, seqno);
+ if (ret)
+ return ret;
+
+ return __wait_seqno(ring, seqno, interruptible, NULL);
+}
+
+/**
+ * Ensures that all rendering to the object has completed and the object is
+ * safe to unbind from the GTT or access from the CPU.
+ */
+static __must_check int
+i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
+ bool readonly)
+{
+ struct intel_ring_buffer *ring = obj->ring;
+ u32 seqno;
+ int ret;
+
+ seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
+ if (seqno == 0)
+ return 0;
+
+ ret = i915_wait_seqno(ring, seqno);
+ if (ret)
+ return ret;
+
+ i915_gem_retire_requests_ring(ring);
+
+ /* Manually manage the write flush as we may have not yet
+ * retired the buffer.
+ */
+ if (obj->last_write_seqno &&
+ i915_seqno_passed(seqno, obj->last_write_seqno)) {
+ obj->last_write_seqno = 0;
+ obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
+ }
+
+ return 0;
+}
+
+/* A nonblocking variant of the above wait. This is a highly dangerous routine
+ * as the object state may change during this call.
+ */
+static __must_check int
+i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
+ bool readonly)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = obj->ring;
+ u32 seqno;
+ int ret;
+
+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+ BUG_ON(!dev_priv->mm.interruptible);
+
+ seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
+ if (seqno == 0)
+ return 0;
+
+ ret = i915_gem_check_wedge(dev_priv, true);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_check_olr(ring, seqno);
+ if (ret)
+ return ret;
+
+ mutex_unlock(&dev->struct_mutex);
+ ret = __wait_seqno(ring, seqno, true, NULL);
+ mutex_lock(&dev->struct_mutex);
+
+ i915_gem_retire_requests_ring(ring);
+
+ /* Manually manage the write flush as we may have not yet
+ * retired the buffer.
+ */
+ if (obj->last_write_seqno &&
+ i915_seqno_passed(seqno, obj->last_write_seqno)) {
+ obj->last_write_seqno = 0;
+ obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
+ }
+
+ return ret;
+}
+
/**
* Called when user space prepares to use an object with the CPU, either
* through the mmap ioctl's mapping or a GTT mapping.
@@ -977,6 +1210,14 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
goto unlock;
}
+ /* Try to flush the object off the GPU without holding the lock.
+ * We will repeat the flush holding the lock in the normal manner
+ * to catch cases where we are gazumped.
+ */
+ ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
+ if (ret)
+ goto unref;
+
if (read_domains & I915_GEM_DOMAIN_GTT) {
ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
@@ -990,6 +1231,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
}
+unref:
drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
@@ -1109,7 +1351,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
goto unlock;
}
if (!obj->gtt_space) {
- ret = i915_gem_object_bind_to_gtt(obj, 0, true);
+ ret = i915_gem_object_bind_to_gtt(obj, 0, true, false);
if (ret)
goto unlock;
@@ -1270,8 +1512,44 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
return i915_gem_get_gtt_size(dev, size, tiling_mode);
}
-int
-i915_gem_mmap_gtt(struct drm_file *file,
+static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ int ret;
+
+ if (obj->base.map_list.map)
+ return 0;
+
+ ret = drm_gem_create_mmap_offset(&obj->base);
+ if (ret != -ENOSPC)
+ return ret;
+
+ /* Badly fragmented mmap space? The only way we can recover
+ * space is by destroying unwanted objects. We can't randomly release
+ * mmap_offsets as userspace expects them to be persistent for the
+ * lifetime of the objects. The closest we can is to release the
+ * offsets on purgeable objects by truncating it and marking it purged,
+ * which prevents userspace from ever using that object again.
+ */
+ i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
+ ret = drm_gem_create_mmap_offset(&obj->base);
+ if (ret != -ENOSPC)
+ return ret;
+
+ i915_gem_shrink_all(dev_priv);
+ return drm_gem_create_mmap_offset(&obj->base);
+}
+
+static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
+{
+ if (!obj->base.map_list.map)
+ return;
+
+ drm_gem_free_mmap_offset(&obj->base);
+}
+
+int
+i915_gem_mmap_gtt(struct drm_file *file,
struct drm_device *dev,
uint32_t handle,
uint64_t *offset)
@@ -1301,11 +1579,9 @@ i915_gem_mmap_gtt(struct drm_file *file,
goto out;
}
- if (!obj->base.map_list.map) {
- ret = drm_gem_create_mmap_offset(&obj->base);
- if (ret)
- goto out;
- }
+ ret = i915_gem_object_create_mmap_offset(obj);
+ if (ret)
+ goto out;
*offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
@@ -1340,83 +1616,245 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
}
-int
-i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
- gfp_t gfpmask)
+/* Immediately discard the backing storage */
+static void
+i915_gem_object_truncate(struct drm_i915_gem_object *obj)
{
- int page_count, i;
- struct address_space *mapping;
struct inode *inode;
- struct page *page;
- if (obj->pages || obj->sg_table)
- return 0;
+ i915_gem_object_free_mmap_offset(obj);
- /* Get the list of pages out of our struct file. They'll be pinned
- * at this point until we release them.
- */
- page_count = obj->base.size / PAGE_SIZE;
- BUG_ON(obj->pages != NULL);
- obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
- if (obj->pages == NULL)
- return -ENOMEM;
+ if (obj->base.filp == NULL)
+ return;
+ /* Our goal here is to return as much of the memory as
+ * is possible back to the system as we are called from OOM.
+ * To do this we must instruct the shmfs to drop all of its
+ * backing pages, *now*.
+ */
inode = obj->base.filp->f_path.dentry->d_inode;
- mapping = inode->i_mapping;
- gfpmask |= mapping_gfp_mask(mapping);
-
- for (i = 0; i < page_count; i++) {
- page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
- if (IS_ERR(page))
- goto err_pages;
-
- obj->pages[i] = page;
- }
-
- if (i915_gem_object_needs_bit17_swizzle(obj))
- i915_gem_object_do_bit_17_swizzle(obj);
-
- return 0;
+ shmem_truncate_range(inode, 0, (loff_t)-1);
-err_pages:
- while (i--)
- page_cache_release(obj->pages[i]);
+ obj->madv = __I915_MADV_PURGED;
+}
- drm_free_large(obj->pages);
- obj->pages = NULL;
- return PTR_ERR(page);
+static inline int
+i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
+{
+ return obj->madv == I915_MADV_DONTNEED;
}
static void
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
{
int page_count = obj->base.size / PAGE_SIZE;
- int i;
-
- if (!obj->pages)
- return;
+ struct scatterlist *sg;
+ int ret, i;
BUG_ON(obj->madv == __I915_MADV_PURGED);
+ ret = i915_gem_object_set_to_cpu_domain(obj, true);
+ if (ret) {
+ /* In the event of a disaster, abandon all caches and
+ * hope for the best.
+ */
+ WARN_ON(ret != -EIO);
+ i915_gem_clflush_object(obj);
+ obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ }
+
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_save_bit_17_swizzle(obj);
if (obj->madv == I915_MADV_DONTNEED)
obj->dirty = 0;
- for (i = 0; i < page_count; i++) {
+ for_each_sg(obj->pages->sgl, sg, page_count, i) {
+ struct page *page = sg_page(sg);
+
if (obj->dirty)
- set_page_dirty(obj->pages[i]);
+ set_page_dirty(page);
if (obj->madv == I915_MADV_WILLNEED)
- mark_page_accessed(obj->pages[i]);
+ mark_page_accessed(page);
- page_cache_release(obj->pages[i]);
+ page_cache_release(page);
}
obj->dirty = 0;
- drm_free_large(obj->pages);
+ sg_free_table(obj->pages);
+ kfree(obj->pages);
+}
+
+static int
+i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
+{
+ const struct drm_i915_gem_object_ops *ops = obj->ops;
+
+ if (obj->pages == NULL)
+ return 0;
+
+ BUG_ON(obj->gtt_space);
+
+ if (obj->pages_pin_count)
+ return -EBUSY;
+
+ ops->put_pages(obj);
obj->pages = NULL;
+
+ list_del(&obj->gtt_list);
+ if (i915_gem_object_is_purgeable(obj))
+ i915_gem_object_truncate(obj);
+
+ return 0;
+}
+
+static long
+i915_gem_purge(struct drm_i915_private *dev_priv, long target)
+{
+ struct drm_i915_gem_object *obj, *next;
+ long count = 0;
+
+ list_for_each_entry_safe(obj, next,
+ &dev_priv->mm.unbound_list,
+ gtt_list) {
+ if (i915_gem_object_is_purgeable(obj) &&
+ i915_gem_object_put_pages(obj) == 0) {
+ count += obj->base.size >> PAGE_SHIFT;
+ if (count >= target)
+ return count;
+ }
+ }
+
+ list_for_each_entry_safe(obj, next,
+ &dev_priv->mm.inactive_list,
+ mm_list) {
+ if (i915_gem_object_is_purgeable(obj) &&
+ i915_gem_object_unbind(obj) == 0 &&
+ i915_gem_object_put_pages(obj) == 0) {
+ count += obj->base.size >> PAGE_SHIFT;
+ if (count >= target)
+ return count;
+ }
+ }
+
+ return count;
+}
+
+static void
+i915_gem_shrink_all(struct drm_i915_private *dev_priv)
+{
+ struct drm_i915_gem_object *obj, *next;
+
+ i915_gem_evict_everything(dev_priv->dev);
+
+ list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
+ i915_gem_object_put_pages(obj);
+}
+
+static int
+i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ int page_count, i;
+ struct address_space *mapping;
+ struct sg_table *st;
+ struct scatterlist *sg;
+ struct page *page;
+ gfp_t gfp;
+
+ /* Assert that the object is not currently in any GPU domain. As it
+ * wasn't in the GTT, there shouldn't be any way it could have been in
+ * a GPU cache
+ */
+ BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
+ BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
+
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (st == NULL)
+ return -ENOMEM;
+
+ page_count = obj->base.size / PAGE_SIZE;
+ if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
+ sg_free_table(st);
+ kfree(st);
+ return -ENOMEM;
+ }
+
+ /* Get the list of pages out of our struct file. They'll be pinned
+ * at this point until we release them.
+ *
+ * Fail silently without starting the shrinker
+ */
+ mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
+ gfp = mapping_gfp_mask(mapping);
+ gfp |= __GFP_NORETRY | __GFP_NOWARN;
+ gfp &= ~(__GFP_IO | __GFP_WAIT);
+ for_each_sg(st->sgl, sg, page_count, i) {
+ page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+ if (IS_ERR(page)) {
+ i915_gem_purge(dev_priv, page_count);
+ page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+ }
+ if (IS_ERR(page)) {
+ /* We've tried hard to allocate the memory by reaping
+ * our own buffer, now let the real VM do its job and
+ * go down in flames if truly OOM.
+ */
+ gfp &= ~(__GFP_NORETRY | __GFP_NOWARN);
+ gfp |= __GFP_IO | __GFP_WAIT;
+
+ i915_gem_shrink_all(dev_priv);
+ page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+ if (IS_ERR(page))
+ goto err_pages;
+
+ gfp |= __GFP_NORETRY | __GFP_NOWARN;
+ gfp &= ~(__GFP_IO | __GFP_WAIT);
+ }
+
+ sg_set_page(sg, page, PAGE_SIZE, 0);
+ }
+
+ if (i915_gem_object_needs_bit17_swizzle(obj))
+ i915_gem_object_do_bit_17_swizzle(obj);
+
+ obj->pages = st;
+ return 0;
+
+err_pages:
+ for_each_sg(st->sgl, sg, i, page_count)
+ page_cache_release(sg_page(sg));
+ sg_free_table(st);
+ kfree(st);
+ return PTR_ERR(page);
+}
+
+/* Ensure that the associated pages are gathered from the backing storage
+ * and pinned into our object. i915_gem_object_get_pages() may be called
+ * multiple times before they are released by a single call to
+ * i915_gem_object_put_pages() - once the pages are no longer referenced
+ * either as a result of memory pressure (reaping pages under the shrinker)
+ * or as the object is itself released.
+ */
+int
+i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ const struct drm_i915_gem_object_ops *ops = obj->ops;
+ int ret;
+
+ if (obj->pages)
+ return 0;
+
+ BUG_ON(obj->pages_pin_count);
+
+ ret = ops->get_pages(obj);
+ if (ret)
+ return ret;
+
+ list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
+ return 0;
}
void
@@ -1440,7 +1878,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
list_move_tail(&obj->ring_list, &ring->active_list);
- obj->last_rendering_seqno = seqno;
+ obj->last_read_seqno = seqno;
if (obj->fenced_gpu_access) {
obj->last_fenced_seqno = seqno;
@@ -1457,97 +1895,35 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
}
static void
-i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
-{
- list_del_init(&obj->ring_list);
- obj->last_rendering_seqno = 0;
- obj->last_fenced_seqno = 0;
-}
-
-static void
-i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
+i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
BUG_ON(!obj->active);
- list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
- i915_gem_object_move_off_active(obj);
-}
-
-static void
-i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
-{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ if (obj->pin_count) /* are we a framebuffer? */
+ intel_mark_fb_idle(obj);
list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
- BUG_ON(!list_empty(&obj->gpu_write_list));
- BUG_ON(!obj->active);
+ list_del_init(&obj->ring_list);
obj->ring = NULL;
- i915_gem_object_move_off_active(obj);
+ obj->last_read_seqno = 0;
+ obj->last_write_seqno = 0;
+ obj->base.write_domain = 0;
+
+ obj->last_fenced_seqno = 0;
obj->fenced_gpu_access = false;
obj->active = 0;
- obj->pending_gpu_write = false;
drm_gem_object_unreference(&obj->base);
WARN_ON(i915_verify_lists(dev));
}
-/* Immediately discard the backing storage */
-static void
-i915_gem_object_truncate(struct drm_i915_gem_object *obj)
-{
- struct inode *inode;
-
- /* Our goal here is to return as much of the memory as
- * is possible back to the system as we are called from OOM.
- * To do this we must instruct the shmfs to drop all of its
- * backing pages, *now*.
- */
- inode = obj->base.filp->f_path.dentry->d_inode;
- shmem_truncate_range(inode, 0, (loff_t)-1);
-
- if (obj->base.map_list.map)
- drm_gem_free_mmap_offset(&obj->base);
-
- obj->madv = __I915_MADV_PURGED;
-}
-
-static inline int
-i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
-{
- return obj->madv == I915_MADV_DONTNEED;
-}
-
-static void
-i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
- uint32_t flush_domains)
-{
- struct drm_i915_gem_object *obj, *next;
-
- list_for_each_entry_safe(obj, next,
- &ring->gpu_write_list,
- gpu_write_list) {
- if (obj->base.write_domain & flush_domains) {
- uint32_t old_write_domain = obj->base.write_domain;
-
- obj->base.write_domain = 0;
- list_del_init(&obj->gpu_write_list);
- i915_gem_object_move_to_active(obj, ring,
- i915_gem_next_request_seqno(ring));
-
- trace_i915_gem_object_change_domain(obj,
- obj->base.read_domains,
- old_write_domain);
- }
- }
-}
-
static u32
i915_gem_get_seqno(struct drm_device *dev)
{
@@ -1588,15 +1964,16 @@ i915_add_request(struct intel_ring_buffer *ring,
* is that the flush _must_ happen before the next request, no matter
* what.
*/
- if (ring->gpu_caches_dirty) {
- ret = i915_gem_flush_ring(ring, 0, I915_GEM_GPU_DOMAINS);
- if (ret)
- return ret;
+ ret = intel_ring_flush_all_caches(ring);
+ if (ret)
+ return ret;
- ring->gpu_caches_dirty = false;
+ if (request == NULL) {
+ request = kmalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL)
+ return -ENOMEM;
}
- BUG_ON(request == NULL);
seqno = i915_gem_next_request_seqno(ring);
/* Record the position of the start of the request so that
@@ -1607,8 +1984,10 @@ i915_add_request(struct intel_ring_buffer *ring,
request_ring_position = intel_ring_get_tail(ring);
ret = ring->add_request(ring, &seqno);
- if (ret)
- return ret;
+ if (ret) {
+ kfree(request);
+ return ret;
+ }
trace_i915_gem_request_add(ring, seqno);
@@ -1618,6 +1997,7 @@ i915_add_request(struct intel_ring_buffer *ring,
request->emitted_jiffies = jiffies;
was_empty = list_empty(&ring->request_list);
list_add_tail(&request->list, &ring->request_list);
+ request->file_priv = NULL;
if (file) {
struct drm_i915_file_private *file_priv = file->driver_priv;
@@ -1637,13 +2017,13 @@ i915_add_request(struct intel_ring_buffer *ring,
jiffies +
msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
}
- if (was_empty)
+ if (was_empty) {
queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work, HZ);
+ intel_mark_busy(dev_priv->dev);
+ }
}
- WARN_ON(!list_empty(&ring->gpu_write_list));
-
return 0;
}
@@ -1685,8 +2065,6 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
struct drm_i915_gem_object,
ring_list);
- obj->base.write_domain = 0;
- list_del_init(&obj->gpu_write_list);
i915_gem_object_move_to_inactive(obj);
}
}
@@ -1722,20 +2100,6 @@ void i915_gem_reset(struct drm_device *dev)
for_each_ring(ring, dev_priv, i)
i915_gem_reset_ring_lists(dev_priv, ring);
- /* Remove anything from the flushing lists. The GPU cache is likely
- * to be lost on reset along with the data, so simply move the
- * lost bo to the inactive list.
- */
- while (!list_empty(&dev_priv->mm.flushing_list)) {
- obj = list_first_entry(&dev_priv->mm.flushing_list,
- struct drm_i915_gem_object,
- mm_list);
-
- obj->base.write_domain = 0;
- list_del_init(&obj->gpu_write_list);
- i915_gem_object_move_to_inactive(obj);
- }
-
/* Move everything out of the GPU domains to ensure we do any
* necessary invalidation upon reuse.
*/
@@ -1764,7 +2128,7 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
WARN_ON(i915_verify_lists(ring->dev));
- seqno = ring->get_seqno(ring);
+ seqno = ring->get_seqno(ring, true);
for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
if (seqno >= ring->sync_seqno[i])
@@ -1803,13 +2167,10 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
struct drm_i915_gem_object,
ring_list);
- if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
+ if (!i915_seqno_passed(seqno, obj->last_read_seqno))
break;
- if (obj->base.write_domain != 0)
- i915_gem_object_move_to_flushing(obj);
- else
- i915_gem_object_move_to_inactive(obj);
+ i915_gem_object_move_to_inactive(obj);
}
if (unlikely(ring->trace_irq_seqno &&
@@ -1846,226 +2207,30 @@ i915_gem_retire_work_handler(struct work_struct *work)
dev = dev_priv->dev;
/* Come back later if the device is busy... */
- if (!mutex_trylock(&dev->struct_mutex)) {
- queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
- return;
- }
-
- i915_gem_retire_requests(dev);
-
- /* Send a periodic flush down the ring so we don't hold onto GEM
- * objects indefinitely.
- */
- idle = true;
- for_each_ring(ring, dev_priv, i) {
- if (ring->gpu_caches_dirty) {
- struct drm_i915_gem_request *request;
-
- request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL ||
- i915_add_request(ring, NULL, request))
- kfree(request);
- }
-
- idle &= list_empty(&ring->request_list);
- }
-
- if (!dev_priv->mm.suspended && !idle)
- queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
-
- mutex_unlock(&dev->struct_mutex);
-}
-
-int
-i915_gem_check_wedge(struct drm_i915_private *dev_priv,
- bool interruptible)
-{
- if (atomic_read(&dev_priv->mm.wedged)) {
- struct completion *x = &dev_priv->error_completion;
- bool recovery_complete;
- unsigned long flags;
-
- /* Give the error handler a chance to run. */
- spin_lock_irqsave(&x->wait.lock, flags);
- recovery_complete = x->done > 0;
- spin_unlock_irqrestore(&x->wait.lock, flags);
-
- /* Non-interruptible callers can't handle -EAGAIN, hence return
- * -EIO unconditionally for these. */
- if (!interruptible)
- return -EIO;
-
- /* Recovery complete, but still wedged means reset failure. */
- if (recovery_complete)
- return -EIO;
-
- return -EAGAIN;
- }
-
- return 0;
-}
-
-/*
- * Compare seqno against outstanding lazy request. Emit a request if they are
- * equal.
- */
-static int
-i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
-{
- int ret = 0;
-
- BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
-
- if (seqno == ring->outstanding_lazy_request) {
- struct drm_i915_gem_request *request;
-
- request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL)
- return -ENOMEM;
-
- ret = i915_add_request(ring, NULL, request);
- if (ret) {
- kfree(request);
- return ret;
- }
-
- BUG_ON(seqno != request->seqno);
- }
-
- return ret;
-}
-
-/**
- * __wait_seqno - wait until execution of seqno has finished
- * @ring: the ring expected to report seqno
- * @seqno: duh!
- * @interruptible: do an interruptible wait (normally yes)
- * @timeout: in - how long to wait (NULL forever); out - how much time remaining
- *
- * Returns 0 if the seqno was found within the alloted time. Else returns the
- * errno with remaining time filled in timeout argument.
- */
-static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
- bool interruptible, struct timespec *timeout)
-{
- drm_i915_private_t *dev_priv = ring->dev->dev_private;
- struct timespec before, now, wait_time={1,0};
- unsigned long timeout_jiffies;
- long end;
- bool wait_forever = true;
- int ret;
-
- if (i915_seqno_passed(ring->get_seqno(ring), seqno))
- return 0;
-
- trace_i915_gem_request_wait_begin(ring, seqno);
-
- if (timeout != NULL) {
- wait_time = *timeout;
- wait_forever = false;
- }
-
- timeout_jiffies = timespec_to_jiffies(&wait_time);
-
- if (WARN_ON(!ring->irq_get(ring)))
- return -ENODEV;
-
- /* Record current time in case interrupted by signal, or wedged * */
- getrawmonotonic(&before);
-
-#define EXIT_COND \
- (i915_seqno_passed(ring->get_seqno(ring), seqno) || \
- atomic_read(&dev_priv->mm.wedged))
- do {
- if (interruptible)
- end = wait_event_interruptible_timeout(ring->irq_queue,
- EXIT_COND,
- timeout_jiffies);
- else
- end = wait_event_timeout(ring->irq_queue, EXIT_COND,
- timeout_jiffies);
-
- ret = i915_gem_check_wedge(dev_priv, interruptible);
- if (ret)
- end = ret;
- } while (end == 0 && wait_forever);
-
- getrawmonotonic(&now);
-
- ring->irq_put(ring);
- trace_i915_gem_request_wait_end(ring, seqno);
-#undef EXIT_COND
-
- if (timeout) {
- struct timespec sleep_time = timespec_sub(now, before);
- *timeout = timespec_sub(*timeout, sleep_time);
- }
-
- switch (end) {
- case -EIO:
- case -EAGAIN: /* Wedged */
- case -ERESTARTSYS: /* Signal */
- return (int)end;
- case 0: /* Timeout */
- if (timeout)
- set_normalized_timespec(timeout, 0, 0);
- return -ETIME;
- default: /* Completed */
- WARN_ON(end < 0); /* We're not aware of other errors */
- return 0;
- }
-}
-
-/**
- * Waits for a sequence number to be signaled, and cleans up the
- * request and object lists appropriately for that event.
- */
-int
-i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
-{
- drm_i915_private_t *dev_priv = ring->dev->dev_private;
- int ret = 0;
-
- BUG_ON(seqno == 0);
-
- ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
- if (ret)
- return ret;
-
- ret = i915_gem_check_olr(ring, seqno);
- if (ret)
- return ret;
-
- ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible, NULL);
-
- return ret;
-}
-
-/**
- * Ensures that all rendering to the object has completed and the object is
- * safe to unbind from the GTT or access from the CPU.
- */
-int
-i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
-{
- int ret;
+ if (!mutex_trylock(&dev->struct_mutex)) {
+ queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+ return;
+ }
- /* This function only exists to support waiting for existing rendering,
- * not for emitting required flushes.
- */
- BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
+ i915_gem_retire_requests(dev);
- /* If there is rendering queued on the buffer being evicted, wait for
- * it.
+ /* Send a periodic flush down the ring so we don't hold onto GEM
+ * objects indefinitely.
*/
- if (obj->active) {
- ret = i915_wait_seqno(obj->ring, obj->last_rendering_seqno);
- if (ret)
- return ret;
- i915_gem_retire_requests_ring(obj->ring);
+ idle = true;
+ for_each_ring(ring, dev_priv, i) {
+ if (ring->gpu_caches_dirty)
+ i915_add_request(ring, NULL, NULL);
+
+ idle &= list_empty(&ring->request_list);
}
- return 0;
+ if (!dev_priv->mm.suspended && !idle)
+ queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+ if (idle)
+ intel_mark_idle(dev);
+
+ mutex_unlock(&dev->struct_mutex);
}
/**
@@ -2079,14 +2244,10 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
int ret;
if (obj->active) {
- ret = i915_gem_object_flush_gpu_write_domain(obj);
+ ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
if (ret)
return ret;
- ret = i915_gem_check_olr(obj->ring,
- obj->last_rendering_seqno);
- if (ret)
- return ret;
i915_gem_retire_requests_ring(obj->ring);
}
@@ -2146,7 +2307,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
goto out;
if (obj->active) {
- seqno = obj->last_rendering_seqno;
+ seqno = obj->last_read_seqno;
ring = obj->ring;
}
@@ -2201,11 +2362,11 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
return 0;
if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
- return i915_gem_object_wait_rendering(obj);
+ return i915_gem_object_wait_rendering(obj, false);
idx = intel_ring_sync_index(from, to);
- seqno = obj->last_rendering_seqno;
+ seqno = obj->last_read_seqno;
if (seqno <= from->sync_seqno[idx])
return 0;
@@ -2259,6 +2420,8 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
if (obj->pin_count)
return -EBUSY;
+ BUG_ON(obj->pages == NULL);
+
ret = i915_gem_object_finish_gpu(obj);
if (ret)
return ret;
@@ -2269,22 +2432,6 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
i915_gem_object_finish_gtt(obj);
- /* Move the object to the CPU domain to ensure that
- * any possible CPU writes while it's not in the GTT
- * are flushed when we go to remap it.
- */
- if (ret == 0)
- ret = i915_gem_object_set_to_cpu_domain(obj, 1);
- if (ret == -ERESTARTSYS)
- return ret;
- if (ret) {
- /* In the event of a disaster, abandon all caches and
- * hope for the best.
- */
- i915_gem_clflush_object(obj);
- obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
- }
-
/* release the fence reg _after_ flushing */
ret = i915_gem_object_put_fence(obj);
if (ret)
@@ -2300,10 +2447,8 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
}
i915_gem_gtt_finish_object(obj);
- i915_gem_object_put_pages_gtt(obj);
-
- list_del_init(&obj->gtt_list);
- list_del_init(&obj->mm_list);
+ list_del(&obj->mm_list);
+ list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
/* Avoid an unnecessary call to unbind on rebind. */
obj->map_and_fenceable = true;
@@ -2311,48 +2456,14 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
obj->gtt_space = NULL;
obj->gtt_offset = 0;
- if (i915_gem_object_is_purgeable(obj))
- i915_gem_object_truncate(obj);
-
- return ret;
-}
-
-int
-i915_gem_flush_ring(struct intel_ring_buffer *ring,
- uint32_t invalidate_domains,
- uint32_t flush_domains)
-{
- int ret;
-
- if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
- return 0;
-
- trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
-
- ret = ring->flush(ring, invalidate_domains, flush_domains);
- if (ret)
- return ret;
-
- if (flush_domains & I915_GEM_GPU_DOMAINS)
- i915_gem_process_flushing_list(ring, flush_domains);
-
return 0;
}
static int i915_ring_idle(struct intel_ring_buffer *ring)
{
- int ret;
-
- if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
+ if (list_empty(&ring->active_list))
return 0;
- if (!list_empty(&ring->gpu_write_list)) {
- ret = i915_gem_flush_ring(ring,
- I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
- if (ret)
- return ret;
- }
-
return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
}
@@ -2371,10 +2482,6 @@ int i915_gpu_idle(struct drm_device *dev)
ret = i915_ring_idle(ring);
if (ret)
return ret;
-
- /* Is the device fubar? */
- if (WARN_ON(!list_empty(&ring->gpu_write_list)))
- return -EBUSY;
}
return 0;
@@ -2547,21 +2654,8 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
static int
i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
{
- int ret;
-
- if (obj->fenced_gpu_access) {
- if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
- ret = i915_gem_flush_ring(obj->ring,
- 0, obj->base.write_domain);
- if (ret)
- return ret;
- }
-
- obj->fenced_gpu_access = false;
- }
-
if (obj->last_fenced_seqno) {
- ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
+ int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
if (ret)
return ret;
@@ -2574,6 +2668,7 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
mb();
+ obj->fenced_gpu_access = false;
return 0;
}
@@ -2693,18 +2788,88 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
return 0;
}
+static bool i915_gem_valid_gtt_space(struct drm_device *dev,
+ struct drm_mm_node *gtt_space,
+ unsigned long cache_level)
+{
+ struct drm_mm_node *other;
+
+ /* On non-LLC machines we have to be careful when putting differing
+ * types of snoopable memory together to avoid the prefetcher
+ * crossing memory domains and dieing.
+ */
+ if (HAS_LLC(dev))
+ return true;
+
+ if (gtt_space == NULL)
+ return true;
+
+ if (list_empty(>t_space->node_list))
+ return true;
+
+ other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
+ if (other->allocated && !other->hole_follows && other->color != cache_level)
+ return false;
+
+ other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
+ if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
+ return false;
+
+ return true;
+}
+
+static void i915_gem_verify_gtt(struct drm_device *dev)
+{
+#if WATCH_GTT
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ int err = 0;
+
+ list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+ if (obj->gtt_space == NULL) {
+ printk(KERN_ERR "object found on GTT list with no space reserved\n");
+ err++;
+ continue;
+ }
+
+ if (obj->cache_level != obj->gtt_space->color) {
+ printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
+ obj->gtt_space->start,
+ obj->gtt_space->start + obj->gtt_space->size,
+ obj->cache_level,
+ obj->gtt_space->color);
+ err++;
+ continue;
+ }
+
+ if (!i915_gem_valid_gtt_space(dev,
+ obj->gtt_space,
+ obj->cache_level)) {
+ printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
+ obj->gtt_space->start,
+ obj->gtt_space->start + obj->gtt_space->size,
+ obj->cache_level);
+ err++;
+ continue;
+ }
+ }
+
+ WARN_ON(err);
+#endif
+}
+
/**
* Finds free space in the GTT aperture and binds the object there.
*/
static int
i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
unsigned alignment,
- bool map_and_fenceable)
+ bool map_and_fenceable,
+ bool nonblocking)
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_mm_node *free_space;
- gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
u32 size, fence_size, fence_alignment, unfenced_alignment;
bool mappable, fenceable;
int ret;
@@ -2744,89 +2909,67 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
return -E2BIG;
}
+ ret = i915_gem_object_get_pages(obj);
+ if (ret)
+ return ret;
+
search_free:
if (map_and_fenceable)
free_space =
- drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
- size, alignment,
- 0, dev_priv->mm.gtt_mappable_end,
- 0);
+ drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
+ size, alignment, obj->cache_level,
+ 0, dev_priv->mm.gtt_mappable_end,
+ false);
else
- free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
- size, alignment, 0);
+ free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space,
+ size, alignment, obj->cache_level,
+ false);
if (free_space != NULL) {
if (map_and_fenceable)
obj->gtt_space =
drm_mm_get_block_range_generic(free_space,
- size, alignment, 0,
+ size, alignment, obj->cache_level,
0, dev_priv->mm.gtt_mappable_end,
- 0);
+ false);
else
obj->gtt_space =
- drm_mm_get_block(free_space, size, alignment);
+ drm_mm_get_block_generic(free_space,
+ size, alignment, obj->cache_level,
+ false);
}
if (obj->gtt_space == NULL) {
- /* If the gtt is empty and we're still having trouble
- * fitting our object in, we're out of memory.
- */
ret = i915_gem_evict_something(dev, size, alignment,
- map_and_fenceable);
+ obj->cache_level,
+ map_and_fenceable,
+ nonblocking);
if (ret)
return ret;
goto search_free;
}
-
- ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
- if (ret) {
+ if (WARN_ON(!i915_gem_valid_gtt_space(dev,
+ obj->gtt_space,
+ obj->cache_level))) {
drm_mm_put_block(obj->gtt_space);
obj->gtt_space = NULL;
-
- if (ret == -ENOMEM) {
- /* first try to reclaim some memory by clearing the GTT */
- ret = i915_gem_evict_everything(dev, false);
- if (ret) {
- /* now try to shrink everyone else */
- if (gfpmask) {
- gfpmask = 0;
- goto search_free;
- }
-
- return -ENOMEM;
- }
-
- goto search_free;
- }
-
- return ret;
+ return -EINVAL;
}
+
ret = i915_gem_gtt_prepare_object(obj);
if (ret) {
- i915_gem_object_put_pages_gtt(obj);
drm_mm_put_block(obj->gtt_space);
obj->gtt_space = NULL;
-
- if (i915_gem_evict_everything(dev, false))
- return ret;
-
- goto search_free;
+ return ret;
}
if (!dev_priv->mm.aliasing_ppgtt)
i915_gem_gtt_bind_object(obj, obj->cache_level);
- list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
+ list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
- /* Assert that the object is not currently in any GPU domain. As it
- * wasn't in the GTT, there shouldn't be any way it could have been in
- * a GPU cache
- */
- BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
- BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
-
obj->gtt_offset = obj->gtt_space->start;
fenceable =
@@ -2839,6 +2982,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
obj->map_and_fenceable = mappable && fenceable;
trace_i915_gem_object_bind(obj, map_and_fenceable);
+ i915_gem_verify_gtt(dev);
return 0;
}
@@ -2865,18 +3009,7 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
trace_i915_gem_object_clflush(obj);
- drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
-}
-
-/** Flushes any GPU write domain for the object if it's dirty. */
-static int
-i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
-{
- if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
- return 0;
-
- /* Queue the GPU write cache flushing we need. */
- return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
+ drm_clflush_sg(obj->pages);
}
/** Flushes the GTT write domain for the object if it's dirty. */
@@ -2945,16 +3078,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
return 0;
- ret = i915_gem_object_flush_gpu_write_domain(obj);
+ ret = i915_gem_object_wait_rendering(obj, !write);
if (ret)
return ret;
- if (obj->pending_gpu_write || write) {
- ret = i915_gem_object_wait_rendering(obj);
- if (ret)
- return ret;
- }
-
i915_gem_object_flush_cpu_write_domain(obj);
old_write_domain = obj->base.write_domain;
@@ -2997,6 +3124,12 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
return -EBUSY;
}
+ if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
+ ret = i915_gem_object_unbind(obj);
+ if (ret)
+ return ret;
+ }
+
if (obj->gtt_space) {
ret = i915_gem_object_finish_gpu(obj);
if (ret)
@@ -3008,7 +3141,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
* registers with snooped memory, so relinquish any fences
* currently pointing to our region in the aperture.
*/
- if (INTEL_INFO(obj->base.dev)->gen < 6) {
+ if (INTEL_INFO(dev)->gen < 6) {
ret = i915_gem_object_put_fence(obj);
if (ret)
return ret;
@@ -3019,6 +3152,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
if (obj->has_aliasing_ppgtt_mapping)
i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
obj, cache_level);
+
+ obj->gtt_space->color = cache_level;
}
if (cache_level == I915_CACHE_NONE) {
@@ -3045,9 +3180,72 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
}
obj->cache_level = cache_level;
+ i915_gem_verify_gtt(dev);
return 0;
}
+int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_caching *args = data;
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ args->caching = obj->cache_level != I915_CACHE_NONE;
+
+ drm_gem_object_unreference(&obj->base);
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_caching *args = data;
+ struct drm_i915_gem_object *obj;
+ enum i915_cache_level level;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ switch (args->caching) {
+ case I915_CACHING_NONE:
+ level = I915_CACHE_NONE;
+ break;
+ case I915_CACHING_CACHED:
+ level = I915_CACHE_LLC;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ ret = i915_gem_object_set_cache_level(obj, level);
+
+ drm_gem_object_unreference(&obj->base);
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
/*
* Prepare buffer for display plane (scanout, cursors, etc).
* Can be called from an uninterruptible phase (modesetting) and allows
@@ -3061,10 +3259,6 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 old_read_domains, old_write_domain;
int ret;
- ret = i915_gem_object_flush_gpu_write_domain(obj);
- if (ret)
- return ret;
-
if (pipelined != obj->ring) {
ret = i915_gem_object_sync(obj, pipelined);
if (ret)
@@ -3088,7 +3282,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
* (e.g. libkms for the bootup splash), we have to ensure that we
* always use map_and_fenceable for all scanout buffers.
*/
- ret = i915_gem_object_pin(obj, alignment, true);
+ ret = i915_gem_object_pin(obj, alignment, true, false);
if (ret)
return ret;
@@ -3100,7 +3294,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+ obj->base.write_domain = 0;
obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
trace_i915_gem_object_change_domain(obj,
@@ -3118,13 +3312,7 @@ i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
return 0;
- if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
- ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
- if (ret)
- return ret;
- }
-
- ret = i915_gem_object_wait_rendering(obj);
+ ret = i915_gem_object_wait_rendering(obj, false);
if (ret)
return ret;
@@ -3148,16 +3336,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
return 0;
- ret = i915_gem_object_flush_gpu_write_domain(obj);
+ ret = i915_gem_object_wait_rendering(obj, !write);
if (ret)
return ret;
- if (write || obj->pending_gpu_write) {
- ret = i915_gem_object_wait_rendering(obj);
- if (ret)
- return ret;
- }
-
i915_gem_object_flush_gtt_write_domain(obj);
old_write_domain = obj->base.write_domain;
@@ -3237,7 +3419,8 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
int
i915_gem_object_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
- bool map_and_fenceable)
+ bool map_and_fenceable,
+ bool nonblocking)
{
int ret;
@@ -3262,7 +3445,8 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
if (obj->gtt_space == NULL) {
ret = i915_gem_object_bind_to_gtt(obj, alignment,
- map_and_fenceable);
+ map_and_fenceable,
+ nonblocking);
if (ret)
return ret;
}
@@ -3320,7 +3504,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
obj->user_pin_count++;
obj->pin_filp = file;
if (obj->user_pin_count == 1) {
- ret = i915_gem_object_pin(obj, args->alignment, true);
+ ret = i915_gem_object_pin(obj, args->alignment, true, false);
if (ret)
goto out;
}
@@ -3400,6 +3584,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_object_flush_active(obj);
args->busy = obj->active;
+ if (obj->ring) {
+ BUILD_BUG_ON(I915_NUM_RINGS > 16);
+ args->busy |= intel_ring_flag(obj->ring) << 16;
+ }
drm_gem_object_unreference(&obj->base);
unlock:
@@ -3448,9 +3636,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
if (obj->madv != __I915_MADV_PURGED)
obj->madv = args->madv;
- /* if the object is no longer bound, discard its backing storage */
- if (i915_gem_object_is_purgeable(obj) &&
- obj->gtt_space == NULL)
+ /* if the object is no longer attached, discard its backing storage */
+ if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
i915_gem_object_truncate(obj);
args->retained = obj->madv != __I915_MADV_PURGED;
@@ -3462,10 +3649,32 @@ unlock:
return ret;
}
+void i915_gem_object_init(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_object_ops *ops)
+{
+ INIT_LIST_HEAD(&obj->mm_list);
+ INIT_LIST_HEAD(&obj->gtt_list);
+ INIT_LIST_HEAD(&obj->ring_list);
+ INIT_LIST_HEAD(&obj->exec_list);
+
+ obj->ops = ops;
+
+ obj->fence_reg = I915_FENCE_REG_NONE;
+ obj->madv = I915_MADV_WILLNEED;
+ /* Avoid an unnecessary call to unbind on the first bind. */
+ obj->map_and_fenceable = true;
+
+ i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
+ .get_pages = i915_gem_object_get_pages_gtt,
+ .put_pages = i915_gem_object_put_pages_gtt,
+};
+
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct address_space *mapping;
u32 mask;
@@ -3489,7 +3698,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
mapping_set_gfp_mask(mapping, mask);
- i915_gem_info_add_obj(dev_priv, size);
+ i915_gem_object_init(obj, &i915_gem_object_ops);
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
@@ -3511,17 +3720,6 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
} else
obj->cache_level = I915_CACHE_NONE;
- obj->base.driver_private = NULL;
- obj->fence_reg = I915_FENCE_REG_NONE;
- INIT_LIST_HEAD(&obj->mm_list);
- INIT_LIST_HEAD(&obj->gtt_list);
- INIT_LIST_HEAD(&obj->ring_list);
- INIT_LIST_HEAD(&obj->exec_list);
- INIT_LIST_HEAD(&obj->gpu_write_list);
- obj->madv = I915_MADV_WILLNEED;
- /* Avoid an unnecessary call to unbind on the first bind. */
- obj->map_and_fenceable = true;
-
return obj;
}
@@ -3540,9 +3738,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
trace_i915_gem_object_destroy(obj);
- if (gem_obj->import_attach)
- drm_prime_gem_destroy(gem_obj, obj->sg_table);
-
if (obj->phys_obj)
i915_gem_detach_phys_object(dev, obj);
@@ -3558,8 +3753,14 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
dev_priv->mm.interruptible = was_interruptible;
}
- if (obj->base.map_list.map)
- drm_gem_free_mmap_offset(&obj->base);
+ obj->pages_pin_count = 0;
+ i915_gem_object_put_pages(obj);
+ i915_gem_object_free_mmap_offset(obj);
+
+ BUG_ON(obj->pages);
+
+ if (obj->base.import_attach)
+ drm_prime_gem_destroy(&obj->base, NULL);
drm_gem_object_release(&obj->base);
i915_gem_info_remove_obj(dev_priv, obj->base.size);
@@ -3590,7 +3791,7 @@ i915_gem_idle(struct drm_device *dev)
/* Under UMS, be paranoid and evict. */
if (!drm_core_check_feature(dev, DRIVER_MODESET))
- i915_gem_evict_everything(dev, false);
+ i915_gem_evict_everything(dev);
i915_gem_reset_fences(dev);
@@ -3891,7 +4092,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
}
BUG_ON(!list_empty(&dev_priv->mm.active_list));
- BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
mutex_unlock(&dev->struct_mutex);
@@ -3939,7 +4139,6 @@ init_ring_lists(struct intel_ring_buffer *ring)
{
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
- INIT_LIST_HEAD(&ring->gpu_write_list);
}
void
@@ -3949,10 +4148,10 @@ i915_gem_load(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
INIT_LIST_HEAD(&dev_priv->mm.active_list);
- INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
+ INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
+ INIT_LIST_HEAD(&dev_priv->mm.bound_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
- INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
for (i = 0; i < I915_NUM_RINGS; i++)
init_ring_lists(&dev_priv->ring[i]);
for (i = 0; i < I915_MAX_NUM_FENCES; i++)
@@ -4196,18 +4395,6 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
spin_unlock(&file_priv->mm.lock);
}
-static int
-i915_gpu_is_active(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- int lists_empty;
-
- lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->mm.active_list);
-
- return !lists_empty;
-}
-
static int
i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
{
@@ -4216,60 +4403,27 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
struct drm_i915_private,
mm.inactive_shrinker);
struct drm_device *dev = dev_priv->dev;
- struct drm_i915_gem_object *obj, *next;
+ struct drm_i915_gem_object *obj;
int nr_to_scan = sc->nr_to_scan;
int cnt;
if (!mutex_trylock(&dev->struct_mutex))
return 0;
- /* "fast-path" to count number of available objects */
- if (nr_to_scan == 0) {
- cnt = 0;
- list_for_each_entry(obj,
- &dev_priv->mm.inactive_list,
- mm_list)
- cnt++;
- mutex_unlock(&dev->struct_mutex);
- return cnt / 100 * sysctl_vfs_cache_pressure;
- }
-
-rescan:
- /* first scan for clean buffers */
- i915_gem_retire_requests(dev);
-
- list_for_each_entry_safe(obj, next,
- &dev_priv->mm.inactive_list,
- mm_list) {
- if (i915_gem_object_is_purgeable(obj)) {
- if (i915_gem_object_unbind(obj) == 0 &&
- --nr_to_scan == 0)
- break;
- }
+ if (nr_to_scan) {
+ nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
+ if (nr_to_scan > 0)
+ i915_gem_shrink_all(dev_priv);
}
- /* second pass, evict/count anything still on the inactive list */
cnt = 0;
- list_for_each_entry_safe(obj, next,
- &dev_priv->mm.inactive_list,
- mm_list) {
- if (nr_to_scan &&
- i915_gem_object_unbind(obj) == 0)
- nr_to_scan--;
- else
- cnt++;
- }
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
+ if (obj->pages_pin_count == 0)
+ cnt += obj->base.size >> PAGE_SHIFT;
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
+ if (obj->pin_count == 0 && obj->pages_pin_count == 0)
+ cnt += obj->base.size >> PAGE_SHIFT;
- if (nr_to_scan && i915_gpu_is_active(dev)) {
- /*
- * We are desperate for pages, so as a last resort, wait
- * for the GPU to finish and discard whatever we can.
- * This has a dramatic impact to reduce the number of
- * OOM-killer events whilst running the GPU aggressively.
- */
- if (i915_gpu_idle(dev) == 0)
- goto rescan;
- }
mutex_unlock(&dev->struct_mutex);
- return cnt / 100 * sysctl_vfs_cache_pressure;
+ return cnt;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index a21c3dccf4361c39f35b66c1536f230c4ef7ae92..1eb48faf741b6f9b4332dd93f71c152ef12ed686 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -97,8 +97,7 @@
static struct i915_hw_context *
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
-static int do_switch(struct drm_i915_gem_object *from_obj,
- struct i915_hw_context *to, u32 seqno);
+static int do_switch(struct i915_hw_context *to);
static int get_context_size(struct drm_device *dev)
{
@@ -113,7 +112,10 @@ static int get_context_size(struct drm_device *dev)
break;
case 7:
reg = I915_READ(GEN7_CXT_SIZE);
- ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
+ if (IS_HASWELL(dev))
+ ret = HSW_CXT_TOTAL_SIZE(reg) * 64;
+ else
+ ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
break;
default:
BUG();
@@ -219,20 +221,21 @@ static int create_default_context(struct drm_i915_private *dev_priv)
* default context.
*/
dev_priv->ring[RCS].default_context = ctx;
- ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false);
- if (ret) {
- do_destroy(ctx);
- return ret;
- }
+ ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false, false);
+ if (ret)
+ goto err_destroy;
- ret = do_switch(NULL, ctx, 0);
- if (ret) {
- i915_gem_object_unpin(ctx->obj);
- do_destroy(ctx);
- } else {
- DRM_DEBUG_DRIVER("Default HW context loaded\n");
- }
+ ret = do_switch(ctx);
+ if (ret)
+ goto err_unpin;
+ DRM_DEBUG_DRIVER("Default HW context loaded\n");
+ return 0;
+
+err_unpin:
+ i915_gem_object_unpin(ctx->obj);
+err_destroy:
+ do_destroy(ctx);
return ret;
}
@@ -359,18 +362,19 @@ mi_set_context(struct intel_ring_buffer *ring,
return ret;
}
-static int do_switch(struct drm_i915_gem_object *from_obj,
- struct i915_hw_context *to,
- u32 seqno)
+static int do_switch(struct i915_hw_context *to)
{
- struct intel_ring_buffer *ring = NULL;
+ struct intel_ring_buffer *ring = to->ring;
+ struct drm_i915_gem_object *from_obj = ring->last_context_obj;
u32 hw_flags = 0;
int ret;
- BUG_ON(to == NULL);
BUG_ON(from_obj != NULL && from_obj->pin_count == 0);
- ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false);
+ if (from_obj == to->obj)
+ return 0;
+
+ ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false, false);
if (ret)
return ret;
@@ -393,7 +397,6 @@ static int do_switch(struct drm_i915_gem_object *from_obj,
else if (WARN_ON_ONCE(from_obj == to->obj)) /* not yet expected */
hw_flags |= MI_FORCE_RESTORE;
- ring = to->ring;
ret = mi_set_context(ring, to, hw_flags);
if (ret) {
i915_gem_object_unpin(to->obj);
@@ -407,6 +410,7 @@ static int do_switch(struct drm_i915_gem_object *from_obj,
* MI_SET_CONTEXT instead of when the next seqno has completed.
*/
if (from_obj != NULL) {
+ u32 seqno = i915_gem_next_request_seqno(ring);
from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
i915_gem_object_move_to_active(from_obj, ring, seqno);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
@@ -417,7 +421,7 @@ static int do_switch(struct drm_i915_gem_object *from_obj,
* swapped, but there is no way to do that yet.
*/
from_obj->dirty = 1;
- BUG_ON(from_obj->ring != to->ring);
+ BUG_ON(from_obj->ring != ring);
i915_gem_object_unpin(from_obj);
drm_gem_object_unreference(&from_obj->base);
@@ -448,9 +452,7 @@ int i915_switch_context(struct intel_ring_buffer *ring,
int to_id)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
- struct drm_i915_file_private *file_priv = NULL;
struct i915_hw_context *to;
- struct drm_i915_gem_object *from_obj = ring->last_context_obj;
if (dev_priv->hw_contexts_disabled)
return 0;
@@ -458,21 +460,18 @@ int i915_switch_context(struct intel_ring_buffer *ring,
if (ring != &dev_priv->ring[RCS])
return 0;
- if (file)
- file_priv = file->driver_priv;
-
if (to_id == DEFAULT_CONTEXT_ID) {
to = ring->default_context;
} else {
- to = i915_gem_context_get(file_priv, to_id);
+ if (file == NULL)
+ return -EINVAL;
+
+ to = i915_gem_context_get(file->driver_priv, to_id);
if (to == NULL)
return -ENOENT;
}
- if (from_obj == to->obj)
- return 0;
-
- return do_switch(from_obj, to, i915_gem_next_request_seqno(to->ring));
+ return do_switch(to);
}
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index af199596e79206948517dfcf0b8ef3d7ad6fbadc..773ef77b6c22cc047af54ba70dd491039d7473c0 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -28,35 +28,62 @@
#include
static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
- enum dma_data_direction dir)
+ enum dma_data_direction dir)
{
struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
- struct drm_device *dev = obj->base.dev;
- int npages = obj->base.size / PAGE_SIZE;
- struct sg_table *sg = NULL;
- int ret;
- int nents;
+ struct sg_table *st;
+ struct scatterlist *src, *dst;
+ int ret, i;
- ret = i915_mutex_lock_interruptible(dev);
+ ret = i915_mutex_lock_interruptible(obj->base.dev);
if (ret)
return ERR_PTR(ret);
- if (!obj->pages) {
- ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN);
- if (ret)
- goto out;
+ ret = i915_gem_object_get_pages(obj);
+ if (ret) {
+ st = ERR_PTR(ret);
+ goto out;
+ }
+
+ /* Copy sg so that we make an independent mapping */
+ st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (st == NULL) {
+ st = ERR_PTR(-ENOMEM);
+ goto out;
}
- /* link the pages into an SG then map the sg */
- sg = drm_prime_pages_to_sg(obj->pages, npages);
- nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
+ ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
+ if (ret) {
+ kfree(st);
+ st = ERR_PTR(ret);
+ goto out;
+ }
+
+ src = obj->pages->sgl;
+ dst = st->sgl;
+ for (i = 0; i < obj->pages->nents; i++) {
+ sg_set_page(dst, sg_page(src), PAGE_SIZE, 0);
+ dst = sg_next(dst);
+ src = sg_next(src);
+ }
+
+ if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
+ sg_free_table(st);
+ kfree(st);
+ st = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ i915_gem_object_pin_pages(obj);
+
out:
- mutex_unlock(&dev->struct_mutex);
- return sg;
+ mutex_unlock(&obj->base.dev->struct_mutex);
+ return st;
}
static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
- struct sg_table *sg, enum dma_data_direction dir)
+ struct sg_table *sg,
+ enum dma_data_direction dir)
{
dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
sg_free_table(sg);
@@ -78,7 +105,9 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
{
struct drm_i915_gem_object *obj = dma_buf->priv;
struct drm_device *dev = obj->base.dev;
- int ret;
+ struct scatterlist *sg;
+ struct page **pages;
+ int ret, i;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
@@ -89,24 +118,34 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
goto out_unlock;
}
- if (!obj->pages) {
- ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return ERR_PTR(ret);
- }
- }
+ ret = i915_gem_object_get_pages(obj);
+ if (ret)
+ goto error;
- obj->dma_buf_vmapping = vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL);
- if (!obj->dma_buf_vmapping) {
- DRM_ERROR("failed to vmap object\n");
- goto out_unlock;
- }
+ ret = -ENOMEM;
+
+ pages = drm_malloc_ab(obj->pages->nents, sizeof(struct page *));
+ if (pages == NULL)
+ goto error;
+
+ for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i)
+ pages[i] = sg_page(sg);
+
+ obj->dma_buf_vmapping = vmap(pages, obj->pages->nents, 0, PAGE_KERNEL);
+ drm_free_large(pages);
+
+ if (!obj->dma_buf_vmapping)
+ goto error;
obj->vmapping_count = 1;
+ i915_gem_object_pin_pages(obj);
out_unlock:
mutex_unlock(&dev->struct_mutex);
return obj->dma_buf_vmapping;
+
+error:
+ mutex_unlock(&dev->struct_mutex);
+ return ERR_PTR(ret);
}
static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
@@ -119,10 +158,11 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
if (ret)
return;
- --obj->vmapping_count;
- if (obj->vmapping_count == 0) {
+ if (--obj->vmapping_count == 0) {
vunmap(obj->dma_buf_vmapping);
obj->dma_buf_vmapping = NULL;
+
+ i915_gem_object_unpin_pages(obj);
}
mutex_unlock(&dev->struct_mutex);
}
@@ -151,6 +191,22 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *
return -EINVAL;
}
+static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction)
+{
+ struct drm_i915_gem_object *obj = dma_buf->priv;
+ struct drm_device *dev = obj->base.dev;
+ int ret;
+ bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_object_set_to_cpu_domain(obj, write);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
static const struct dma_buf_ops i915_dmabuf_ops = {
.map_dma_buf = i915_gem_map_dma_buf,
.unmap_dma_buf = i915_gem_unmap_dma_buf,
@@ -162,25 +218,47 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
.mmap = i915_gem_dmabuf_mmap,
.vmap = i915_gem_dmabuf_vmap,
.vunmap = i915_gem_dmabuf_vunmap,
+ .begin_cpu_access = i915_gem_begin_cpu_access,
};
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *gem_obj, int flags)
+ struct drm_gem_object *gem_obj, int flags)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
- return dma_buf_export(obj, &i915_dmabuf_ops,
- obj->base.size, 0600);
+ return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, 0600);
+}
+
+static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
+{
+ struct sg_table *sg;
+
+ sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sg))
+ return PTR_ERR(sg);
+
+ obj->pages = sg;
+ obj->has_dma_mapping = true;
+ return 0;
}
+static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
+{
+ dma_buf_unmap_attachment(obj->base.import_attach,
+ obj->pages, DMA_BIDIRECTIONAL);
+ obj->has_dma_mapping = false;
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
+ .get_pages = i915_gem_object_get_pages_dmabuf,
+ .put_pages = i915_gem_object_put_pages_dmabuf,
+};
+
struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf)
+ struct dma_buf *dma_buf)
{
struct dma_buf_attachment *attach;
- struct sg_table *sg;
struct drm_i915_gem_object *obj;
- int npages;
- int size;
int ret;
/* is this one of own objects? */
@@ -198,34 +276,24 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
if (IS_ERR(attach))
return ERR_CAST(attach);
- sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
- if (IS_ERR(sg)) {
- ret = PTR_ERR(sg);
- goto fail_detach;
- }
-
- size = dma_buf->size;
- npages = size / PAGE_SIZE;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (obj == NULL) {
ret = -ENOMEM;
- goto fail_unmap;
+ goto fail_detach;
}
- ret = drm_gem_private_object_init(dev, &obj->base, size);
+ ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
if (ret) {
kfree(obj);
- goto fail_unmap;
+ goto fail_detach;
}
- obj->sg_table = sg;
+ i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
obj->base.import_attach = attach;
return &obj->base;
-fail_unmap:
- dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
fail_detach:
dma_buf_detach(dma_buf, attach);
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index fd408995a78355364200ae822d289cb2379a4f39..776a3225184ce48db7b00f89b8522b956bfd3bef 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -43,7 +43,8 @@ mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
int
i915_gem_evict_something(struct drm_device *dev, int min_size,
- unsigned alignment, bool mappable)
+ unsigned alignment, unsigned cache_level,
+ bool mappable, bool nonblocking)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct list_head eviction_list, unwind_list;
@@ -78,11 +79,11 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
INIT_LIST_HEAD(&unwind_list);
if (mappable)
drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space,
- min_size, alignment, 0,
+ min_size, alignment, cache_level,
0, dev_priv->mm.gtt_mappable_end);
else
drm_mm_init_scan(&dev_priv->mm.gtt_space,
- min_size, alignment, 0);
+ min_size, alignment, cache_level);
/* First see if there is a large enough contiguous idle region... */
list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
@@ -90,29 +91,16 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
goto found;
}
- /* Now merge in the soon-to-be-expired objects... */
- list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
- /* Does the object require an outstanding flush? */
- if (obj->base.write_domain)
- continue;
-
- if (mark_free(obj, &unwind_list))
- goto found;
- }
+ if (nonblocking)
+ goto none;
- /* Finally add anything with a pending flush (in order of retirement) */
- list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
- if (mark_free(obj, &unwind_list))
- goto found;
- }
+ /* Now merge in the soon-to-be-expired objects... */
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
- if (!obj->base.write_domain)
- continue;
-
if (mark_free(obj, &unwind_list))
goto found;
}
+none:
/* Nothing found, clean up and bail out! */
while (!list_empty(&unwind_list)) {
obj = list_first_entry(&unwind_list,
@@ -163,7 +151,7 @@ found:
}
int
-i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
+i915_gem_evict_everything(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj, *next;
@@ -171,12 +159,11 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
int ret;
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
- list_empty(&dev_priv->mm.flushing_list) &&
list_empty(&dev_priv->mm.active_list));
if (lists_empty)
return -ENOSPC;
- trace_i915_gem_evict_everything(dev, purgeable_only);
+ trace_i915_gem_evict_everything(dev);
/* The gpu_idle will flush everything in the write domain to the
* active list. Then we must move everything off the active list
@@ -188,16 +175,11 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
i915_gem_retire_requests(dev);
- BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
-
/* Having flushed everything, unbind() should never raise an error */
list_for_each_entry_safe(obj, next,
- &dev_priv->mm.inactive_list, mm_list) {
- if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) {
- if (obj->pin_count == 0)
- WARN_ON(i915_gem_object_unbind(obj));
- }
- }
+ &dev_priv->mm.inactive_list, mm_list)
+ if (obj->pin_count == 0)
+ WARN_ON(i915_gem_object_unbind(obj));
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 8dd9a6f47db864e7824b7f43db8d69e23d1da53f..3eea143749f6ccb07751843a01e910657877e57f 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -33,180 +33,6 @@
#include "intel_drv.h"
#include
-struct change_domains {
- uint32_t invalidate_domains;
- uint32_t flush_domains;
- uint32_t flush_rings;
- uint32_t flips;
-};
-
-/*
- * Set the next domain for the specified object. This
- * may not actually perform the necessary flushing/invaliding though,
- * as that may want to be batched with other set_domain operations
- *
- * This is (we hope) the only really tricky part of gem. The goal
- * is fairly simple -- track which caches hold bits of the object
- * and make sure they remain coherent. A few concrete examples may
- * help to explain how it works. For shorthand, we use the notation
- * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
- * a pair of read and write domain masks.
- *
- * Case 1: the batch buffer
- *
- * 1. Allocated
- * 2. Written by CPU
- * 3. Mapped to GTT
- * 4. Read by GPU
- * 5. Unmapped from GTT
- * 6. Freed
- *
- * Let's take these a step at a time
- *
- * 1. Allocated
- * Pages allocated from the kernel may still have
- * cache contents, so we set them to (CPU, CPU) always.
- * 2. Written by CPU (using pwrite)
- * The pwrite function calls set_domain (CPU, CPU) and
- * this function does nothing (as nothing changes)
- * 3. Mapped by GTT
- * This function asserts that the object is not
- * currently in any GPU-based read or write domains
- * 4. Read by GPU
- * i915_gem_execbuffer calls set_domain (COMMAND, 0).
- * As write_domain is zero, this function adds in the
- * current read domains (CPU+COMMAND, 0).
- * flush_domains is set to CPU.
- * invalidate_domains is set to COMMAND
- * clflush is run to get data out of the CPU caches
- * then i915_dev_set_domain calls i915_gem_flush to
- * emit an MI_FLUSH and drm_agp_chipset_flush
- * 5. Unmapped from GTT
- * i915_gem_object_unbind calls set_domain (CPU, CPU)
- * flush_domains and invalidate_domains end up both zero
- * so no flushing/invalidating happens
- * 6. Freed
- * yay, done
- *
- * Case 2: The shared render buffer
- *
- * 1. Allocated
- * 2. Mapped to GTT
- * 3. Read/written by GPU
- * 4. set_domain to (CPU,CPU)
- * 5. Read/written by CPU
- * 6. Read/written by GPU
- *
- * 1. Allocated
- * Same as last example, (CPU, CPU)
- * 2. Mapped to GTT
- * Nothing changes (assertions find that it is not in the GPU)
- * 3. Read/written by GPU
- * execbuffer calls set_domain (RENDER, RENDER)
- * flush_domains gets CPU
- * invalidate_domains gets GPU
- * clflush (obj)
- * MI_FLUSH and drm_agp_chipset_flush
- * 4. set_domain (CPU, CPU)
- * flush_domains gets GPU
- * invalidate_domains gets CPU
- * wait_rendering (obj) to make sure all drawing is complete.
- * This will include an MI_FLUSH to get the data from GPU
- * to memory
- * clflush (obj) to invalidate the CPU cache
- * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
- * 5. Read/written by CPU
- * cache lines are loaded and dirtied
- * 6. Read written by GPU
- * Same as last GPU access
- *
- * Case 3: The constant buffer
- *
- * 1. Allocated
- * 2. Written by CPU
- * 3. Read by GPU
- * 4. Updated (written) by CPU again
- * 5. Read by GPU
- *
- * 1. Allocated
- * (CPU, CPU)
- * 2. Written by CPU
- * (CPU, CPU)
- * 3. Read by GPU
- * (CPU+RENDER, 0)
- * flush_domains = CPU
- * invalidate_domains = RENDER
- * clflush (obj)
- * MI_FLUSH
- * drm_agp_chipset_flush
- * 4. Updated (written) by CPU again
- * (CPU, CPU)
- * flush_domains = 0 (no previous write domain)
- * invalidate_domains = 0 (no new read domains)
- * 5. Read by GPU
- * (CPU+RENDER, 0)
- * flush_domains = CPU
- * invalidate_domains = RENDER
- * clflush (obj)
- * MI_FLUSH
- * drm_agp_chipset_flush
- */
-static void
-i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring,
- struct change_domains *cd)
-{
- uint32_t invalidate_domains = 0, flush_domains = 0;
-
- /*
- * If the object isn't moving to a new write domain,
- * let the object stay in multiple read domains
- */
- if (obj->base.pending_write_domain == 0)
- obj->base.pending_read_domains |= obj->base.read_domains;
-
- /*
- * Flush the current write domain if
- * the new read domains don't match. Invalidate
- * any read domains which differ from the old
- * write domain
- */
- if (obj->base.write_domain &&
- (((obj->base.write_domain != obj->base.pending_read_domains ||
- obj->ring != ring)) ||
- (obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) {
- flush_domains |= obj->base.write_domain;
- invalidate_domains |=
- obj->base.pending_read_domains & ~obj->base.write_domain;
- }
- /*
- * Invalidate any read caches which may have
- * stale data. That is, any new read domains.
- */
- invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains;
- if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
- i915_gem_clflush_object(obj);
-
- if (obj->base.pending_write_domain)
- cd->flips |= atomic_read(&obj->pending_flip);
-
- /* The actual obj->write_domain will be updated with
- * pending_write_domain after we emit the accumulated flush for all
- * of our domain changes in execbuffers (which clears objects'
- * write_domains). So if we have a current write domain that we
- * aren't changing, set pending_write_domain to that.
- */
- if (flush_domains == 0 && obj->base.pending_write_domain == 0)
- obj->base.pending_write_domain = obj->base.write_domain;
-
- cd->invalidate_domains |= invalidate_domains;
- cd->flush_domains |= flush_domains;
- if (flush_domains & I915_GEM_GPU_DOMAINS)
- cd->flush_rings |= intel_ring_flag(obj->ring);
- if (invalidate_domains & I915_GEM_GPU_DOMAINS)
- cd->flush_rings |= intel_ring_flag(ring);
-}
-
struct eb_objects {
int and;
struct hlist_head buckets[0];
@@ -217,6 +43,7 @@ eb_create(int size)
{
struct eb_objects *eb;
int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
+ BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head)));
while (count > size)
count >>= 1;
eb = kzalloc(count*sizeof(struct hlist_head) +
@@ -268,6 +95,7 @@ eb_destroy(struct eb_objects *eb)
static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
{
return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
+ !obj->map_and_fenceable ||
obj->cache_level != I915_CACHE_NONE);
}
@@ -382,7 +210,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
if (ret)
return ret;
- vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
+ vaddr = kmap_atomic(i915_gem_object_get_page(obj,
+ reloc->offset >> PAGE_SHIFT));
*(uint32_t *)(vaddr + page_offset) = reloc->delta;
kunmap_atomic(vaddr);
} else {
@@ -503,7 +332,8 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
return ret;
}
-#define __EXEC_OBJECT_HAS_FENCE (1<<31)
+#define __EXEC_OBJECT_HAS_PIN (1<<31)
+#define __EXEC_OBJECT_HAS_FENCE (1<<30)
static int
need_reloc_mappable(struct drm_i915_gem_object *obj)
@@ -513,9 +343,10 @@ need_reloc_mappable(struct drm_i915_gem_object *obj)
}
static int
-pin_and_fence_object(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring)
+i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *ring)
{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
bool need_fence, need_mappable;
@@ -527,15 +358,17 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,
obj->tiling_mode != I915_TILING_NONE;
need_mappable = need_fence || need_reloc_mappable(obj);
- ret = i915_gem_object_pin(obj, entry->alignment, need_mappable);
+ ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false);
if (ret)
return ret;
+ entry->flags |= __EXEC_OBJECT_HAS_PIN;
+
if (has_fenced_gpu_access) {
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
ret = i915_gem_object_get_fence(obj);
if (ret)
- goto err_unpin;
+ return ret;
if (i915_gem_object_pin_fence(obj))
entry->flags |= __EXEC_OBJECT_HAS_FENCE;
@@ -544,12 +377,35 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,
}
}
+ /* Ensure ppgtt mapping exists if needed */
+ if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
+ i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
+ obj, obj->cache_level);
+
+ obj->has_aliasing_ppgtt_mapping = 1;
+ }
+
entry->offset = obj->gtt_offset;
return 0;
+}
-err_unpin:
- i915_gem_object_unpin(obj);
- return ret;
+static void
+i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_gem_exec_object2 *entry;
+
+ if (!obj->gtt_space)
+ return;
+
+ entry = obj->exec_entry;
+
+ if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
+ i915_gem_object_unpin_fence(obj);
+
+ if (entry->flags & __EXEC_OBJECT_HAS_PIN)
+ i915_gem_object_unpin(obj);
+
+ entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
}
static int
@@ -557,11 +413,10 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct drm_file *file,
struct list_head *objects)
{
- drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_object *obj;
- int ret, retry;
- bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
struct list_head ordered_objects;
+ bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
+ int retry;
INIT_LIST_HEAD(&ordered_objects);
while (!list_empty(objects)) {
@@ -586,6 +441,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
obj->base.pending_read_domains = 0;
obj->base.pending_write_domain = 0;
+ obj->pending_fenced_gpu_access = false;
}
list_splice(&ordered_objects, objects);
@@ -598,12 +454,12 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
* 2. Bind new objects.
* 3. Decrement pin count.
*
- * This avoid unnecessary unbinding of later objects in order to makr
+ * This avoid unnecessary unbinding of later objects in order to make
* room for the earlier objects *unless* we need to defragment.
*/
retry = 0;
do {
- ret = 0;
+ int ret = 0;
/* Unbind any ill-fitting objects or pin. */
list_for_each_entry(obj, objects, exec_list) {
@@ -623,7 +479,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
(need_mappable && !obj->map_and_fenceable))
ret = i915_gem_object_unbind(obj);
else
- ret = pin_and_fence_object(obj, ring);
+ ret = i915_gem_execbuffer_reserve_object(obj, ring);
if (ret)
goto err;
}
@@ -633,77 +489,22 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
if (obj->gtt_space)
continue;
- ret = pin_and_fence_object(obj, ring);
- if (ret) {
- int ret_ignore;
-
- /* This can potentially raise a harmless
- * -EINVAL if we failed to bind in the above
- * call. It cannot raise -EINTR since we know
- * that the bo is freshly bound and so will
- * not need to be flushed or waited upon.
- */
- ret_ignore = i915_gem_object_unbind(obj);
- (void)ret_ignore;
- WARN_ON(obj->gtt_space);
- break;
- }
+ ret = i915_gem_execbuffer_reserve_object(obj, ring);
+ if (ret)
+ goto err;
}
- /* Decrement pin count for bound objects */
- list_for_each_entry(obj, objects, exec_list) {
- struct drm_i915_gem_exec_object2 *entry;
-
- if (!obj->gtt_space)
- continue;
-
- entry = obj->exec_entry;
- if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
- i915_gem_object_unpin_fence(obj);
- entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
- }
-
- i915_gem_object_unpin(obj);
-
- /* ... and ensure ppgtt mapping exist if needed. */
- if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
- i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
- obj, obj->cache_level);
+err: /* Decrement pin count for bound objects */
+ list_for_each_entry(obj, objects, exec_list)
+ i915_gem_execbuffer_unreserve_object(obj);
- obj->has_aliasing_ppgtt_mapping = 1;
- }
- }
-
- if (ret != -ENOSPC || retry > 1)
+ if (ret != -ENOSPC || retry++)
return ret;
- /* First attempt, just clear anything that is purgeable.
- * Second attempt, clear the entire GTT.
- */
- ret = i915_gem_evict_everything(ring->dev, retry == 0);
+ ret = i915_gem_evict_everything(ring->dev);
if (ret)
return ret;
-
- retry++;
} while (1);
-
-err:
- list_for_each_entry_continue_reverse(obj, objects, exec_list) {
- struct drm_i915_gem_exec_object2 *entry;
-
- if (!obj->gtt_space)
- continue;
-
- entry = obj->exec_entry;
- if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
- i915_gem_object_unpin_fence(obj);
- entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
- }
-
- i915_gem_object_unpin(obj);
- }
-
- return ret;
}
static int
@@ -809,18 +610,6 @@ err:
return ret;
}
-static void
-i915_gem_execbuffer_flush(struct drm_device *dev,
- uint32_t invalidate_domains,
- uint32_t flush_domains)
-{
- if (flush_domains & I915_GEM_DOMAIN_CPU)
- intel_gtt_chipset_flush();
-
- if (flush_domains & I915_GEM_DOMAIN_GTT)
- wmb();
-}
-
static int
i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
{
@@ -853,48 +642,45 @@ i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
return 0;
}
-
static int
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
struct list_head *objects)
{
struct drm_i915_gem_object *obj;
- struct change_domains cd;
+ uint32_t flush_domains = 0;
+ uint32_t flips = 0;
int ret;
- memset(&cd, 0, sizeof(cd));
- list_for_each_entry(obj, objects, exec_list)
- i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
-
- if (cd.invalidate_domains | cd.flush_domains) {
- i915_gem_execbuffer_flush(ring->dev,
- cd.invalidate_domains,
- cd.flush_domains);
- }
-
- if (cd.flips) {
- ret = i915_gem_execbuffer_wait_for_flips(ring, cd.flips);
+ list_for_each_entry(obj, objects, exec_list) {
+ ret = i915_gem_object_sync(obj, ring);
if (ret)
return ret;
+
+ if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
+ i915_gem_clflush_object(obj);
+
+ if (obj->base.pending_write_domain)
+ flips |= atomic_read(&obj->pending_flip);
+
+ flush_domains |= obj->base.write_domain;
}
- list_for_each_entry(obj, objects, exec_list) {
- ret = i915_gem_object_sync(obj, ring);
+ if (flips) {
+ ret = i915_gem_execbuffer_wait_for_flips(ring, flips);
if (ret)
return ret;
}
+ if (flush_domains & I915_GEM_DOMAIN_CPU)
+ intel_gtt_chipset_flush();
+
+ if (flush_domains & I915_GEM_DOMAIN_GTT)
+ wmb();
+
/* Unconditionally invalidate gpu caches and ensure that we do flush
* any residual writes from the previous batch.
*/
- ret = i915_gem_flush_ring(ring,
- I915_GEM_GPU_DOMAINS,
- ring->gpu_caches_dirty ? I915_GEM_GPU_DOMAINS : 0);
- if (ret)
- return ret;
-
- ring->gpu_caches_dirty = false;
- return 0;
+ return intel_ring_invalidate_all_caches(ring);
}
static bool
@@ -942,9 +728,8 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
struct drm_i915_gem_object *obj;
list_for_each_entry(obj, objects, exec_list) {
- u32 old_read = obj->base.read_domains;
- u32 old_write = obj->base.write_domain;
-
+ u32 old_read = obj->base.read_domains;
+ u32 old_write = obj->base.write_domain;
obj->base.read_domains = obj->base.pending_read_domains;
obj->base.write_domain = obj->base.pending_write_domain;
@@ -953,17 +738,13 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
i915_gem_object_move_to_active(obj, ring, seqno);
if (obj->base.write_domain) {
obj->dirty = 1;
- obj->pending_gpu_write = true;
- list_move_tail(&obj->gpu_write_list,
- &ring->gpu_write_list);
+ obj->last_write_seqno = seqno;
if (obj->pin_count) /* check for potential scanout */
- intel_mark_busy(ring->dev, obj);
+ intel_mark_fb_busy(obj);
}
trace_i915_gem_object_change_domain(obj, old_read, old_write);
}
-
- intel_mark_busy(ring->dev, NULL);
}
static void
@@ -971,16 +752,11 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
struct drm_file *file,
struct intel_ring_buffer *ring)
{
- struct drm_i915_gem_request *request;
-
/* Unconditionally force add_request to emit a full flush. */
ring->gpu_caches_dirty = true;
/* Add a breadcrumb for the completion of the batch buffer */
- request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL || i915_add_request(ring, file, request)) {
- kfree(request);
- }
+ (void)i915_add_request(ring, file, NULL);
}
static int
@@ -1326,8 +1102,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
return -ENOMEM;
}
ret = copy_from_user(exec_list,
- (struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
+ (void __user *)(uintptr_t)args->buffers_ptr,
sizeof(*exec_list) * args->buffer_count);
if (ret != 0) {
DRM_DEBUG("copy %d exec entries failed %d\n",
@@ -1366,8 +1141,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
for (i = 0; i < args->buffer_count; i++)
exec_list[i].offset = exec2_list[i].offset;
/* ... and back out to userspace */
- ret = copy_to_user((struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
+ ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,
exec_list,
sizeof(*exec_list) * args->buffer_count);
if (ret) {
@@ -1421,8 +1195,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
- ret = copy_to_user((struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
+ ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,
exec2_list,
sizeof(*exec2_list) * args->buffer_count);
if (ret) {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 69261acb94b3f137d56d9b2b022a12b766127289..df470b5e8d36a0dac1daeaa71576cbfa066a2958 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -166,8 +166,7 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
}
static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
- struct scatterlist *sg_list,
- unsigned sg_len,
+ const struct sg_table *pages,
unsigned first_entry,
uint32_t pte_flags)
{
@@ -179,12 +178,12 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
struct scatterlist *sg;
/* init sg walking */
- sg = sg_list;
+ sg = pages->sgl;
i = 0;
segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
m = 0;
- while (i < sg_len) {
+ while (i < pages->nents) {
pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
@@ -193,13 +192,11 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
pt_vaddr[j] = pte | pte_flags;
/* grab the next page */
- m++;
- if (m == segment_len) {
- sg = sg_next(sg);
- i++;
- if (i == sg_len)
+ if (++m == segment_len) {
+ if (++i == pages->nents)
break;
+ sg = sg_next(sg);
segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
m = 0;
}
@@ -212,44 +209,10 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
}
}
-static void i915_ppgtt_insert_pages(struct i915_hw_ppgtt *ppgtt,
- unsigned first_entry, unsigned num_entries,
- struct page **pages, uint32_t pte_flags)
-{
- uint32_t *pt_vaddr, pte;
- unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
- unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
- unsigned last_pte, i;
- dma_addr_t page_addr;
-
- while (num_entries) {
- last_pte = first_pte + num_entries;
- last_pte = min_t(unsigned, last_pte, I915_PPGTT_PT_ENTRIES);
-
- pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
-
- for (i = first_pte; i < last_pte; i++) {
- page_addr = page_to_phys(*pages);
- pte = GEN6_PTE_ADDR_ENCODE(page_addr);
- pt_vaddr[i] = pte | pte_flags;
-
- pages++;
- }
-
- kunmap_atomic(pt_vaddr);
-
- num_entries -= last_pte - first_pte;
- first_pte = 0;
- act_pd++;
- }
-}
-
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t pte_flags = GEN6_PTE_VALID;
switch (cache_level) {
@@ -260,7 +223,7 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
pte_flags |= GEN6_PTE_CACHE_LLC;
break;
case I915_CACHE_NONE:
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(obj->base.dev))
pte_flags |= HSW_PTE_UNCACHED;
else
pte_flags |= GEN6_PTE_UNCACHED;
@@ -269,26 +232,10 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
BUG();
}
- if (obj->sg_table) {
- i915_ppgtt_insert_sg_entries(ppgtt,
- obj->sg_table->sgl,
- obj->sg_table->nents,
- obj->gtt_space->start >> PAGE_SHIFT,
- pte_flags);
- } else if (dev_priv->mm.gtt->needs_dmar) {
- BUG_ON(!obj->sg_list);
-
- i915_ppgtt_insert_sg_entries(ppgtt,
- obj->sg_list,
- obj->num_sg,
- obj->gtt_space->start >> PAGE_SHIFT,
- pte_flags);
- } else
- i915_ppgtt_insert_pages(ppgtt,
- obj->gtt_space->start >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT,
- obj->pages,
- pte_flags);
+ i915_ppgtt_insert_sg_entries(ppgtt,
+ obj->pages,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ pte_flags);
}
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
@@ -350,7 +297,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
(dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
- list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
i915_gem_clflush_object(obj);
i915_gem_gtt_bind_object(obj, obj->cache_level);
}
@@ -360,44 +307,26 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- /* don't map imported dma buf objects */
- if (dev_priv->mm.gtt->needs_dmar && !obj->sg_table)
- return intel_gtt_map_memory(obj->pages,
- obj->base.size >> PAGE_SHIFT,
- &obj->sg_list,
- &obj->num_sg);
- else
+ if (obj->has_dma_mapping)
return 0;
+
+ if (!dma_map_sg(&obj->base.dev->pdev->dev,
+ obj->pages->sgl, obj->pages->nents,
+ PCI_DMA_BIDIRECTIONAL))
+ return -ENOSPC;
+
+ return 0;
}
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
- if (obj->sg_table) {
- intel_gtt_insert_sg_entries(obj->sg_table->sgl,
- obj->sg_table->nents,
- obj->gtt_space->start >> PAGE_SHIFT,
- agp_type);
- } else if (dev_priv->mm.gtt->needs_dmar) {
- BUG_ON(!obj->sg_list);
-
- intel_gtt_insert_sg_entries(obj->sg_list,
- obj->num_sg,
- obj->gtt_space->start >> PAGE_SHIFT,
- agp_type);
- } else
- intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT,
- obj->pages,
- agp_type);
-
+ intel_gtt_insert_sg_entries(obj->pages,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ agp_type);
obj->has_global_gtt_mapping = 1;
}
@@ -417,14 +346,31 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
interruptible = do_idling(dev_priv);
- if (obj->sg_list) {
- intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
- obj->sg_list = NULL;
- }
+ if (!obj->has_dma_mapping)
+ dma_unmap_sg(&dev->pdev->dev,
+ obj->pages->sgl, obj->pages->nents,
+ PCI_DMA_BIDIRECTIONAL);
undo_idling(dev_priv, interruptible);
}
+static void i915_gtt_color_adjust(struct drm_mm_node *node,
+ unsigned long color,
+ unsigned long *start,
+ unsigned long *end)
+{
+ if (node->color != color)
+ *start += 4096;
+
+ if (!list_empty(&node->node_list)) {
+ node = list_entry(node->node_list.next,
+ struct drm_mm_node,
+ node_list);
+ if (node->allocated && node->color != color)
+ *end -= 4096;
+ }
+}
+
void i915_gem_init_global_gtt(struct drm_device *dev,
unsigned long start,
unsigned long mappable_end,
@@ -434,6 +380,8 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
/* Substract the guard page ... */
drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
+ if (!HAS_LLC(dev))
+ dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
dev_priv->mm.gtt_start = start;
dev_priv->mm.gtt_mappable_end = mappable_end;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index c2b7b67e410d26eec2a6754efd4102df0b652501..3208650a235c2a426f4d05a6e23ab4df81f961d7 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -469,18 +469,20 @@ i915_gem_swizzle_page(struct page *page)
void
i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
+ struct scatterlist *sg;
int page_count = obj->base.size >> PAGE_SHIFT;
int i;
if (obj->bit_17 == NULL)
return;
- for (i = 0; i < page_count; i++) {
- char new_bit_17 = page_to_phys(obj->pages[i]) >> 17;
+ for_each_sg(obj->pages->sgl, sg, page_count, i) {
+ struct page *page = sg_page(sg);
+ char new_bit_17 = page_to_phys(page) >> 17;
if ((new_bit_17 & 0x1) !=
(test_bit(i, obj->bit_17) != 0)) {
- i915_gem_swizzle_page(obj->pages[i]);
- set_page_dirty(obj->pages[i]);
+ i915_gem_swizzle_page(page);
+ set_page_dirty(page);
}
}
}
@@ -488,6 +490,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
void
i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
+ struct scatterlist *sg;
int page_count = obj->base.size >> PAGE_SHIFT;
int i;
@@ -501,8 +504,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
}
}
- for (i = 0; i < page_count; i++) {
- if (page_to_phys(obj->pages[i]) & (1 << 17))
+ for_each_sg(obj->pages->sgl, sg, page_count, i) {
+ struct page *page = sg_page(sg);
+ if (page_to_phys(page) & (1 << 17))
__set_bit(i, obj->bit_17);
else
__clear_bit(i, obj->bit_17);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 505357886bbb1fe871c22bb530d373fb413e6306..4e9888388c0c5064ef2c9bcc2832fca1d6d23424 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -295,11 +295,21 @@ static void i915_hotplug_work_func(struct work_struct *work)
drm_helper_hpd_irq_event(dev);
}
-static void i915_handle_rps_change(struct drm_device *dev)
+/* defined intel_pm.c */
+extern spinlock_t mchdev_lock;
+
+static void ironlake_handle_rps_change(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 busy_up, busy_down, max_avg, min_avg;
- u8 new_delay = dev_priv->cur_delay;
+ u8 new_delay;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mchdev_lock, flags);
+
+ I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
+
+ new_delay = dev_priv->ips.cur_delay;
I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
busy_up = I915_READ(RCPREVBSYTUPAVG);
@@ -309,19 +319,21 @@ static void i915_handle_rps_change(struct drm_device *dev)
/* Handle RCS change request from hw */
if (busy_up > max_avg) {
- if (dev_priv->cur_delay != dev_priv->max_delay)
- new_delay = dev_priv->cur_delay - 1;
- if (new_delay < dev_priv->max_delay)
- new_delay = dev_priv->max_delay;
+ if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
+ new_delay = dev_priv->ips.cur_delay - 1;
+ if (new_delay < dev_priv->ips.max_delay)
+ new_delay = dev_priv->ips.max_delay;
} else if (busy_down < min_avg) {
- if (dev_priv->cur_delay != dev_priv->min_delay)
- new_delay = dev_priv->cur_delay + 1;
- if (new_delay > dev_priv->min_delay)
- new_delay = dev_priv->min_delay;
+ if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
+ new_delay = dev_priv->ips.cur_delay + 1;
+ if (new_delay > dev_priv->ips.min_delay)
+ new_delay = dev_priv->ips.min_delay;
}
if (ironlake_set_drps(dev, new_delay))
- dev_priv->cur_delay = new_delay;
+ dev_priv->ips.cur_delay = new_delay;
+
+ spin_unlock_irqrestore(&mchdev_lock, flags);
return;
}
@@ -334,7 +346,7 @@ static void notify_ring(struct drm_device *dev,
if (ring->obj == NULL)
return;
- trace_i915_gem_request_complete(ring, ring->get_seqno(ring));
+ trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
wake_up_all(&ring->irq_queue);
if (i915_enable_hangcheck) {
@@ -348,16 +360,16 @@ static void notify_ring(struct drm_device *dev,
static void gen6_pm_rps_work(struct work_struct *work)
{
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
- rps_work);
+ rps.work);
u32 pm_iir, pm_imr;
u8 new_delay;
- spin_lock_irq(&dev_priv->rps_lock);
- pm_iir = dev_priv->pm_iir;
- dev_priv->pm_iir = 0;
+ spin_lock_irq(&dev_priv->rps.lock);
+ pm_iir = dev_priv->rps.pm_iir;
+ dev_priv->rps.pm_iir = 0;
pm_imr = I915_READ(GEN6_PMIMR);
I915_WRITE(GEN6_PMIMR, 0);
- spin_unlock_irq(&dev_priv->rps_lock);
+ spin_unlock_irq(&dev_priv->rps.lock);
if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
return;
@@ -365,11 +377,17 @@ static void gen6_pm_rps_work(struct work_struct *work)
mutex_lock(&dev_priv->dev->struct_mutex);
if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
- new_delay = dev_priv->cur_delay + 1;
+ new_delay = dev_priv->rps.cur_delay + 1;
else
- new_delay = dev_priv->cur_delay - 1;
+ new_delay = dev_priv->rps.cur_delay - 1;
- gen6_set_rps(dev_priv->dev, new_delay);
+ /* sysfs frequency interfaces may have snuck in while servicing the
+ * interrupt
+ */
+ if (!(new_delay > dev_priv->rps.max_delay ||
+ new_delay < dev_priv->rps.min_delay)) {
+ gen6_set_rps(dev_priv->dev, new_delay);
+ }
mutex_unlock(&dev_priv->dev->struct_mutex);
}
@@ -443,7 +461,7 @@ static void ivybridge_handle_parity_error(struct drm_device *dev)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long flags;
- if (!IS_IVYBRIDGE(dev))
+ if (!HAS_L3_GPU_CACHE(dev))
return;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -487,19 +505,19 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
* IIR bits should never already be set because IMR should
* prevent an interrupt from being shown in IIR. The warning
* displays a case where we've unsafely cleared
- * dev_priv->pm_iir. Although missing an interrupt of the same
+ * dev_priv->rps.pm_iir. Although missing an interrupt of the same
* type is not a problem, it displays a problem in the logic.
*
- * The mask bit in IMR is cleared by rps_work.
+ * The mask bit in IMR is cleared by dev_priv->rps.work.
*/
- spin_lock_irqsave(&dev_priv->rps_lock, flags);
- dev_priv->pm_iir |= pm_iir;
- I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
+ spin_lock_irqsave(&dev_priv->rps.lock, flags);
+ dev_priv->rps.pm_iir |= pm_iir;
+ I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
POSTING_READ(GEN6_PMIMR);
- spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
+ spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
- queue_work(dev_priv->wq, &dev_priv->rps_work);
+ queue_work(dev_priv->wq, &dev_priv->rps.work);
}
static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
@@ -792,10 +810,8 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
ibx_irq_handler(dev, pch_iir);
}
- if (de_iir & DE_PCU_EVENT) {
- I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
- i915_handle_rps_change(dev);
- }
+ if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
+ ironlake_handle_rps_change(dev);
if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
gen6_queue_rps_work(dev_priv, pm_iir);
@@ -842,26 +858,55 @@ static void i915_error_work_func(struct work_struct *work)
}
}
+/* NB: please notice the memset */
+static void i915_get_extra_instdone(struct drm_device *dev,
+ uint32_t *instdone)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
+
+ switch(INTEL_INFO(dev)->gen) {
+ case 2:
+ case 3:
+ instdone[0] = I915_READ(INSTDONE);
+ break;
+ case 4:
+ case 5:
+ case 6:
+ instdone[0] = I915_READ(INSTDONE_I965);
+ instdone[1] = I915_READ(INSTDONE1);
+ break;
+ default:
+ WARN_ONCE(1, "Unsupported platform\n");
+ case 7:
+ instdone[0] = I915_READ(GEN7_INSTDONE_1);
+ instdone[1] = I915_READ(GEN7_SC_INSTDONE);
+ instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
+ instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
+ break;
+ }
+}
+
#ifdef CONFIG_DEBUG_FS
static struct drm_i915_error_object *
i915_error_object_create(struct drm_i915_private *dev_priv,
struct drm_i915_gem_object *src)
{
struct drm_i915_error_object *dst;
- int page, page_count;
+ int i, count;
u32 reloc_offset;
if (src == NULL || src->pages == NULL)
return NULL;
- page_count = src->base.size / PAGE_SIZE;
+ count = src->base.size / PAGE_SIZE;
- dst = kmalloc(sizeof(*dst) + page_count * sizeof(u32 *), GFP_ATOMIC);
+ dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC);
if (dst == NULL)
return NULL;
reloc_offset = src->gtt_offset;
- for (page = 0; page < page_count; page++) {
+ for (i = 0; i < count; i++) {
unsigned long flags;
void *d;
@@ -884,30 +929,33 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
memcpy_fromio(d, s, PAGE_SIZE);
io_mapping_unmap_atomic(s);
} else {
+ struct page *page;
void *s;
- drm_clflush_pages(&src->pages[page], 1);
+ page = i915_gem_object_get_page(src, i);
+
+ drm_clflush_pages(&page, 1);
- s = kmap_atomic(src->pages[page]);
+ s = kmap_atomic(page);
memcpy(d, s, PAGE_SIZE);
kunmap_atomic(s);
- drm_clflush_pages(&src->pages[page], 1);
+ drm_clflush_pages(&page, 1);
}
local_irq_restore(flags);
- dst->pages[page] = d;
+ dst->pages[i] = d;
reloc_offset += PAGE_SIZE;
}
- dst->page_count = page_count;
+ dst->page_count = count;
dst->gtt_offset = src->gtt_offset;
return dst;
unwind:
- while (page--)
- kfree(dst->pages[page]);
+ while (i--)
+ kfree(dst->pages[i]);
kfree(dst);
return NULL;
}
@@ -948,7 +996,8 @@ static void capture_bo(struct drm_i915_error_buffer *err,
{
err->size = obj->base.size;
err->name = obj->base.name;
- err->seqno = obj->last_rendering_seqno;
+ err->rseqno = obj->last_read_seqno;
+ err->wseqno = obj->last_write_seqno;
err->gtt_offset = obj->gtt_offset;
err->read_domains = obj->base.read_domains;
err->write_domain = obj->base.write_domain;
@@ -1038,12 +1087,12 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
if (!ring->get_seqno)
return NULL;
- seqno = ring->get_seqno(ring);
+ seqno = ring->get_seqno(ring, false);
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
if (obj->ring != ring)
continue;
- if (i915_seqno_passed(seqno, obj->last_rendering_seqno))
+ if (i915_seqno_passed(seqno, obj->last_read_seqno))
continue;
if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
@@ -1079,10 +1128,8 @@ static void i915_record_ring_state(struct drm_device *dev,
error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
- if (ring->id == RCS) {
- error->instdone1 = I915_READ(INSTDONE1);
+ if (ring->id == RCS)
error->bbaddr = I915_READ64(BB_ADDR);
- }
} else {
error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
error->ipeir[ring->id] = I915_READ(IPEIR);
@@ -1092,7 +1139,7 @@ static void i915_record_ring_state(struct drm_device *dev,
error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
- error->seqno[ring->id] = ring->get_seqno(ring);
+ error->seqno[ring->id] = ring->get_seqno(ring, false);
error->acthd[ring->id] = intel_ring_get_active_head(ring);
error->head[ring->id] = I915_READ_HEAD(ring);
error->tail[ring->id] = I915_READ_TAIL(ring);
@@ -1198,6 +1245,11 @@ static void i915_capture_error_state(struct drm_device *dev)
error->done_reg = I915_READ(DONE_REG);
}
+ if (INTEL_INFO(dev)->gen == 7)
+ error->err_int = I915_READ(GEN7_ERR_INT);
+
+ i915_get_extra_instdone(dev, error->extra_instdone);
+
i915_gem_record_fences(dev, error);
i915_gem_record_rings(dev, error);
@@ -1209,7 +1261,7 @@ static void i915_capture_error_state(struct drm_device *dev)
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
i++;
error->active_bo_count = i;
- list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list)
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
if (obj->pin_count)
i++;
error->pinned_bo_count = i - error->active_bo_count;
@@ -1234,7 +1286,7 @@ static void i915_capture_error_state(struct drm_device *dev)
error->pinned_bo_count =
capture_pinned_bo(error->pinned_bo,
error->pinned_bo_count,
- &dev_priv->mm.gtt_list);
+ &dev_priv->mm.bound_list);
do_gettimeofday(&error->time);
@@ -1273,24 +1325,26 @@ void i915_destroy_error_state(struct drm_device *dev)
static void i915_report_and_clear_eir(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t instdone[I915_NUM_INSTDONE_REG];
u32 eir = I915_READ(EIR);
- int pipe;
+ int pipe, i;
if (!eir)
return;
pr_err("render error detected, EIR: 0x%08x\n", eir);
+ i915_get_extra_instdone(dev, instdone);
+
if (IS_G4X(dev)) {
if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
u32 ipeir = I915_READ(IPEIR_I965);
pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
- pr_err(" INSTDONE: 0x%08x\n",
- I915_READ(INSTDONE_I965));
+ for (i = 0; i < ARRAY_SIZE(instdone); i++)
+ pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
- pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
I915_WRITE(IPEIR_I965, ipeir);
POSTING_READ(IPEIR_I965);
@@ -1324,12 +1378,13 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
if (eir & I915_ERROR_INSTRUCTION) {
pr_err("instruction error\n");
pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
+ for (i = 0; i < ARRAY_SIZE(instdone); i++)
+ pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
if (INTEL_INFO(dev)->gen < 4) {
u32 ipeir = I915_READ(IPEIR);
pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
- pr_err(" INSTDONE: 0x%08x\n", I915_READ(INSTDONE));
pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
I915_WRITE(IPEIR, ipeir);
POSTING_READ(IPEIR);
@@ -1338,10 +1393,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
- pr_err(" INSTDONE: 0x%08x\n",
- I915_READ(INSTDONE_I965));
pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
- pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
I915_WRITE(IPEIR_I965, ipeir);
POSTING_READ(IPEIR_I965);
@@ -1589,7 +1641,8 @@ ring_last_seqno(struct intel_ring_buffer *ring)
static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
{
if (list_empty(&ring->request_list) ||
- i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) {
+ i915_seqno_passed(ring->get_seqno(ring, false),
+ ring_last_seqno(ring))) {
/* Issue a wake-up to catch stuck h/w. */
if (waitqueue_active(&ring->irq_queue)) {
DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
@@ -1655,7 +1708,7 @@ void i915_hangcheck_elapsed(unsigned long data)
{
struct drm_device *dev = (struct drm_device *)data;
drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t acthd[I915_NUM_RINGS], instdone, instdone1;
+ uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG];
struct intel_ring_buffer *ring;
bool err = false, idle;
int i;
@@ -1683,25 +1736,16 @@ void i915_hangcheck_elapsed(unsigned long data)
return;
}
- if (INTEL_INFO(dev)->gen < 4) {
- instdone = I915_READ(INSTDONE);
- instdone1 = 0;
- } else {
- instdone = I915_READ(INSTDONE_I965);
- instdone1 = I915_READ(INSTDONE1);
- }
-
+ i915_get_extra_instdone(dev, instdone);
if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 &&
- dev_priv->last_instdone == instdone &&
- dev_priv->last_instdone1 == instdone1) {
+ memcmp(dev_priv->prev_instdone, instdone, sizeof(instdone)) == 0) {
if (i915_hangcheck_hung(dev))
return;
} else {
dev_priv->hangcheck_count = 0;
memcpy(dev_priv->last_acthd, acthd, sizeof(acthd));
- dev_priv->last_instdone = instdone;
- dev_priv->last_instdone1 = instdone1;
+ memcpy(dev_priv->prev_instdone, instdone, sizeof(instdone));
}
repeat:
@@ -2646,7 +2690,7 @@ void intel_irq_init(struct drm_device *dev)
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
INIT_WORK(&dev_priv->error_work, i915_error_work_func);
- INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
+ INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work);
dev->driver->get_vblank_counter = i915_get_vblank_counter;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 28725ce5b82cc052a4469828166a7f9c8d987236..7637824c6a7d5f8975ebeac1ccfaff4ed24c383e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -450,6 +450,7 @@
#define RING_ACTHD(base) ((base)+0x74)
#define RING_NOPID(base) ((base)+0x94)
#define RING_IMR(base) ((base)+0xa8)
+#define RING_TIMESTAMP(base) ((base)+0x358)
#define TAIL_ADDR 0x001FFFF8
#define HEAD_WRAP_COUNT 0xFFE00000
#define HEAD_WRAP_ONE 0x00200000
@@ -478,6 +479,11 @@
#define IPEIR_I965 0x02064
#define IPEHR_I965 0x02068
#define INSTDONE_I965 0x0206c
+#define GEN7_INSTDONE_1 0x0206c
+#define GEN7_SC_INSTDONE 0x07100
+#define GEN7_SAMPLER_INSTDONE 0x0e160
+#define GEN7_ROW_INSTDONE 0x0e164
+#define I915_NUM_INSTDONE_REG 4
#define RING_IPEIR(base) ((base)+0x64)
#define RING_IPEHR(base) ((base)+0x68)
#define RING_INSTDONE(base) ((base)+0x6c)
@@ -500,6 +506,8 @@
#define DMA_FADD_I8XX 0x020d0
#define ERROR_GEN6 0x040a0
+#define GEN7_ERR_INT 0x44040
+#define ERR_INT_MMIO_UNCLAIMED (1<<13)
/* GM45+ chicken bits -- debug workaround bits that may be required
* for various sorts of correct behavior. The top 16 bits of each are
@@ -529,6 +537,8 @@
#define GFX_PSMI_GRANULARITY (1<<10)
#define GFX_PPGTT_ENABLE (1<<9)
+#define VLV_DISPLAY_BASE 0x180000
+
#define SCPD0 0x0209c /* 915+ only */
#define IER 0x020a0
#define IIR 0x020a4
@@ -1496,6 +1506,14 @@
GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
GEN7_CXT_GT1_SIZE(ctx_reg) + \
GEN7_CXT_VFSTATE_SIZE(ctx_reg))
+#define HSW_CXT_POWER_SIZE(ctx_reg) ((ctx_reg >> 26) & 0x3f)
+#define HSW_CXT_RING_SIZE(ctx_reg) ((ctx_reg >> 23) & 0x7)
+#define HSW_CXT_RENDER_SIZE(ctx_reg) ((ctx_reg >> 15) & 0xff)
+#define HSW_CXT_TOTAL_SIZE(ctx_reg) (HSW_CXT_POWER_SIZE(ctx_reg) + \
+ HSW_CXT_RING_SIZE(ctx_reg) + \
+ HSW_CXT_RENDER_SIZE(ctx_reg) + \
+ GEN7_CXT_VFSTATE_SIZE(ctx_reg))
+
/*
* Overlay regs
@@ -1549,12 +1567,35 @@
/* VGA port control */
#define ADPA 0x61100
+#define PCH_ADPA 0xe1100
+#define VLV_ADPA (VLV_DISPLAY_BASE + ADPA)
+
#define ADPA_DAC_ENABLE (1<<31)
#define ADPA_DAC_DISABLE 0
#define ADPA_PIPE_SELECT_MASK (1<<30)
#define ADPA_PIPE_A_SELECT 0
#define ADPA_PIPE_B_SELECT (1<<30)
#define ADPA_PIPE_SELECT(pipe) ((pipe) << 30)
+/* CPT uses bits 29:30 for pch transcoder select */
+#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */
+#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24)
+#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24)
+#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
+#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24)
+#define ADPA_CRT_HOTPLUG_ENABLE (1<<23)
+#define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22)
+#define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22)
+#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21)
+#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21)
+#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20)
+#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18)
+#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17)
+#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
+#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
#define ADPA_USE_VGA_HVPOLARITY (1<<15)
#define ADPA_SETS_HVPOLARITY 0
#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
@@ -1753,6 +1794,10 @@
/* Video Data Island Packet control */
#define VIDEO_DIP_DATA 0x61178
+/* Read the description of VIDEO_DIP_DATA (before Haswel) or VIDEO_DIP_ECC
+ * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
+ * of the infoframe structure specified by CEA-861. */
+#define VIDEO_DIP_DATA_SIZE 32
#define VIDEO_DIP_CTL 0x61170
/* Pre HSW: */
#define VIDEO_DIP_ENABLE (1 << 31)
@@ -3889,31 +3934,6 @@
#define FDI_PLL_CTL_1 0xfe000
#define FDI_PLL_CTL_2 0xfe004
-/* CRT */
-#define PCH_ADPA 0xe1100
-#define ADPA_TRANS_SELECT_MASK (1<<30)
-#define ADPA_TRANS_A_SELECT 0
-#define ADPA_TRANS_B_SELECT (1<<30)
-#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */
-#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24)
-#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24)
-#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
-#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24)
-#define ADPA_CRT_HOTPLUG_ENABLE (1<<23)
-#define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22)
-#define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22)
-#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21)
-#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21)
-#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20)
-#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18)
-#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17)
-#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
-#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
-
/* or SDVOB */
#define HDMIB 0xe1140
#define PORT_ENABLE (1 << 31)
@@ -4021,6 +4041,8 @@
#define PORT_TRANS_C_SEL_CPT (2<<29)
#define PORT_TRANS_SEL_MASK (3<<29)
#define PORT_TRANS_SEL_CPT(pipe) ((pipe) << 29)
+#define PORT_TO_PIPE(val) (((val) & (1<<30)) >> 30)
+#define PORT_TO_PIPE_CPT(val) (((val) & PORT_TRANS_SEL_MASK) >> 29)
#define TRANS_DP_CTL_A 0xe0300
#define TRANS_DP_CTL_B 0xe1300
@@ -4239,7 +4261,15 @@
#define G4X_HDMIW_HDMIEDID 0x6210C
#define IBX_HDMIW_HDMIEDID_A 0xE2050
+#define IBX_HDMIW_HDMIEDID_B 0xE2150
+#define IBX_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
+ IBX_HDMIW_HDMIEDID_A, \
+ IBX_HDMIW_HDMIEDID_B)
#define IBX_AUD_CNTL_ST_A 0xE20B4
+#define IBX_AUD_CNTL_ST_B 0xE21B4
+#define IBX_AUD_CNTL_ST(pipe) _PIPE(pipe, \
+ IBX_AUD_CNTL_ST_A, \
+ IBX_AUD_CNTL_ST_B)
#define IBX_ELD_BUFFER_SIZE (0x1f << 10)
#define IBX_ELD_ADDRESS (0x1f << 5)
#define IBX_ELD_ACK (1 << 4)
@@ -4248,7 +4278,15 @@
#define IBX_CP_READYB (1 << 1)
#define CPT_HDMIW_HDMIEDID_A 0xE5050
+#define CPT_HDMIW_HDMIEDID_B 0xE5150
+#define CPT_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
+ CPT_HDMIW_HDMIEDID_A, \
+ CPT_HDMIW_HDMIEDID_B)
#define CPT_AUD_CNTL_ST_A 0xE50B4
+#define CPT_AUD_CNTL_ST_B 0xE51B4
+#define CPT_AUD_CNTL_ST(pipe) _PIPE(pipe, \
+ CPT_AUD_CNTL_ST_A, \
+ CPT_AUD_CNTL_ST_B)
#define CPT_AUD_CNTRL_ST2 0xE50C0
/* These are the 4 32-bit write offset registers for each stream
@@ -4258,7 +4296,15 @@
#define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4)
#define IBX_AUD_CONFIG_A 0xe2000
+#define IBX_AUD_CONFIG_B 0xe2100
+#define IBX_AUD_CFG(pipe) _PIPE(pipe, \
+ IBX_AUD_CONFIG_A, \
+ IBX_AUD_CONFIG_B)
#define CPT_AUD_CONFIG_A 0xe5000
+#define CPT_AUD_CONFIG_B 0xe5100
+#define CPT_AUD_CFG(pipe) _PIPE(pipe, \
+ CPT_AUD_CONFIG_A, \
+ CPT_AUD_CONFIG_B)
#define AUD_CONFIG_N_VALUE_INDEX (1 << 29)
#define AUD_CONFIG_N_PROG_ENABLE (1 << 28)
#define AUD_CONFIG_UPPER_N_SHIFT 20
@@ -4269,195 +4315,233 @@
#define AUD_CONFIG_PIXEL_CLOCK_HDMI (0xf << 16)
#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
+/* HSW Audio */
+#define HSW_AUD_CONFIG_A 0x65000 /* Audio Configuration Transcoder A */
+#define HSW_AUD_CONFIG_B 0x65100 /* Audio Configuration Transcoder B */
+#define HSW_AUD_CFG(pipe) _PIPE(pipe, \
+ HSW_AUD_CONFIG_A, \
+ HSW_AUD_CONFIG_B)
+
+#define HSW_AUD_MISC_CTRL_A 0x65010 /* Audio Misc Control Convert 1 */
+#define HSW_AUD_MISC_CTRL_B 0x65110 /* Audio Misc Control Convert 2 */
+#define HSW_AUD_MISC_CTRL(pipe) _PIPE(pipe, \
+ HSW_AUD_MISC_CTRL_A, \
+ HSW_AUD_MISC_CTRL_B)
+
+#define HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4 /* Audio DIP and ELD Control State Transcoder A */
+#define HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4 /* Audio DIP and ELD Control State Transcoder B */
+#define HSW_AUD_DIP_ELD_CTRL(pipe) _PIPE(pipe, \
+ HSW_AUD_DIP_ELD_CTRL_ST_A, \
+ HSW_AUD_DIP_ELD_CTRL_ST_B)
+
+/* Audio Digital Converter */
+#define HSW_AUD_DIG_CNVT_1 0x65080 /* Audio Converter 1 */
+#define HSW_AUD_DIG_CNVT_2 0x65180 /* Audio Converter 1 */
+#define AUD_DIG_CNVT(pipe) _PIPE(pipe, \
+ HSW_AUD_DIG_CNVT_1, \
+ HSW_AUD_DIG_CNVT_2)
+#define DIP_PORT_SEL_MASK 0x3
+
+#define HSW_AUD_EDID_DATA_A 0x65050
+#define HSW_AUD_EDID_DATA_B 0x65150
+#define HSW_AUD_EDID_DATA(pipe) _PIPE(pipe, \
+ HSW_AUD_EDID_DATA_A, \
+ HSW_AUD_EDID_DATA_B)
+
+#define HSW_AUD_PIPE_CONV_CFG 0x6507c /* Audio pipe and converter configs */
+#define HSW_AUD_PIN_ELD_CP_VLD 0x650c0 /* Audio ELD and CP Ready Status */
+#define AUDIO_INACTIVE_C (1<<11)
+#define AUDIO_INACTIVE_B (1<<7)
+#define AUDIO_INACTIVE_A (1<<3)
+#define AUDIO_OUTPUT_ENABLE_A (1<<2)
+#define AUDIO_OUTPUT_ENABLE_B (1<<6)
+#define AUDIO_OUTPUT_ENABLE_C (1<<10)
+#define AUDIO_ELD_VALID_A (1<<0)
+#define AUDIO_ELD_VALID_B (1<<4)
+#define AUDIO_ELD_VALID_C (1<<8)
+#define AUDIO_CP_READY_A (1<<1)
+#define AUDIO_CP_READY_B (1<<5)
+#define AUDIO_CP_READY_C (1<<9)
+
/* HSW Power Wells */
-#define HSW_PWR_WELL_CTL1 0x45400 /* BIOS */
-#define HSW_PWR_WELL_CTL2 0x45404 /* Driver */
-#define HSW_PWR_WELL_CTL3 0x45408 /* KVMR */
-#define HSW_PWR_WELL_CTL4 0x4540C /* Debug */
-#define HSW_PWR_WELL_ENABLE (1<<31)
-#define HSW_PWR_WELL_STATE (1<<30)
-#define HSW_PWR_WELL_CTL5 0x45410
+#define HSW_PWR_WELL_CTL1 0x45400 /* BIOS */
+#define HSW_PWR_WELL_CTL2 0x45404 /* Driver */
+#define HSW_PWR_WELL_CTL3 0x45408 /* KVMR */
+#define HSW_PWR_WELL_CTL4 0x4540C /* Debug */
+#define HSW_PWR_WELL_ENABLE (1<<31)
+#define HSW_PWR_WELL_STATE (1<<30)
+#define HSW_PWR_WELL_CTL5 0x45410
#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31)
#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20)
-#define HSW_PWR_WELL_FORCE_ON (1<<19)
-#define HSW_PWR_WELL_CTL6 0x45414
+#define HSW_PWR_WELL_FORCE_ON (1<<19)
+#define HSW_PWR_WELL_CTL6 0x45414
/* Per-pipe DDI Function Control */
-#define PIPE_DDI_FUNC_CTL_A 0x60400
-#define PIPE_DDI_FUNC_CTL_B 0x61400
-#define PIPE_DDI_FUNC_CTL_C 0x62400
+#define PIPE_DDI_FUNC_CTL_A 0x60400
+#define PIPE_DDI_FUNC_CTL_B 0x61400
+#define PIPE_DDI_FUNC_CTL_C 0x62400
#define PIPE_DDI_FUNC_CTL_EDP 0x6F400
-#define DDI_FUNC_CTL(pipe) _PIPE(pipe, \
- PIPE_DDI_FUNC_CTL_A, \
- PIPE_DDI_FUNC_CTL_B)
+#define DDI_FUNC_CTL(pipe) _PIPE(pipe, PIPE_DDI_FUNC_CTL_A, \
+ PIPE_DDI_FUNC_CTL_B)
#define PIPE_DDI_FUNC_ENABLE (1<<31)
/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
-#define PIPE_DDI_PORT_MASK (7<<28)
-#define PIPE_DDI_SELECT_PORT(x) ((x)<<28)
-#define PIPE_DDI_MODE_SELECT_HDMI (0<<24)
-#define PIPE_DDI_MODE_SELECT_DVI (1<<24)
+#define PIPE_DDI_PORT_MASK (7<<28)
+#define PIPE_DDI_SELECT_PORT(x) ((x)<<28)
+#define PIPE_DDI_MODE_SELECT_MASK (7<<24)
+#define PIPE_DDI_MODE_SELECT_HDMI (0<<24)
+#define PIPE_DDI_MODE_SELECT_DVI (1<<24)
#define PIPE_DDI_MODE_SELECT_DP_SST (2<<24)
#define PIPE_DDI_MODE_SELECT_DP_MST (3<<24)
-#define PIPE_DDI_MODE_SELECT_FDI (4<<24)
-#define PIPE_DDI_BPC_8 (0<<20)
-#define PIPE_DDI_BPC_10 (1<<20)
-#define PIPE_DDI_BPC_6 (2<<20)
-#define PIPE_DDI_BPC_12 (3<<20)
-#define PIPE_DDI_BFI_ENABLE (1<<4)
-#define PIPE_DDI_PORT_WIDTH_X1 (0<<1)
-#define PIPE_DDI_PORT_WIDTH_X2 (1<<1)
-#define PIPE_DDI_PORT_WIDTH_X4 (3<<1)
+#define PIPE_DDI_MODE_SELECT_FDI (4<<24)
+#define PIPE_DDI_BPC_MASK (7<<20)
+#define PIPE_DDI_BPC_8 (0<<20)
+#define PIPE_DDI_BPC_10 (1<<20)
+#define PIPE_DDI_BPC_6 (2<<20)
+#define PIPE_DDI_BPC_12 (3<<20)
+#define PIPE_DDI_PVSYNC (1<<17)
+#define PIPE_DDI_PHSYNC (1<<16)
+#define PIPE_DDI_BFI_ENABLE (1<<4)
+#define PIPE_DDI_PORT_WIDTH_X1 (0<<1)
+#define PIPE_DDI_PORT_WIDTH_X2 (1<<1)
+#define PIPE_DDI_PORT_WIDTH_X4 (3<<1)
/* DisplayPort Transport Control */
#define DP_TP_CTL_A 0x64040
#define DP_TP_CTL_B 0x64140
-#define DP_TP_CTL(port) _PORT(port, \
- DP_TP_CTL_A, \
- DP_TP_CTL_B)
-#define DP_TP_CTL_ENABLE (1<<31)
-#define DP_TP_CTL_MODE_SST (0<<27)
-#define DP_TP_CTL_MODE_MST (1<<27)
+#define DP_TP_CTL(port) _PORT(port, DP_TP_CTL_A, DP_TP_CTL_B)
+#define DP_TP_CTL_ENABLE (1<<31)
+#define DP_TP_CTL_MODE_SST (0<<27)
+#define DP_TP_CTL_MODE_MST (1<<27)
#define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1<<18)
-#define DP_TP_CTL_FDI_AUTOTRAIN (1<<15)
+#define DP_TP_CTL_FDI_AUTOTRAIN (1<<15)
#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8)
#define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8)
#define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8)
-#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8)
+#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8)
/* DisplayPort Transport Status */
#define DP_TP_STATUS_A 0x64044
#define DP_TP_STATUS_B 0x64144
-#define DP_TP_STATUS(port) _PORT(port, \
- DP_TP_STATUS_A, \
- DP_TP_STATUS_B)
+#define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B)
#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12)
/* DDI Buffer Control */
#define DDI_BUF_CTL_A 0x64000
#define DDI_BUF_CTL_B 0x64100
-#define DDI_BUF_CTL(port) _PORT(port, \
- DDI_BUF_CTL_A, \
- DDI_BUF_CTL_B)
-#define DDI_BUF_CTL_ENABLE (1<<31)
+#define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B)
+#define DDI_BUF_CTL_ENABLE (1<<31)
#define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */
-#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */
+#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */
#define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */
-#define DDI_BUF_EMP_400MV_9_5DB_HSW (3<<24) /* Sel3 */
+#define DDI_BUF_EMP_400MV_9_5DB_HSW (3<<24) /* Sel3 */
#define DDI_BUF_EMP_600MV_0DB_HSW (4<<24) /* Sel4 */
-#define DDI_BUF_EMP_600MV_3_5DB_HSW (5<<24) /* Sel5 */
+#define DDI_BUF_EMP_600MV_3_5DB_HSW (5<<24) /* Sel5 */
#define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */
#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */
-#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
-#define DDI_BUF_EMP_MASK (0xf<<24)
-#define DDI_BUF_IS_IDLE (1<<7)
-#define DDI_PORT_WIDTH_X1 (0<<1)
-#define DDI_PORT_WIDTH_X2 (1<<1)
-#define DDI_PORT_WIDTH_X4 (3<<1)
+#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
+#define DDI_BUF_EMP_MASK (0xf<<24)
+#define DDI_BUF_IS_IDLE (1<<7)
+#define DDI_PORT_WIDTH_X1 (0<<1)
+#define DDI_PORT_WIDTH_X2 (1<<1)
+#define DDI_PORT_WIDTH_X4 (3<<1)
#define DDI_INIT_DISPLAY_DETECTED (1<<0)
/* DDI Buffer Translations */
#define DDI_BUF_TRANS_A 0x64E00
#define DDI_BUF_TRANS_B 0x64E60
-#define DDI_BUF_TRANS(port) _PORT(port, \
- DDI_BUF_TRANS_A, \
- DDI_BUF_TRANS_B)
+#define DDI_BUF_TRANS(port) _PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B)
/* Sideband Interface (SBI) is programmed indirectly, via
* SBI_ADDR, which contains the register offset; and SBI_DATA,
* which contains the payload */
-#define SBI_ADDR 0xC6000
-#define SBI_DATA 0xC6004
+#define SBI_ADDR 0xC6000
+#define SBI_DATA 0xC6004
#define SBI_CTL_STAT 0xC6008
#define SBI_CTL_OP_CRRD (0x6<<8)
#define SBI_CTL_OP_CRWR (0x7<<8)
#define SBI_RESPONSE_FAIL (0x1<<1)
-#define SBI_RESPONSE_SUCCESS (0x0<<1)
-#define SBI_BUSY (0x1<<0)
-#define SBI_READY (0x0<<0)
+#define SBI_RESPONSE_SUCCESS (0x0<<1)
+#define SBI_BUSY (0x1<<0)
+#define SBI_READY (0x0<<0)
/* SBI offsets */
-#define SBI_SSCDIVINTPHASE6 0x0600
+#define SBI_SSCDIVINTPHASE6 0x0600
#define SBI_SSCDIVINTPHASE_DIVSEL_MASK ((0x7f)<<1)
#define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x)<<1)
#define SBI_SSCDIVINTPHASE_INCVAL_MASK ((0x7f)<<8)
#define SBI_SSCDIVINTPHASE_INCVAL(x) ((x)<<8)
-#define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15)
+#define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15)
#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0)
-#define SBI_SSCCTL 0x020c
+#define SBI_SSCCTL 0x020c
#define SBI_SSCCTL6 0x060C
-#define SBI_SSCCTL_DISABLE (1<<0)
+#define SBI_SSCCTL_DISABLE (1<<0)
#define SBI_SSCAUXDIV6 0x0610
#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4)
-#define SBI_DBUFF0 0x2a00
+#define SBI_DBUFF0 0x2a00
/* LPT PIXCLK_GATE */
-#define PIXCLK_GATE 0xC6020
-#define PIXCLK_GATE_UNGATE 1<<0
-#define PIXCLK_GATE_GATE 0<<0
+#define PIXCLK_GATE 0xC6020
+#define PIXCLK_GATE_UNGATE (1<<0)
+#define PIXCLK_GATE_GATE (0<<0)
/* SPLL */
-#define SPLL_CTL 0x46020
+#define SPLL_CTL 0x46020
#define SPLL_PLL_ENABLE (1<<31)
#define SPLL_PLL_SCC (1<<28)
#define SPLL_PLL_NON_SCC (2<<28)
-#define SPLL_PLL_FREQ_810MHz (0<<26)
-#define SPLL_PLL_FREQ_1350MHz (1<<26)
+#define SPLL_PLL_FREQ_810MHz (0<<26)
+#define SPLL_PLL_FREQ_1350MHz (1<<26)
/* WRPLL */
-#define WRPLL_CTL1 0x46040
-#define WRPLL_CTL2 0x46060
-#define WRPLL_PLL_ENABLE (1<<31)
-#define WRPLL_PLL_SELECT_SSC (0x01<<28)
-#define WRPLL_PLL_SELECT_NON_SCC (0x02<<28)
+#define WRPLL_CTL1 0x46040
+#define WRPLL_CTL2 0x46060
+#define WRPLL_PLL_ENABLE (1<<31)
+#define WRPLL_PLL_SELECT_SSC (0x01<<28)
+#define WRPLL_PLL_SELECT_NON_SCC (0x02<<28)
#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28)
/* WRPLL divider programming */
-#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
-#define WRPLL_DIVIDER_POST(x) ((x)<<8)
-#define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16)
+#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
+#define WRPLL_DIVIDER_POST(x) ((x)<<8)
+#define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16)
/* Port clock selection */
#define PORT_CLK_SEL_A 0x46100
#define PORT_CLK_SEL_B 0x46104
-#define PORT_CLK_SEL(port) _PORT(port, \
- PORT_CLK_SEL_A, \
- PORT_CLK_SEL_B)
+#define PORT_CLK_SEL(port) _PORT(port, PORT_CLK_SEL_A, PORT_CLK_SEL_B)
#define PORT_CLK_SEL_LCPLL_2700 (0<<29)
#define PORT_CLK_SEL_LCPLL_1350 (1<<29)
#define PORT_CLK_SEL_LCPLL_810 (2<<29)
-#define PORT_CLK_SEL_SPLL (3<<29)
+#define PORT_CLK_SEL_SPLL (3<<29)
#define PORT_CLK_SEL_WRPLL1 (4<<29)
#define PORT_CLK_SEL_WRPLL2 (5<<29)
/* Pipe clock selection */
#define PIPE_CLK_SEL_A 0x46140
#define PIPE_CLK_SEL_B 0x46144
-#define PIPE_CLK_SEL(pipe) _PIPE(pipe, \
- PIPE_CLK_SEL_A, \
- PIPE_CLK_SEL_B)
+#define PIPE_CLK_SEL(pipe) _PIPE(pipe, PIPE_CLK_SEL_A, PIPE_CLK_SEL_B)
/* For each pipe, we need to select the corresponding port clock */
-#define PIPE_CLK_SEL_DISABLED (0x0<<29)
-#define PIPE_CLK_SEL_PORT(x) ((x+1)<<29)
+#define PIPE_CLK_SEL_DISABLED (0x0<<29)
+#define PIPE_CLK_SEL_PORT(x) ((x+1)<<29)
/* LCPLL Control */
-#define LCPLL_CTL 0x130040
+#define LCPLL_CTL 0x130040
#define LCPLL_PLL_DISABLE (1<<31)
#define LCPLL_PLL_LOCK (1<<30)
-#define LCPLL_CD_CLOCK_DISABLE (1<<25)
+#define LCPLL_CD_CLOCK_DISABLE (1<<25)
#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
/* Pipe WM_LINETIME - watermark line time */
#define PIPE_WM_LINETIME_A 0x45270
#define PIPE_WM_LINETIME_B 0x45274
-#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, \
- PIPE_WM_LINETIME_A, \
- PIPE_WM_LINETIME_B)
-#define PIPE_WM_LINETIME_MASK (0x1ff)
-#define PIPE_WM_LINETIME_TIME(x) ((x))
+#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, PIPE_WM_LINETIME_A, \
+ PIPE_WM_LINETIME_B)
+#define PIPE_WM_LINETIME_MASK (0x1ff)
+#define PIPE_WM_LINETIME_TIME(x) ((x))
#define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff<<16)
-#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16)
+#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16)
/* SFUSE_STRAP */
-#define SFUSE_STRAP 0xc2014
+#define SFUSE_STRAP 0xc2014
#define SFUSE_STRAP_DDIB_DETECTED (1<<2)
#define SFUSE_STRAP_DDIC_DETECTED (1<<1)
#define SFUSE_STRAP_DDID_DETECTED (1<<0)
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 7631807a27886e2483d77de1046461350f9c7d15..903eebd2117a41437837131adb46811ce858fbdd 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -46,32 +46,32 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
}
static ssize_t
-show_rc6_mask(struct device *dev, struct device_attribute *attr, char *buf)
+show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
return snprintf(buf, PAGE_SIZE, "%x", intel_enable_rc6(dminor->dev));
}
static ssize_t
-show_rc6_ms(struct device *dev, struct device_attribute *attr, char *buf)
+show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
return snprintf(buf, PAGE_SIZE, "%u", rc6_residency);
}
static ssize_t
-show_rc6p_ms(struct device *dev, struct device_attribute *attr, char *buf)
+show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
return snprintf(buf, PAGE_SIZE, "%u", rc6p_residency);
}
static ssize_t
-show_rc6pp_ms(struct device *dev, struct device_attribute *attr, char *buf)
+show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
return snprintf(buf, PAGE_SIZE, "%u", rc6pp_residency);
}
@@ -93,6 +93,7 @@ static struct attribute_group rc6_attr_group = {
.name = power_group_name,
.attrs = rc6_attrs
};
+#endif
static int l3_access_valid(struct drm_device *dev, loff_t offset)
{
@@ -202,37 +203,214 @@ static struct bin_attribute dpf_attrs = {
.mmap = NULL
};
+static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
+ mutex_unlock(&dev->struct_mutex);
+
+ return snprintf(buf, PAGE_SIZE, "%d", ret);
+}
+
+static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
+ mutex_unlock(&dev->struct_mutex);
+
+ return snprintf(buf, PAGE_SIZE, "%d", ret);
+}
+
+static ssize_t gt_max_freq_mhz_store(struct device *kdev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val, rp_state_cap, hw_max, hw_min;
+ ssize_t ret;
+
+ ret = kstrtou32(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ val /= GT_FREQUENCY_MULTIPLIER;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ hw_max = (rp_state_cap & 0xff);
+ hw_min = ((rp_state_cap & 0xff0000) >> 16);
+
+ if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+ if (dev_priv->rps.cur_delay > val)
+ gen6_set_rps(dev_priv->dev, val);
+
+ dev_priv->rps.max_delay = val;
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return count;
+}
+
+static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
+ mutex_unlock(&dev->struct_mutex);
+
+ return snprintf(buf, PAGE_SIZE, "%d", ret);
+}
+
+static ssize_t gt_min_freq_mhz_store(struct device *kdev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val, rp_state_cap, hw_max, hw_min;
+ ssize_t ret;
+
+ ret = kstrtou32(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ val /= GT_FREQUENCY_MULTIPLIER;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ hw_max = (rp_state_cap & 0xff);
+ hw_min = ((rp_state_cap & 0xff0000) >> 16);
+
+ if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+ if (dev_priv->rps.cur_delay < val)
+ gen6_set_rps(dev_priv->dev, val);
+
+ dev_priv->rps.min_delay = val;
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return count;
+
+}
+
+static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
+static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
+static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
+
+
+static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
+static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
+static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
+static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
+
+/* For now we have a static number of RP states */
+static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val, rp_state_cap;
+ ssize_t ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+ rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (attr == &dev_attr_gt_RP0_freq_mhz) {
+ val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER;
+ } else if (attr == &dev_attr_gt_RP1_freq_mhz) {
+ val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER;
+ } else if (attr == &dev_attr_gt_RPn_freq_mhz) {
+ val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER;
+ } else {
+ BUG();
+ }
+ return snprintf(buf, PAGE_SIZE, "%d", val);
+}
+
+static const struct attribute *gen6_attrs[] = {
+ &dev_attr_gt_cur_freq_mhz.attr,
+ &dev_attr_gt_max_freq_mhz.attr,
+ &dev_attr_gt_min_freq_mhz.attr,
+ &dev_attr_gt_RP0_freq_mhz.attr,
+ &dev_attr_gt_RP1_freq_mhz.attr,
+ &dev_attr_gt_RPn_freq_mhz.attr,
+ NULL,
+};
+
void i915_setup_sysfs(struct drm_device *dev)
{
int ret;
+#ifdef CONFIG_PM
if (INTEL_INFO(dev)->gen >= 6) {
ret = sysfs_merge_group(&dev->primary->kdev.kobj,
&rc6_attr_group);
if (ret)
DRM_ERROR("RC6 residency sysfs setup failed\n");
}
-
- if (IS_IVYBRIDGE(dev)) {
+#endif
+ if (HAS_L3_GPU_CACHE(dev)) {
ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs);
if (ret)
DRM_ERROR("l3 parity sysfs setup failed\n");
}
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs);
+ if (ret)
+ DRM_ERROR("gen6 sysfs setup failed\n");
+ }
}
void i915_teardown_sysfs(struct drm_device *dev)
{
+ sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
device_remove_bin_file(&dev->primary->kdev, &dpf_attrs);
+#ifdef CONFIG_PM
sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
+#endif
}
-#else
-void i915_setup_sysfs(struct drm_device *dev)
-{
- return;
-}
-
-void i915_teardown_sysfs(struct drm_device *dev)
-{
- return;
-}
-#endif /* CONFIG_PM */
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index fe90b3a84a6d9db3b7cac327173749fe0fc0b0a9..8134421b89a6c582b9862a252e246fb9f69ae12f 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -214,22 +214,18 @@ TRACE_EVENT(i915_gem_evict,
);
TRACE_EVENT(i915_gem_evict_everything,
- TP_PROTO(struct drm_device *dev, bool purgeable),
- TP_ARGS(dev, purgeable),
+ TP_PROTO(struct drm_device *dev),
+ TP_ARGS(dev),
TP_STRUCT__entry(
__field(u32, dev)
- __field(bool, purgeable)
),
TP_fast_assign(
__entry->dev = dev->primary->index;
- __entry->purgeable = purgeable;
),
- TP_printk("dev=%d%s",
- __entry->dev,
- __entry->purgeable ? ", purgeable only" : "")
+ TP_printk("dev=%d", __entry->dev)
);
TRACE_EVENT(i915_gem_ring_dispatch,
@@ -434,6 +430,21 @@ TRACE_EVENT(i915_reg_rw,
(u32)(__entry->val >> 32))
);
+TRACE_EVENT(intel_gpu_freq_change,
+ TP_PROTO(u32 freq),
+ TP_ARGS(freq),
+
+ TP_STRUCT__entry(
+ __field(u32, freq)
+ ),
+
+ TP_fast_assign(
+ __entry->freq = freq;
+ ),
+
+ TP_printk("new_freq=%u", __entry->freq)
+);
+
#endif /* _I915_TRACE_H_ */
/* This part must be outside protection */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index c8f1c0db446d1c9e9e9acfe743907f76cd947309..893f30164b7ec70a5cd2308db6db1e3e55222d6f 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -46,6 +46,7 @@
struct intel_crt {
struct intel_encoder base;
bool force_hotplug_required;
+ u32 adpa_reg;
};
static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
@@ -54,42 +55,68 @@ static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
struct intel_crt, base);
}
-static void pch_crt_dpms(struct drm_encoder *encoder, int mode)
+static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->dev;
+ return container_of(encoder, struct intel_crt, base);
+}
+
+static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crt *crt = intel_encoder_to_crt(encoder);
+ u32 tmp;
+
+ tmp = I915_READ(crt->adpa_reg);
+
+ if (!(tmp & ADPA_DAC_ENABLE))
+ return false;
+
+ if (HAS_PCH_CPT(dev))
+ *pipe = PORT_TO_PIPE_CPT(tmp);
+ else
+ *pipe = PORT_TO_PIPE(tmp);
+
+ return true;
+}
+
+static void intel_disable_crt(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_crt *crt = intel_encoder_to_crt(encoder);
u32 temp;
- temp = I915_READ(PCH_ADPA);
+ temp = I915_READ(crt->adpa_reg);
+ temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
temp &= ~ADPA_DAC_ENABLE;
+ I915_WRITE(crt->adpa_reg, temp);
+}
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- temp |= ADPA_DAC_ENABLE;
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- /* Just leave port enable cleared */
- break;
- }
+static void intel_enable_crt(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_crt *crt = intel_encoder_to_crt(encoder);
+ u32 temp;
- I915_WRITE(PCH_ADPA, temp);
+ temp = I915_READ(crt->adpa_reg);
+ temp |= ADPA_DAC_ENABLE;
+ I915_WRITE(crt->adpa_reg, temp);
}
-static void gmch_crt_dpms(struct drm_encoder *encoder, int mode)
+/* Note: The caller is required to filter out dpms modes not supported by the
+ * platform. */
+static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
{
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crt *crt = intel_encoder_to_crt(encoder);
u32 temp;
- temp = I915_READ(ADPA);
+ temp = I915_READ(crt->adpa_reg);
temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
temp &= ~ADPA_DAC_ENABLE;
- if (IS_VALLEYVIEW(dev) && mode != DRM_MODE_DPMS_ON)
- mode = DRM_MODE_DPMS_OFF;
-
switch (mode) {
case DRM_MODE_DPMS_ON:
temp |= ADPA_DAC_ENABLE;
@@ -105,7 +132,51 @@ static void gmch_crt_dpms(struct drm_encoder *encoder, int mode)
break;
}
- I915_WRITE(ADPA, temp);
+ I915_WRITE(crt->adpa_reg, temp);
+}
+
+static void intel_crt_dpms(struct drm_connector *connector, int mode)
+{
+ struct drm_device *dev = connector->dev;
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
+ struct drm_crtc *crtc;
+ int old_dpms;
+
+ /* PCH platforms and VLV only support on/off. */
+ if (INTEL_INFO(dev)->gen < 5 && mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+
+ if (mode == connector->dpms)
+ return;
+
+ old_dpms = connector->dpms;
+ connector->dpms = mode;
+
+ /* Only need to change hw state when actually enabled */
+ crtc = encoder->base.crtc;
+ if (!crtc) {
+ encoder->connectors_active = false;
+ return;
+ }
+
+ /* We need the pipe to run for anything but OFF. */
+ if (mode == DRM_MODE_DPMS_OFF)
+ encoder->connectors_active = false;
+ else
+ encoder->connectors_active = true;
+
+ if (mode < old_dpms) {
+ /* From off to on, enable the pipe first. */
+ intel_crtc_update_dpms(crtc);
+
+ intel_crt_set_dpms(encoder, mode);
+ } else {
+ intel_crt_set_dpms(encoder, mode);
+
+ intel_crtc_update_dpms(crtc);
+ }
+
+ intel_modeset_check_state(connector->dev);
}
static int intel_crt_mode_valid(struct drm_connector *connector,
@@ -144,19 +215,15 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crt *crt =
+ intel_encoder_to_crt(to_intel_encoder(encoder));
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_private *dev_priv = dev->dev_private;
int dpll_md_reg;
u32 adpa, dpll_md;
- u32 adpa_reg;
dpll_md_reg = DPLL_MD(intel_crtc->pipe);
- if (HAS_PCH_SPLIT(dev))
- adpa_reg = PCH_ADPA;
- else
- adpa_reg = ADPA;
-
/*
* Disable separate mode multiplier used when cloning SDVO to CRT
* XXX this needs to be adjusted when we really are cloning
@@ -184,7 +251,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT(intel_crtc->pipe), 0);
- I915_WRITE(adpa_reg, adpa);
+ I915_WRITE(crt->adpa_reg, adpa);
}
static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
@@ -544,14 +611,12 @@ intel_crt_detect(struct drm_connector *connector, bool force)
return connector->status;
/* for pre-945g platforms use load detect */
- if (intel_get_load_detect_pipe(&crt->base, connector, NULL,
- &tmp)) {
+ if (intel_get_load_detect_pipe(connector, NULL, &tmp)) {
if (intel_crt_detect_ddc(connector))
status = connector_status_connected;
else
status = intel_crt_load_detect(crt);
- intel_release_load_detect_pipe(&crt->base, connector,
- &tmp);
+ intel_release_load_detect_pipe(connector, &tmp);
} else
status = connector_status_unknown;
@@ -602,25 +667,15 @@ static void intel_crt_reset(struct drm_connector *connector)
* Routines for controlling stuff on the analog port
*/
-static const struct drm_encoder_helper_funcs pch_encoder_funcs = {
+static const struct drm_encoder_helper_funcs crt_encoder_funcs = {
.mode_fixup = intel_crt_mode_fixup,
- .prepare = intel_encoder_prepare,
- .commit = intel_encoder_commit,
.mode_set = intel_crt_mode_set,
- .dpms = pch_crt_dpms,
-};
-
-static const struct drm_encoder_helper_funcs gmch_encoder_funcs = {
- .mode_fixup = intel_crt_mode_fixup,
- .prepare = intel_encoder_prepare,
- .commit = intel_encoder_commit,
- .mode_set = intel_crt_mode_set,
- .dpms = gmch_crt_dpms,
+ .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_crt_connector_funcs = {
.reset = intel_crt_reset,
- .dpms = drm_helper_connector_dpms,
+ .dpms = intel_crt_dpms,
.detect = intel_crt_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = intel_crt_destroy,
@@ -661,7 +716,6 @@ void intel_crt_init(struct drm_device *dev)
struct intel_crt *crt;
struct intel_connector *intel_connector;
struct drm_i915_private *dev_priv = dev->dev_private;
- const struct drm_encoder_helper_funcs *encoder_helper_funcs;
/* Skip machines without VGA that falsely report hotplug events */
if (dmi_check_system(intel_no_crt))
@@ -687,13 +741,11 @@ void intel_crt_init(struct drm_device *dev)
intel_connector_attach_encoder(intel_connector, &crt->base);
crt->base.type = INTEL_OUTPUT_ANALOG;
- crt->base.clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT |
- 1 << INTEL_ANALOG_CLONE_BIT |
- 1 << INTEL_SDVO_LVDS_CLONE_BIT);
+ crt->base.cloneable = true;
if (IS_HASWELL(dev))
crt->base.crtc_mask = (1 << 0);
else
- crt->base.crtc_mask = (1 << 0) | (1 << 1);
+ crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
if (IS_GEN2(dev))
connector->interlace_allowed = 0;
@@ -702,11 +754,18 @@ void intel_crt_init(struct drm_device *dev)
connector->doublescan_allowed = 0;
if (HAS_PCH_SPLIT(dev))
- encoder_helper_funcs = &pch_encoder_funcs;
+ crt->adpa_reg = PCH_ADPA;
+ else if (IS_VALLEYVIEW(dev))
+ crt->adpa_reg = VLV_ADPA;
else
- encoder_helper_funcs = &gmch_encoder_funcs;
+ crt->adpa_reg = ADPA;
+
+ crt->base.disable = intel_disable_crt;
+ crt->base.enable = intel_enable_crt;
+ crt->base.get_hw_state = intel_crt_get_hw_state;
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
- drm_encoder_helper_add(&crt->base.base, encoder_helper_funcs);
+ drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs);
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
drm_sysfs_connector_add(connector);
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 933c7485917264d2eb07b6c42a8cd336876b070b..bfe375466a0e03b953022248e65cfafdf3a17874 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -250,7 +250,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
case PORT_B:
case PORT_C:
case PORT_D:
- intel_hdmi_init(dev, DDI_BUF_CTL(port));
+ intel_hdmi_init(dev, DDI_BUF_CTL(port), port);
break;
default:
DRM_DEBUG_DRIVER("No handlers defined for port %d, skipping DDI initialization\n",
@@ -267,7 +267,8 @@ struct wrpll_tmds_clock {
u16 r2; /* Reference divider */
};
-/* Table of matching values for WRPLL clocks programming for each frequency */
+/* Table of matching values for WRPLL clocks programming for each frequency.
+ * The code assumes this table is sorted. */
static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
{19750, 38, 25, 18},
{20000, 48, 32, 18},
@@ -277,7 +278,6 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
{23000, 36, 23, 15},
{23500, 40, 40, 23},
{23750, 26, 16, 14},
- {23750, 26, 16, 14},
{24000, 36, 24, 15},
{25000, 36, 25, 15},
{25175, 26, 40, 33},
@@ -437,7 +437,6 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
{108000, 8, 24, 15},
{108108, 8, 173, 108},
{109000, 6, 23, 19},
- {109000, 6, 23, 19},
{110000, 6, 22, 18},
{110013, 6, 22, 18},
{110250, 8, 49, 30},
@@ -614,7 +613,6 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
{218250, 4, 42, 26},
{218750, 4, 34, 21},
{219000, 4, 47, 29},
- {219000, 4, 47, 29},
{220000, 4, 44, 27},
{220640, 4, 49, 30},
{220750, 4, 36, 22},
@@ -658,7 +656,7 @@ void intel_ddi_mode_set(struct drm_encoder *encoder,
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
int port = intel_hdmi->ddi_port;
int pipe = intel_crtc->pipe;
- int p, n2, r2, valid=0;
+ int p, n2, r2;
u32 temp, i;
/* On Haswell, we need to enable the clocks and prepare DDI function to
@@ -666,26 +664,23 @@ void intel_ddi_mode_set(struct drm_encoder *encoder,
*/
DRM_DEBUG_KMS("Preparing HDMI DDI mode for Haswell on port %c, pipe %c\n", port_name(port), pipe_name(pipe));
- for (i=0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++) {
- if (crtc->mode.clock == wrpll_tmds_clock_table[i].clock) {
- p = wrpll_tmds_clock_table[i].p;
- n2 = wrpll_tmds_clock_table[i].n2;
- r2 = wrpll_tmds_clock_table[i].r2;
+ for (i = 0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++)
+ if (crtc->mode.clock <= wrpll_tmds_clock_table[i].clock)
+ break;
- DRM_DEBUG_KMS("WR PLL clock: found settings for %dKHz refresh rate: p=%d, n2=%d, r2=%d\n",
- crtc->mode.clock,
- p, n2, r2);
+ if (i == ARRAY_SIZE(wrpll_tmds_clock_table))
+ i--;
- valid = 1;
- break;
- }
- }
+ p = wrpll_tmds_clock_table[i].p;
+ n2 = wrpll_tmds_clock_table[i].n2;
+ r2 = wrpll_tmds_clock_table[i].r2;
- if (!valid) {
- DRM_ERROR("Unable to find WR PLL clock settings for %dKHz refresh rate\n",
- crtc->mode.clock);
- return;
- }
+ if (wrpll_tmds_clock_table[i].clock != crtc->mode.clock)
+ DRM_INFO("WR PLL: using settings for %dKHz on %dKHz mode\n",
+ wrpll_tmds_clock_table[i].clock, crtc->mode.clock);
+
+ DRM_DEBUG_KMS("WR PLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
+ crtc->mode.clock, p, n2, r2);
/* Enable LCPLL if disabled */
temp = I915_READ(LCPLL_CTL);
@@ -718,46 +713,107 @@ void intel_ddi_mode_set(struct drm_encoder *encoder,
/* Proper support for digital audio needs a new logic and a new set
* of registers, so we leave it for future patch bombing.
*/
- DRM_DEBUG_DRIVER("HDMI audio on pipe %c not yet supported on DDI\n",
+ DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
pipe_name(intel_crtc->pipe));
+
+ /* write eld */
+ DRM_DEBUG_DRIVER("HDMI audio: write eld information\n");
+ intel_write_eld(encoder, adjusted_mode);
}
/* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */
- temp = I915_READ(DDI_FUNC_CTL(pipe));
- temp &= ~PIPE_DDI_PORT_MASK;
- temp &= ~PIPE_DDI_BPC_12;
- temp |= PIPE_DDI_SELECT_PORT(port) |
- PIPE_DDI_MODE_SELECT_HDMI |
- ((intel_crtc->bpp > 24) ?
- PIPE_DDI_BPC_12 :
- PIPE_DDI_BPC_8) |
- PIPE_DDI_FUNC_ENABLE;
+ temp = PIPE_DDI_FUNC_ENABLE | PIPE_DDI_SELECT_PORT(port);
+
+ switch (intel_crtc->bpp) {
+ case 18:
+ temp |= PIPE_DDI_BPC_6;
+ break;
+ case 24:
+ temp |= PIPE_DDI_BPC_8;
+ break;
+ case 30:
+ temp |= PIPE_DDI_BPC_10;
+ break;
+ case 36:
+ temp |= PIPE_DDI_BPC_12;
+ break;
+ default:
+ WARN(1, "%d bpp unsupported by pipe DDI function\n",
+ intel_crtc->bpp);
+ }
+
+ if (intel_hdmi->has_hdmi_sink)
+ temp |= PIPE_DDI_MODE_SELECT_HDMI;
+ else
+ temp |= PIPE_DDI_MODE_SELECT_DVI;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ temp |= PIPE_DDI_PVSYNC;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ temp |= PIPE_DDI_PHSYNC;
I915_WRITE(DDI_FUNC_CTL(pipe), temp);
intel_hdmi->set_infoframes(encoder, adjusted_mode);
}
-void intel_ddi_dpms(struct drm_encoder *encoder, int mode)
+bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
{
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ u32 tmp;
+ int i;
+
+ tmp = I915_READ(DDI_BUF_CTL(intel_hdmi->ddi_port));
+
+ if (!(tmp & DDI_BUF_CTL_ENABLE))
+ return false;
+
+ for_each_pipe(i) {
+ tmp = I915_READ(DDI_FUNC_CTL(i));
+
+ if ((tmp & PIPE_DDI_PORT_MASK)
+ == PIPE_DDI_SELECT_PORT(intel_hdmi->ddi_port)) {
+ *pipe = i;
+ return true;
+ }
+ }
+
+ DRM_DEBUG_KMS("No pipe for ddi port %i found\n", intel_hdmi->ddi_port);
+
+ return true;
+}
+
+void intel_enable_ddi(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
int port = intel_hdmi->ddi_port;
u32 temp;
temp = I915_READ(DDI_BUF_CTL(port));
-
- if (mode != DRM_MODE_DPMS_ON) {
- temp &= ~DDI_BUF_CTL_ENABLE;
- } else {
- temp |= DDI_BUF_CTL_ENABLE;
- }
+ temp |= DDI_BUF_CTL_ENABLE;
/* Enable DDI_BUF_CTL. In HDMI/DVI mode, the port width,
* and swing/emphasis values are ignored so nothing special needs
* to be done besides enabling the port.
*/
- I915_WRITE(DDI_BUF_CTL(port),
- temp);
+ I915_WRITE(DDI_BUF_CTL(port), temp);
+}
+
+void intel_disable_ddi(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ int port = intel_hdmi->ddi_port;
+ u32 temp;
+
+ temp = I915_READ(DDI_BUF_CTL(port));
+ temp &= ~DDI_BUF_CTL_ENABLE;
+
+ I915_WRITE(DDI_BUF_CTL(port), temp);
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 7ea9a3ceb2698b9b188955eb4008e5fa599dbf42..e3c02655d36ff4a944ae6fd8e3723ee5f9915d29 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1006,7 +1006,7 @@ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
/* Wait for the Pipe State to go off */
if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
100))
- DRM_DEBUG_KMS("pipe_off wait timed out\n");
+ WARN(1, "pipe_off wait timed out\n");
} else {
u32 last_line, line_mask;
int reg = PIPEDSL(pipe);
@@ -1024,7 +1024,7 @@ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
} while (((I915_READ(reg) & line_mask) != last_line) &&
time_after(timeout, jiffies));
if (time_after(jiffies, timeout))
- DRM_DEBUG_KMS("pipe_off wait timed out\n");
+ WARN(1, "pipe_off wait timed out\n");
}
}
@@ -1431,6 +1431,8 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
* protect mechanism may be enabled.
*
* Note! This is for pre-ILK only.
+ *
+ * Unfortunately needed by dvo_ns2501 since the dvo depends on it running.
*/
static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
{
@@ -1860,59 +1862,6 @@ static void intel_disable_plane(struct drm_i915_private *dev_priv,
intel_wait_for_vblank(dev_priv->dev, pipe);
}
-static void disable_pch_dp(struct drm_i915_private *dev_priv,
- enum pipe pipe, int reg, u32 port_sel)
-{
- u32 val = I915_READ(reg);
- if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
- DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
- I915_WRITE(reg, val & ~DP_PORT_EN);
- }
-}
-
-static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
- enum pipe pipe, int reg)
-{
- u32 val = I915_READ(reg);
- if (hdmi_pipe_enabled(dev_priv, pipe, val)) {
- DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
- reg, pipe);
- I915_WRITE(reg, val & ~PORT_ENABLE);
- }
-}
-
-/* Disable any ports connected to this transcoder */
-static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- u32 reg, val;
-
- val = I915_READ(PCH_PP_CONTROL);
- I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
-
- disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
- disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
- disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
-
- reg = PCH_ADPA;
- val = I915_READ(reg);
- if (adpa_pipe_enabled(dev_priv, pipe, val))
- I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
-
- reg = PCH_LVDS;
- val = I915_READ(reg);
- if (lvds_pipe_enabled(dev_priv, pipe, val)) {
- DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
- I915_WRITE(reg, val & ~LVDS_PORT_EN);
- POSTING_READ(reg);
- udelay(100);
- }
-
- disable_pch_hdmi(dev_priv, pipe, HDMIB);
- disable_pch_hdmi(dev_priv, pipe, HDMIC);
- disable_pch_hdmi(dev_priv, pipe, HDMID);
-}
-
int
intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
@@ -2201,16 +2150,17 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
static int
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
+ struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_framebuffer *old_fb;
int ret;
/* no fb bound */
- if (!crtc->fb) {
+ if (!fb) {
DRM_ERROR("No FB bound\n");
return 0;
}
@@ -2224,7 +2174,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
mutex_lock(&dev->struct_mutex);
ret = intel_pin_and_fence_fb_obj(dev,
- to_intel_framebuffer(crtc->fb)->obj,
+ to_intel_framebuffer(fb)->obj,
NULL);
if (ret != 0) {
mutex_unlock(&dev->struct_mutex);
@@ -2232,17 +2182,22 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return ret;
}
- if (old_fb)
- intel_finish_fb(old_fb);
+ if (crtc->fb)
+ intel_finish_fb(crtc->fb);
- ret = dev_priv->display.update_plane(crtc, crtc->fb, x, y);
+ ret = dev_priv->display.update_plane(crtc, fb, x, y);
if (ret) {
- intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
+ intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj);
mutex_unlock(&dev->struct_mutex);
DRM_ERROR("failed to update base address\n");
return ret;
}
+ old_fb = crtc->fb;
+ crtc->fb = fb;
+ crtc->x = x;
+ crtc->y = y;
+
if (old_fb) {
intel_wait_for_vblank(dev, intel_crtc->pipe);
intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
@@ -2709,11 +2664,10 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
DRM_DEBUG_KMS("FDI train done.\n");
}
-static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
+static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 reg, temp;
@@ -2754,6 +2708,35 @@ static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
}
}
+static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp;
+
+ /* Switch from PCDclk to Rawclk */
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_PCDCLK);
+
+ /* Disable CPU FDI TX PLL */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
+
+ POSTING_READ(reg);
+ udelay(100);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
+
+ /* Wait for the clocks to turn off. */
+ POSTING_READ(reg);
+ udelay(100);
+}
+
static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2838,13 +2821,13 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct intel_encoder *encoder;
+ struct intel_encoder *intel_encoder;
/*
* If there's a non-PCH eDP on this crtc, it must be DP_A, and that
* must be driven by its own crtc; no sharing is possible.
*/
- for_each_encoder_on_crtc(dev, crtc, encoder) {
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
/* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell
* CPU handles all others */
@@ -2852,19 +2835,19 @@ static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
/* It is still unclear how this will work on PPT, so throw up a warning */
WARN_ON(!HAS_PCH_LPT(dev));
- if (encoder->type == DRM_MODE_ENCODER_DAC) {
+ if (intel_encoder->type == INTEL_OUTPUT_ANALOG) {
DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n");
return true;
} else {
DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n",
- encoder->type);
+ intel_encoder->type);
return false;
}
}
- switch (encoder->type) {
+ switch (intel_encoder->type) {
case INTEL_OUTPUT_EDP:
- if (!intel_encoder_is_pch_edp(&encoder->base))
+ if (!intel_encoder_is_pch_edp(&intel_encoder->base))
return false;
continue;
}
@@ -3181,11 +3164,14 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
u32 temp;
bool is_pch_port;
+ WARN_ON(!crtc->enabled);
+
if (intel_crtc->active)
return;
@@ -3200,10 +3186,16 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
is_pch_port = intel_crtc_driving_pch(crtc);
- if (is_pch_port)
- ironlake_fdi_pll_enable(crtc);
- else
- ironlake_fdi_disable(crtc);
+ if (is_pch_port) {
+ ironlake_fdi_pll_enable(intel_crtc);
+ } else {
+ assert_fdi_tx_disabled(dev_priv, pipe);
+ assert_fdi_rx_disabled(dev_priv, pipe);
+ }
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->pre_enable)
+ encoder->pre_enable(encoder);
/* Enable panel fitting for LVDS */
if (dev_priv->pch_pf_size &&
@@ -3234,6 +3226,12 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
mutex_unlock(&dev->struct_mutex);
intel_crtc_update_cursor(crtc, true);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->enable(encoder);
+
+ if (HAS_PCH_CPT(dev))
+ intel_cpt_verify_modeset(dev, intel_crtc->pipe);
}
static void ironlake_crtc_disable(struct drm_crtc *crtc)
@@ -3241,13 +3239,18 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
u32 reg, temp;
+
if (!intel_crtc->active)
return;
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->disable(encoder);
+
intel_crtc_wait_for_pending_flips(crtc);
drm_vblank_off(dev, pipe);
intel_crtc_update_cursor(crtc, false);
@@ -3263,14 +3266,11 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
I915_WRITE(PF_CTL(pipe), 0);
I915_WRITE(PF_WIN_SZ(pipe), 0);
- ironlake_fdi_disable(crtc);
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->post_disable)
+ encoder->post_disable(encoder);
- /* This is a horrible layering violation; we should be doing this in
- * the connector/encoder ->prepare instead, but we don't always have
- * enough information there about the config to know whether it will
- * actually be necessary or just cause undesired flicker.
- */
- intel_disable_pch_ports(dev_priv, pipe);
+ ironlake_fdi_disable(crtc);
intel_disable_transcoder(dev_priv, pipe);
@@ -3304,26 +3304,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
/* disable PCH DPLL */
intel_disable_pch_pll(intel_crtc);
- /* Switch from PCDclk to Rawclk */
- reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
- I915_WRITE(reg, temp & ~FDI_PCDCLK);
-
- /* Disable CPU FDI TX PLL */
- reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
- I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
-
- POSTING_READ(reg);
- udelay(100);
-
- reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
- I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
-
- /* Wait for the clocks to turn off. */
- POSTING_READ(reg);
- udelay(100);
+ ironlake_fdi_pll_disable(intel_crtc);
intel_crtc->active = false;
intel_update_watermarks(dev);
@@ -3333,30 +3314,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
mutex_unlock(&dev->struct_mutex);
}
-static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
-
- /* XXX: When our outputs are all unaware of DPMS modes other than off
- * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
- */
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
- ironlake_crtc_enable(crtc);
- break;
-
- case DRM_MODE_DPMS_OFF:
- DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
- ironlake_crtc_disable(crtc);
- break;
- }
-}
-
static void ironlake_crtc_off(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -3386,9 +3343,12 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
+ WARN_ON(!crtc->enabled);
+
if (intel_crtc->active)
return;
@@ -3405,6 +3365,9 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
/* Give the overlay scaler a chance to enable if it's on this pipe */
intel_crtc_dpms_overlay(intel_crtc, true);
intel_crtc_update_cursor(crtc, true);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->enable(encoder);
}
static void i9xx_crtc_disable(struct drm_crtc *crtc)
@@ -3412,12 +3375,17 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
+
if (!intel_crtc->active)
return;
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->disable(encoder);
+
/* Give the overlay scaler a chance to disable if it's on this pipe */
intel_crtc_wait_for_pending_flips(crtc);
drm_vblank_off(dev, pipe);
@@ -3436,45 +3404,17 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
intel_update_watermarks(dev);
}
-static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
- /* XXX: When our outputs are all unaware of DPMS modes other than off
- * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
- */
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- i9xx_crtc_enable(crtc);
- break;
- case DRM_MODE_DPMS_OFF:
- i9xx_crtc_disable(crtc);
- break;
- }
-}
-
static void i9xx_crtc_off(struct drm_crtc *crtc)
{
}
-/**
- * Sets the power management mode of the pipe and plane.
- */
-static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
+static void intel_crtc_update_sarea(struct drm_crtc *crtc,
+ bool enabled)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- bool enabled;
-
- if (intel_crtc->dpms_mode == mode)
- return;
-
- intel_crtc->dpms_mode = mode;
-
- dev_priv->display.dpms(crtc, mode);
if (!dev->primary->master)
return;
@@ -3483,8 +3423,6 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
if (!master_priv->sarea_priv)
return;
- enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
-
switch (pipe) {
case 0:
master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
@@ -3500,13 +3438,42 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
+/**
+ * Sets the power management mode of the pipe and plane.
+ */
+void intel_crtc_update_dpms(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *intel_encoder;
+ bool enable = false;
+
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder)
+ enable |= intel_encoder->connectors_active;
+
+ if (enable)
+ dev_priv->display.crtc_enable(crtc);
+ else
+ dev_priv->display.crtc_disable(crtc);
+
+ intel_crtc_update_sarea(crtc, enable);
+}
+
+static void intel_crtc_noop(struct drm_crtc *crtc)
+{
+}
+
static void intel_crtc_disable(struct drm_crtc *crtc)
{
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
struct drm_device *dev = crtc->dev;
+ struct drm_connector *connector;
struct drm_i915_private *dev_priv = dev->dev_private;
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+ /* crtc should still be enabled when we disable it. */
+ WARN_ON(!crtc->enabled);
+
+ dev_priv->display.crtc_disable(crtc);
+ intel_crtc_update_sarea(crtc, false);
dev_priv->display.off(crtc);
assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
@@ -3516,63 +3483,128 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
mutex_unlock(&dev->struct_mutex);
+ crtc->fb = NULL;
+ }
+
+ /* Update computed state. */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (!connector->encoder || !connector->encoder->crtc)
+ continue;
+
+ if (connector->encoder->crtc != crtc)
+ continue;
+
+ connector->dpms = DRM_MODE_DPMS_OFF;
+ to_intel_encoder(connector->encoder)->connectors_active = false;
}
}
-/* Prepare for a mode set.
- *
- * Note we could be a lot smarter here. We need to figure out which outputs
- * will be enabled, which disabled (in short, how the config will changes)
- * and perform the minimum necessary steps to accomplish that, e.g. updating
- * watermarks, FBC configuration, making sure PLLs are programmed correctly,
- * panel fitting is in the proper state, etc.
- */
-static void i9xx_crtc_prepare(struct drm_crtc *crtc)
+void intel_modeset_disable(struct drm_device *dev)
{
- i9xx_crtc_disable(crtc);
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc->enabled)
+ intel_crtc_disable(crtc);
+ }
}
-static void i9xx_crtc_commit(struct drm_crtc *crtc)
+void intel_encoder_noop(struct drm_encoder *encoder)
{
- i9xx_crtc_enable(crtc);
}
-static void ironlake_crtc_prepare(struct drm_crtc *crtc)
+void intel_encoder_destroy(struct drm_encoder *encoder)
{
- ironlake_crtc_disable(crtc);
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+
+ drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
}
-static void ironlake_crtc_commit(struct drm_crtc *crtc)
+/* Simple dpms helper for encodres with just one connector, no cloning and only
+ * one kind of off state. It clamps all !ON modes to fully OFF and changes the
+ * state of the entire output pipe. */
+void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
{
- ironlake_crtc_enable(crtc);
+ if (mode == DRM_MODE_DPMS_ON) {
+ encoder->connectors_active = true;
+
+ intel_crtc_update_dpms(encoder->base.crtc);
+ } else {
+ encoder->connectors_active = false;
+
+ intel_crtc_update_dpms(encoder->base.crtc);
+ }
}
-void intel_encoder_prepare(struct drm_encoder *encoder)
+/* Cross check the actual hw state with our own modeset state tracking (and it's
+ * internal consistency). */
+static void intel_connector_check_state(struct intel_connector *connector)
{
- struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
- /* lvds has its own version of prepare see intel_lvds_prepare */
- encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+ if (connector->get_hw_state(connector)) {
+ struct intel_encoder *encoder = connector->encoder;
+ struct drm_crtc *crtc;
+ bool encoder_enabled;
+ enum pipe pipe;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.base.id,
+ drm_get_connector_name(&connector->base));
+
+ WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
+ "wrong connector dpms state\n");
+ WARN(connector->base.encoder != &encoder->base,
+ "active connector not linked to encoder\n");
+ WARN(!encoder->connectors_active,
+ "encoder->connectors_active not set\n");
+
+ encoder_enabled = encoder->get_hw_state(encoder, &pipe);
+ WARN(!encoder_enabled, "encoder not enabled\n");
+ if (WARN_ON(!encoder->base.crtc))
+ return;
+
+ crtc = encoder->base.crtc;
+
+ WARN(!crtc->enabled, "crtc not enabled\n");
+ WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
+ WARN(pipe != to_intel_crtc(crtc)->pipe,
+ "encoder active on the wrong pipe\n");
+ }
}
-void intel_encoder_commit(struct drm_encoder *encoder)
+/* Even simpler default implementation, if there's really no special case to
+ * consider. */
+void intel_connector_dpms(struct drm_connector *connector, int mode)
{
- struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
- struct drm_device *dev = encoder->dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
- /* lvds has its own version of commit see intel_lvds_commit */
- encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
+ /* All the simple cases only support two dpms states. */
+ if (mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
- if (HAS_PCH_CPT(dev))
- intel_cpt_verify_modeset(dev, intel_crtc->pipe);
+ if (mode == connector->dpms)
+ return;
+
+ connector->dpms = mode;
+
+ /* Only need to change hw state when actually enabled */
+ if (encoder->base.crtc)
+ intel_encoder_dpms(encoder, mode);
+ else
+ WARN_ON(encoder->connectors_active != false);
+
+ intel_modeset_check_state(connector->dev);
}
-void intel_encoder_destroy(struct drm_encoder *encoder)
+/* Simple connector->get_hw_state implementation for encoders that support only
+ * one connector and no cloning and hence the encoder state determines the state
+ * of the connector. */
+bool intel_connector_get_hw_state(struct intel_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+ enum pipe pipe = 0;
+ struct intel_encoder *encoder = connector->encoder;
- drm_encoder_cleanup(encoder);
- kfree(intel_encoder);
+ return encoder->get_hw_state(encoder, &pipe);
}
static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -3593,6 +3625,13 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET))
drm_mode_set_crtcinfo(adjusted_mode, 0);
+ /* WaPruneModeWithIncorrectHsyncOffset: Cantiga+ cannot handle modes
+ * with a hsync front porch of 0.
+ */
+ if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
+ adjusted_mode->hsync_start == adjusted_mode->hdisplay)
+ return false;
+
return true;
}
@@ -3728,6 +3767,7 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
* true if they don't match).
*/
static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
unsigned int *pipe_bpp,
struct drm_display_mode *mode)
{
@@ -3797,7 +3837,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
* also stays within the max display bpc discovered above.
*/
- switch (crtc->fb->depth) {
+ switch (fb->depth) {
case 8:
bpc = 8; /* since we go through a colormap */
break;
@@ -4216,7 +4256,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
- struct drm_framebuffer *old_fb)
+ struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4406,7 +4446,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(DSPCNTR(plane), dspcntr);
POSTING_READ(DSPCNTR(plane));
- ret = intel_pipe_set_base(crtc, x, y, old_fb);
+ ret = intel_pipe_set_base(crtc, x, y, fb);
intel_update_watermarks(dev);
@@ -4560,63 +4600,78 @@ static int ironlake_get_refclk(struct drm_crtc *crtc)
return 120000;
}
-static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
+static void ironlake_set_pipeconf(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode,
- int x, int y,
- struct drm_framebuffer *old_fb)
+ bool dither)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
- int refclk, num_connectors = 0;
- intel_clock_t clock, reduced_clock;
- u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
- bool ok, has_reduced_clock = false, is_sdvo = false;
- bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
- struct intel_encoder *encoder, *edp_encoder = NULL;
+ uint32_t val;
+
+ val = I915_READ(PIPECONF(pipe));
+
+ val &= ~PIPE_BPC_MASK;
+ switch (intel_crtc->bpp) {
+ case 18:
+ val |= PIPE_6BPC;
+ break;
+ case 24:
+ val |= PIPE_8BPC;
+ break;
+ case 30:
+ val |= PIPE_10BPC;
+ break;
+ case 36:
+ val |= PIPE_12BPC;
+ break;
+ default:
+ val |= PIPE_8BPC;
+ break;
+ }
+
+ val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
+ if (dither)
+ val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
+
+ val &= ~PIPECONF_INTERLACE_MASK;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ val |= PIPECONF_INTERLACED_ILK;
+ else
+ val |= PIPECONF_PROGRESSIVE;
+
+ I915_WRITE(PIPECONF(pipe), val);
+ POSTING_READ(PIPECONF(pipe));
+}
+
+static bool ironlake_compute_clocks(struct drm_crtc *crtc,
+ struct drm_display_mode *adjusted_mode,
+ intel_clock_t *clock,
+ bool *has_reduced_clock,
+ intel_clock_t *reduced_clock)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *intel_encoder;
+ int refclk;
const intel_limit_t *limit;
- int ret;
- struct fdi_m_n m_n = {0};
- u32 temp;
- int target_clock, pixel_multiplier, lane, link_bw, factor;
- unsigned int pipe_bpp;
- bool dither;
- bool is_cpu_edp = false, is_pch_edp = false;
+ bool ret, is_sdvo = false, is_tv = false, is_lvds = false;
- for_each_encoder_on_crtc(dev, crtc, encoder) {
- switch (encoder->type) {
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ switch (intel_encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
case INTEL_OUTPUT_SDVO:
case INTEL_OUTPUT_HDMI:
is_sdvo = true;
- if (encoder->needs_tv_clock)
+ if (intel_encoder->needs_tv_clock)
is_tv = true;
break;
case INTEL_OUTPUT_TVOUT:
is_tv = true;
break;
- case INTEL_OUTPUT_ANALOG:
- is_crt = true;
- break;
- case INTEL_OUTPUT_DISPLAYPORT:
- is_dp = true;
- break;
- case INTEL_OUTPUT_EDP:
- is_dp = true;
- if (intel_encoder_is_pch_edp(&encoder->base))
- is_pch_edp = true;
- else
- is_cpu_edp = true;
- edp_encoder = encoder;
- break;
}
-
- num_connectors++;
}
refclk = ironlake_get_refclk(crtc);
@@ -4627,15 +4682,10 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
limit = intel_limit(crtc, refclk);
- ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
- &clock);
- if (!ok) {
- DRM_ERROR("Couldn't find PLL settings for mode!\n");
- return -EINVAL;
- }
-
- /* Ensure that the cursor is valid for the new mode before changing... */
- intel_crtc_update_cursor(crtc, true);
+ ret = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
+ clock);
+ if (!ret)
+ return false;
if (is_lvds && dev_priv->lvds_downclock_avail) {
/*
@@ -4644,16 +4694,86 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
* by using the FP0/FP1. In such case we will disable the LVDS
* downclock feature.
*/
- has_reduced_clock = limit->find_pll(limit, crtc,
- dev_priv->lvds_downclock,
- refclk,
- &clock,
- &reduced_clock);
+ *has_reduced_clock = limit->find_pll(limit, crtc,
+ dev_priv->lvds_downclock,
+ refclk,
+ clock,
+ reduced_clock);
}
if (is_sdvo && is_tv)
- i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
+ i9xx_adjust_sdvo_tv_clock(adjusted_mode, clock);
+
+ return true;
+}
+
+static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ int num_connectors = 0;
+ intel_clock_t clock, reduced_clock;
+ u32 dpll, fp = 0, fp2 = 0;
+ bool ok, has_reduced_clock = false, is_sdvo = false;
+ bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
+ struct intel_encoder *encoder, *edp_encoder = NULL;
+ int ret;
+ struct fdi_m_n m_n = {0};
+ u32 temp;
+ int target_clock, pixel_multiplier, lane, link_bw, factor;
+ unsigned int pipe_bpp;
+ bool dither;
+ bool is_cpu_edp = false, is_pch_edp = false;
+
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
+ switch (encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ case INTEL_OUTPUT_SDVO:
+ case INTEL_OUTPUT_HDMI:
+ is_sdvo = true;
+ if (encoder->needs_tv_clock)
+ is_tv = true;
+ break;
+ case INTEL_OUTPUT_TVOUT:
+ is_tv = true;
+ break;
+ case INTEL_OUTPUT_ANALOG:
+ is_crt = true;
+ break;
+ case INTEL_OUTPUT_DISPLAYPORT:
+ is_dp = true;
+ break;
+ case INTEL_OUTPUT_EDP:
+ is_dp = true;
+ if (intel_encoder_is_pch_edp(&encoder->base))
+ is_pch_edp = true;
+ else
+ is_cpu_edp = true;
+ edp_encoder = encoder;
+ break;
+ }
+
+ num_connectors++;
+ }
+
+ ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
+ &has_reduced_clock, &reduced_clock);
+ if (!ok) {
+ DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+ /* Ensure that the cursor is valid for the new mode before changing... */
+ intel_crtc_update_cursor(crtc, true);
/* FDI link */
pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
@@ -4682,32 +4802,17 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
target_clock = adjusted_mode->clock;
/* determine panel color depth */
- temp = I915_READ(PIPECONF(pipe));
- temp &= ~PIPE_BPC_MASK;
- dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
- switch (pipe_bpp) {
- case 18:
- temp |= PIPE_6BPC;
- break;
- case 24:
- temp |= PIPE_8BPC;
- break;
- case 30:
- temp |= PIPE_10BPC;
- break;
- case 36:
- temp |= PIPE_12BPC;
- break;
- default:
+ dither = intel_choose_pipe_bpp_dither(crtc, fb, &pipe_bpp, mode);
+ if (is_lvds && dev_priv->lvds_dither)
+ dither = true;
+
+ if (pipe_bpp != 18 && pipe_bpp != 24 && pipe_bpp != 30 &&
+ pipe_bpp != 36) {
WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
- pipe_bpp);
- temp |= PIPE_8BPC;
+ pipe_bpp);
pipe_bpp = 24;
- break;
}
-
intel_crtc->bpp = pipe_bpp;
- I915_WRITE(PIPECONF(pipe), temp);
if (!lane) {
/*
@@ -4791,12 +4896,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
else
dpll |= PLL_REF_INPUT_DREFCLK;
- /* setup pipeconf */
- pipeconf = I915_READ(PIPECONF(pipe));
-
- /* Set up the display plane register */
- dspcntr = DISPPLANE_GAMMA_ENABLE;
-
DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
drm_mode_debug_printmodeline(mode);
@@ -4856,12 +4955,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(PCH_LVDS, temp);
}
- pipeconf &= ~PIPECONF_DITHER_EN;
- pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
- if ((is_lvds && dev_priv->lvds_dither) || dither) {
- pipeconf |= PIPECONF_DITHER_EN;
- pipeconf |= PIPECONF_DITHER_TYPE_SP;
- }
if (is_dp && !is_cpu_edp) {
intel_dp_set_m_n(crtc, mode, adjusted_mode);
} else {
@@ -4897,9 +4990,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
}
}
- pipeconf &= ~PIPECONF_INTERLACE_MASK;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
- pipeconf |= PIPECONF_INTERLACED_ILK;
/* the chip adds 2 halflines automatically */
adjusted_mode->crtc_vtotal -= 1;
adjusted_mode->crtc_vblank_end -= 1;
@@ -4907,7 +4998,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
adjusted_mode->crtc_hsync_start
- adjusted_mode->crtc_htotal/2);
} else {
- pipeconf |= PIPECONF_PROGRESSIVE;
I915_WRITE(VSYNCSHIFT(pipe), 0);
}
@@ -4945,15 +5035,15 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
if (is_cpu_edp)
ironlake_set_pll_edp(crtc, adjusted_mode->clock);
- I915_WRITE(PIPECONF(pipe), pipeconf);
- POSTING_READ(PIPECONF(pipe));
+ ironlake_set_pipeconf(crtc, adjusted_mode, dither);
intel_wait_for_vblank(dev, pipe);
- I915_WRITE(DSPCNTR(plane), dspcntr);
+ /* Set up the display plane register */
+ I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
POSTING_READ(DSPCNTR(plane));
- ret = intel_pipe_set_base(crtc, x, y, old_fb);
+ ret = intel_pipe_set_base(crtc, x, y, fb);
intel_update_watermarks(dev);
@@ -4966,7 +5056,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
- struct drm_framebuffer *old_fb)
+ struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4977,14 +5067,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
drm_vblank_pre_modeset(dev, pipe);
ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
- x, y, old_fb);
+ x, y, fb);
drm_vblank_post_modeset(dev, pipe);
- if (ret)
- intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
- else
- intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
-
return ret;
}
@@ -5057,6 +5142,91 @@ static void g4x_write_eld(struct drm_connector *connector,
I915_WRITE(G4X_AUD_CNTL_ST, i);
}
+static void haswell_write_eld(struct drm_connector *connector,
+ struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ uint8_t *eld = connector->eld;
+ struct drm_device *dev = crtc->dev;
+ uint32_t eldv;
+ uint32_t i;
+ int len;
+ int pipe = to_intel_crtc(crtc)->pipe;
+ int tmp;
+
+ int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe);
+ int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe);
+ int aud_config = HSW_AUD_CFG(pipe);
+ int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD;
+
+
+ DRM_DEBUG_DRIVER("HDMI: Haswell Audio initialize....\n");
+
+ /* Audio output enable */
+ DRM_DEBUG_DRIVER("HDMI audio: enable codec\n");
+ tmp = I915_READ(aud_cntrl_st2);
+ tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4));
+ I915_WRITE(aud_cntrl_st2, tmp);
+
+ /* Wait for 1 vertical blank */
+ intel_wait_for_vblank(dev, pipe);
+
+ /* Set ELD valid state */
+ tmp = I915_READ(aud_cntrl_st2);
+ DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%8x\n", tmp);
+ tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
+ I915_WRITE(aud_cntrl_st2, tmp);
+ tmp = I915_READ(aud_cntrl_st2);
+ DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%8x\n", tmp);
+
+ /* Enable HDMI mode */
+ tmp = I915_READ(aud_config);
+ DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%8x\n", tmp);
+ /* clear N_programing_enable and N_value_index */
+ tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
+ I915_WRITE(aud_config, tmp);
+
+ DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
+
+ eldv = AUDIO_ELD_VALID_A << (pipe * 4);
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+ DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
+ eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
+ I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
+ } else
+ I915_WRITE(aud_config, 0);
+
+ if (intel_eld_uptodate(connector,
+ aud_cntrl_st2, eldv,
+ aud_cntl_st, IBX_ELD_ADDRESS,
+ hdmiw_hdmiedid))
+ return;
+
+ i = I915_READ(aud_cntrl_st2);
+ i &= ~eldv;
+ I915_WRITE(aud_cntrl_st2, i);
+
+ if (!eld[0])
+ return;
+
+ i = I915_READ(aud_cntl_st);
+ i &= ~IBX_ELD_ADDRESS;
+ I915_WRITE(aud_cntl_st, i);
+ i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */
+ DRM_DEBUG_DRIVER("port num:%d\n", i);
+
+ len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */
+ DRM_DEBUG_DRIVER("ELD size %d\n", len);
+ for (i = 0; i < len; i++)
+ I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
+
+ i = I915_READ(aud_cntrl_st2);
+ i |= eldv;
+ I915_WRITE(aud_cntrl_st2, i);
+
+}
+
static void ironlake_write_eld(struct drm_connector *connector,
struct drm_crtc *crtc)
{
@@ -5069,28 +5239,24 @@ static void ironlake_write_eld(struct drm_connector *connector,
int aud_config;
int aud_cntl_st;
int aud_cntrl_st2;
+ int pipe = to_intel_crtc(crtc)->pipe;
if (HAS_PCH_IBX(connector->dev)) {
- hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A;
- aud_config = IBX_AUD_CONFIG_A;
- aud_cntl_st = IBX_AUD_CNTL_ST_A;
+ hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
+ aud_config = IBX_AUD_CFG(pipe);
+ aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
} else {
- hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A;
- aud_config = CPT_AUD_CONFIG_A;
- aud_cntl_st = CPT_AUD_CNTL_ST_A;
+ hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
+ aud_config = CPT_AUD_CFG(pipe);
+ aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
}
- i = to_intel_crtc(crtc)->pipe;
- hdmiw_hdmiedid += i * 0x100;
- aud_cntl_st += i * 0x100;
- aud_config += i * 0x100;
-
- DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i));
+ DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
i = I915_READ(aud_cntl_st);
- i = (i >> 29) & 0x3; /* DIP_Port_Select, 0x1 = PortB */
+ i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */
if (!i) {
DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
/* operate blindly on all ports */
@@ -5337,8 +5503,6 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
uint32_t addr;
int ret;
- DRM_DEBUG_KMS("\n");
-
/* if we want to turn off the cursor ignore width and height */
if (!handle) {
DRM_DEBUG_KMS("cursor off\n");
@@ -5584,17 +5748,18 @@ mode_fits_in_fbdev(struct drm_device *dev,
return fb;
}
-bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
- struct drm_connector *connector,
+bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_display_mode *mode,
struct intel_load_detect_pipe *old)
{
struct intel_crtc *intel_crtc;
+ struct intel_encoder *intel_encoder =
+ intel_attached_encoder(connector);
struct drm_crtc *possible_crtc;
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = NULL;
struct drm_device *dev = encoder->dev;
- struct drm_framebuffer *old_fb;
+ struct drm_framebuffer *fb;
int i = -1;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
@@ -5615,21 +5780,12 @@ bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
if (encoder->crtc) {
crtc = encoder->crtc;
- intel_crtc = to_intel_crtc(crtc);
- old->dpms_mode = intel_crtc->dpms_mode;
+ old->dpms_mode = connector->dpms;
old->load_detect_temp = false;
/* Make sure the crtc and connector are running */
- if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
- struct drm_encoder_helper_funcs *encoder_funcs;
- struct drm_crtc_helper_funcs *crtc_funcs;
-
- crtc_funcs = crtc->helper_private;
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
-
- encoder_funcs = encoder->helper_private;
- encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
- }
+ if (connector->dpms != DRM_MODE_DPMS_ON)
+ connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
return true;
}
@@ -5653,19 +5809,17 @@ bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
return false;
}
- encoder->crtc = crtc;
- connector->encoder = encoder;
+ intel_encoder->new_crtc = to_intel_crtc(crtc);
+ to_intel_connector(connector)->new_encoder = intel_encoder;
intel_crtc = to_intel_crtc(crtc);
- old->dpms_mode = intel_crtc->dpms_mode;
+ old->dpms_mode = connector->dpms;
old->load_detect_temp = true;
old->release_fb = NULL;
if (!mode)
mode = &load_detect_mode;
- old_fb = crtc->fb;
-
/* We need a framebuffer large enough to accommodate all accesses
* that the plane may generate whilst we perform load detection.
* We can not rely on the fbcon either being present (we get called
@@ -5673,50 +5827,52 @@ bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
* not even exist) or that it is large enough to satisfy the
* requested mode.
*/
- crtc->fb = mode_fits_in_fbdev(dev, mode);
- if (crtc->fb == NULL) {
+ fb = mode_fits_in_fbdev(dev, mode);
+ if (fb == NULL) {
DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
- crtc->fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
- old->release_fb = crtc->fb;
+ fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
+ old->release_fb = fb;
} else
DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
- if (IS_ERR(crtc->fb)) {
+ if (IS_ERR(fb)) {
DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
- crtc->fb = old_fb;
- return false;
+ goto fail;
}
- if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) {
+ if (!intel_set_mode(crtc, mode, 0, 0, fb)) {
DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
if (old->release_fb)
old->release_fb->funcs->destroy(old->release_fb);
- crtc->fb = old_fb;
- return false;
+ goto fail;
}
/* let the connector get through one full cycle before testing */
intel_wait_for_vblank(dev, intel_crtc->pipe);
return true;
+fail:
+ connector->encoder = NULL;
+ encoder->crtc = NULL;
+ return false;
}
-void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
- struct drm_connector *connector,
+void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old)
{
+ struct intel_encoder *intel_encoder =
+ intel_attached_encoder(connector);
struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_device *dev = encoder->dev;
- struct drm_crtc *crtc = encoder->crtc;
- struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector),
encoder->base.id, drm_get_encoder_name(encoder));
if (old->load_detect_temp) {
- connector->encoder = NULL;
- drm_helper_disable_unused_functions(dev);
+ struct drm_crtc *crtc = encoder->crtc;
+
+ to_intel_connector(connector)->new_encoder = NULL;
+ intel_encoder->new_crtc = NULL;
+ intel_set_mode(crtc, NULL, 0, 0, NULL);
if (old->release_fb)
old->release_fb->funcs->destroy(old->release_fb);
@@ -5725,10 +5881,8 @@ void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
}
/* Switch crtc and encoder back off if necessary */
- if (old->dpms_mode != DRM_MODE_DPMS_ON) {
- encoder_funcs->dpms(encoder, old->dpms_mode);
- crtc_funcs->dpms(crtc, old->dpms_mode);
- }
+ if (old->dpms_mode != DRM_MODE_DPMS_ON)
+ connector->funcs->dpms(connector, old->dpms_mode);
}
/* Returns the clock of the currently programmed mode of the given pipe. */
@@ -5850,46 +6004,6 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
return mode;
}
-#define GPU_IDLE_TIMEOUT 500 /* ms */
-
-/* When this timer fires, we've been idle for awhile */
-static void intel_gpu_idle_timer(unsigned long arg)
-{
- struct drm_device *dev = (struct drm_device *)arg;
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- if (!list_empty(&dev_priv->mm.active_list)) {
- /* Still processing requests, so just re-arm the timer. */
- mod_timer(&dev_priv->idle_timer, jiffies +
- msecs_to_jiffies(GPU_IDLE_TIMEOUT));
- return;
- }
-
- dev_priv->busy = false;
- queue_work(dev_priv->wq, &dev_priv->idle_work);
-}
-
-#define CRTC_IDLE_TIMEOUT 1000 /* ms */
-
-static void intel_crtc_idle_timer(unsigned long arg)
-{
- struct intel_crtc *intel_crtc = (struct intel_crtc *)arg;
- struct drm_crtc *crtc = &intel_crtc->base;
- drm_i915_private_t *dev_priv = crtc->dev->dev_private;
- struct intel_framebuffer *intel_fb;
-
- intel_fb = to_intel_framebuffer(crtc->fb);
- if (intel_fb && intel_fb->obj->active) {
- /* The framebuffer is still being accessed by the GPU. */
- mod_timer(&intel_crtc->idle_timer, jiffies +
- msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
- return;
- }
-
- intel_crtc->busy = false;
- queue_work(dev_priv->wq, &dev_priv->idle_work);
-}
-
static void intel_increase_pllclock(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -5919,10 +6033,6 @@ static void intel_increase_pllclock(struct drm_crtc *crtc)
if (dpll & DISPLAY_RATE_SELECT_FPA1)
DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
}
-
- /* Schedule downclock */
- mod_timer(&intel_crtc->idle_timer, jiffies +
- msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
}
static void intel_decrease_pllclock(struct drm_crtc *crtc)
@@ -5961,89 +6071,46 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
}
-/**
- * intel_idle_update - adjust clocks for idleness
- * @work: work struct
- *
- * Either the GPU or display (or both) went idle. Check the busy status
- * here and adjust the CRTC and GPU clocks as necessary.
- */
-static void intel_idle_update(struct work_struct *work)
+void intel_mark_busy(struct drm_device *dev)
+{
+ i915_update_gfx_val(dev->dev_private);
+}
+
+void intel_mark_idle(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
- idle_work);
- struct drm_device *dev = dev_priv->dev;
+}
+
+void intel_mark_fb_busy(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
struct drm_crtc *crtc;
- struct intel_crtc *intel_crtc;
if (!i915_powersave)
return;
- mutex_lock(&dev->struct_mutex);
-
- i915_update_gfx_val(dev_priv);
-
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- /* Skip inactive CRTCs */
if (!crtc->fb)
continue;
- intel_crtc = to_intel_crtc(crtc);
- if (!intel_crtc->busy)
- intel_decrease_pllclock(crtc);
+ if (to_intel_framebuffer(crtc->fb)->obj == obj)
+ intel_increase_pllclock(crtc);
}
-
-
- mutex_unlock(&dev->struct_mutex);
}
-/**
- * intel_mark_busy - mark the GPU and possibly the display busy
- * @dev: drm device
- * @obj: object we're operating on
- *
- * Callers can use this function to indicate that the GPU is busy processing
- * commands. If @obj matches one of the CRTC objects (i.e. it's a scanout
- * buffer), we'll also mark the display as busy, so we know to increase its
- * clock frequency.
- */
-void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
+void intel_mark_fb_idle(struct drm_i915_gem_object *obj)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = NULL;
- struct intel_framebuffer *intel_fb;
- struct intel_crtc *intel_crtc;
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return;
-
- if (!dev_priv->busy) {
- intel_sanitize_pm(dev);
- dev_priv->busy = true;
- } else
- mod_timer(&dev_priv->idle_timer, jiffies +
- msecs_to_jiffies(GPU_IDLE_TIMEOUT));
+ struct drm_device *dev = obj->base.dev;
+ struct drm_crtc *crtc;
- if (obj == NULL)
+ if (!i915_powersave)
return;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (!crtc->fb)
continue;
- intel_crtc = to_intel_crtc(crtc);
- intel_fb = to_intel_framebuffer(crtc->fb);
- if (intel_fb->obj == obj) {
- if (!intel_crtc->busy) {
- /* Non-busy -> busy, upclock */
- intel_increase_pllclock(crtc);
- intel_crtc->busy = true;
- } else {
- /* Busy -> busy, put off timer */
- mod_timer(&intel_crtc->idle_timer, jiffies +
- msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
- }
- }
+ if (to_intel_framebuffer(crtc->fb)->obj == obj)
+ intel_decrease_pllclock(crtc);
}
}
@@ -6394,7 +6461,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
default:
WARN_ONCE(1, "unknown plane in flip command\n");
ret = -ENODEV;
- goto err;
+ goto err_unpin;
}
ret = intel_ring_begin(ring, 4);
@@ -6502,7 +6569,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
goto cleanup_pending;
intel_disable_fbc(dev);
- intel_mark_busy(dev, obj);
+ intel_mark_fb_busy(obj);
mutex_unlock(&dev->struct_mutex);
trace_i915_flip_request(intel_crtc->plane, obj);
@@ -6527,81 +6594,807 @@ free_work:
return ret;
}
-static void intel_sanitize_modesetting(struct drm_device *dev,
- int pipe, int plane)
+static struct drm_crtc_helper_funcs intel_helper_funcs = {
+ .mode_set_base_atomic = intel_pipe_set_base_atomic,
+ .load_lut = intel_crtc_load_lut,
+ .disable = intel_crtc_noop,
+};
+
+bool intel_encoder_check_is_cloned(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 reg, val;
- int i;
+ struct intel_encoder *other_encoder;
+ struct drm_crtc *crtc = &encoder->new_crtc->base;
- /* Clear any frame start delays used for debugging left by the BIOS */
- for_each_pipe(i) {
- reg = PIPECONF(i);
- I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
+ if (WARN_ON(!crtc))
+ return false;
+
+ list_for_each_entry(other_encoder,
+ &crtc->dev->mode_config.encoder_list,
+ base.head) {
+
+ if (&other_encoder->new_crtc->base != crtc ||
+ encoder == other_encoder)
+ continue;
+ else
+ return true;
}
- if (HAS_PCH_SPLIT(dev))
- return;
+ return false;
+}
- /* Who knows what state these registers were left in by the BIOS or
- * grub?
- *
- * If we leave the registers in a conflicting state (e.g. with the
- * display plane reading from the other pipe than the one we intend
- * to use) then when we attempt to teardown the active mode, we will
- * not disable the pipes and planes in the correct order -- leaving
- * a plane reading from a disabled pipe and possibly leading to
- * undefined behaviour.
+static bool intel_encoder_crtc_ok(struct drm_encoder *encoder,
+ struct drm_crtc *crtc)
+{
+ struct drm_device *dev;
+ struct drm_crtc *tmp;
+ int crtc_mask = 1;
+
+ WARN(!crtc, "checking null crtc?\n");
+
+ dev = crtc->dev;
+
+ list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
+ if (tmp == crtc)
+ break;
+ crtc_mask <<= 1;
+ }
+
+ if (encoder->possible_crtcs & crtc_mask)
+ return true;
+ return false;
+}
+
+/**
+ * intel_modeset_update_staged_output_state
+ *
+ * Updates the staged output configuration state, e.g. after we've read out the
+ * current hw state.
+ */
+static void intel_modeset_update_staged_output_state(struct drm_device *dev)
+{
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ connector->new_encoder =
+ to_intel_encoder(connector->base.encoder);
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ encoder->new_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ }
+}
+
+/**
+ * intel_modeset_commit_output_state
+ *
+ * This function copies the stage display pipe configuration to the real one.
+ */
+static void intel_modeset_commit_output_state(struct drm_device *dev)
+{
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ connector->base.encoder = &connector->new_encoder->base;
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ encoder->base.crtc = &encoder->new_crtc->base;
+ }
+}
+
+static struct drm_display_mode *
+intel_modeset_adjusted_mode(struct drm_crtc *crtc,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_display_mode *adjusted_mode;
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ struct intel_encoder *encoder;
+
+ adjusted_mode = drm_mode_duplicate(dev, mode);
+ if (!adjusted_mode)
+ return ERR_PTR(-ENOMEM);
+
+ /* Pass our mode to the connectors and the CRTC to give them a chance to
+ * adjust it according to limitations or connector properties, and also
+ * a chance to reject the mode entirely.
*/
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
- reg = DSPCNTR(plane);
- val = I915_READ(reg);
+ if (&encoder->new_crtc->base != crtc)
+ continue;
+ encoder_funcs = encoder->base.helper_private;
+ if (!(encoder_funcs->mode_fixup(&encoder->base, mode,
+ adjusted_mode))) {
+ DRM_DEBUG_KMS("Encoder fixup failed\n");
+ goto fail;
+ }
+ }
- if ((val & DISPLAY_PLANE_ENABLE) == 0)
- return;
- if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe)
- return;
+ if (!(intel_crtc_mode_fixup(crtc, mode, adjusted_mode))) {
+ DRM_DEBUG_KMS("CRTC fixup failed\n");
+ goto fail;
+ }
+ DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
- /* This display plane is active and attached to the other CPU pipe. */
- pipe = !pipe;
+ return adjusted_mode;
+fail:
+ drm_mode_destroy(dev, adjusted_mode);
+ return ERR_PTR(-EINVAL);
+}
- /* Disable the plane and wait for it to stop reading from the pipe. */
- intel_disable_plane(dev_priv, plane, pipe);
- intel_disable_pipe(dev_priv, pipe);
+/* Computes which crtcs are affected and sets the relevant bits in the mask. For
+ * simplicity we use the crtc's pipe number (because it's easier to obtain). */
+static void
+intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
+ unsigned *prepare_pipes, unsigned *disable_pipes)
+{
+ struct intel_crtc *intel_crtc;
+ struct drm_device *dev = crtc->dev;
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+ struct drm_crtc *tmp_crtc;
+
+ *disable_pipes = *modeset_pipes = *prepare_pipes = 0;
+
+ /* Check which crtcs have changed outputs connected to them, these need
+ * to be part of the prepare_pipes mask. We don't (yet) support global
+ * modeset across multiple crtcs, so modeset_pipes will only have one
+ * bit set at most. */
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->base.encoder == &connector->new_encoder->base)
+ continue;
+
+ if (connector->base.encoder) {
+ tmp_crtc = connector->base.encoder->crtc;
+
+ *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
+ }
+
+ if (connector->new_encoder)
+ *prepare_pipes |=
+ 1 << connector->new_encoder->new_crtc->pipe;
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ if (encoder->base.crtc == &encoder->new_crtc->base)
+ continue;
+
+ if (encoder->base.crtc) {
+ tmp_crtc = encoder->base.crtc;
+
+ *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
+ }
+
+ if (encoder->new_crtc)
+ *prepare_pipes |= 1 << encoder->new_crtc->pipe;
+ }
+
+ /* Check for any pipes that will be fully disabled ... */
+ list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
+ base.head) {
+ bool used = false;
+
+ /* Don't try to disable disabled crtcs. */
+ if (!intel_crtc->base.enabled)
+ continue;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ if (encoder->new_crtc == intel_crtc)
+ used = true;
+ }
+
+ if (!used)
+ *disable_pipes |= 1 << intel_crtc->pipe;
+ }
+
+
+ /* set_mode is also used to update properties on life display pipes. */
+ intel_crtc = to_intel_crtc(crtc);
+ if (crtc->enabled)
+ *prepare_pipes |= 1 << intel_crtc->pipe;
+
+ /* We only support modeset on one single crtc, hence we need to do that
+ * only for the passed in crtc iff we change anything else than just
+ * disable crtcs.
+ *
+ * This is actually not true, to be fully compatible with the old crtc
+ * helper we automatically disable _any_ output (i.e. doesn't need to be
+ * connected to the crtc we're modesetting on) if it's disconnected.
+ * Which is a rather nutty api (since changed the output configuration
+ * without userspace's explicit request can lead to confusion), but
+ * alas. Hence we currently need to modeset on all pipes we prepare. */
+ if (*prepare_pipes)
+ *modeset_pipes = *prepare_pipes;
+
+ /* ... and mask these out. */
+ *modeset_pipes &= ~(*disable_pipes);
+ *prepare_pipes &= ~(*disable_pipes);
}
-static void intel_crtc_reset(struct drm_crtc *crtc)
+static bool intel_crtc_in_use(struct drm_crtc *crtc)
{
+ struct drm_encoder *encoder;
struct drm_device *dev = crtc->dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- /* Reset flags back to the 'unknown' status so that they
- * will be correctly set on the initial modeset.
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+ if (encoder->crtc == crtc)
+ return true;
+
+ return false;
+}
+
+static void
+intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
+{
+ struct intel_encoder *intel_encoder;
+ struct intel_crtc *intel_crtc;
+ struct drm_connector *connector;
+
+ list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ if (!intel_encoder->base.crtc)
+ continue;
+
+ intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
+
+ if (prepare_pipes & (1 << intel_crtc->pipe))
+ intel_encoder->connectors_active = false;
+ }
+
+ intel_modeset_commit_output_state(dev);
+
+ /* Update computed state. */
+ list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
+ base.head) {
+ intel_crtc->base.enabled = intel_crtc_in_use(&intel_crtc->base);
+ }
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (!connector->encoder || !connector->encoder->crtc)
+ continue;
+
+ intel_crtc = to_intel_crtc(connector->encoder->crtc);
+
+ if (prepare_pipes & (1 << intel_crtc->pipe)) {
+ struct drm_property *dpms_property =
+ dev->mode_config.dpms_property;
+
+ connector->dpms = DRM_MODE_DPMS_ON;
+ drm_connector_property_set_value(connector,
+ dpms_property,
+ DRM_MODE_DPMS_ON);
+
+ intel_encoder = to_intel_encoder(connector->encoder);
+ intel_encoder->connectors_active = true;
+ }
+ }
+
+}
+
+#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
+ list_for_each_entry((intel_crtc), \
+ &(dev)->mode_config.crtc_list, \
+ base.head) \
+ if (mask & (1 <<(intel_crtc)->pipe)) \
+
+void
+intel_modeset_check_state(struct drm_device *dev)
+{
+ struct intel_crtc *crtc;
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ /* This also checks the encoder/connector hw state with the
+ * ->get_hw_state callbacks. */
+ intel_connector_check_state(connector);
+
+ WARN(&connector->new_encoder->base != connector->base.encoder,
+ "connector's staged encoder doesn't match current encoder\n");
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ bool enabled = false;
+ bool active = false;
+ enum pipe pipe, tracked_pipe;
+
+ DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
+ encoder->base.base.id,
+ drm_get_encoder_name(&encoder->base));
+
+ WARN(&encoder->new_crtc->base != encoder->base.crtc,
+ "encoder's stage crtc doesn't match current crtc\n");
+ WARN(encoder->connectors_active && !encoder->base.crtc,
+ "encoder's active_connectors set, but no crtc\n");
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->base.encoder != &encoder->base)
+ continue;
+ enabled = true;
+ if (connector->base.dpms != DRM_MODE_DPMS_OFF)
+ active = true;
+ }
+ WARN(!!encoder->base.crtc != enabled,
+ "encoder's enabled state mismatch "
+ "(expected %i, found %i)\n",
+ !!encoder->base.crtc, enabled);
+ WARN(active && !encoder->base.crtc,
+ "active encoder with no crtc\n");
+
+ WARN(encoder->connectors_active != active,
+ "encoder's computed active state doesn't match tracked active state "
+ "(expected %i, found %i)\n", active, encoder->connectors_active);
+
+ active = encoder->get_hw_state(encoder, &pipe);
+ WARN(active != encoder->connectors_active,
+ "encoder's hw state doesn't match sw tracking "
+ "(expected %i, found %i)\n",
+ encoder->connectors_active, active);
+
+ if (!encoder->base.crtc)
+ continue;
+
+ tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
+ WARN(active && pipe != tracked_pipe,
+ "active encoder's pipe doesn't match"
+ "(expected %i, found %i)\n",
+ tracked_pipe, pipe);
+
+ }
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+ base.head) {
+ bool enabled = false;
+ bool active = false;
+
+ DRM_DEBUG_KMS("[CRTC:%d]\n",
+ crtc->base.base.id);
+
+ WARN(crtc->active && !crtc->base.enabled,
+ "active crtc, but not enabled in sw tracking\n");
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ if (encoder->base.crtc != &crtc->base)
+ continue;
+ enabled = true;
+ if (encoder->connectors_active)
+ active = true;
+ }
+ WARN(active != crtc->active,
+ "crtc's computed active state doesn't match tracked active state "
+ "(expected %i, found %i)\n", active, crtc->active);
+ WARN(enabled != crtc->base.enabled,
+ "crtc's computed enabled state doesn't match tracked enabled state "
+ "(expected %i, found %i)\n", enabled, crtc->base.enabled);
+
+ assert_pipe(dev->dev_private, crtc->pipe, crtc->active);
+ }
+}
+
+bool intel_set_mode(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ int x, int y, struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = crtc->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ struct drm_encoder *encoder;
+ struct intel_crtc *intel_crtc;
+ unsigned disable_pipes, prepare_pipes, modeset_pipes;
+ bool ret = true;
+
+ intel_modeset_affected_pipes(crtc, &modeset_pipes,
+ &prepare_pipes, &disable_pipes);
+
+ DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
+ modeset_pipes, prepare_pipes, disable_pipes);
+
+ for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
+ intel_crtc_disable(&intel_crtc->base);
+
+ saved_hwmode = crtc->hwmode;
+ saved_mode = crtc->mode;
+
+ /* Hack: Because we don't (yet) support global modeset on multiple
+ * crtcs, we don't keep track of the new mode for more than one crtc.
+ * Hence simply check whether any bit is set in modeset_pipes in all the
+ * pieces of code that are not yet converted to deal with mutliple crtcs
+ * changing their mode at the same time. */
+ adjusted_mode = NULL;
+ if (modeset_pipes) {
+ adjusted_mode = intel_modeset_adjusted_mode(crtc, mode);
+ if (IS_ERR(adjusted_mode)) {
+ return false;
+ }
+ }
+
+ for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
+ if (intel_crtc->base.enabled)
+ dev_priv->display.crtc_disable(&intel_crtc->base);
+ }
+
+ /* crtc->mode is already used by the ->mode_set callbacks, hence we need
+ * to set it here already despite that we pass it down the callchain.
*/
- intel_crtc->dpms_mode = -1;
+ if (modeset_pipes)
+ crtc->mode = *mode;
+
+ /* Only after disabling all output pipelines that will be changed can we
+ * update the the output configuration. */
+ intel_modeset_update_state(dev, prepare_pipes);
- /* We need to fix up any BIOS configuration that conflicts with
- * our expectations.
+ /* Set up the DPLL and any encoders state that needs to adjust or depend
+ * on the DPLL.
*/
- intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
+ for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
+ ret = !intel_crtc_mode_set(&intel_crtc->base,
+ mode, adjusted_mode,
+ x, y, fb);
+ if (!ret)
+ goto done;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+
+ if (encoder->crtc != &intel_crtc->base)
+ continue;
+
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
+ encoder->base.id, drm_get_encoder_name(encoder),
+ mode->base.id, mode->name);
+ encoder_funcs = encoder->helper_private;
+ encoder_funcs->mode_set(encoder, mode, adjusted_mode);
+ }
+ }
+
+ /* Now enable the clocks, plane, pipe, and connectors that we set up. */
+ for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc)
+ dev_priv->display.crtc_enable(&intel_crtc->base);
+
+ if (modeset_pipes) {
+ /* Store real post-adjustment hardware mode. */
+ crtc->hwmode = *adjusted_mode;
+
+ /* Calculate and store various constants which
+ * are later needed by vblank and swap-completion
+ * timestamping. They are derived from true hwmode.
+ */
+ drm_calc_timestamping_constants(crtc);
+ }
+
+ /* FIXME: add subpixel order */
+done:
+ drm_mode_destroy(dev, adjusted_mode);
+ if (!ret && crtc->enabled) {
+ crtc->hwmode = saved_hwmode;
+ crtc->mode = saved_mode;
+ } else {
+ intel_modeset_check_state(dev);
+ }
+
+ return ret;
}
-static struct drm_crtc_helper_funcs intel_helper_funcs = {
- .dpms = intel_crtc_dpms,
- .mode_fixup = intel_crtc_mode_fixup,
- .mode_set = intel_crtc_mode_set,
- .mode_set_base = intel_pipe_set_base,
- .mode_set_base_atomic = intel_pipe_set_base_atomic,
- .load_lut = intel_crtc_load_lut,
- .disable = intel_crtc_disable,
-};
+#undef for_each_intel_crtc_masked
+
+static void intel_set_config_free(struct intel_set_config *config)
+{
+ if (!config)
+ return;
+
+ kfree(config->save_connector_encoders);
+ kfree(config->save_encoder_crtcs);
+ kfree(config);
+}
+
+static int intel_set_config_save_state(struct drm_device *dev,
+ struct intel_set_config *config)
+{
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ int count;
+
+ config->save_encoder_crtcs =
+ kcalloc(dev->mode_config.num_encoder,
+ sizeof(struct drm_crtc *), GFP_KERNEL);
+ if (!config->save_encoder_crtcs)
+ return -ENOMEM;
+
+ config->save_connector_encoders =
+ kcalloc(dev->mode_config.num_connector,
+ sizeof(struct drm_encoder *), GFP_KERNEL);
+ if (!config->save_connector_encoders)
+ return -ENOMEM;
+
+ /* Copy data. Note that driver private data is not affected.
+ * Should anything bad happen only the expected state is
+ * restored, not the drivers personal bookkeeping.
+ */
+ count = 0;
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ config->save_encoder_crtcs[count++] = encoder->crtc;
+ }
+
+ count = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ config->save_connector_encoders[count++] = connector->encoder;
+ }
+
+ return 0;
+}
+
+static void intel_set_config_restore_state(struct drm_device *dev,
+ struct intel_set_config *config)
+{
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+ int count;
+
+ count = 0;
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
+ encoder->new_crtc =
+ to_intel_crtc(config->save_encoder_crtcs[count++]);
+ }
+
+ count = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
+ connector->new_encoder =
+ to_intel_encoder(config->save_connector_encoders[count++]);
+ }
+}
+
+static void
+intel_set_config_compute_mode_changes(struct drm_mode_set *set,
+ struct intel_set_config *config)
+{
+
+ /* We should be able to check here if the fb has the same properties
+ * and then just flip_or_move it */
+ if (set->crtc->fb != set->fb) {
+ /* If we have no fb then treat it as a full mode set */
+ if (set->crtc->fb == NULL) {
+ DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
+ config->mode_changed = true;
+ } else if (set->fb == NULL) {
+ config->mode_changed = true;
+ } else if (set->fb->depth != set->crtc->fb->depth) {
+ config->mode_changed = true;
+ } else if (set->fb->bits_per_pixel !=
+ set->crtc->fb->bits_per_pixel) {
+ config->mode_changed = true;
+ } else
+ config->fb_changed = true;
+ }
+
+ if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y))
+ config->fb_changed = true;
+
+ if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
+ DRM_DEBUG_KMS("modes are different, full mode set\n");
+ drm_mode_debug_printmodeline(&set->crtc->mode);
+ drm_mode_debug_printmodeline(set->mode);
+ config->mode_changed = true;
+ }
+}
+
+static int
+intel_modeset_stage_output_state(struct drm_device *dev,
+ struct drm_mode_set *set,
+ struct intel_set_config *config)
+{
+ struct drm_crtc *new_crtc;
+ struct intel_connector *connector;
+ struct intel_encoder *encoder;
+ int count, ro;
+
+ /* The upper layers ensure that we either disabl a crtc or have a list
+ * of connectors. For paranoia, double-check this. */
+ WARN_ON(!set->fb && (set->num_connectors != 0));
+ WARN_ON(set->fb && (set->num_connectors == 0));
+
+ count = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ /* Otherwise traverse passed in connector list and get encoders
+ * for them. */
+ for (ro = 0; ro < set->num_connectors; ro++) {
+ if (set->connectors[ro] == &connector->base) {
+ connector->new_encoder = connector->encoder;
+ break;
+ }
+ }
+
+ /* If we disable the crtc, disable all its connectors. Also, if
+ * the connector is on the changing crtc but not on the new
+ * connector list, disable it. */
+ if ((!set->fb || ro == set->num_connectors) &&
+ connector->base.encoder &&
+ connector->base.encoder->crtc == set->crtc) {
+ connector->new_encoder = NULL;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
+ connector->base.base.id,
+ drm_get_connector_name(&connector->base));
+ }
+
+
+ if (&connector->new_encoder->base != connector->base.encoder) {
+ DRM_DEBUG_KMS("encoder changed, full mode switch\n");
+ config->mode_changed = true;
+ }
+
+ /* Disable all disconnected encoders. */
+ if (connector->base.status == connector_status_disconnected)
+ connector->new_encoder = NULL;
+ }
+ /* connector->new_encoder is now updated for all connectors. */
+
+ /* Update crtc of enabled connectors. */
+ count = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ if (!connector->new_encoder)
+ continue;
+
+ new_crtc = connector->new_encoder->base.crtc;
+
+ for (ro = 0; ro < set->num_connectors; ro++) {
+ if (set->connectors[ro] == &connector->base)
+ new_crtc = set->crtc;
+ }
+
+ /* Make sure the new CRTC will work with the encoder */
+ if (!intel_encoder_crtc_ok(&connector->new_encoder->base,
+ new_crtc)) {
+ return -EINVAL;
+ }
+ connector->encoder->new_crtc = to_intel_crtc(new_crtc);
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
+ connector->base.base.id,
+ drm_get_connector_name(&connector->base),
+ new_crtc->base.id);
+ }
+
+ /* Check for any encoders that needs to be disabled. */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->new_encoder == encoder) {
+ WARN_ON(!connector->new_encoder->new_crtc);
+
+ goto next_encoder;
+ }
+ }
+ encoder->new_crtc = NULL;
+next_encoder:
+ /* Only now check for crtc changes so we don't miss encoders
+ * that will be disabled. */
+ if (&encoder->new_crtc->base != encoder->base.crtc) {
+ DRM_DEBUG_KMS("crtc changed, full mode switch\n");
+ config->mode_changed = true;
+ }
+ }
+ /* Now we've also updated encoder->new_crtc for all encoders. */
+
+ return 0;
+}
+
+static int intel_crtc_set_config(struct drm_mode_set *set)
+{
+ struct drm_device *dev;
+ struct drm_mode_set save_set;
+ struct intel_set_config *config;
+ int ret;
+
+ BUG_ON(!set);
+ BUG_ON(!set->crtc);
+ BUG_ON(!set->crtc->helper_private);
+
+ if (!set->mode)
+ set->fb = NULL;
+
+ /* The fb helper likes to play gross jokes with ->mode_set_config.
+ * Unfortunately the crtc helper doesn't do much at all for this case,
+ * so we have to cope with this madness until the fb helper is fixed up. */
+ if (set->fb && set->num_connectors == 0)
+ return 0;
+
+ if (set->fb) {
+ DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
+ set->crtc->base.id, set->fb->base.id,
+ (int)set->num_connectors, set->x, set->y);
+ } else {
+ DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
+ }
+
+ dev = set->crtc->dev;
+
+ ret = -ENOMEM;
+ config = kzalloc(sizeof(*config), GFP_KERNEL);
+ if (!config)
+ goto out_config;
+
+ ret = intel_set_config_save_state(dev, config);
+ if (ret)
+ goto out_config;
+
+ save_set.crtc = set->crtc;
+ save_set.mode = &set->crtc->mode;
+ save_set.x = set->crtc->x;
+ save_set.y = set->crtc->y;
+ save_set.fb = set->crtc->fb;
+
+ /* Compute whether we need a full modeset, only an fb base update or no
+ * change at all. In the future we might also check whether only the
+ * mode changed, e.g. for LVDS where we only change the panel fitter in
+ * such cases. */
+ intel_set_config_compute_mode_changes(set, config);
+
+ ret = intel_modeset_stage_output_state(dev, set, config);
+ if (ret)
+ goto fail;
+
+ if (config->mode_changed) {
+ if (set->mode) {
+ DRM_DEBUG_KMS("attempting to set mode from"
+ " userspace\n");
+ drm_mode_debug_printmodeline(set->mode);
+ }
+
+ if (!intel_set_mode(set->crtc, set->mode,
+ set->x, set->y, set->fb)) {
+ DRM_ERROR("failed to set mode on [CRTC:%d]\n",
+ set->crtc->base.id);
+ ret = -EINVAL;
+ goto fail;
+ }
+ } else if (config->fb_changed) {
+ ret = intel_pipe_set_base(set->crtc,
+ set->x, set->y, set->fb);
+ }
+
+ intel_set_config_free(config);
+
+ return 0;
+
+fail:
+ intel_set_config_restore_state(dev, config);
+
+ /* Try to restore the config */
+ if (config->mode_changed &&
+ !intel_set_mode(save_set.crtc, save_set.mode,
+ save_set.x, save_set.y, save_set.fb))
+ DRM_ERROR("failed to restore config after modeset failure\n");
+
+out_config:
+ intel_set_config_free(config);
+ return ret;
+}
static const struct drm_crtc_funcs intel_crtc_funcs = {
- .reset = intel_crtc_reset,
.cursor_set = intel_crtc_cursor_set,
.cursor_move = intel_crtc_cursor_move,
.gamma_set = intel_crtc_gamma_set,
- .set_config = drm_crtc_helper_set_config,
+ .set_config = intel_crtc_set_config,
.destroy = intel_crtc_destroy,
.page_flip = intel_crtc_page_flip,
};
@@ -6655,24 +7448,9 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
- intel_crtc_reset(&intel_crtc->base);
- intel_crtc->active = true; /* force the pipe off on setup_init_config */
intel_crtc->bpp = 24; /* default for pre-Ironlake */
- if (HAS_PCH_SPLIT(dev)) {
- intel_helper_funcs.prepare = ironlake_crtc_prepare;
- intel_helper_funcs.commit = ironlake_crtc_commit;
- } else {
- intel_helper_funcs.prepare = i9xx_crtc_prepare;
- intel_helper_funcs.commit = i9xx_crtc_commit;
- }
-
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
-
- intel_crtc->busy = false;
-
- setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
- (unsigned long)intel_crtc);
}
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
@@ -6699,15 +7477,23 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
return 0;
}
-static int intel_encoder_clones(struct drm_device *dev, int type_mask)
+static int intel_encoder_clones(struct intel_encoder *encoder)
{
- struct intel_encoder *encoder;
+ struct drm_device *dev = encoder->base.dev;
+ struct intel_encoder *source_encoder;
int index_mask = 0;
int entry = 0;
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
- if (type_mask & encoder->clone_mask)
+ list_for_each_entry(source_encoder,
+ &dev->mode_config.encoder_list, base.head) {
+
+ if (encoder == source_encoder)
index_mask |= (1 << entry);
+
+ /* Intel hw has only one MUX where enocoders could be cloned. */
+ if (encoder->cloneable && source_encoder->cloneable)
+ index_mask |= (1 << entry);
+
entry++;
}
@@ -6748,10 +7534,10 @@ static void intel_setup_outputs(struct drm_device *dev)
dpd_is_edp = intel_dpd_is_edp(dev);
if (has_edp_a(dev))
- intel_dp_init(dev, DP_A);
+ intel_dp_init(dev, DP_A, PORT_A);
if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
- intel_dp_init(dev, PCH_DP_D);
+ intel_dp_init(dev, PCH_DP_D, PORT_D);
}
intel_crt_init(dev);
@@ -6782,22 +7568,22 @@ static void intel_setup_outputs(struct drm_device *dev)
/* PCH SDVOB multiplex with HDMIB */
found = intel_sdvo_init(dev, PCH_SDVOB, true);
if (!found)
- intel_hdmi_init(dev, HDMIB);
+ intel_hdmi_init(dev, HDMIB, PORT_B);
if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
- intel_dp_init(dev, PCH_DP_B);
+ intel_dp_init(dev, PCH_DP_B, PORT_B);
}
if (I915_READ(HDMIC) & PORT_DETECTED)
- intel_hdmi_init(dev, HDMIC);
+ intel_hdmi_init(dev, HDMIC, PORT_C);
if (!dpd_is_edp && I915_READ(HDMID) & PORT_DETECTED)
- intel_hdmi_init(dev, HDMID);
+ intel_hdmi_init(dev, HDMID, PORT_D);
if (I915_READ(PCH_DP_C) & DP_DETECTED)
- intel_dp_init(dev, PCH_DP_C);
+ intel_dp_init(dev, PCH_DP_C, PORT_C);
if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
- intel_dp_init(dev, PCH_DP_D);
+ intel_dp_init(dev, PCH_DP_D, PORT_D);
} else if (IS_VALLEYVIEW(dev)) {
int found;
@@ -6805,17 +7591,17 @@ static void intel_setup_outputs(struct drm_device *dev)
/* SDVOB multiplex with HDMIB */
found = intel_sdvo_init(dev, SDVOB, true);
if (!found)
- intel_hdmi_init(dev, SDVOB);
+ intel_hdmi_init(dev, SDVOB, PORT_B);
if (!found && (I915_READ(DP_B) & DP_DETECTED))
- intel_dp_init(dev, DP_B);
+ intel_dp_init(dev, DP_B, PORT_B);
}
if (I915_READ(SDVOC) & PORT_DETECTED)
- intel_hdmi_init(dev, SDVOC);
+ intel_hdmi_init(dev, SDVOC, PORT_C);
/* Shares lanes with HDMI on SDVOC */
if (I915_READ(DP_C) & DP_DETECTED)
- intel_dp_init(dev, DP_C);
+ intel_dp_init(dev, DP_C, PORT_C);
} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
bool found = false;
@@ -6824,12 +7610,12 @@ static void intel_setup_outputs(struct drm_device *dev)
found = intel_sdvo_init(dev, SDVOB, true);
if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
- intel_hdmi_init(dev, SDVOB);
+ intel_hdmi_init(dev, SDVOB, PORT_B);
}
if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
DRM_DEBUG_KMS("probing DP_B\n");
- intel_dp_init(dev, DP_B);
+ intel_dp_init(dev, DP_B, PORT_B);
}
}
@@ -6844,18 +7630,18 @@ static void intel_setup_outputs(struct drm_device *dev)
if (SUPPORTS_INTEGRATED_HDMI(dev)) {
DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
- intel_hdmi_init(dev, SDVOC);
+ intel_hdmi_init(dev, SDVOC, PORT_C);
}
if (SUPPORTS_INTEGRATED_DP(dev)) {
DRM_DEBUG_KMS("probing DP_C\n");
- intel_dp_init(dev, DP_C);
+ intel_dp_init(dev, DP_C, PORT_C);
}
}
if (SUPPORTS_INTEGRATED_DP(dev) &&
(I915_READ(DP_D) & DP_DETECTED)) {
DRM_DEBUG_KMS("probing DP_D\n");
- intel_dp_init(dev, DP_D);
+ intel_dp_init(dev, DP_D, PORT_D);
}
} else if (IS_GEN2(dev))
intel_dvo_init(dev);
@@ -6866,12 +7652,9 @@ static void intel_setup_outputs(struct drm_device *dev)
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
encoder->base.possible_crtcs = encoder->crtc_mask;
encoder->base.possible_clones =
- intel_encoder_clones(dev, encoder->clone_mask);
+ intel_encoder_clones(encoder);
}
- /* disable all the possible outputs/crtcs before entering KMS mode */
- drm_helper_disable_unused_functions(dev);
-
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
ironlake_init_pch_refclk(dev);
}
@@ -6973,13 +7756,15 @@ static void intel_init_display(struct drm_device *dev)
/* We always want a DPMS function */
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->display.dpms = ironlake_crtc_dpms;
dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
+ dev_priv->display.crtc_enable = ironlake_crtc_enable;
+ dev_priv->display.crtc_disable = ironlake_crtc_disable;
dev_priv->display.off = ironlake_crtc_off;
dev_priv->display.update_plane = ironlake_update_plane;
} else {
- dev_priv->display.dpms = i9xx_crtc_dpms;
dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
+ dev_priv->display.crtc_enable = i9xx_crtc_enable;
+ dev_priv->display.crtc_disable = i9xx_crtc_disable;
dev_priv->display.off = i9xx_crtc_off;
dev_priv->display.update_plane = i9xx_update_plane;
}
@@ -7023,7 +7808,7 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.write_eld = ironlake_write_eld;
} else if (IS_HASWELL(dev)) {
dev_priv->display.fdi_link_train = hsw_fdi_link_train;
- dev_priv->display.write_eld = ironlake_write_eld;
+ dev_priv->display.write_eld = haswell_write_eld;
} else
dev_priv->display.update_wm = NULL;
} else if (IS_G4X(dev)) {
@@ -7101,21 +7886,16 @@ static struct intel_quirk intel_quirks[] = {
/* HP Mini needs pipe A force quirk (LP: #322104) */
{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
- /* Thinkpad R31 needs pipe A force quirk */
- { 0x3577, 0x1014, 0x0505, quirk_pipea_force },
/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
- /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */
- { 0x3577, 0x1014, 0x0513, quirk_pipea_force },
- /* ThinkPad X40 needs pipe A force quirk */
-
/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
/* 855 & before need to leave pipe A & dpll A up */
{ 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
{ 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
+ { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
/* Lenovo U160 cannot use SSC on LVDS */
{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
@@ -7231,10 +8011,251 @@ void intel_modeset_init(struct drm_device *dev)
/* Just disable it once at startup */
i915_disable_vga(dev);
intel_setup_outputs(dev);
+}
+
+static void
+intel_connector_break_all_links(struct intel_connector *connector)
+{
+ connector->base.dpms = DRM_MODE_DPMS_OFF;
+ connector->base.encoder = NULL;
+ connector->encoder->connectors_active = false;
+ connector->encoder->base.crtc = NULL;
+}
+
+static void intel_enable_pipe_a(struct drm_device *dev)
+{
+ struct intel_connector *connector;
+ struct drm_connector *crt = NULL;
+ struct intel_load_detect_pipe load_detect_temp;
+
+ /* We can't just switch on the pipe A, we need to set things up with a
+ * proper mode and output configuration. As a gross hack, enable pipe A
+ * by enabling the load detect pipe once. */
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
+ crt = &connector->base;
+ break;
+ }
+ }
+
+ if (!crt)
+ return;
+
+ if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp))
+ intel_release_load_detect_pipe(crt, &load_detect_temp);
+
- INIT_WORK(&dev_priv->idle_work, intel_idle_update);
- setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
- (unsigned long)dev);
+}
+
+static void intel_sanitize_crtc(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg, val;
+
+ /* Clear any frame start delays used for debugging left by the BIOS */
+ reg = PIPECONF(crtc->pipe);
+ I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
+
+ /* We need to sanitize the plane -> pipe mapping first because this will
+ * disable the crtc (and hence change the state) if it is wrong. */
+ if (!HAS_PCH_SPLIT(dev)) {
+ struct intel_connector *connector;
+ bool plane;
+
+ reg = DSPCNTR(crtc->plane);
+ val = I915_READ(reg);
+
+ if ((val & DISPLAY_PLANE_ENABLE) == 0 &&
+ (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
+ goto ok;
+
+ DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
+ crtc->base.base.id);
+
+ /* Pipe has the wrong plane attached and the plane is active.
+ * Temporarily change the plane mapping and disable everything
+ * ... */
+ plane = crtc->plane;
+ crtc->plane = !plane;
+ dev_priv->display.crtc_disable(&crtc->base);
+ crtc->plane = plane;
+
+ /* ... and break all links. */
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->encoder->base.crtc != &crtc->base)
+ continue;
+
+ intel_connector_break_all_links(connector);
+ }
+
+ WARN_ON(crtc->active);
+ crtc->base.enabled = false;
+ }
+ok:
+
+ if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
+ crtc->pipe == PIPE_A && !crtc->active) {
+ /* BIOS forgot to enable pipe A, this mostly happens after
+ * resume. Force-enable the pipe to fix this, the update_dpms
+ * call below we restore the pipe to the right state, but leave
+ * the required bits on. */
+ intel_enable_pipe_a(dev);
+ }
+
+ /* Adjust the state of the output pipe according to whether we
+ * have active connectors/encoders. */
+ intel_crtc_update_dpms(&crtc->base);
+
+ if (crtc->active != crtc->base.enabled) {
+ struct intel_encoder *encoder;
+
+ /* This can happen either due to bugs in the get_hw_state
+ * functions or because the pipe is force-enabled due to the
+ * pipe A quirk. */
+ DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
+ crtc->base.base.id,
+ crtc->base.enabled ? "enabled" : "disabled",
+ crtc->active ? "enabled" : "disabled");
+
+ crtc->base.enabled = crtc->active;
+
+ /* Because we only establish the connector -> encoder ->
+ * crtc links if something is active, this means the
+ * crtc is now deactivated. Break the links. connector
+ * -> encoder links are only establish when things are
+ * actually up, hence no need to break them. */
+ WARN_ON(crtc->active);
+
+ for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
+ WARN_ON(encoder->connectors_active);
+ encoder->base.crtc = NULL;
+ }
+ }
+}
+
+static void intel_sanitize_encoder(struct intel_encoder *encoder)
+{
+ struct intel_connector *connector;
+ struct drm_device *dev = encoder->base.dev;
+
+ /* We need to check both for a crtc link (meaning that the
+ * encoder is active and trying to read from a pipe) and the
+ * pipe itself being active. */
+ bool has_active_crtc = encoder->base.crtc &&
+ to_intel_crtc(encoder->base.crtc)->active;
+
+ if (encoder->connectors_active && !has_active_crtc) {
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
+ encoder->base.base.id,
+ drm_get_encoder_name(&encoder->base));
+
+ /* Connector is active, but has no active pipe. This is
+ * fallout from our resume register restoring. Disable
+ * the encoder manually again. */
+ if (encoder->base.crtc) {
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
+ encoder->base.base.id,
+ drm_get_encoder_name(&encoder->base));
+ encoder->disable(encoder);
+ }
+
+ /* Inconsistent output/port/pipe state happens presumably due to
+ * a bug in one of the get_hw_state functions. Or someplace else
+ * in our code, like the register restore mess on resume. Clamp
+ * things to off as a safer default. */
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->encoder != encoder)
+ continue;
+
+ intel_connector_break_all_links(connector);
+ }
+ }
+ /* Enabled encoders without active connectors will be fixed in
+ * the crtc fixup. */
+}
+
+/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
+ * and i915 state tracking structures. */
+void intel_modeset_setup_hw_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe;
+ u32 tmp;
+ struct intel_crtc *crtc;
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+
+ for_each_pipe(pipe) {
+ crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+
+ tmp = I915_READ(PIPECONF(pipe));
+ if (tmp & PIPECONF_ENABLE)
+ crtc->active = true;
+ else
+ crtc->active = false;
+
+ crtc->base.enabled = crtc->active;
+
+ DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
+ crtc->base.base.id,
+ crtc->active ? "enabled" : "disabled");
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ pipe = 0;
+
+ if (encoder->get_hw_state(encoder, &pipe)) {
+ encoder->base.crtc =
+ dev_priv->pipe_to_crtc_mapping[pipe];
+ } else {
+ encoder->base.crtc = NULL;
+ }
+
+ encoder->connectors_active = false;
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe=%i\n",
+ encoder->base.base.id,
+ drm_get_encoder_name(&encoder->base),
+ encoder->base.crtc ? "enabled" : "disabled",
+ pipe);
+ }
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->get_hw_state(connector)) {
+ connector->base.dpms = DRM_MODE_DPMS_ON;
+ connector->encoder->connectors_active = true;
+ connector->base.encoder = &connector->encoder->base;
+ } else {
+ connector->base.dpms = DRM_MODE_DPMS_OFF;
+ connector->base.encoder = NULL;
+ }
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
+ connector->base.base.id,
+ drm_get_connector_name(&connector->base),
+ connector->base.encoder ? "enabled" : "disabled");
+ }
+
+ /* HW state is read out, now we need to sanitize this mess. */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ intel_sanitize_encoder(encoder);
+ }
+
+ for_each_pipe(pipe) {
+ crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ intel_sanitize_crtc(crtc);
+ }
+
+ intel_modeset_update_staged_output_state(dev);
+
+ intel_modeset_check_state(dev);
}
void intel_modeset_gem_init(struct drm_device *dev)
@@ -7242,6 +8263,8 @@ void intel_modeset_gem_init(struct drm_device *dev)
intel_modeset_init_hw(dev);
intel_setup_overlay(dev);
+
+ intel_modeset_setup_hw_state(dev);
}
void intel_modeset_cleanup(struct drm_device *dev)
@@ -7280,19 +8303,11 @@ void intel_modeset_cleanup(struct drm_device *dev)
* enqueue unpin/hotplug work. */
drm_irq_uninstall(dev);
cancel_work_sync(&dev_priv->hotplug_work);
- cancel_work_sync(&dev_priv->rps_work);
+ cancel_work_sync(&dev_priv->rps.work);
/* flush any delayed tasks or pending work */
flush_scheduled_work();
- /* Shut off idle work before the crtcs get freed. */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- intel_crtc = to_intel_crtc(crtc);
- del_timer_sync(&intel_crtc->idle_timer);
- }
- del_timer_sync(&dev_priv->idle_timer);
- cancel_work_sync(&dev_priv->idle_work);
-
drm_mode_config_cleanup(dev);
}
@@ -7338,7 +8353,7 @@ struct intel_display_error_state {
u32 position;
u32 base;
u32 size;
- } cursor[2];
+ } cursor[I915_MAX_PIPES];
struct intel_pipe_error_state {
u32 conf;
@@ -7350,7 +8365,7 @@ struct intel_display_error_state {
u32 vtotal;
u32 vblank;
u32 vsync;
- } pipe[2];
+ } pipe[I915_MAX_PIPES];
struct intel_plane_error_state {
u32 control;
@@ -7360,7 +8375,7 @@ struct intel_display_error_state {
u32 addr;
u32 surface;
u32 tile_offset;
- } plane[2];
+ } plane[I915_MAX_PIPES];
};
struct intel_display_error_state *
@@ -7374,7 +8389,7 @@ intel_display_capture_error_state(struct drm_device *dev)
if (error == NULL)
return NULL;
- for (i = 0; i < 2; i++) {
+ for_each_pipe(i) {
error->cursor[i].control = I915_READ(CURCNTR(i));
error->cursor[i].position = I915_READ(CURPOS(i));
error->cursor[i].base = I915_READ(CURBASE(i));
@@ -7407,9 +8422,11 @@ intel_display_print_error_state(struct seq_file *m,
struct drm_device *dev,
struct intel_display_error_state *error)
{
+ drm_i915_private_t *dev_priv = dev->dev_private;
int i;
- for (i = 0; i < 2; i++) {
+ seq_printf(m, "Num Pipes: %d\n", dev_priv->num_pipe);
+ for_each_pipe(i) {
seq_printf(m, "Pipe [%d]:\n", i);
seq_printf(m, " CONF: %08x\n", error->pipe[i].conf);
seq_printf(m, " SRC: %08x\n", error->pipe[i].source);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index f1bd4f4cd667b7b6e3e98082f7f59bfbae567f67..6c8746c030c720ac4792c80362f273f9c64084c9 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -35,42 +35,10 @@
#include "intel_drv.h"
#include
#include "i915_drv.h"
-#include
-#define DP_RECEIVER_CAP_SIZE 0xf
#define DP_LINK_STATUS_SIZE 6
#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
-#define DP_LINK_CONFIGURATION_SIZE 9
-
-struct intel_dp {
- struct intel_encoder base;
- uint32_t output_reg;
- uint32_t DP;
- uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
- bool has_audio;
- enum hdmi_force_audio force_audio;
- uint32_t color_range;
- int dpms_mode;
- uint8_t link_bw;
- uint8_t lane_count;
- uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
- struct i2c_adapter adapter;
- struct i2c_algo_dp_aux_data algo;
- bool is_pch_edp;
- uint8_t train_set[4];
- int panel_power_up_delay;
- int panel_power_down_delay;
- int panel_power_cycle_delay;
- int backlight_on_delay;
- int backlight_off_delay;
- struct drm_display_mode *panel_fixed_mode; /* for eDP */
- struct delayed_work panel_vdd_work;
- bool want_panel_vdd;
- struct edid *edid; /* cached EDID for eDP */
- int edid_mode_count;
-};
-
/**
* is_edp - is the given port attached to an eDP panel (either CPU or PCH)
* @intel_dp: DP struct
@@ -839,9 +807,6 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
}
}
-static void ironlake_edp_pll_on(struct drm_encoder *encoder);
-static void ironlake_edp_pll_off(struct drm_encoder *encoder);
-
static void
intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -852,14 +817,6 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- /* Turn on the eDP PLL if needed */
- if (is_edp(intel_dp)) {
- if (!is_pch_edp(intel_dp))
- ironlake_edp_pll_on(encoder);
- else
- ironlake_edp_pll_off(encoder);
- }
-
/*
* There are four kinds of DP registers:
*
@@ -881,10 +838,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
* supposed to be read-only.
*/
intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
- intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
/* Handle DP bits in common between all three register formats */
-
intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
switch (intel_dp->lane_count) {
@@ -931,7 +886,6 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
intel_dp->DP |= intel_crtc->pipe << 29;
/* don't miss out required setting for eDP */
- intel_dp->DP |= DP_PLL_ENABLE;
if (adjusted_mode->clock < 200000)
intel_dp->DP |= DP_PLL_FREQ_160MHZ;
else
@@ -953,7 +907,6 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
if (is_cpu_edp(intel_dp)) {
/* don't miss out required setting for eDP */
- intel_dp->DP |= DP_PLL_ENABLE;
if (adjusted_mode->clock < 200000)
intel_dp->DP |= DP_PLL_FREQ_160MHZ;
else
@@ -1224,27 +1177,49 @@ static void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
msleep(intel_dp->backlight_off_delay);
}
-static void ironlake_edp_pll_on(struct drm_encoder *encoder)
+static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
{
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
+ assert_pipe_disabled(dev_priv,
+ to_intel_crtc(crtc)->pipe);
+
DRM_DEBUG_KMS("\n");
dpa_ctl = I915_READ(DP_A);
- dpa_ctl |= DP_PLL_ENABLE;
- I915_WRITE(DP_A, dpa_ctl);
+ WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
+ WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
+
+ /* We don't adjust intel_dp->DP while tearing down the link, to
+ * facilitate link retraining (e.g. after hotplug). Hence clear all
+ * enable bits here to ensure that we don't enable too much. */
+ intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
+ intel_dp->DP |= DP_PLL_ENABLE;
+ I915_WRITE(DP_A, intel_dp->DP);
POSTING_READ(DP_A);
udelay(200);
}
-static void ironlake_edp_pll_off(struct drm_encoder *encoder)
+static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
{
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
+ assert_pipe_disabled(dev_priv,
+ to_intel_crtc(crtc)->pipe);
+
dpa_ctl = I915_READ(DP_A);
+ WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
+ "dp pll off, should be on\n");
+ WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
+
+ /* We can't rely on the value tracked for the DP register in
+ * intel_dp->DP because link_down must not change that (otherwise link
+ * re-training will fail. */
dpa_ctl &= ~DP_PLL_ENABLE;
I915_WRITE(DP_A, dpa_ctl);
POSTING_READ(DP_A);
@@ -1281,10 +1256,57 @@ static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
}
}
-static void intel_dp_prepare(struct drm_encoder *encoder)
+static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 tmp = I915_READ(intel_dp->output_reg);
+ if (!(tmp & DP_PORT_EN))
+ return false;
+
+ if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
+ *pipe = PORT_TO_PIPE_CPT(tmp);
+ } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
+ *pipe = PORT_TO_PIPE(tmp);
+ } else {
+ u32 trans_sel;
+ u32 trans_dp;
+ int i;
+
+ switch (intel_dp->output_reg) {
+ case PCH_DP_B:
+ trans_sel = TRANS_DP_PORT_SEL_B;
+ break;
+ case PCH_DP_C:
+ trans_sel = TRANS_DP_PORT_SEL_C;
+ break;
+ case PCH_DP_D:
+ trans_sel = TRANS_DP_PORT_SEL_D;
+ break;
+ default:
+ return true;
+ }
+
+ for_each_pipe(i) {
+ trans_dp = I915_READ(TRANS_DP_CTL(i));
+ if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
+ *pipe = i;
+ return true;
+ }
+ }
+ }
+
+ DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", intel_dp->output_reg);
+
+ return true;
+}
+
+static void intel_disable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
/* Make sure the panel is off before trying to change the mode. But also
* ensure that we have vdd while we switch off the panel. */
@@ -1292,14 +1314,31 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
ironlake_edp_backlight_off(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
ironlake_edp_panel_off(intel_dp);
- intel_dp_link_down(intel_dp);
+
+ /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
+ if (!is_cpu_edp(intel_dp))
+ intel_dp_link_down(intel_dp);
}
-static void intel_dp_commit(struct drm_encoder *encoder)
+static void intel_post_disable_dp(struct intel_encoder *encoder)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_device *dev = encoder->dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+ if (is_cpu_edp(intel_dp)) {
+ intel_dp_link_down(intel_dp);
+ ironlake_edp_pll_off(intel_dp);
+ }
+}
+
+static void intel_enable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dp_reg = I915_READ(intel_dp->output_reg);
+
+ if (WARN_ON(dp_reg & DP_PORT_EN))
+ return;
ironlake_edp_panel_vdd_on(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
@@ -1308,47 +1347,14 @@ static void intel_dp_commit(struct drm_encoder *encoder)
ironlake_edp_panel_vdd_off(intel_dp, true);
intel_dp_complete_link_train(intel_dp);
ironlake_edp_backlight_on(intel_dp);
-
- intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
-
- if (HAS_PCH_CPT(dev))
- intel_cpt_verify_modeset(dev, intel_crtc->pipe);
}
-static void
-intel_dp_dpms(struct drm_encoder *encoder, int mode)
+static void intel_pre_enable_dp(struct intel_encoder *encoder)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dp_reg = I915_READ(intel_dp->output_reg);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- if (mode != DRM_MODE_DPMS_ON) {
- /* Switching the panel off requires vdd. */
- ironlake_edp_panel_vdd_on(intel_dp);
- ironlake_edp_backlight_off(intel_dp);
- intel_dp_sink_dpms(intel_dp, mode);
- ironlake_edp_panel_off(intel_dp);
- intel_dp_link_down(intel_dp);
-
- if (is_cpu_edp(intel_dp))
- ironlake_edp_pll_off(encoder);
- } else {
- if (is_cpu_edp(intel_dp))
- ironlake_edp_pll_on(encoder);
-
- ironlake_edp_panel_vdd_on(intel_dp);
- intel_dp_sink_dpms(intel_dp, mode);
- if (!(dp_reg & DP_PORT_EN)) {
- intel_dp_start_link_train(intel_dp);
- ironlake_edp_panel_on(intel_dp);
- ironlake_edp_panel_vdd_off(intel_dp, true);
- intel_dp_complete_link_train(intel_dp);
- } else
- ironlake_edp_panel_vdd_off(intel_dp, false);
- ironlake_edp_backlight_on(intel_dp);
- }
- intel_dp->dpms_mode = mode;
+ if (is_cpu_edp(intel_dp))
+ ironlake_edp_pll_on(intel_dp);
}
/*
@@ -1667,6 +1673,45 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
+ dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
+
+ switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ dp_reg_value |= DP_LINK_TRAIN_OFF_CPT;
+ break;
+ case DP_TRAINING_PATTERN_1:
+ dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT;
+ break;
+ case DP_TRAINING_PATTERN_2:
+ dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
+ break;
+ case DP_TRAINING_PATTERN_3:
+ DRM_ERROR("DP training pattern 3 not supported\n");
+ dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
+ break;
+ }
+
+ } else {
+ dp_reg_value &= ~DP_LINK_TRAIN_MASK;
+
+ switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ dp_reg_value |= DP_LINK_TRAIN_OFF;
+ break;
+ case DP_TRAINING_PATTERN_1:
+ dp_reg_value |= DP_LINK_TRAIN_PAT_1;
+ break;
+ case DP_TRAINING_PATTERN_2:
+ dp_reg_value |= DP_LINK_TRAIN_PAT_2;
+ break;
+ case DP_TRAINING_PATTERN_3:
+ DRM_ERROR("DP training pattern 3 not supported\n");
+ dp_reg_value |= DP_LINK_TRAIN_PAT_2;
+ break;
+ }
+ }
+
I915_WRITE(intel_dp->output_reg, dp_reg_value);
POSTING_READ(intel_dp->output_reg);
@@ -1674,12 +1719,15 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
DP_TRAINING_PATTERN_SET,
dp_train_pat);
- ret = intel_dp_aux_native_write(intel_dp,
- DP_TRAINING_LANE0_SET,
- intel_dp->train_set,
- intel_dp->lane_count);
- if (ret != intel_dp->lane_count)
- return false;
+ if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) !=
+ DP_TRAINING_PATTERN_DISABLE) {
+ ret = intel_dp_aux_native_write(intel_dp,
+ DP_TRAINING_LANE0_SET,
+ intel_dp->train_set,
+ intel_dp->lane_count);
+ if (ret != intel_dp->lane_count)
+ return false;
+ }
return true;
}
@@ -1689,26 +1737,12 @@ static void
intel_dp_start_link_train(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
int i;
uint8_t voltage;
bool clock_recovery = false;
int voltage_tries, loop_tries;
- u32 reg;
uint32_t DP = intel_dp->DP;
- /*
- * On CPT we have to enable the port in training pattern 1, which
- * will happen below in intel_dp_set_link_train. Otherwise, enable
- * the port and wait for it to become active.
- */
- if (!HAS_PCH_CPT(dev)) {
- I915_WRITE(intel_dp->output_reg, intel_dp->DP);
- POSTING_READ(intel_dp->output_reg);
- intel_wait_for_vblank(dev, intel_crtc->pipe);
- }
-
/* Write the link configuration data */
intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
intel_dp->link_configuration,
@@ -1716,10 +1750,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
DP |= DP_PORT_EN;
- if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
- DP &= ~DP_LINK_TRAIN_MASK_CPT;
- else
- DP &= ~DP_LINK_TRAIN_MASK;
memset(intel_dp->train_set, 0, 4);
voltage = 0xff;
voltage_tries = 0;
@@ -1743,12 +1773,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
- if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
- reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
- else
- reg = DP | DP_LINK_TRAIN_PAT_1;
-
- if (!intel_dp_set_link_train(intel_dp, reg,
+ if (!intel_dp_set_link_train(intel_dp, DP,
DP_TRAINING_PATTERN_1 |
DP_LINK_SCRAMBLING_DISABLE))
break;
@@ -1803,10 +1828,8 @@ static void
intel_dp_complete_link_train(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
bool channel_eq = false;
int tries, cr_tries;
- u32 reg;
uint32_t DP = intel_dp->DP;
/* channel equalization */
@@ -1835,13 +1858,8 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
- if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
- reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
- else
- reg = DP | DP_LINK_TRAIN_PAT_2;
-
/* channel eq pattern */
- if (!intel_dp_set_link_train(intel_dp, reg,
+ if (!intel_dp_set_link_train(intel_dp, DP,
DP_TRAINING_PATTERN_2 |
DP_LINK_SCRAMBLING_DISABLE))
break;
@@ -1876,15 +1894,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
++tries;
}
- if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
- reg = DP | DP_LINK_TRAIN_OFF_CPT;
- else
- reg = DP | DP_LINK_TRAIN_OFF;
-
- I915_WRITE(intel_dp->output_reg, reg);
- POSTING_READ(intel_dp->output_reg);
- intel_dp_aux_native_write_1(intel_dp,
- DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
+ intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE);
}
static void
@@ -1894,18 +1904,11 @@ intel_dp_link_down(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t DP = intel_dp->DP;
- if ((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)
+ if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
return;
DRM_DEBUG_KMS("\n");
- if (is_edp(intel_dp)) {
- DP &= ~DP_PLL_ENABLE;
- I915_WRITE(intel_dp->output_reg, DP);
- POSTING_READ(intel_dp->output_reg);
- udelay(100);
- }
-
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
DP &= ~DP_LINK_TRAIN_MASK_CPT;
I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
@@ -1917,13 +1920,6 @@ intel_dp_link_down(struct intel_dp *intel_dp)
msleep(17);
- if (is_edp(intel_dp)) {
- if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
- DP |= DP_LINK_TRAIN_OFF_CPT;
- else
- DP |= DP_LINK_TRAIN_OFF;
- }
-
if (HAS_PCH_IBX(dev) &&
I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
struct drm_crtc *crtc = intel_dp->base.base.crtc;
@@ -2032,10 +2028,10 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
u8 sink_irq_vector;
u8 link_status[DP_LINK_STATUS_SIZE];
- if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON)
+ if (!intel_dp->base.connectors_active)
return;
- if (!intel_dp->base.base.crtc)
+ if (WARN_ON(!intel_dp->base.base.crtc))
return;
/* Try to read receiver status if the link appears to be up */
@@ -2159,7 +2155,6 @@ intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *ada
ret = drm_add_edid_modes(connector, intel_dp->edid);
drm_edid_to_eld(connector,
intel_dp->edid);
- connector->display_info.raw_edid = NULL;
return intel_dp->edid_mode_count;
}
@@ -2205,7 +2200,6 @@ intel_dp_detect(struct drm_connector *connector, bool force)
edid = intel_dp_get_edid(connector, &intel_dp->adapter);
if (edid) {
intel_dp->has_audio = drm_detect_monitor_audio(edid);
- connector->display_info.raw_edid = NULL;
kfree(edid);
}
}
@@ -2270,8 +2264,6 @@ intel_dp_detect_audio(struct drm_connector *connector)
edid = intel_dp_get_edid(connector, &intel_dp->adapter);
if (edid) {
has_audio = drm_detect_monitor_audio(edid);
-
- connector->display_info.raw_edid = NULL;
kfree(edid);
}
@@ -2325,9 +2317,8 @@ intel_dp_set_property(struct drm_connector *connector,
done:
if (intel_dp->base.base.crtc) {
struct drm_crtc *crtc = intel_dp->base.base.crtc;
- drm_crtc_helper_set_mode(crtc, &crtc->mode,
- crtc->x, crtc->y,
- crtc->fb);
+ intel_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y, crtc->fb);
}
return 0;
@@ -2361,15 +2352,13 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
}
static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
- .dpms = intel_dp_dpms,
.mode_fixup = intel_dp_mode_fixup,
- .prepare = intel_dp_prepare,
.mode_set = intel_dp_mode_set,
- .commit = intel_dp_commit,
+ .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_dp_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = intel_connector_dpms,
.detect = intel_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_dp_set_property,
@@ -2440,7 +2429,7 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
}
void
-intel_dp_init(struct drm_device *dev, int output_reg)
+intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
@@ -2455,7 +2444,9 @@ intel_dp_init(struct drm_device *dev, int output_reg)
return;
intel_dp->output_reg = output_reg;
- intel_dp->dpms_mode = -1;
+ intel_dp->port = port;
+ /* Preserve the current hw state. */
+ intel_dp->DP = I915_READ(intel_dp->output_reg);
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
@@ -2482,18 +2473,10 @@ intel_dp_init(struct drm_device *dev, int output_reg)
connector->polled = DRM_CONNECTOR_POLL_HPD;
- if (output_reg == DP_B || output_reg == PCH_DP_B)
- intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
- else if (output_reg == DP_C || output_reg == PCH_DP_C)
- intel_encoder->clone_mask = (1 << INTEL_DP_C_CLONE_BIT);
- else if (output_reg == DP_D || output_reg == PCH_DP_D)
- intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
+ intel_encoder->cloneable = false;
- if (is_edp(intel_dp)) {
- intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
- INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
- ironlake_panel_vdd_work);
- }
+ INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
+ ironlake_panel_vdd_work);
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
@@ -2507,29 +2490,33 @@ intel_dp_init(struct drm_device *dev, int output_reg)
intel_connector_attach_encoder(intel_connector, intel_encoder);
drm_sysfs_connector_add(connector);
+ intel_encoder->enable = intel_enable_dp;
+ intel_encoder->pre_enable = intel_pre_enable_dp;
+ intel_encoder->disable = intel_disable_dp;
+ intel_encoder->post_disable = intel_post_disable_dp;
+ intel_encoder->get_hw_state = intel_dp_get_hw_state;
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+
/* Set up the DDC bus. */
- switch (output_reg) {
- case DP_A:
- name = "DPDDC-A";
- break;
- case DP_B:
- case PCH_DP_B:
- dev_priv->hotplug_supported_mask |=
- DPB_HOTPLUG_INT_STATUS;
- name = "DPDDC-B";
- break;
- case DP_C:
- case PCH_DP_C:
- dev_priv->hotplug_supported_mask |=
- DPC_HOTPLUG_INT_STATUS;
- name = "DPDDC-C";
- break;
- case DP_D:
- case PCH_DP_D:
- dev_priv->hotplug_supported_mask |=
- DPD_HOTPLUG_INT_STATUS;
- name = "DPDDC-D";
- break;
+ switch (port) {
+ case PORT_A:
+ name = "DPDDC-A";
+ break;
+ case PORT_B:
+ dev_priv->hotplug_supported_mask |= DPB_HOTPLUG_INT_STATUS;
+ name = "DPDDC-B";
+ break;
+ case PORT_C:
+ dev_priv->hotplug_supported_mask |= DPC_HOTPLUG_INT_STATUS;
+ name = "DPDDC-C";
+ break;
+ case PORT_D:
+ dev_priv->hotplug_supported_mask |= DPD_HOTPLUG_INT_STATUS;
+ name = "DPDDC-D";
+ break;
+ default:
+ WARN(1, "Invalid port %c\n", port_name(port));
+ break;
}
/* Cache some DPCD data in the eDP case */
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 7db849052a98626e5784589bd854d129fe850407..05cc7c372fc5150863b7ad1c739637c29cad65b6 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -31,6 +31,7 @@
#include
#include
#include
+#include
#define _wait_for(COND, MS, W) ({ \
unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
@@ -40,7 +41,11 @@
ret__ = -ETIMEDOUT; \
break; \
} \
- if (W && drm_can_sleep()) msleep(W); \
+ if (W && drm_can_sleep()) { \
+ msleep(W); \
+ } else { \
+ cpu_relax(); \
+ } \
} \
ret__; \
})
@@ -90,25 +95,6 @@
#define INTEL_OUTPUT_DISPLAYPORT 7
#define INTEL_OUTPUT_EDP 8
-/* Intel Pipe Clone Bit */
-#define INTEL_HDMIB_CLONE_BIT 1
-#define INTEL_HDMIC_CLONE_BIT 2
-#define INTEL_HDMID_CLONE_BIT 3
-#define INTEL_HDMIE_CLONE_BIT 4
-#define INTEL_HDMIF_CLONE_BIT 5
-#define INTEL_SDVO_NON_TV_CLONE_BIT 6
-#define INTEL_SDVO_TV_CLONE_BIT 7
-#define INTEL_SDVO_LVDS_CLONE_BIT 8
-#define INTEL_ANALOG_CLONE_BIT 9
-#define INTEL_TV_CLONE_BIT 10
-#define INTEL_DP_B_CLONE_BIT 11
-#define INTEL_DP_C_CLONE_BIT 12
-#define INTEL_DP_D_CLONE_BIT 13
-#define INTEL_LVDS_CLONE_BIT 14
-#define INTEL_DVO_TMDS_CLONE_BIT 15
-#define INTEL_DVO_LVDS_CLONE_BIT 16
-#define INTEL_EDP_CLONE_BIT 17
-
#define INTEL_DVO_CHIP_NONE 0
#define INTEL_DVO_CHIP_LVDS 1
#define INTEL_DVO_CHIP_TMDS 2
@@ -151,16 +137,48 @@ struct intel_fbdev {
struct intel_encoder {
struct drm_encoder base;
+ /*
+ * The new crtc this encoder will be driven from. Only differs from
+ * base->crtc while a modeset is in progress.
+ */
+ struct intel_crtc *new_crtc;
+
int type;
bool needs_tv_clock;
+ /*
+ * Intel hw has only one MUX where encoders could be clone, hence a
+ * simple flag is enough to compute the possible_clones mask.
+ */
+ bool cloneable;
+ bool connectors_active;
void (*hot_plug)(struct intel_encoder *);
+ void (*pre_enable)(struct intel_encoder *);
+ void (*enable)(struct intel_encoder *);
+ void (*disable)(struct intel_encoder *);
+ void (*post_disable)(struct intel_encoder *);
+ /* Read out the current hw state of this connector, returning true if
+ * the encoder is active. If the encoder is enabled it also set the pipe
+ * it is connected to in the pipe parameter. */
+ bool (*get_hw_state)(struct intel_encoder *, enum pipe *pipe);
int crtc_mask;
- int clone_mask;
};
struct intel_connector {
struct drm_connector base;
+ /*
+ * The fixed encoder this connector is connected to.
+ */
struct intel_encoder *encoder;
+
+ /*
+ * The new encoder this connector will be driven. Only differs from
+ * encoder while a modeset is in progress.
+ */
+ struct intel_encoder *new_encoder;
+
+ /* Reads out the current hw, returning true if the connector is enabled
+ * and active (i.e. dpms ON state). */
+ bool (*get_hw_state)(struct intel_connector *);
};
struct intel_crtc {
@@ -168,11 +186,13 @@ struct intel_crtc {
enum pipe pipe;
enum plane plane;
u8 lut_r[256], lut_g[256], lut_b[256];
- int dpms_mode;
- bool active; /* is the crtc on? independent of the dpms mode */
+ /*
+ * Whether the crtc and the connected output pipeline is active. Implies
+ * that crtc->enabled is set, i.e. the current mode configuration has
+ * some outputs connected to this crtc.
+ */
+ bool active;
bool primary_disabled; /* is the crtc obscured by a plane? */
- bool busy; /* is scanout buffer being updated frequently? */
- struct timer_list idle_timer;
bool lowfreq_avail;
struct intel_overlay *overlay;
struct intel_unpin_work *unpin_work;
@@ -311,6 +331,37 @@ struct intel_hdmi {
struct drm_display_mode *adjusted_mode);
};
+#define DP_RECEIVER_CAP_SIZE 0xf
+#define DP_LINK_CONFIGURATION_SIZE 9
+
+struct intel_dp {
+ struct intel_encoder base;
+ uint32_t output_reg;
+ uint32_t DP;
+ uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
+ bool has_audio;
+ enum hdmi_force_audio force_audio;
+ enum port port;
+ uint32_t color_range;
+ uint8_t link_bw;
+ uint8_t lane_count;
+ uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
+ struct i2c_adapter adapter;
+ struct i2c_algo_dp_aux_data algo;
+ bool is_pch_edp;
+ uint8_t train_set[4];
+ int panel_power_up_delay;
+ int panel_power_down_delay;
+ int panel_power_cycle_delay;
+ int backlight_on_delay;
+ int backlight_off_delay;
+ struct drm_display_mode *panel_fixed_mode; /* for eDP */
+ struct delayed_work panel_vdd_work;
+ bool want_panel_vdd;
+ struct edid *edid; /* cached EDID for eDP */
+ int edid_mode_count;
+};
+
static inline struct drm_crtc *
intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
{
@@ -350,17 +401,21 @@ extern void intel_attach_force_audio_property(struct drm_connector *connector);
extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
extern void intel_crt_init(struct drm_device *dev);
-extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
+extern void intel_hdmi_init(struct drm_device *dev,
+ int sdvox_reg, enum port port);
extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
bool is_sdvob);
extern void intel_dvo_init(struct drm_device *dev);
extern void intel_tv_init(struct drm_device *dev);
-extern void intel_mark_busy(struct drm_device *dev,
- struct drm_i915_gem_object *obj);
+extern void intel_mark_busy(struct drm_device *dev);
+extern void intel_mark_idle(struct drm_device *dev);
+extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj);
+extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj);
extern bool intel_lvds_init(struct drm_device *dev);
-extern void intel_dp_init(struct drm_device *dev, int dp_reg);
+extern void intel_dp_init(struct drm_device *dev, int output_reg,
+ enum port port);
void
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
@@ -373,8 +428,6 @@ extern int intel_plane_init(struct drm_device *dev, enum pipe pipe);
extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
enum plane plane);
-void intel_sanitize_pm(struct drm_device *dev);
-
/* intel_panel.c */
extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode);
@@ -391,10 +444,27 @@ extern void intel_panel_disable_backlight(struct drm_device *dev);
extern void intel_panel_destroy_backlight(struct drm_device *dev);
extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
+struct intel_set_config {
+ struct drm_encoder **save_connector_encoders;
+ struct drm_crtc **save_encoder_crtcs;
+
+ bool fb_changed;
+ bool mode_changed;
+};
+
+extern bool intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ int x, int y, struct drm_framebuffer *old_fb);
+extern void intel_modeset_disable(struct drm_device *dev);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
-extern void intel_encoder_prepare(struct drm_encoder *encoder);
-extern void intel_encoder_commit(struct drm_encoder *encoder);
+extern void intel_crtc_update_dpms(struct drm_crtc *crtc);
+extern void intel_encoder_noop(struct drm_encoder *encoder);
extern void intel_encoder_destroy(struct drm_encoder *encoder);
+extern void intel_encoder_dpms(struct intel_encoder *encoder, int mode);
+extern bool intel_encoder_check_is_cloned(struct intel_encoder *encoder);
+extern void intel_connector_dpms(struct drm_connector *, int mode);
+extern bool intel_connector_get_hw_state(struct intel_connector *connector);
+extern void intel_modeset_check_state(struct drm_device *dev);
+
static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector)
{
@@ -417,12 +487,10 @@ struct intel_load_detect_pipe {
bool load_detect_temp;
int dpms_mode;
};
-extern bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
- struct drm_connector *connector,
+extern bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_display_mode *mode,
struct intel_load_detect_pipe *old);
-extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
- struct drm_connector *connector,
+extern void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old);
extern void intelfb_restore(void);
@@ -503,7 +571,10 @@ extern void intel_disable_gt_powersave(struct drm_device *dev);
extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
extern void ironlake_teardown_rc6(struct drm_device *dev);
-extern void intel_ddi_dpms(struct drm_encoder *encoder, int mode);
+extern void intel_enable_ddi(struct intel_encoder *encoder);
+extern void intel_disable_ddi(struct intel_encoder *encoder);
+extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe);
extern void intel_ddi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index ac9f2dd5648a6f1d9eb295ca03171c8b452007dd..15da99533e5b627eb493c69745b399a8b2439e36 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -36,6 +36,7 @@
#define SIL164_ADDR 0x38
#define CH7xxx_ADDR 0x76
#define TFP410_ADDR 0x38
+#define NS2501_ADDR 0x38
static const struct intel_dvo_device intel_dvo_devices[] = {
{
@@ -73,7 +74,14 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
.slave_addr = 0x75,
.gpio = GMBUS_PORT_DPB,
.dev_ops = &ch7017_ops,
- }
+ },
+ {
+ .type = INTEL_DVO_CHIP_TMDS,
+ .name = "ns2501",
+ .dvo_reg = DVOC,
+ .slave_addr = NS2501_ADDR,
+ .dev_ops = &ns2501_ops,
+ }
};
struct intel_dvo {
@@ -96,22 +104,91 @@ static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
struct intel_dvo, base);
}
-static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
+static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
{
- struct drm_i915_private *dev_priv = encoder->dev->dev_private;
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+ struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base);
+
+ return intel_dvo->dev.dev_ops->get_hw_state(&intel_dvo->dev);
+}
+
+static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
+ u32 tmp;
+
+ tmp = I915_READ(intel_dvo->dev.dvo_reg);
+
+ if (!(tmp & DVO_ENABLE))
+ return false;
+
+ *pipe = PORT_TO_PIPE(tmp);
+
+ return true;
+}
+
+static void intel_disable_dvo(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
+ u32 dvo_reg = intel_dvo->dev.dvo_reg;
+ u32 temp = I915_READ(dvo_reg);
+
+ intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
+ I915_WRITE(dvo_reg, temp & ~DVO_ENABLE);
+ I915_READ(dvo_reg);
+}
+
+static void intel_enable_dvo(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
u32 dvo_reg = intel_dvo->dev.dvo_reg;
u32 temp = I915_READ(dvo_reg);
+ I915_WRITE(dvo_reg, temp | DVO_ENABLE);
+ I915_READ(dvo_reg);
+ intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
+}
+
+static void intel_dvo_dpms(struct drm_connector *connector, int mode)
+{
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+ struct drm_crtc *crtc;
+
+ /* dvo supports only 2 dpms states. */
+ if (mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+
+ if (mode == connector->dpms)
+ return;
+
+ connector->dpms = mode;
+
+ /* Only need to change hw state when actually enabled */
+ crtc = intel_dvo->base.base.crtc;
+ if (!crtc) {
+ intel_dvo->base.connectors_active = false;
+ return;
+ }
+
if (mode == DRM_MODE_DPMS_ON) {
- I915_WRITE(dvo_reg, temp | DVO_ENABLE);
- I915_READ(dvo_reg);
- intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, mode);
+ intel_dvo->base.connectors_active = true;
+
+ intel_crtc_update_dpms(crtc);
+
+ intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
} else {
- intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, mode);
- I915_WRITE(dvo_reg, temp & ~DVO_ENABLE);
- I915_READ(dvo_reg);
+ intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
+
+ intel_dvo->base.connectors_active = false;
+
+ intel_crtc_update_dpms(crtc);
}
+
+ intel_modeset_check_state(connector->dev);
}
static int intel_dvo_mode_valid(struct drm_connector *connector,
@@ -266,15 +343,13 @@ static void intel_dvo_destroy(struct drm_connector *connector)
}
static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
- .dpms = intel_dvo_dpms,
.mode_fixup = intel_dvo_mode_fixup,
- .prepare = intel_encoder_prepare,
.mode_set = intel_dvo_mode_set,
- .commit = intel_encoder_commit,
+ .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = intel_dvo_dpms,
.detect = intel_dvo_detect,
.destroy = intel_dvo_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -363,6 +438,11 @@ void intel_dvo_init(struct drm_device *dev)
drm_encoder_init(dev, &intel_encoder->base,
&intel_dvo_enc_funcs, encoder_type);
+ intel_encoder->disable = intel_disable_dvo;
+ intel_encoder->enable = intel_enable_dvo;
+ intel_encoder->get_hw_state = intel_dvo_get_hw_state;
+ intel_connector->get_hw_state = intel_dvo_connector_get_hw_state;
+
/* Now, try to find a controller */
for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
struct drm_connector *connector = &intel_connector->base;
@@ -395,17 +475,14 @@ void intel_dvo_init(struct drm_device *dev)
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
switch (dvo->type) {
case INTEL_DVO_CHIP_TMDS:
- intel_encoder->clone_mask =
- (1 << INTEL_DVO_TMDS_CLONE_BIT) |
- (1 << INTEL_ANALOG_CLONE_BIT);
+ intel_encoder->cloneable = true;
drm_connector_init(dev, connector,
&intel_dvo_connector_funcs,
DRM_MODE_CONNECTOR_DVII);
encoder_type = DRM_MODE_ENCODER_TMDS;
break;
case INTEL_DVO_CHIP_LVDS:
- intel_encoder->clone_mask =
- (1 << INTEL_DVO_LVDS_CLONE_BIT);
+ intel_encoder->cloneable = false;
drm_connector_init(dev, connector,
&intel_dvo_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 025be7dd2a27a9647ac7e596eac1ea16acb14b78..9ba0aaed7ee851b177b05948f7291c17897df13e 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -150,6 +150,9 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(VIDEO_DIP_DATA, *data);
data++;
}
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(VIDEO_DIP_DATA, 0);
mmiowb();
val |= g4x_infoframe_enable(frame);
@@ -185,6 +188,9 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
}
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
mmiowb();
val |= g4x_infoframe_enable(frame);
@@ -223,6 +229,9 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
}
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
mmiowb();
val |= g4x_infoframe_enable(frame);
@@ -258,6 +267,9 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
}
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
mmiowb();
val |= g4x_infoframe_enable(frame);
@@ -291,6 +303,9 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(data_reg + i, *data);
data++;
}
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(data_reg + i, 0);
mmiowb();
val |= hsw_infoframe_enable(frame);
@@ -376,6 +391,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
port = VIDEO_DIP_PORT_C;
break;
default:
+ BUG();
return;
}
@@ -434,6 +450,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
port = VIDEO_DIP_PORT_D;
break;
default:
+ BUG();
return;
}
@@ -600,15 +617,36 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
intel_hdmi->set_infoframes(encoder, adjusted_mode);
}
-static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
+static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
{
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ u32 tmp;
+
+ tmp = I915_READ(intel_hdmi->sdvox_reg);
+
+ if (!(tmp & SDVO_ENABLE))
+ return false;
+
+ if (HAS_PCH_CPT(dev))
+ *pipe = PORT_TO_PIPE_CPT(tmp);
+ else
+ *pipe = PORT_TO_PIPE(tmp);
+
+ return true;
+}
+
+static void intel_enable_hdmi(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
u32 temp;
u32 enable_bits = SDVO_ENABLE;
- if (intel_hdmi->has_audio || mode != DRM_MODE_DPMS_ON)
+ if (intel_hdmi->has_audio)
enable_bits |= SDVO_AUDIO_ENABLE;
temp = I915_READ(intel_hdmi->sdvox_reg);
@@ -616,31 +654,12 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
/* HW workaround for IBX, we need to move the port to transcoder A
* before disabling it. */
if (HAS_PCH_IBX(dev)) {
- struct drm_crtc *crtc = encoder->crtc;
+ struct drm_crtc *crtc = encoder->base.crtc;
int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
- if (mode != DRM_MODE_DPMS_ON) {
- if (temp & SDVO_PIPE_B_SELECT) {
- temp &= ~SDVO_PIPE_B_SELECT;
- I915_WRITE(intel_hdmi->sdvox_reg, temp);
- POSTING_READ(intel_hdmi->sdvox_reg);
-
- /* Again we need to write this twice. */
- I915_WRITE(intel_hdmi->sdvox_reg, temp);
- POSTING_READ(intel_hdmi->sdvox_reg);
-
- /* Transcoder selection bits only update
- * effectively on vblank. */
- if (crtc)
- intel_wait_for_vblank(dev, pipe);
- else
- msleep(50);
- }
- } else {
- /* Restore the transcoder select bit. */
- if (pipe == PIPE_B)
- enable_bits |= SDVO_PIPE_B_SELECT;
- }
+ /* Restore the transcoder select bit. */
+ if (pipe == PIPE_B)
+ enable_bits |= SDVO_PIPE_B_SELECT;
}
/* HW workaround, need to toggle enable bit off and on for 12bpc, but
@@ -651,12 +670,64 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
POSTING_READ(intel_hdmi->sdvox_reg);
}
- if (mode != DRM_MODE_DPMS_ON) {
- temp &= ~enable_bits;
- } else {
- temp |= enable_bits;
+ temp |= enable_bits;
+
+ I915_WRITE(intel_hdmi->sdvox_reg, temp);
+ POSTING_READ(intel_hdmi->sdvox_reg);
+
+ /* HW workaround, need to write this twice for issue that may result
+ * in first write getting masked.
+ */
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(intel_hdmi->sdvox_reg, temp);
+ POSTING_READ(intel_hdmi->sdvox_reg);
+ }
+}
+
+static void intel_disable_hdmi(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ u32 temp;
+ u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE;
+
+ temp = I915_READ(intel_hdmi->sdvox_reg);
+
+ /* HW workaround for IBX, we need to move the port to transcoder A
+ * before disabling it. */
+ if (HAS_PCH_IBX(dev)) {
+ struct drm_crtc *crtc = encoder->base.crtc;
+ int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
+
+ if (temp & SDVO_PIPE_B_SELECT) {
+ temp &= ~SDVO_PIPE_B_SELECT;
+ I915_WRITE(intel_hdmi->sdvox_reg, temp);
+ POSTING_READ(intel_hdmi->sdvox_reg);
+
+ /* Again we need to write this twice. */
+ I915_WRITE(intel_hdmi->sdvox_reg, temp);
+ POSTING_READ(intel_hdmi->sdvox_reg);
+
+ /* Transcoder selection bits only update
+ * effectively on vblank. */
+ if (crtc)
+ intel_wait_for_vblank(dev, pipe);
+ else
+ msleep(50);
+ }
+ }
+
+ /* HW workaround, need to toggle enable bit off and on for 12bpc, but
+ * we do this anyway which shows more stable in testing.
+ */
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(intel_hdmi->sdvox_reg, temp & ~SDVO_ENABLE);
+ POSTING_READ(intel_hdmi->sdvox_reg);
}
+ temp &= ~enable_bits;
+
I915_WRITE(intel_hdmi->sdvox_reg, temp);
POSTING_READ(intel_hdmi->sdvox_reg);
@@ -736,7 +807,6 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
drm_detect_hdmi_monitor(edid);
intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
}
- connector->display_info.raw_edid = NULL;
kfree(edid);
}
@@ -777,8 +847,6 @@ intel_hdmi_detect_audio(struct drm_connector *connector)
if (edid) {
if (edid->input & DRM_EDID_INPUT_DIGITAL)
has_audio = drm_detect_monitor_audio(edid);
-
- connector->display_info.raw_edid = NULL;
kfree(edid);
}
@@ -832,9 +900,8 @@ intel_hdmi_set_property(struct drm_connector *connector,
done:
if (intel_hdmi->base.base.crtc) {
struct drm_crtc *crtc = intel_hdmi->base.base.crtc;
- drm_crtc_helper_set_mode(crtc, &crtc->mode,
- crtc->x, crtc->y,
- crtc->fb);
+ intel_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y, crtc->fb);
}
return 0;
@@ -848,23 +915,19 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
}
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs_hsw = {
- .dpms = intel_ddi_dpms,
.mode_fixup = intel_hdmi_mode_fixup,
- .prepare = intel_encoder_prepare,
.mode_set = intel_ddi_mode_set,
- .commit = intel_encoder_commit,
+ .disable = intel_encoder_noop,
};
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
- .dpms = intel_hdmi_dpms,
.mode_fixup = intel_hdmi_mode_fixup,
- .prepare = intel_encoder_prepare,
.mode_set = intel_hdmi_mode_set,
- .commit = intel_encoder_commit,
+ .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = intel_connector_dpms,
.detect = intel_hdmi_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_hdmi_set_property,
@@ -888,7 +951,7 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
intel_attach_broadcast_rgb_property(connector);
}
-void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
+void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
@@ -922,48 +985,25 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
connector->doublescan_allowed = 0;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
- /* Set up the DDC bus. */
- if (sdvox_reg == SDVOB) {
- intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
- intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
- dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
- } else if (sdvox_reg == SDVOC) {
- intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
- intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
- dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
- } else if (sdvox_reg == HDMIB) {
- intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
- intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
- dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
- } else if (sdvox_reg == HDMIC) {
- intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
- intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
- dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
- } else if (sdvox_reg == HDMID) {
- intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
- intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
- dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
- } else if (sdvox_reg == DDI_BUF_CTL(PORT_B)) {
- DRM_DEBUG_DRIVER("LPT: detected output on DDI B\n");
- intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
+ intel_encoder->cloneable = false;
+
+ intel_hdmi->ddi_port = port;
+ switch (port) {
+ case PORT_B:
intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
- intel_hdmi->ddi_port = PORT_B;
dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
- } else if (sdvox_reg == DDI_BUF_CTL(PORT_C)) {
- DRM_DEBUG_DRIVER("LPT: detected output on DDI C\n");
- intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
+ break;
+ case PORT_C:
intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
- intel_hdmi->ddi_port = PORT_C;
dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
- } else if (sdvox_reg == DDI_BUF_CTL(PORT_D)) {
- DRM_DEBUG_DRIVER("LPT: detected output on DDI D\n");
- intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
+ break;
+ case PORT_D:
intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
- intel_hdmi->ddi_port = PORT_D;
dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
- } else {
- /* If we got an unknown sdvox_reg, things are pretty much broken
- * in a way that we should let the kernel know about it */
+ break;
+ case PORT_A:
+ /* Internal port only for eDP. */
+ default:
BUG();
}
@@ -986,10 +1026,21 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
intel_hdmi->set_infoframes = cpt_set_infoframes;
}
- if (IS_HASWELL(dev))
- drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs_hsw);
- else
- drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
+ if (IS_HASWELL(dev)) {
+ intel_encoder->enable = intel_enable_ddi;
+ intel_encoder->disable = intel_disable_ddi;
+ intel_encoder->get_hw_state = intel_ddi_get_hw_state;
+ drm_encoder_helper_add(&intel_encoder->base,
+ &intel_hdmi_helper_funcs_hsw);
+ } else {
+ intel_encoder->enable = intel_enable_hdmi;
+ intel_encoder->disable = intel_disable_hdmi;
+ intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
+ drm_encoder_helper_add(&intel_encoder->base,
+ &intel_hdmi_helper_funcs);
+ }
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+
intel_hdmi_add_properties(intel_hdmi, connector);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 8552be9f5db1fdf109d8075329891840c8a9c5d5..e3166df55daad03f4f5c144a5c0558aaa017fec6 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -64,13 +64,40 @@ static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector)
struct intel_lvds, base);
}
+static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 lvds_reg, tmp;
+
+ if (HAS_PCH_SPLIT(dev)) {
+ lvds_reg = PCH_LVDS;
+ } else {
+ lvds_reg = LVDS;
+ }
+
+ tmp = I915_READ(lvds_reg);
+
+ if (!(tmp & LVDS_PORT_EN))
+ return false;
+
+ if (HAS_PCH_CPT(dev))
+ *pipe = PORT_TO_PIPE_CPT(tmp);
+ else
+ *pipe = PORT_TO_PIPE(tmp);
+
+ return true;
+}
+
/**
* Sets the power state for the panel.
*/
-static void intel_lvds_enable(struct intel_lvds *intel_lvds)
+static void intel_enable_lvds(struct intel_encoder *encoder)
{
- struct drm_device *dev = intel_lvds->base.base.dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(intel_lvds->base.base.crtc);
+ struct drm_device *dev = encoder->base.dev;
+ struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 ctl_reg, lvds_reg, stat_reg;
@@ -110,9 +137,10 @@ static void intel_lvds_enable(struct intel_lvds *intel_lvds)
intel_panel_enable_backlight(dev, intel_crtc->pipe);
}
-static void intel_lvds_disable(struct intel_lvds *intel_lvds)
+static void intel_disable_lvds(struct intel_encoder *encoder)
{
- struct drm_device *dev = intel_lvds->base.base.dev;
+ struct drm_device *dev = encoder->base.dev;
+ struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 ctl_reg, lvds_reg, stat_reg;
@@ -141,18 +169,6 @@ static void intel_lvds_disable(struct intel_lvds *intel_lvds)
POSTING_READ(lvds_reg);
}
-static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
-{
- struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
-
- if (mode == DRM_MODE_DPMS_ON)
- intel_lvds_enable(intel_lvds);
- else
- intel_lvds_disable(intel_lvds);
-
- /* XXX: We never power down the LVDS pairs. */
-}
-
static int intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
@@ -233,9 +249,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
- struct intel_encoder *tmp_encoder;
+ struct intel_crtc *intel_crtc = intel_lvds->base.new_crtc;
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
int pipe;
@@ -245,14 +260,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
return false;
}
- /* Should never happen!! */
- for_each_encoder_on_crtc(dev, encoder->crtc, tmp_encoder) {
- if (&tmp_encoder->base != encoder) {
- DRM_ERROR("Can't enable LVDS and another "
- "encoder on the same pipe\n");
- return false;
- }
- }
+ if (intel_encoder_check_is_cloned(&intel_lvds->base))
+ return false;
/*
* We have timings from the BIOS for the panel, put them in
@@ -404,23 +413,6 @@ out:
return true;
}
-static void intel_lvds_prepare(struct drm_encoder *encoder)
-{
- struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
-
- intel_lvds_disable(intel_lvds);
-}
-
-static void intel_lvds_commit(struct drm_encoder *encoder)
-{
- struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
-
- /* Always do a full power on as we do not know what state
- * we were left in.
- */
- intel_lvds_enable(intel_lvds);
-}
-
static void intel_lvds_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -534,7 +526,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
dev_priv->modeset_on_lid = 0;
mutex_lock(&dev->mode_config.mutex);
- drm_helper_resume_force_mode(dev);
+ intel_modeset_check_state(dev);
mutex_unlock(&dev->mode_config.mutex);
return NOTIFY_OK;
@@ -586,8 +578,8 @@ static int intel_lvds_set_property(struct drm_connector *connector,
* If the CRTC is enabled, the display will be changed
* according to the new panel fitting mode.
*/
- drm_crtc_helper_set_mode(crtc, &crtc->mode,
- crtc->x, crtc->y, crtc->fb);
+ intel_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y, crtc->fb);
}
}
@@ -595,11 +587,9 @@ static int intel_lvds_set_property(struct drm_connector *connector,
}
static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
- .dpms = intel_lvds_dpms,
.mode_fixup = intel_lvds_mode_fixup,
- .prepare = intel_lvds_prepare,
.mode_set = intel_lvds_mode_set,
- .commit = intel_lvds_commit,
+ .disable = intel_encoder_noop,
};
static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
@@ -609,7 +599,7 @@ static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs
};
static const struct drm_connector_funcs intel_lvds_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = intel_connector_dpms,
.detect = intel_lvds_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_lvds_set_property,
@@ -971,10 +961,15 @@ bool intel_lvds_init(struct drm_device *dev)
drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs,
DRM_MODE_ENCODER_LVDS);
+ intel_encoder->enable = intel_enable_lvds;
+ intel_encoder->disable = intel_disable_lvds;
+ intel_encoder->get_hw_state = intel_lvds_get_hw_state;
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+
intel_connector_attach_encoder(intel_connector, intel_encoder);
intel_encoder->type = INTEL_OUTPUT_LVDS;
- intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
+ intel_encoder->cloneable = false;
if (HAS_PCH_SPLIT(dev))
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
else if (IS_GEN4(dev))
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index cc71fd9aaed51d0d79f261ef9d2c47c434859877..cabd84bf66ebd576e1f75cdc8e4ef8a7f928e073 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -45,7 +45,6 @@ int intel_connector_update_modes(struct drm_connector *connector,
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
drm_edid_to_eld(connector, edid);
- connector->display_info.raw_edid = NULL;
kfree(edid);
return ret;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 5cc624eb6133e6cfe0e3e88ed4e56e28c9d3046e..5530413213d8ed0d4cc86ff62c54cfd55588d6c1 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -427,6 +427,25 @@ blind_set:
goto end;
}
+static void intel_setup_cadls(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_opregion *opregion = &dev_priv->opregion;
+ int i = 0;
+ u32 disp_id;
+
+ /* Initialize the CADL field by duplicating the DIDL values.
+ * Technically, this is not always correct as display outputs may exist,
+ * but not active. This initialization is necessary for some Clevo
+ * laptops that check this field before processing the brightness and
+ * display switching hotkeys. Just like DIDL, CADL is NULL-terminated if
+ * there are less than eight devices. */
+ do {
+ disp_id = ioread32(&opregion->acpi->didl[i]);
+ iowrite32(disp_id, &opregion->acpi->cadl[i]);
+ } while (++i < 8 && disp_id != 0);
+}
+
void intel_opregion_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -436,8 +455,10 @@ void intel_opregion_init(struct drm_device *dev)
return;
if (opregion->acpi) {
- if (drm_core_check_feature(dev, DRIVER_MODESET))
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_didl_outputs(dev);
+ intel_setup_cadls(dev);
+ }
/* Notify BIOS we are ready to handle ACPI video ext notifs.
* Right now, all the events are handled by the ACPI video module.
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 10510221d76358670717c3376971b559538c93fe..ebff850a9ab6ff8f7b35c17fced205be45d84205 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -234,54 +234,6 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
return 0;
}
-/* Workaround for i830 bug where pipe a must be enable to change control regs */
-static int
-i830_activate_pipe_a(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_crtc *crtc;
- struct drm_crtc_helper_funcs *crtc_funcs;
- struct drm_display_mode vesa_640x480 = {
- DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
- 752, 800, 0, 480, 489, 492, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)
- }, *mode;
-
- crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[0]);
- if (crtc->dpms_mode == DRM_MODE_DPMS_ON)
- return 0;
-
- /* most i8xx have pipe a forced on, so don't trust dpms mode */
- if (I915_READ(_PIPEACONF) & PIPECONF_ENABLE)
- return 0;
-
- crtc_funcs = crtc->base.helper_private;
- if (crtc_funcs->dpms == NULL)
- return 0;
-
- DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n");
-
- mode = drm_mode_duplicate(dev, &vesa_640x480);
-
- if (!drm_crtc_helper_set_mode(&crtc->base, mode,
- crtc->base.x, crtc->base.y,
- crtc->base.fb))
- return 0;
-
- crtc_funcs->dpms(&crtc->base, DRM_MODE_DPMS_ON);
- return 1;
-}
-
-static void
-i830_deactivate_pipe_a(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[0];
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
-
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
-}
-
/* overlay needs to be disable in OCMD reg */
static int intel_overlay_on(struct intel_overlay *overlay)
{
@@ -289,17 +241,12 @@ static int intel_overlay_on(struct intel_overlay *overlay)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
struct drm_i915_gem_request *request;
- int pipe_a_quirk = 0;
int ret;
BUG_ON(overlay->active);
overlay->active = 1;
- if (IS_I830(dev)) {
- pipe_a_quirk = i830_activate_pipe_a(dev);
- if (pipe_a_quirk < 0)
- return pipe_a_quirk;
- }
+ WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (request == NULL) {
@@ -321,9 +268,6 @@ static int intel_overlay_on(struct intel_overlay *overlay)
ret = intel_overlay_do_wait_request(overlay, request, NULL);
out:
- if (pipe_a_quirk)
- i830_deactivate_pipe_a(dev);
-
return ret;
}
@@ -1438,7 +1382,7 @@ void intel_setup_overlay(struct drm_device *dev)
}
overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
} else {
- ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true);
+ ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true, false);
if (ret) {
DRM_ERROR("failed to pin overlay register bo\n");
goto out_free_bo;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index ba8a27b1757ad97e774fe8266432161477a3561a..d69f8f49beb5bf2a67f87f9b708a5a99af1616ff 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -31,6 +31,8 @@
#include "../../../platform/x86/intel_ips.h"
#include
+#define FORCEWAKE_ACK_TIMEOUT_MS 2
+
/* FBC, or Frame Buffer Compression, is a technique employed to compress the
* framebuffer contents in-memory, aiming at reducing the required bandwidth
* during in-memory transfers and, therefore, reduce the power packet.
@@ -593,7 +595,7 @@ static void i915_ironlake_get_mem_freq(struct drm_device *dev)
break;
}
- dev_priv->r_t = dev_priv->mem_freq;
+ dev_priv->ips.r_t = dev_priv->mem_freq;
switch (csipll & 0x3ff) {
case 0x00c:
@@ -625,11 +627,11 @@ static void i915_ironlake_get_mem_freq(struct drm_device *dev)
}
if (dev_priv->fsb_freq == 3200) {
- dev_priv->c_m = 0;
+ dev_priv->ips.c_m = 0;
} else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
- dev_priv->c_m = 1;
+ dev_priv->ips.c_m = 1;
} else {
- dev_priv->c_m = 2;
+ dev_priv->ips.c_m = 2;
}
}
@@ -2138,7 +2140,7 @@ intel_alloc_context_page(struct drm_device *dev)
return NULL;
}
- ret = i915_gem_object_pin(ctx, 4096, true);
+ ret = i915_gem_object_pin(ctx, 4096, true, false);
if (ret) {
DRM_ERROR("failed to pin power context: %d\n", ret);
goto err_unref;
@@ -2160,11 +2162,22 @@ err_unref:
return NULL;
}
+/**
+ * Lock protecting IPS related data structures
+ */
+DEFINE_SPINLOCK(mchdev_lock);
+
+/* Global for IPS driver to get at the current i915 device. Protected by
+ * mchdev_lock. */
+static struct drm_i915_private *i915_mch_dev;
+
bool ironlake_set_drps(struct drm_device *dev, u8 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u16 rgvswctl;
+ assert_spin_locked(&mchdev_lock);
+
rgvswctl = I915_READ16(MEMSWCTL);
if (rgvswctl & MEMCTL_CMD_STS) {
DRM_DEBUG("gpu busy, RCS change rejected\n");
@@ -2188,6 +2201,8 @@ static void ironlake_enable_drps(struct drm_device *dev)
u32 rgvmodectl = I915_READ(MEMMODECTL);
u8 fmax, fmin, fstart, vstart;
+ spin_lock_irq(&mchdev_lock);
+
/* Enable temp reporting */
I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
@@ -2211,12 +2226,12 @@ static void ironlake_enable_drps(struct drm_device *dev)
vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
PXVFREQ_PX_SHIFT;
- dev_priv->fmax = fmax; /* IPS callback will increase this */
- dev_priv->fstart = fstart;
+ dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
+ dev_priv->ips.fstart = fstart;
- dev_priv->max_delay = fstart;
- dev_priv->min_delay = fmin;
- dev_priv->cur_delay = fstart;
+ dev_priv->ips.max_delay = fstart;
+ dev_priv->ips.min_delay = fmin;
+ dev_priv->ips.cur_delay = fstart;
DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
fmax, fmin, fstart);
@@ -2233,23 +2248,29 @@ static void ironlake_enable_drps(struct drm_device *dev)
rgvmodectl |= MEMMODE_SWMODE_EN;
I915_WRITE(MEMMODECTL, rgvmodectl);
- if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
+ if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
DRM_ERROR("stuck trying to change perf mode\n");
- msleep(1);
+ mdelay(1);
ironlake_set_drps(dev, fstart);
- dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
+ dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
I915_READ(0x112e0);
- dev_priv->last_time1 = jiffies_to_msecs(jiffies);
- dev_priv->last_count2 = I915_READ(0x112f4);
- getrawmonotonic(&dev_priv->last_time2);
+ dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
+ dev_priv->ips.last_count2 = I915_READ(0x112f4);
+ getrawmonotonic(&dev_priv->ips.last_time2);
+
+ spin_unlock_irq(&mchdev_lock);
}
static void ironlake_disable_drps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u16 rgvswctl = I915_READ16(MEMSWCTL);
+ u16 rgvswctl;
+
+ spin_lock_irq(&mchdev_lock);
+
+ rgvswctl = I915_READ16(MEMSWCTL);
/* Ack interrupts, disable EFC interrupt */
I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
@@ -2259,31 +2280,54 @@ static void ironlake_disable_drps(struct drm_device *dev)
I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
/* Go back to the starting frequency */
- ironlake_set_drps(dev, dev_priv->fstart);
- msleep(1);
+ ironlake_set_drps(dev, dev_priv->ips.fstart);
+ mdelay(1);
rgvswctl |= MEMCTL_CMD_STS;
I915_WRITE(MEMSWCTL, rgvswctl);
- msleep(1);
+ mdelay(1);
+ spin_unlock_irq(&mchdev_lock);
}
-void gen6_set_rps(struct drm_device *dev, u8 val)
+/* There's a funny hw issue where the hw returns all 0 when reading from
+ * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
+ * ourselves, instead of doing a rmw cycle (which might result in us clearing
+ * all limits and the gpu stuck at whatever frequency it is at atm).
+ */
+static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 limits;
limits = 0;
- if (val >= dev_priv->max_delay)
- val = dev_priv->max_delay;
- else
- limits |= dev_priv->max_delay << 24;
- if (val <= dev_priv->min_delay)
- val = dev_priv->min_delay;
- else
- limits |= dev_priv->min_delay << 16;
+ if (*val >= dev_priv->rps.max_delay)
+ *val = dev_priv->rps.max_delay;
+ limits |= dev_priv->rps.max_delay << 24;
+
+ /* Only set the down limit when we've reached the lowest level to avoid
+ * getting more interrupts, otherwise leave this clear. This prevents a
+ * race in the hw when coming out of rc6: There's a tiny window where
+ * the hw runs at the minimal clock before selecting the desired
+ * frequency, if the down threshold expires in that window we will not
+ * receive a down interrupt. */
+ if (*val <= dev_priv->rps.min_delay) {
+ *val = dev_priv->rps.min_delay;
+ limits |= dev_priv->rps.min_delay << 16;
+ }
- if (val == dev_priv->cur_delay)
+ return limits;
+}
+
+void gen6_set_rps(struct drm_device *dev, u8 val)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 limits = gen6_rps_limits(dev_priv, &val);
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ WARN_ON(val > dev_priv->rps.max_delay);
+ WARN_ON(val < dev_priv->rps.min_delay);
+
+ if (val == dev_priv->rps.cur_delay)
return;
I915_WRITE(GEN6_RPNSWREQ,
@@ -2296,7 +2340,11 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
*/
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
- dev_priv->cur_delay = val;
+ POSTING_READ(GEN6_RPNSWREQ);
+
+ dev_priv->rps.cur_delay = val;
+
+ trace_intel_gpu_freq_change(val * 50);
}
static void gen6_disable_rps(struct drm_device *dev)
@@ -2312,40 +2360,40 @@ static void gen6_disable_rps(struct drm_device *dev)
* register (PMIMR) to mask PM interrupts. The only risk is in leaving
* stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
- spin_lock_irq(&dev_priv->rps_lock);
- dev_priv->pm_iir = 0;
- spin_unlock_irq(&dev_priv->rps_lock);
+ spin_lock_irq(&dev_priv->rps.lock);
+ dev_priv->rps.pm_iir = 0;
+ spin_unlock_irq(&dev_priv->rps.lock);
I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
}
int intel_enable_rc6(const struct drm_device *dev)
{
- /*
- * Respect the kernel parameter if it is set
- */
+ /* Respect the kernel parameter if it is set */
if (i915_enable_rc6 >= 0)
return i915_enable_rc6;
- /*
- * Disable RC6 on Ironlake
- */
- if (INTEL_INFO(dev)->gen == 5)
- return 0;
+ if (INTEL_INFO(dev)->gen == 5) {
+#ifdef CONFIG_INTEL_IOMMU
+ /* Disable rc6 on ilk if VT-d is on. */
+ if (intel_iommu_gfx_mapped)
+ return false;
+#endif
+ DRM_DEBUG_DRIVER("Ironlake: only RC6 available\n");
+ return INTEL_RC6_ENABLE;
+ }
- /* On Haswell, only RC6 is available. So let's enable it by default to
- * provide better testing and coverage since the beginning.
- */
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev)) {
+ DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
return INTEL_RC6_ENABLE;
+ }
- /*
- * Disable rc6 on Sandybridge
- */
+ /* snb/ivb have more than one rc6 state. */
if (INTEL_INFO(dev)->gen == 6) {
DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
return INTEL_RC6_ENABLE;
}
+
DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
}
@@ -2383,9 +2431,9 @@ static void gen6_enable_rps(struct drm_device *dev)
gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
/* In units of 100MHz */
- dev_priv->max_delay = rp_state_cap & 0xff;
- dev_priv->min_delay = (rp_state_cap & 0xff0000) >> 16;
- dev_priv->cur_delay = 0;
+ dev_priv->rps.max_delay = rp_state_cap & 0xff;
+ dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16;
+ dev_priv->rps.cur_delay = 0;
/* disable the counters and set deterministic thresholds */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -2438,8 +2486,8 @@ static void gen6_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
- dev_priv->max_delay << 24 |
- dev_priv->min_delay << 16);
+ dev_priv->rps.max_delay << 24 |
+ dev_priv->rps.min_delay << 16);
I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
@@ -2477,7 +2525,7 @@ static void gen6_enable_rps(struct drm_device *dev)
500))
DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
if (pcu_mbox & (1<<31)) { /* OC supported */
- dev_priv->max_delay = pcu_mbox & 0xff;
+ dev_priv->rps.max_delay = pcu_mbox & 0xff;
DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
}
@@ -2485,10 +2533,10 @@ static void gen6_enable_rps(struct drm_device *dev)
/* requires MSI enabled */
I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS);
- spin_lock_irq(&dev_priv->rps_lock);
- WARN_ON(dev_priv->pm_iir != 0);
+ spin_lock_irq(&dev_priv->rps.lock);
+ WARN_ON(dev_priv->rps.pm_iir != 0);
I915_WRITE(GEN6_PMIMR, 0);
- spin_unlock_irq(&dev_priv->rps_lock);
+ spin_unlock_irq(&dev_priv->rps.lock);
/* enable all PM interrupts */
I915_WRITE(GEN6_PMINTRMSK, 0);
@@ -2520,9 +2568,9 @@ static void gen6_update_ring_freq(struct drm_device *dev)
* to use for memory access. We do this by specifying the IA frequency
* the PCU should use as a reference to determine the ring frequency.
*/
- for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
+ for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay;
gpu_freq--) {
- int diff = dev_priv->max_delay - gpu_freq;
+ int diff = dev_priv->rps.max_delay - gpu_freq;
/*
* For GPU frequencies less than 750MHz, just use the lowest
@@ -2686,14 +2734,16 @@ static const struct cparams {
{ 0, 800, 231, 23784 },
};
-unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
+static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
{
u64 total_count, diff, ret;
u32 count1, count2, count3, m = 0, c = 0;
unsigned long now = jiffies_to_msecs(jiffies), diff1;
int i;
- diff1 = now - dev_priv->last_time1;
+ assert_spin_locked(&mchdev_lock);
+
+ diff1 = now - dev_priv->ips.last_time1;
/* Prevent division-by-zero if we are asking too fast.
* Also, we don't get interesting results if we are polling
@@ -2701,7 +2751,7 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
* in such cases.
*/
if (diff1 <= 10)
- return dev_priv->chipset_power;
+ return dev_priv->ips.chipset_power;
count1 = I915_READ(DMIEC);
count2 = I915_READ(DDREC);
@@ -2710,16 +2760,16 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
total_count = count1 + count2 + count3;
/* FIXME: handle per-counter overflow */
- if (total_count < dev_priv->last_count1) {
- diff = ~0UL - dev_priv->last_count1;
+ if (total_count < dev_priv->ips.last_count1) {
+ diff = ~0UL - dev_priv->ips.last_count1;
diff += total_count;
} else {
- diff = total_count - dev_priv->last_count1;
+ diff = total_count - dev_priv->ips.last_count1;
}
for (i = 0; i < ARRAY_SIZE(cparams); i++) {
- if (cparams[i].i == dev_priv->c_m &&
- cparams[i].t == dev_priv->r_t) {
+ if (cparams[i].i == dev_priv->ips.c_m &&
+ cparams[i].t == dev_priv->ips.r_t) {
m = cparams[i].m;
c = cparams[i].c;
break;
@@ -2730,14 +2780,30 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
ret = ((m * diff) + c);
ret = div_u64(ret, 10);
- dev_priv->last_count1 = total_count;
- dev_priv->last_time1 = now;
+ dev_priv->ips.last_count1 = total_count;
+ dev_priv->ips.last_time1 = now;
- dev_priv->chipset_power = ret;
+ dev_priv->ips.chipset_power = ret;
return ret;
}
+unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
+{
+ unsigned long val;
+
+ if (dev_priv->info->gen != 5)
+ return 0;
+
+ spin_lock_irq(&mchdev_lock);
+
+ val = __i915_chipset_val(dev_priv);
+
+ spin_unlock_irq(&mchdev_lock);
+
+ return val;
+}
+
unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
{
unsigned long m, x, b;
@@ -2894,18 +2960,17 @@ static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
return v_table[pxvid].vd;
}
-void i915_update_gfx_val(struct drm_i915_private *dev_priv)
+static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
{
struct timespec now, diff1;
u64 diff;
unsigned long diffms;
u32 count;
- if (dev_priv->info->gen != 5)
- return;
+ assert_spin_locked(&mchdev_lock);
getrawmonotonic(&now);
- diff1 = timespec_sub(now, dev_priv->last_time2);
+ diff1 = timespec_sub(now, dev_priv->ips.last_time2);
/* Don't divide by 0 */
diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
@@ -2914,28 +2979,42 @@ void i915_update_gfx_val(struct drm_i915_private *dev_priv)
count = I915_READ(GFXEC);
- if (count < dev_priv->last_count2) {
- diff = ~0UL - dev_priv->last_count2;
+ if (count < dev_priv->ips.last_count2) {
+ diff = ~0UL - dev_priv->ips.last_count2;
diff += count;
} else {
- diff = count - dev_priv->last_count2;
+ diff = count - dev_priv->ips.last_count2;
}
- dev_priv->last_count2 = count;
- dev_priv->last_time2 = now;
+ dev_priv->ips.last_count2 = count;
+ dev_priv->ips.last_time2 = now;
/* More magic constants... */
diff = diff * 1181;
diff = div_u64(diff, diffms * 10);
- dev_priv->gfx_power = diff;
+ dev_priv->ips.gfx_power = diff;
}
-unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
+void i915_update_gfx_val(struct drm_i915_private *dev_priv)
+{
+ if (dev_priv->info->gen != 5)
+ return;
+
+ spin_lock_irq(&mchdev_lock);
+
+ __i915_update_gfx_val(dev_priv);
+
+ spin_unlock_irq(&mchdev_lock);
+}
+
+static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
{
unsigned long t, corr, state1, corr2, state2;
u32 pxvid, ext_v;
- pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
+ assert_spin_locked(&mchdev_lock);
+
+ pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4));
pxvid = (pxvid >> 24) & 0x7f;
ext_v = pvid_to_extvid(dev_priv, pxvid);
@@ -2955,27 +3034,31 @@ unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
corr = corr * ((150142 * state1) / 10000 - 78642);
corr /= 100000;
- corr2 = (corr * dev_priv->corr);
+ corr2 = (corr * dev_priv->ips.corr);
state2 = (corr2 * state1) / 10000;
state2 /= 100; /* convert to mW */
- i915_update_gfx_val(dev_priv);
+ __i915_update_gfx_val(dev_priv);
- return dev_priv->gfx_power + state2;
+ return dev_priv->ips.gfx_power + state2;
}
-/* Global for IPS driver to get at the current i915 device */
-static struct drm_i915_private *i915_mch_dev;
-/*
- * Lock protecting IPS related data structures
- * - i915_mch_dev
- * - dev_priv->max_delay
- * - dev_priv->min_delay
- * - dev_priv->fmax
- * - dev_priv->gpu_busy
- */
-static DEFINE_SPINLOCK(mchdev_lock);
+unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
+{
+ unsigned long val;
+
+ if (dev_priv->info->gen != 5)
+ return 0;
+
+ spin_lock_irq(&mchdev_lock);
+
+ val = __i915_gfx_val(dev_priv);
+
+ spin_unlock_irq(&mchdev_lock);
+
+ return val;
+}
/**
* i915_read_mch_val - return value for IPS use
@@ -2988,18 +3071,18 @@ unsigned long i915_read_mch_val(void)
struct drm_i915_private *dev_priv;
unsigned long chipset_val, graphics_val, ret = 0;
- spin_lock(&mchdev_lock);
+ spin_lock_irq(&mchdev_lock);
if (!i915_mch_dev)
goto out_unlock;
dev_priv = i915_mch_dev;
- chipset_val = i915_chipset_val(dev_priv);
- graphics_val = i915_gfx_val(dev_priv);
+ chipset_val = __i915_chipset_val(dev_priv);
+ graphics_val = __i915_gfx_val(dev_priv);
ret = chipset_val + graphics_val;
out_unlock:
- spin_unlock(&mchdev_lock);
+ spin_unlock_irq(&mchdev_lock);
return ret;
}
@@ -3015,18 +3098,18 @@ bool i915_gpu_raise(void)
struct drm_i915_private *dev_priv;
bool ret = true;
- spin_lock(&mchdev_lock);
+ spin_lock_irq(&mchdev_lock);
if (!i915_mch_dev) {
ret = false;
goto out_unlock;
}
dev_priv = i915_mch_dev;
- if (dev_priv->max_delay > dev_priv->fmax)
- dev_priv->max_delay--;
+ if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
+ dev_priv->ips.max_delay--;
out_unlock:
- spin_unlock(&mchdev_lock);
+ spin_unlock_irq(&mchdev_lock);
return ret;
}
@@ -3043,18 +3126,18 @@ bool i915_gpu_lower(void)
struct drm_i915_private *dev_priv;
bool ret = true;
- spin_lock(&mchdev_lock);
+ spin_lock_irq(&mchdev_lock);
if (!i915_mch_dev) {
ret = false;
goto out_unlock;
}
dev_priv = i915_mch_dev;
- if (dev_priv->max_delay < dev_priv->min_delay)
- dev_priv->max_delay++;
+ if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
+ dev_priv->ips.max_delay++;
out_unlock:
- spin_unlock(&mchdev_lock);
+ spin_unlock_irq(&mchdev_lock);
return ret;
}
@@ -3068,17 +3151,20 @@ EXPORT_SYMBOL_GPL(i915_gpu_lower);
bool i915_gpu_busy(void)
{
struct drm_i915_private *dev_priv;
+ struct intel_ring_buffer *ring;
bool ret = false;
+ int i;
- spin_lock(&mchdev_lock);
+ spin_lock_irq(&mchdev_lock);
if (!i915_mch_dev)
goto out_unlock;
dev_priv = i915_mch_dev;
- ret = dev_priv->busy;
+ for_each_ring(ring, dev_priv, i)
+ ret |= !list_empty(&ring->request_list);
out_unlock:
- spin_unlock(&mchdev_lock);
+ spin_unlock_irq(&mchdev_lock);
return ret;
}
@@ -3095,20 +3181,20 @@ bool i915_gpu_turbo_disable(void)
struct drm_i915_private *dev_priv;
bool ret = true;
- spin_lock(&mchdev_lock);
+ spin_lock_irq(&mchdev_lock);
if (!i915_mch_dev) {
ret = false;
goto out_unlock;
}
dev_priv = i915_mch_dev;
- dev_priv->max_delay = dev_priv->fstart;
+ dev_priv->ips.max_delay = dev_priv->ips.fstart;
- if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart))
+ if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
ret = false;
out_unlock:
- spin_unlock(&mchdev_lock);
+ spin_unlock_irq(&mchdev_lock);
return ret;
}
@@ -3136,19 +3222,20 @@ ips_ping_for_i915_load(void)
void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
{
- spin_lock(&mchdev_lock);
+ /* We only register the i915 ips part with intel-ips once everything is
+ * set up, to avoid intel-ips sneaking in and reading bogus values. */
+ spin_lock_irq(&mchdev_lock);
i915_mch_dev = dev_priv;
- dev_priv->mchdev_lock = &mchdev_lock;
- spin_unlock(&mchdev_lock);
+ spin_unlock_irq(&mchdev_lock);
ips_ping_for_i915_load();
}
void intel_gpu_ips_teardown(void)
{
- spin_lock(&mchdev_lock);
+ spin_lock_irq(&mchdev_lock);
i915_mch_dev = NULL;
- spin_unlock(&mchdev_lock);
+ spin_unlock_irq(&mchdev_lock);
}
static void intel_init_emon(struct drm_device *dev)
{
@@ -3218,7 +3305,7 @@ static void intel_init_emon(struct drm_device *dev)
lcfuse = I915_READ(LCFUSE02);
- dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
+ dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
}
void intel_disable_gt_powersave(struct drm_device *dev)
@@ -3731,42 +3818,6 @@ void intel_init_clock_gating(struct drm_device *dev)
dev_priv->display.init_pch_clock_gating(dev);
}
-static void gen6_sanitize_pm(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 limits, delay, old;
-
- gen6_gt_force_wake_get(dev_priv);
-
- old = limits = I915_READ(GEN6_RP_INTERRUPT_LIMITS);
- /* Make sure we continue to get interrupts
- * until we hit the minimum or maximum frequencies.
- */
- limits &= ~(0x3f << 16 | 0x3f << 24);
- delay = dev_priv->cur_delay;
- if (delay < dev_priv->max_delay)
- limits |= (dev_priv->max_delay & 0x3f) << 24;
- if (delay > dev_priv->min_delay)
- limits |= (dev_priv->min_delay & 0x3f) << 16;
-
- if (old != limits) {
- /* Note that the known failure case is to read back 0. */
- DRM_DEBUG_DRIVER("Power management discrepancy: GEN6_RP_INTERRUPT_LIMITS "
- "expected %08x, was %08x\n", limits, old);
- I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
- }
-
- gen6_gt_force_wake_put(dev_priv);
-}
-
-void intel_sanitize_pm(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (dev_priv->display.sanitize_pm)
- dev_priv->display.sanitize_pm(dev);
-}
-
/* Starting with Haswell, we have different power wells for
* different parts of the GPU. This attempts to enable them all.
*/
@@ -3852,7 +3903,6 @@ void intel_init_pm(struct drm_device *dev)
dev_priv->display.update_wm = NULL;
}
dev_priv->display.init_clock_gating = gen6_init_clock_gating;
- dev_priv->display.sanitize_pm = gen6_sanitize_pm;
} else if (IS_IVYBRIDGE(dev)) {
/* FIXME: detect B0+ stepping and use auto training */
if (SNB_READ_WM0_LATENCY()) {
@@ -3864,7 +3914,6 @@ void intel_init_pm(struct drm_device *dev)
dev_priv->display.update_wm = NULL;
}
dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
- dev_priv->display.sanitize_pm = gen6_sanitize_pm;
} else if (IS_HASWELL(dev)) {
if (SNB_READ_WM0_LATENCY()) {
dev_priv->display.update_wm = sandybridge_update_wm;
@@ -3876,7 +3925,6 @@ void intel_init_pm(struct drm_device *dev)
dev_priv->display.update_wm = NULL;
}
dev_priv->display.init_clock_gating = haswell_init_clock_gating;
- dev_priv->display.sanitize_pm = gen6_sanitize_pm;
} else
dev_priv->display.update_wm = NULL;
} else if (IS_VALLEYVIEW(dev)) {
@@ -3955,14 +4003,16 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
else
forcewake_ack = FORCEWAKE_ACK;
- if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, 500))
- DRM_ERROR("Force wake wait timed out\n");
+ if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
I915_WRITE_NOTRACE(FORCEWAKE, 1);
- POSTING_READ(FORCEWAKE);
+ POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
- if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500))
- DRM_ERROR("Force wake wait timed out\n");
+ if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
__gen6_gt_wait_for_thread_c0(dev_priv);
}
@@ -3976,14 +4026,16 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
else
forcewake_ack = FORCEWAKE_MT_ACK;
- if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, 500))
- DRM_ERROR("Force wake wait timed out\n");
+ if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
- POSTING_READ(FORCEWAKE_MT);
+ POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
- if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500))
- DRM_ERROR("Force wake wait timed out\n");
+ if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
__gen6_gt_wait_for_thread_c0(dev_priv);
}
@@ -4016,14 +4068,14 @@ void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE, 0);
- POSTING_READ(FORCEWAKE);
+ /* gen6_gt_check_fifodbg doubles as the POSTING_READ */
gen6_gt_check_fifodbg(dev_priv);
}
static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
- POSTING_READ(FORCEWAKE_MT);
+ /* gen6_gt_check_fifodbg doubles as the POSTING_READ */
gen6_gt_check_fifodbg(dev_priv);
}
@@ -4062,24 +4114,24 @@ int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
{
- /* Already awake? */
- if ((I915_READ(0x130094) & 0xa1) == 0xa1)
- return;
+ if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
- I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffffffff);
- POSTING_READ(FORCEWAKE_VLV);
+ I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(1));
- if (wait_for_atomic_us((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1), 500))
- DRM_ERROR("Force wake wait timed out\n");
+ if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
__gen6_gt_wait_for_thread_c0(dev_priv);
}
static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
{
- I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffff0000);
- /* FIXME: confirm VLV behavior with Punit folks */
- POSTING_READ(FORCEWAKE_VLV);
+ I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(1));
+ /* The below doubles as a POSTING_READ */
+ gen6_gt_check_fifodbg(dev_priv);
}
void intel_gt_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 1aef516cc6fac6da853bcbd6f1fbcdf9228db9c3..ecbc5c5dbbbcc6db5d910836c9f765f5c429f905 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -261,6 +261,83 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
return 0;
}
+static int
+gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+ intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_STALL_AT_SCOREBOARD);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static int
+gen7_render_ring_flush(struct intel_ring_buffer *ring,
+ u32 invalidate_domains, u32 flush_domains)
+{
+ u32 flags = 0;
+ struct pipe_control *pc = ring->private;
+ u32 scratch_addr = pc->gtt_offset + 128;
+ int ret;
+
+ /*
+ * Ensure that any following seqno writes only happen when the render
+ * cache is indeed flushed.
+ *
+ * Workaround: 4th PIPE_CONTROL command (except the ones with only
+ * read-cache invalidate bits set) must have the CS_STALL bit set. We
+ * don't try to be clever and just set it unconditionally.
+ */
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ /* Just flush everything. Experiments have shown that reducing the
+ * number of bits based on the write domains has little performance
+ * impact.
+ */
+ if (flush_domains) {
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ }
+ if (invalidate_domains) {
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ /*
+ * TLB invalidate requires a post-sync write.
+ */
+ flags |= PIPE_CONTROL_QW_WRITE;
+
+ /* Workaround: we must issue a pipe_control with CS-stall bit
+ * set before a pipe_control command that has the state cache
+ * invalidate bit set. */
+ gen7_render_ring_cs_stall_wa(ring);
+ }
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+ intel_ring_emit(ring, flags);
+ intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(ring, 0);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
static void ring_write_tail(struct intel_ring_buffer *ring,
u32 value)
{
@@ -381,12 +458,12 @@ init_pipe_control(struct intel_ring_buffer *ring)
i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
- ret = i915_gem_object_pin(obj, 4096, true);
+ ret = i915_gem_object_pin(obj, 4096, true, false);
if (ret)
goto err_unref;
pc->gtt_offset = obj->gtt_offset;
- pc->cpu_page = kmap(obj->pages[0]);
+ pc->cpu_page = kmap(sg_page(obj->pages->sgl));
if (pc->cpu_page == NULL)
goto err_unpin;
@@ -413,7 +490,8 @@ cleanup_pipe_control(struct intel_ring_buffer *ring)
return;
obj = pc->obj;
- kunmap(obj->pages[0]);
+
+ kunmap(sg_page(obj->pages->sgl));
i915_gem_object_unpin(obj);
drm_gem_object_unreference(&obj->base);
@@ -461,7 +539,7 @@ static int init_render_ring(struct intel_ring_buffer *ring)
if (INTEL_INFO(dev)->gen >= 6)
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
- if (IS_IVYBRIDGE(dev))
+ if (HAS_L3_GPU_CACHE(dev))
I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
return ret;
@@ -627,26 +705,24 @@ pc_render_add_request(struct intel_ring_buffer *ring,
}
static u32
-gen6_ring_get_seqno(struct intel_ring_buffer *ring)
+gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
{
- struct drm_device *dev = ring->dev;
-
/* Workaround to force correct ordering between irq and seqno writes on
* ivb (and maybe also on snb) by reading from a CS register (like
* ACTHD) before reading the status page. */
- if (IS_GEN6(dev) || IS_GEN7(dev))
+ if (!lazy_coherency)
intel_ring_get_active_head(ring);
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
}
static u32
-ring_get_seqno(struct intel_ring_buffer *ring)
+ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
{
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
}
static u32
-pc_render_get_seqno(struct intel_ring_buffer *ring)
+pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
{
struct pipe_control *pc = ring->private;
return pc->cpu_page[0];
@@ -851,7 +927,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
- if (IS_IVYBRIDGE(dev) && ring->id == RCS)
+ if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
I915_WRITE_IMR(ring, ~(ring->irq_enable_mask |
GEN6_RENDER_L3_PARITY_ERROR));
else
@@ -874,7 +950,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
- if (IS_IVYBRIDGE(dev) && ring->id == RCS)
+ if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
else
I915_WRITE_IMR(ring, ~0);
@@ -950,7 +1026,7 @@ static void cleanup_status_page(struct intel_ring_buffer *ring)
if (obj == NULL)
return;
- kunmap(obj->pages[0]);
+ kunmap(sg_page(obj->pages->sgl));
i915_gem_object_unpin(obj);
drm_gem_object_unreference(&obj->base);
ring->status_page.obj = NULL;
@@ -971,13 +1047,13 @@ static int init_status_page(struct intel_ring_buffer *ring)
i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
- ret = i915_gem_object_pin(obj, 4096, true);
+ ret = i915_gem_object_pin(obj, 4096, true, false);
if (ret != 0) {
goto err_unref;
}
ring->status_page.gfx_addr = obj->gtt_offset;
- ring->status_page.page_addr = kmap(obj->pages[0]);
+ ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
if (ring->status_page.page_addr == NULL) {
ret = -ENOMEM;
goto err_unpin;
@@ -1009,7 +1085,6 @@ static int intel_init_ring_buffer(struct drm_device *dev,
ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
- INIT_LIST_HEAD(&ring->gpu_write_list);
ring->size = 32 * PAGE_SIZE;
init_waitqueue_head(&ring->irq_queue);
@@ -1029,7 +1104,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
ring->obj = obj;
- ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
+ ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false);
if (ret)
goto err_unref;
@@ -1378,7 +1453,9 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
if (INTEL_INFO(dev)->gen >= 6) {
ring->add_request = gen6_add_request;
- ring->flush = gen6_render_ring_flush;
+ ring->flush = gen7_render_ring_flush;
+ if (INTEL_INFO(dev)->gen == 6)
+ ring->flush = gen6_render_ring_flush;
ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq;
ring->irq_enable_mask = GT_USER_INTERRUPT;
@@ -1480,7 +1557,6 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
- INIT_LIST_HEAD(&ring->gpu_write_list);
ring->size = size;
ring->effective_size = ring->size;
@@ -1573,3 +1649,41 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
return intel_init_ring_buffer(dev, ring);
}
+
+int
+intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
+{
+ int ret;
+
+ if (!ring->gpu_caches_dirty)
+ return 0;
+
+ ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
+ if (ret)
+ return ret;
+
+ trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
+
+ ring->gpu_caches_dirty = false;
+ return 0;
+}
+
+int
+intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
+{
+ uint32_t flush_domains;
+ int ret;
+
+ flush_domains = 0;
+ if (ring->gpu_caches_dirty)
+ flush_domains = I915_GEM_GPU_DOMAINS;
+
+ ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
+ if (ret)
+ return ret;
+
+ trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
+
+ ring->gpu_caches_dirty = false;
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 1d3c81fdad92087d2caf58c3d1b79753f5c75319..2ea7a311a1f0ed8753d800cc48a48e6873bd336d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -72,7 +72,14 @@ struct intel_ring_buffer {
u32 flush_domains);
int (*add_request)(struct intel_ring_buffer *ring,
u32 *seqno);
- u32 (*get_seqno)(struct intel_ring_buffer *ring);
+ /* Some chipsets are not quite as coherent as advertised and need
+ * an expensive kick to force a true read of the up-to-date seqno.
+ * However, the up-to-date seqno is not always required and the last
+ * seen value is good enough. Note that the seqno will always be
+ * monotonic, even if not coherent.
+ */
+ u32 (*get_seqno)(struct intel_ring_buffer *ring,
+ bool lazy_coherency);
int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
u32 offset, u32 length);
void (*cleanup)(struct intel_ring_buffer *ring);
@@ -100,15 +107,6 @@ struct intel_ring_buffer {
*/
struct list_head request_list;
- /**
- * List of objects currently pending a GPU write flush.
- *
- * All elements on this list will belong to either the
- * active_list or flushing_list, last_rendering_seqno can
- * be used to differentiate between the two elements.
- */
- struct list_head gpu_write_list;
-
/**
* Do we have some not yet emitted requests outstanding?
*/
@@ -204,6 +202,8 @@ static inline void intel_ring_emit(struct intel_ring_buffer *ring,
void intel_ring_advance(struct intel_ring_buffer *ring);
u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
+int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
+int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
int intel_init_render_ring_buffer(struct drm_device *dev);
int intel_init_bsd_ring_buffer(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index d251d9d7a06c058093582a1c7f6af2f0d1c98fbf..0007a4d9bf6e5248effc509fd25f442b6d4e3164 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -96,7 +96,7 @@ struct intel_sdvo {
/*
* Hotplug activation bits for this device
*/
- uint8_t hotplug_active[2];
+ uint16_t hotplug_active;
/**
* This is used to select the color range of RBG outputs in HDMI mode.
@@ -627,6 +627,14 @@ static bool intel_sdvo_set_active_outputs(struct intel_sdvo *intel_sdvo,
&outputs, sizeof(outputs));
}
+static bool intel_sdvo_get_active_outputs(struct intel_sdvo *intel_sdvo,
+ u16 *outputs)
+{
+ return intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_ACTIVE_OUTPUTS,
+ outputs, sizeof(*outputs));
+}
+
static bool intel_sdvo_set_encoder_power_state(struct intel_sdvo *intel_sdvo,
int mode)
{
@@ -1141,51 +1149,132 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
intel_sdvo_write_sdvox(intel_sdvo, sdvox);
}
-static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
+static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector)
{
- struct drm_device *dev = encoder->dev;
+ struct intel_sdvo_connector *intel_sdvo_connector =
+ to_intel_sdvo_connector(&connector->base);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(&connector->base);
+ u16 active_outputs;
+
+ intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
+
+ if (active_outputs & intel_sdvo_connector->output_flag)
+ return true;
+ else
+ return false;
+}
+
+static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+ u32 tmp;
+
+ tmp = I915_READ(intel_sdvo->sdvo_reg);
+
+ if (!(tmp & SDVO_ENABLE))
+ return false;
+
+ if (HAS_PCH_CPT(dev))
+ *pipe = PORT_TO_PIPE_CPT(tmp);
+ else
+ *pipe = PORT_TO_PIPE(tmp);
+
+ return true;
+}
+
+static void intel_disable_sdvo(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+ u32 temp;
+
+ intel_sdvo_set_active_outputs(intel_sdvo, 0);
+ if (0)
+ intel_sdvo_set_encoder_power_state(intel_sdvo,
+ DRM_MODE_DPMS_OFF);
+
+ temp = I915_READ(intel_sdvo->sdvo_reg);
+ if ((temp & SDVO_ENABLE) != 0) {
+ intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
+ }
+}
+
+static void intel_enable_sdvo(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
u32 temp;
+ bool input1, input2;
+ int i;
+ u8 status;
+
+ temp = I915_READ(intel_sdvo->sdvo_reg);
+ if ((temp & SDVO_ENABLE) == 0)
+ intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
+ for (i = 0; i < 2; i++)
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+ status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
+ /* Warn if the device reported failure to sync.
+ * A lot of SDVO devices fail to notify of sync, but it's
+ * a given it the status is a success, we succeeded.
+ */
+ if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
+ DRM_DEBUG_KMS("First %s output reported failure to "
+ "sync\n", SDVO_NAME(intel_sdvo));
+ }
+
+ if (0)
+ intel_sdvo_set_encoder_power_state(intel_sdvo,
+ DRM_MODE_DPMS_ON);
+ intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
+}
+
+static void intel_sdvo_dpms(struct drm_connector *connector, int mode)
+{
+ struct drm_crtc *crtc;
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+
+ /* dvo supports only 2 dpms states. */
+ if (mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+
+ if (mode == connector->dpms)
+ return;
+
+ connector->dpms = mode;
+
+ /* Only need to change hw state when actually enabled */
+ crtc = intel_sdvo->base.base.crtc;
+ if (!crtc) {
+ intel_sdvo->base.connectors_active = false;
+ return;
+ }
if (mode != DRM_MODE_DPMS_ON) {
intel_sdvo_set_active_outputs(intel_sdvo, 0);
if (0)
intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
- if (mode == DRM_MODE_DPMS_OFF) {
- temp = I915_READ(intel_sdvo->sdvo_reg);
- if ((temp & SDVO_ENABLE) != 0) {
- intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
- }
- }
+ intel_sdvo->base.connectors_active = false;
+
+ intel_crtc_update_dpms(crtc);
} else {
- bool input1, input2;
- int i;
- u8 status;
-
- temp = I915_READ(intel_sdvo->sdvo_reg);
- if ((temp & SDVO_ENABLE) == 0)
- intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
- for (i = 0; i < 2; i++)
- intel_wait_for_vblank(dev, intel_crtc->pipe);
-
- status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
- /* Warn if the device reported failure to sync.
- * A lot of SDVO devices fail to notify of sync, but it's
- * a given it the status is a success, we succeeded.
- */
- if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
- DRM_DEBUG_KMS("First %s output reported failure to "
- "sync\n", SDVO_NAME(intel_sdvo));
- }
+ intel_sdvo->base.connectors_active = true;
+
+ intel_crtc_update_dpms(crtc);
if (0)
intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
}
- return;
+
+ intel_modeset_check_state(connector->dev);
}
static int intel_sdvo_mode_valid(struct drm_connector *connector,
@@ -1250,25 +1339,29 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in
return true;
}
-static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo)
+static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
{
struct drm_device *dev = intel_sdvo->base.base.dev;
- u8 response[2];
+ uint16_t hotplug;
/* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
* on the line. */
if (IS_I945G(dev) || IS_I945GM(dev))
- return false;
+ return 0;
- return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
- &response, 2) && response[0];
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
+ &hotplug, sizeof(hotplug)))
+ return 0;
+
+ return hotplug;
}
static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
{
struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
- intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &intel_sdvo->hotplug_active, 2);
+ intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG,
+ &intel_sdvo->hotplug_active, 2);
}
static bool
@@ -1344,7 +1437,6 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
}
} else
status = connector_status_disconnected;
- connector->display_info.raw_edid = NULL;
kfree(edid);
}
@@ -1418,7 +1510,6 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
else
ret = connector_status_disconnected;
- connector->display_info.raw_edid = NULL;
kfree(edid);
} else
ret = connector_status_connected;
@@ -1464,7 +1555,6 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
drm_add_edid_modes(connector, edid);
}
- connector->display_info.raw_edid = NULL;
kfree(edid);
}
}
@@ -1836,8 +1926,8 @@ set_value:
done:
if (intel_sdvo->base.base.crtc) {
struct drm_crtc *crtc = intel_sdvo->base.base.crtc;
- drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
- crtc->y, crtc->fb);
+ intel_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y, crtc->fb);
}
return 0;
@@ -1845,15 +1935,13 @@ done:
}
static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
- .dpms = intel_sdvo_dpms,
.mode_fixup = intel_sdvo_mode_fixup,
- .prepare = intel_encoder_prepare,
.mode_set = intel_sdvo_mode_set,
- .commit = intel_encoder_commit,
+ .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = intel_sdvo_dpms,
.detect = intel_sdvo_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_sdvo_set_property,
@@ -2025,6 +2113,7 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
connector->base.base.interlace_allowed = 1;
connector->base.base.doublescan_allowed = 0;
connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->base.get_hw_state = intel_sdvo_connector_get_hw_state;
intel_connector_attach_encoder(&connector->base, &encoder->base);
drm_sysfs_connector_add(&connector->base.base);
@@ -2063,17 +2152,18 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
intel_connector = &intel_sdvo_connector->base;
connector = &intel_connector->base;
- if (intel_sdvo_supports_hotplug(intel_sdvo) & (1 << device)) {
+ if (intel_sdvo_get_hotplug_support(intel_sdvo) &
+ intel_sdvo_connector->output_flag) {
connector->polled = DRM_CONNECTOR_POLL_HPD;
- intel_sdvo->hotplug_active[0] |= 1 << device;
+ intel_sdvo->hotplug_active |= intel_sdvo_connector->output_flag;
/* Some SDVO devices have one-shot hotplug interrupts.
* Ensure that they get re-enabled when an interrupt happens.
*/
intel_encoder->hot_plug = intel_sdvo_enable_hotplug;
intel_sdvo_enable_hotplug(intel_encoder);
- }
- else
+ } else {
connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+ }
encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
connector->connector_type = DRM_MODE_CONNECTOR_DVID;
@@ -2081,8 +2171,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
intel_sdvo->is_hdmi = true;
}
- intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
- (1 << INTEL_ANALOG_CLONE_BIT));
+ intel_sdvo->base.cloneable = true;
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
if (intel_sdvo->is_hdmi)
@@ -2113,7 +2202,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
intel_sdvo->is_tv = true;
intel_sdvo->base.needs_tv_clock = true;
- intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
+ intel_sdvo->base.cloneable = false;
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
@@ -2156,8 +2245,7 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
}
- intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
- (1 << INTEL_ANALOG_CLONE_BIT));
+ intel_sdvo->base.cloneable = true;
intel_sdvo_connector_init(intel_sdvo_connector,
intel_sdvo);
@@ -2189,8 +2277,10 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
}
- intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
- (1 << INTEL_SDVO_LVDS_CLONE_BIT));
+ /* SDVO LVDS is cloneable because the SDVO encoder does the upscaling,
+ * as opposed to native LVDS, where we upscale with the panel-fitter
+ * (and hence only the native LVDS resolution could be cloned). */
+ intel_sdvo->base.cloneable = true;
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
@@ -2575,6 +2665,10 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs);
+ intel_encoder->disable = intel_disable_sdvo;
+ intel_encoder->enable = intel_enable_sdvo;
+ intel_encoder->get_hw_state = intel_sdvo_get_hw_state;
+
/* In default case sdvo lvds is false */
if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
goto err;
@@ -2589,7 +2683,7 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
/* Only enable the hotplug irq if we need it, to work around noisy
* hotplug lines.
*/
- if (intel_sdvo->hotplug_active[0])
+ if (intel_sdvo->hotplug_active)
dev_priv->hotplug_supported_mask |= hotplug_mask;
intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index ccfb2ff4c31d7ab3695eda1b481fce25bd446199..62bb048c135e510020c619d2fdf310392d129db8 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -835,22 +835,37 @@ static struct intel_tv *intel_attached_tv(struct drm_connector *connector)
base);
}
+static bool
+intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 tmp = I915_READ(TV_CTL);
+
+ if (!(tmp & TV_ENC_ENABLE))
+ return false;
+
+ *pipe = PORT_TO_PIPE(tmp);
+
+ return true;
+}
+
static void
-intel_tv_dpms(struct drm_encoder *encoder, int mode)
+intel_enable_tv(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE);
- break;
- }
+ I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
+}
+
+static void
+intel_disable_tv(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE);
}
static const struct tv_mode *
@@ -894,17 +909,14 @@ intel_tv_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct drm_device *dev = encoder->dev;
struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
- struct intel_encoder *other_encoder;
if (!tv_mode)
return false;
- for_each_encoder_on_crtc(dev, encoder->crtc, other_encoder)
- if (&other_encoder->base != encoder)
- return false;
+ if (intel_encoder_check_is_cloned(&intel_tv->base))
+ return false;
adjusted_mode->clock = tv_mode->clock;
return true;
@@ -1302,12 +1314,9 @@ intel_tv_detect(struct drm_connector *connector, bool force)
if (force) {
struct intel_load_detect_pipe tmp;
- if (intel_get_load_detect_pipe(&intel_tv->base, connector,
- &mode, &tmp)) {
+ if (intel_get_load_detect_pipe(connector, &mode, &tmp)) {
type = intel_tv_detect_type(intel_tv, connector);
- intel_release_load_detect_pipe(&intel_tv->base,
- connector,
- &tmp);
+ intel_release_load_detect_pipe(connector, &tmp);
} else
return connector_status_unknown;
} else
@@ -1473,22 +1482,20 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
}
if (changed && crtc)
- drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
- crtc->y, crtc->fb);
+ intel_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y, crtc->fb);
out:
return ret;
}
static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
- .dpms = intel_tv_dpms,
.mode_fixup = intel_tv_mode_fixup,
- .prepare = intel_encoder_prepare,
.mode_set = intel_tv_mode_set,
- .commit = intel_encoder_commit,
+ .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_tv_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = intel_connector_dpms,
.detect = intel_tv_detect,
.destroy = intel_tv_destroy,
.set_property = intel_tv_set_property,
@@ -1618,10 +1625,15 @@ intel_tv_init(struct drm_device *dev)
drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs,
DRM_MODE_ENCODER_TVDAC);
+ intel_encoder->enable = intel_enable_tv;
+ intel_encoder->disable = intel_disable_tv;
+ intel_encoder->get_hw_state = intel_tv_get_hw_state;
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+
intel_connector_attach_encoder(intel_connector, intel_encoder);
intel_encoder->type = INTEL_OUTPUT_TVOUT;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
- intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT);
+ intel_encoder->cloneable = false;
intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 73868d0c25ae2c00ed30bc9bf0d2c3115fe80e73..5ea5033eae0a4109d9230dd663508273da0bfd7c 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -195,7 +195,6 @@ struct mga_device {
struct drm_global_reference mem_global_ref;
struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
- atomic_t validate_sequence;
} ttm;
u32 reg_1e24; /* SE model number */
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 3d429de0771a7f6f5bd9ddf74ae090a324064974..d3d99a28ddef770d78525cd2e32786f40f047084 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1398,7 +1398,6 @@ static int mga_vga_get_modes(struct drm_connector *connector)
if (edid) {
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
- connector->display_info.raw_edid = NULL;
kfree(edid);
}
return ret;
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 97a81260485a15fb360a4fa6dfc37ac910fca303..8a55beeb8bdc210acec214ee07542f20f63f17df 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -17,6 +17,34 @@ config DRM_NOUVEAU
help
Choose this option for open-source nVidia support.
+config NOUVEAU_DEBUG
+ int "Maximum debug level"
+ depends on DRM_NOUVEAU
+ range 0 7
+ default 5
+ help
+ Selects the maximum debug level to compile support for.
+
+ 0 - fatal
+ 1 - error
+ 2 - warning
+ 3 - info
+ 4 - debug
+ 5 - trace (recommended)
+ 6 - paranoia
+ 7 - spam
+
+ The paranoia and spam levels will add a lot of extra checks which
+ may potentially slow down driver operation.
+
+config NOUVEAU_DEBUG_DEFAULT
+ int "Default debug level"
+ depends on DRM_NOUVEAU
+ range 0 7
+ default 3
+ help
+ Selects the default debug level
+
config DRM_NOUVEAU_BACKLIGHT
bool "Support for backlight control"
depends on DRM_NOUVEAU
@@ -25,14 +53,6 @@ config DRM_NOUVEAU_BACKLIGHT
Say Y here if you want to control the backlight of your display
(e.g. a laptop panel).
-config DRM_NOUVEAU_DEBUG
- bool "Build in Nouveau's debugfs support"
- depends on DRM_NOUVEAU && DEBUG_FS
- default y
- help
- Say Y here if you want Nouveau to output debugging information
- via debugfs.
-
menu "I2C encoder or helper chips"
depends on DRM && DRM_KMS_HELPER && I2C
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 1cece6a78f397d02b092590656c2b38bde29a8f0..a990df4d6c049d263087d3567ab4d24a24e01aef 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -3,49 +3,190 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
ccflags-y := -Iinclude/drm
-nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
- nouveau_gpuobj.o nouveau_irq.o nouveau_notifier.o \
- nouveau_sgdma.o nouveau_dma.o nouveau_util.o \
- nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
- nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
- nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
- nouveau_hdmi.o nouveau_dp.o nouveau_ramht.o \
- nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \
- nouveau_mm.o nouveau_vm.o nouveau_mxm.o nouveau_gpio.o \
- nouveau_abi16.o \
- nv04_timer.o \
- nv04_mc.o nv40_mc.o nv50_mc.o \
- nv04_fb.o nv10_fb.o nv20_fb.o nv30_fb.o nv40_fb.o \
- nv50_fb.o nvc0_fb.o \
- nv04_fifo.o nv10_fifo.o nv17_fifo.o nv40_fifo.o nv50_fifo.o \
- nv84_fifo.o nvc0_fifo.o nve0_fifo.o \
- nv04_fence.o nv10_fence.o nv84_fence.o nvc0_fence.o \
- nv04_software.o nv50_software.o nvc0_software.o \
- nv04_graph.o nv10_graph.o nv20_graph.o \
- nv40_graph.o nv50_graph.o nvc0_graph.o nve0_graph.o \
- nv40_grctx.o nv50_grctx.o nvc0_grctx.o nve0_grctx.o \
- nv84_crypt.o nv98_crypt.o \
- nva3_copy.o nvc0_copy.o \
- nv31_mpeg.o nv50_mpeg.o \
- nv84_bsp.o \
- nv84_vp.o \
- nv98_ppp.o \
- nv04_instmem.o nv50_instmem.o nvc0_instmem.o \
- nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
- nv04_crtc.o nv04_display.o nv04_cursor.o \
- nv50_evo.o nv50_crtc.o nv50_dac.o nv50_sor.o \
- nv50_cursor.o nv50_display.o \
- nvd0_display.o \
- nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o \
- nv10_gpio.o nv50_gpio.o \
- nv50_calc.o \
- nv04_pm.o nv40_pm.o nv50_pm.o nva3_pm.o nvc0_pm.o \
- nv50_vram.o nvc0_vram.o \
- nv50_vm.o nvc0_vm.o nouveau_prime.o
-
-nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
+ccflags-y += -I$(src)/core/include
+ccflags-y += -I$(src)/core
+ccflags-y += -I$(src)
+
+nouveau-y := core/core/client.o
+nouveau-y += core/core/engctx.o
+nouveau-y += core/core/engine.o
+nouveau-y += core/core/enum.o
+nouveau-y += core/core/gpuobj.o
+nouveau-y += core/core/handle.o
+nouveau-y += core/core/mm.o
+nouveau-y += core/core/namedb.o
+nouveau-y += core/core/object.o
+nouveau-y += core/core/option.o
+nouveau-y += core/core/parent.o
+nouveau-y += core/core/printk.o
+nouveau-y += core/core/ramht.o
+nouveau-y += core/core/subdev.o
+
+nouveau-y += core/subdev/bar/base.o
+nouveau-y += core/subdev/bar/nv50.o
+nouveau-y += core/subdev/bar/nvc0.o
+nouveau-y += core/subdev/bios/base.o
+nouveau-y += core/subdev/bios/bit.o
+nouveau-y += core/subdev/bios/conn.o
+nouveau-y += core/subdev/bios/dcb.o
+nouveau-y += core/subdev/bios/dp.o
+nouveau-y += core/subdev/bios/extdev.o
+nouveau-y += core/subdev/bios/gpio.o
+nouveau-y += core/subdev/bios/i2c.o
+nouveau-y += core/subdev/bios/init.o
+nouveau-y += core/subdev/bios/mxm.o
+nouveau-y += core/subdev/bios/perf.o
+nouveau-y += core/subdev/bios/pll.o
+nouveau-y += core/subdev/bios/therm.o
+nouveau-y += core/subdev/clock/nv04.o
+nouveau-y += core/subdev/clock/nv40.o
+nouveau-y += core/subdev/clock/nv50.o
+nouveau-y += core/subdev/clock/nva3.o
+nouveau-y += core/subdev/clock/nvc0.o
+nouveau-y += core/subdev/clock/pllnv04.o
+nouveau-y += core/subdev/clock/pllnva3.o
+nouveau-y += core/subdev/device/base.o
+nouveau-y += core/subdev/device/nv04.o
+nouveau-y += core/subdev/device/nv10.o
+nouveau-y += core/subdev/device/nv20.o
+nouveau-y += core/subdev/device/nv30.o
+nouveau-y += core/subdev/device/nv40.o
+nouveau-y += core/subdev/device/nv50.o
+nouveau-y += core/subdev/device/nvc0.o
+nouveau-y += core/subdev/device/nve0.o
+nouveau-y += core/subdev/devinit/base.o
+nouveau-y += core/subdev/devinit/nv04.o
+nouveau-y += core/subdev/devinit/nv05.o
+nouveau-y += core/subdev/devinit/nv10.o
+nouveau-y += core/subdev/devinit/nv1a.o
+nouveau-y += core/subdev/devinit/nv20.o
+nouveau-y += core/subdev/devinit/nv50.o
+nouveau-y += core/subdev/fb/base.o
+nouveau-y += core/subdev/fb/nv04.o
+nouveau-y += core/subdev/fb/nv10.o
+nouveau-y += core/subdev/fb/nv20.o
+nouveau-y += core/subdev/fb/nv30.o
+nouveau-y += core/subdev/fb/nv40.o
+nouveau-y += core/subdev/fb/nv50.o
+nouveau-y += core/subdev/fb/nvc0.o
+nouveau-y += core/subdev/gpio/base.o
+nouveau-y += core/subdev/gpio/nv10.o
+nouveau-y += core/subdev/gpio/nv50.o
+nouveau-y += core/subdev/gpio/nvd0.o
+nouveau-y += core/subdev/i2c/base.o
+nouveau-y += core/subdev/i2c/aux.o
+nouveau-y += core/subdev/i2c/bit.o
+nouveau-y += core/subdev/ibus/nvc0.o
+nouveau-y += core/subdev/ibus/nve0.o
+nouveau-y += core/subdev/instmem/base.o
+nouveau-y += core/subdev/instmem/nv04.o
+nouveau-y += core/subdev/instmem/nv40.o
+nouveau-y += core/subdev/instmem/nv50.o
+nouveau-y += core/subdev/ltcg/nvc0.o
+nouveau-y += core/subdev/mc/base.o
+nouveau-y += core/subdev/mc/nv04.o
+nouveau-y += core/subdev/mc/nv44.o
+nouveau-y += core/subdev/mc/nv50.o
+nouveau-y += core/subdev/mc/nv98.o
+nouveau-y += core/subdev/mc/nvc0.o
+nouveau-y += core/subdev/mxm/base.o
+nouveau-y += core/subdev/mxm/mxms.o
+nouveau-y += core/subdev/mxm/nv50.o
+nouveau-y += core/subdev/therm/base.o
+nouveau-y += core/subdev/therm/fan.o
+nouveau-y += core/subdev/therm/ic.o
+nouveau-y += core/subdev/therm/nv40.o
+nouveau-y += core/subdev/therm/nv50.o
+nouveau-y += core/subdev/therm/temp.o
+nouveau-y += core/subdev/timer/base.o
+nouveau-y += core/subdev/timer/nv04.o
+nouveau-y += core/subdev/vm/base.o
+nouveau-y += core/subdev/vm/nv04.o
+nouveau-y += core/subdev/vm/nv41.o
+nouveau-y += core/subdev/vm/nv44.o
+nouveau-y += core/subdev/vm/nv50.o
+nouveau-y += core/subdev/vm/nvc0.o
+
+nouveau-y += core/engine/dmaobj/base.o
+nouveau-y += core/engine/dmaobj/nv04.o
+nouveau-y += core/engine/dmaobj/nv50.o
+nouveau-y += core/engine/dmaobj/nvc0.o
+nouveau-y += core/engine/bsp/nv84.o
+nouveau-y += core/engine/copy/nva3.o
+nouveau-y += core/engine/copy/nvc0.o
+nouveau-y += core/engine/copy/nve0.o
+nouveau-y += core/engine/crypt/nv84.o
+nouveau-y += core/engine/crypt/nv98.o
+nouveau-y += core/engine/disp/nv04.o
+nouveau-y += core/engine/disp/nv50.o
+nouveau-y += core/engine/disp/nvd0.o
+nouveau-y += core/engine/disp/vga.o
+nouveau-y += core/engine/fifo/base.o
+nouveau-y += core/engine/fifo/nv04.o
+nouveau-y += core/engine/fifo/nv10.o
+nouveau-y += core/engine/fifo/nv17.o
+nouveau-y += core/engine/fifo/nv40.o
+nouveau-y += core/engine/fifo/nv50.o
+nouveau-y += core/engine/fifo/nv84.o
+nouveau-y += core/engine/fifo/nvc0.o
+nouveau-y += core/engine/fifo/nve0.o
+nouveau-y += core/engine/graph/ctxnv40.o
+nouveau-y += core/engine/graph/ctxnv50.o
+nouveau-y += core/engine/graph/ctxnvc0.o
+nouveau-y += core/engine/graph/ctxnve0.o
+nouveau-y += core/engine/graph/nv04.o
+nouveau-y += core/engine/graph/nv10.o
+nouveau-y += core/engine/graph/nv20.o
+nouveau-y += core/engine/graph/nv25.o
+nouveau-y += core/engine/graph/nv2a.o
+nouveau-y += core/engine/graph/nv30.o
+nouveau-y += core/engine/graph/nv34.o
+nouveau-y += core/engine/graph/nv35.o
+nouveau-y += core/engine/graph/nv40.o
+nouveau-y += core/engine/graph/nv50.o
+nouveau-y += core/engine/graph/nvc0.o
+nouveau-y += core/engine/graph/nve0.o
+nouveau-y += core/engine/mpeg/nv31.o
+nouveau-y += core/engine/mpeg/nv40.o
+nouveau-y += core/engine/mpeg/nv50.o
+nouveau-y += core/engine/mpeg/nv84.o
+nouveau-y += core/engine/ppp/nv98.o
+nouveau-y += core/engine/software/nv04.o
+nouveau-y += core/engine/software/nv10.o
+nouveau-y += core/engine/software/nv50.o
+nouveau-y += core/engine/software/nvc0.o
+nouveau-y += core/engine/vp/nv84.o
+
+# drm/core
+nouveau-y += nouveau_drm.o nouveau_chan.o nouveau_dma.o nouveau_fence.o
+nouveau-y += nouveau_irq.o nouveau_vga.o nouveau_agp.o
+nouveau-y += nouveau_ttm.o nouveau_sgdma.o nouveau_bo.o nouveau_gem.o
+nouveau-y += nouveau_prime.o nouveau_abi16.o
+nouveau-y += nv04_fence.o nv10_fence.o nv50_fence.o nv84_fence.o nvc0_fence.o
+
+# drm/kms
+nouveau-y += nouveau_bios.o nouveau_fbcon.o nouveau_display.o
+nouveau-y += nouveau_connector.o nouveau_hdmi.o nouveau_dp.o
+nouveau-y += nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o
+
+# drm/kms/nv04:nv50
+nouveau-y += nouveau_hw.o nouveau_calc.o
+nouveau-y += nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o
+nouveau-y += nv04_crtc.o nv04_display.o nv04_cursor.o
+
+# drm/kms/nv50-
+nouveau-y += nv50_display.o nvd0_display.o
+nouveau-y += nv50_crtc.o nv50_dac.o nv50_sor.o nv50_cursor.o
+nouveau-y += nv50_evo.o
+
+# drm/pm
+nouveau-y += nouveau_pm.o nouveau_volt.o nouveau_perf.o
+nouveau-y += nv04_pm.o nv40_pm.o nv50_pm.o nva3_pm.o nvc0_pm.o
+nouveau-y += nouveau_mem.o
+
+# other random bits
nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
-nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o
nouveau-$(CONFIG_ACPI) += nouveau_acpi.o
+nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o
obj-$(CONFIG_DRM_NOUVEAU)+= nouveau.o
diff --git a/drivers/gpu/drm/nouveau/core/core/client.c b/drivers/gpu/drm/nouveau/core/core/client.c
new file mode 100644
index 0000000000000000000000000000000000000000..c617f0480071ae4c07e9d524513149cfb1ac0452
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/client.c
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include
+#include
+#include
+#include
+
+#include
+
+static void
+nouveau_client_dtor(struct nouveau_object *object)
+{
+ struct nouveau_client *client = (void *)object;
+ nouveau_object_ref(NULL, &client->device);
+ nouveau_handle_destroy(client->root);
+ nouveau_namedb_destroy(&client->base);
+}
+
+static struct nouveau_oclass
+nouveau_client_oclass = {
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .dtor = nouveau_client_dtor,
+ },
+};
+
+int
+nouveau_client_create_(const char *name, u64 devname, const char *cfg,
+ const char *dbg, int length, void **pobject)
+{
+ struct nouveau_object *device;
+ struct nouveau_client *client;
+ int ret;
+
+ device = (void *)nouveau_device_find(devname);
+ if (!device)
+ return -ENODEV;
+
+ ret = nouveau_namedb_create_(NULL, NULL, &nouveau_client_oclass,
+ NV_CLIENT_CLASS, nouveau_device_sclass,
+ 0, length, pobject);
+ client = *pobject;
+ if (ret)
+ return ret;
+
+ ret = nouveau_handle_create(nv_object(client), ~0, ~0,
+ nv_object(client), &client->root);
+ if (ret) {
+ nouveau_namedb_destroy(&client->base);
+ return ret;
+ }
+
+ /* prevent init/fini being called, os in in charge of this */
+ atomic_set(&nv_object(client)->usecount, 2);
+
+ nouveau_object_ref(device, &client->device);
+ snprintf(client->name, sizeof(client->name), "%s", name);
+ client->debug = nouveau_dbgopt(dbg, "CLIENT");
+ return 0;
+}
+
+int
+nouveau_client_init(struct nouveau_client *client)
+{
+ int ret;
+ nv_debug(client, "init running\n");
+ ret = nouveau_handle_init(client->root);
+ nv_debug(client, "init completed with %d\n", ret);
+ return ret;
+}
+
+int
+nouveau_client_fini(struct nouveau_client *client, bool suspend)
+{
+ const char *name[2] = { "fini", "suspend" };
+ int ret;
+
+ nv_debug(client, "%s running\n", name[suspend]);
+ ret = nouveau_handle_fini(client->root, suspend);
+ nv_debug(client, "%s completed with %d\n", name[suspend], ret);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/engctx.c b/drivers/gpu/drm/nouveau/core/core/engctx.c
new file mode 100644
index 0000000000000000000000000000000000000000..e41b10d5eb595d62da2c0d7a302992a720213edf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/engctx.c
@@ -0,0 +1,236 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+static inline int
+nouveau_engctx_exists(struct nouveau_object *parent,
+ struct nouveau_engine *engine, void **pobject)
+{
+ struct nouveau_engctx *engctx;
+ struct nouveau_object *parctx;
+
+ list_for_each_entry(engctx, &engine->contexts, head) {
+ parctx = nv_pclass(nv_object(engctx), NV_PARENT_CLASS);
+ if (parctx == parent) {
+ atomic_inc(&nv_object(engctx)->refcount);
+ *pobject = engctx;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+int
+nouveau_engctx_create_(struct nouveau_object *parent,
+ struct nouveau_object *engobj,
+ struct nouveau_oclass *oclass,
+ struct nouveau_object *pargpu,
+ u32 size, u32 align, u32 flags,
+ int length, void **pobject)
+{
+ struct nouveau_client *client = nouveau_client(parent);
+ struct nouveau_engine *engine = nv_engine(engobj);
+ struct nouveau_object *engctx;
+ unsigned long save;
+ int ret;
+
+ /* check if this engine already has a context for the parent object,
+ * and reference it instead of creating a new one
+ */
+ spin_lock_irqsave(&engine->lock, save);
+ ret = nouveau_engctx_exists(parent, engine, pobject);
+ spin_unlock_irqrestore(&engine->lock, save);
+ if (ret)
+ return ret;
+
+ /* create the new context, supports creating both raw objects and
+ * objects backed by instance memory
+ */
+ if (size) {
+ ret = nouveau_gpuobj_create_(parent, engobj, oclass,
+ NV_ENGCTX_CLASS,
+ pargpu, size, align, flags,
+ length, pobject);
+ } else {
+ ret = nouveau_object_create_(parent, engobj, oclass,
+ NV_ENGCTX_CLASS, length, pobject);
+ }
+
+ engctx = *pobject;
+ if (ret)
+ return ret;
+
+ /* must take the lock again and re-check a context doesn't already
+ * exist (in case of a race) - the lock had to be dropped before as
+ * it's not possible to allocate the object with it held.
+ */
+ spin_lock_irqsave(&engine->lock, save);
+ ret = nouveau_engctx_exists(parent, engine, pobject);
+ if (ret) {
+ spin_unlock_irqrestore(&engine->lock, save);
+ nouveau_object_ref(NULL, &engctx);
+ return ret;
+ }
+
+ if (client->vm)
+ atomic_inc(&client->vm->engref[nv_engidx(engobj)]);
+ list_add(&nv_engctx(engctx)->head, &engine->contexts);
+ nv_engctx(engctx)->addr = ~0ULL;
+ spin_unlock_irqrestore(&engine->lock, save);
+ return 0;
+}
+
+void
+nouveau_engctx_destroy(struct nouveau_engctx *engctx)
+{
+ struct nouveau_object *engobj = nv_object(engctx)->engine;
+ struct nouveau_engine *engine = nv_engine(engobj);
+ struct nouveau_client *client = nouveau_client(engctx);
+ unsigned long save;
+
+ nouveau_gpuobj_unmap(&engctx->vma);
+ spin_lock_irqsave(&engine->lock, save);
+ list_del(&engctx->head);
+ spin_unlock_irqrestore(&engine->lock, save);
+
+ if (client->vm)
+ atomic_dec(&client->vm->engref[nv_engidx(engobj)]);
+
+ if (engctx->base.size)
+ nouveau_gpuobj_destroy(&engctx->base);
+ else
+ nouveau_object_destroy(&engctx->base.base);
+}
+
+int
+nouveau_engctx_init(struct nouveau_engctx *engctx)
+{
+ struct nouveau_object *object = nv_object(engctx);
+ struct nouveau_subdev *subdev = nv_subdev(object->engine);
+ struct nouveau_object *parent;
+ struct nouveau_subdev *pardev;
+ int ret;
+
+ ret = nouveau_gpuobj_init(&engctx->base);
+ if (ret)
+ return ret;
+
+ parent = nv_pclass(object->parent, NV_PARENT_CLASS);
+ pardev = nv_subdev(parent->engine);
+ if (nv_parent(parent)->context_attach) {
+ mutex_lock(&pardev->mutex);
+ ret = nv_parent(parent)->context_attach(parent, object);
+ mutex_unlock(&pardev->mutex);
+ }
+
+ if (ret) {
+ nv_error(parent, "failed to attach %s context, %d\n",
+ subdev->name, ret);
+ return ret;
+ }
+
+ nv_debug(parent, "attached %s context\n", subdev->name);
+ return 0;
+}
+
+int
+nouveau_engctx_fini(struct nouveau_engctx *engctx, bool suspend)
+{
+ struct nouveau_object *object = nv_object(engctx);
+ struct nouveau_subdev *subdev = nv_subdev(object->engine);
+ struct nouveau_object *parent;
+ struct nouveau_subdev *pardev;
+ int ret = 0;
+
+ parent = nv_pclass(object->parent, NV_PARENT_CLASS);
+ pardev = nv_subdev(parent->engine);
+ if (nv_parent(parent)->context_detach) {
+ mutex_lock(&pardev->mutex);
+ ret = nv_parent(parent)->context_detach(parent, suspend, object);
+ mutex_unlock(&pardev->mutex);
+ }
+
+ if (ret) {
+ nv_error(parent, "failed to detach %s context, %d\n",
+ subdev->name, ret);
+ return ret;
+ }
+
+ nv_debug(parent, "detached %s context\n", subdev->name);
+ return nouveau_gpuobj_fini(&engctx->base, suspend);
+}
+
+void
+_nouveau_engctx_dtor(struct nouveau_object *object)
+{
+ nouveau_engctx_destroy(nv_engctx(object));
+}
+
+int
+_nouveau_engctx_init(struct nouveau_object *object)
+{
+ return nouveau_engctx_init(nv_engctx(object));
+}
+
+
+int
+_nouveau_engctx_fini(struct nouveau_object *object, bool suspend)
+{
+ return nouveau_engctx_fini(nv_engctx(object), suspend);
+}
+
+struct nouveau_object *
+nouveau_engctx_get(struct nouveau_engine *engine, u64 addr)
+{
+ struct nouveau_engctx *engctx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&engine->lock, flags);
+ list_for_each_entry(engctx, &engine->contexts, head) {
+ if (engctx->addr == addr) {
+ engctx->save = flags;
+ return nv_object(engctx);
+ }
+ }
+ spin_unlock_irqrestore(&engine->lock, flags);
+ return NULL;
+}
+
+void
+nouveau_engctx_put(struct nouveau_object *object)
+{
+ if (object) {
+ struct nouveau_engine *engine = nv_engine(object->engine);
+ struct nouveau_engctx *engctx = nv_engctx(object);
+ spin_unlock_irqrestore(&engine->lock, engctx->save);
+ }
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/engine.c b/drivers/gpu/drm/nouveau/core/core/engine.c
new file mode 100644
index 0000000000000000000000000000000000000000..09b3bd502fd05daccaf3292d1cd872e5e741e312
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/engine.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include
+#include
+#include
+
+int
+nouveau_engine_create_(struct nouveau_object *parent,
+ struct nouveau_object *engobj,
+ struct nouveau_oclass *oclass, bool enable,
+ const char *iname, const char *fname,
+ int length, void **pobject)
+{
+ struct nouveau_device *device = nv_device(parent);
+ struct nouveau_engine *engine;
+ int ret;
+
+ ret = nouveau_subdev_create_(parent, engobj, oclass, NV_ENGINE_CLASS,
+ iname, fname, length, pobject);
+ engine = *pobject;
+ if (ret)
+ return ret;
+
+ if (!nouveau_boolopt(device->cfgopt, iname, enable)) {
+ if (!enable)
+ nv_warn(engine, "disabled, %s=1 to enable\n", iname);
+ return -ENODEV;
+ }
+
+ INIT_LIST_HEAD(&engine->contexts);
+ spin_lock_init(&engine->lock);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_util.c b/drivers/gpu/drm/nouveau/core/core/enum.c
similarity index 86%
rename from drivers/gpu/drm/nouveau/nouveau_util.c
rename to drivers/gpu/drm/nouveau/core/core/enum.c
index e51b51503baa0c9ee7ee417116b9e7145c55f2b7..7cc7133d82dee933203effbba2e0560143ad4514 100644
--- a/drivers/gpu/drm/nouveau/nouveau_util.c
+++ b/drivers/gpu/drm/nouveau/core/core/enum.c
@@ -25,27 +25,8 @@
*
*/
-#include
-
-#include "nouveau_util.h"
-
-static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
-
-void
-nouveau_bitfield_print(const struct nouveau_bitfield *bf, u32 value)
-{
- while (bf->name) {
- if (value & bf->mask) {
- printk(" %s", bf->name);
- value &= ~bf->mask;
- }
-
- bf++;
- }
-
- if (value)
- printk(" (unknown bits 0x%08x)", value);
-}
+#include
+#include
const struct nouveau_enum *
nouveau_enum_find(const struct nouveau_enum *en, u32 value)
@@ -63,16 +44,24 @@ void
nouveau_enum_print(const struct nouveau_enum *en, u32 value)
{
en = nouveau_enum_find(en, value);
- if (en) {
+ if (en)
printk("%s", en->name);
- return;
- }
-
- printk("(unknown enum 0x%08x)", value);
+ else
+ printk("(unknown enum 0x%08x)", value);
}
-int
-nouveau_ratelimit(void)
+void
+nouveau_bitfield_print(const struct nouveau_bitfield *bf, u32 value)
{
- return __ratelimit(&nouveau_ratelimit_state);
+ while (bf->name) {
+ if (value & bf->mask) {
+ printk(" %s", bf->name);
+ value &= ~bf->mask;
+ }
+
+ bf++;
+ }
+
+ if (value)
+ printk(" (unknown bits 0x%08x)", value);
}
diff --git a/drivers/gpu/drm/nouveau/core/core/gpuobj.c b/drivers/gpu/drm/nouveau/core/core/gpuobj.c
new file mode 100644
index 0000000000000000000000000000000000000000..1f34549aff18748758d4d72feed0eba5800734ab
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/gpuobj.c
@@ -0,0 +1,318 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include
+#include
+
+#include
+#include
+#include
+
+void
+nouveau_gpuobj_destroy(struct nouveau_gpuobj *gpuobj)
+{
+ int i;
+
+ if (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE) {
+ for (i = 0; i < gpuobj->size; i += 4)
+ nv_wo32(gpuobj, i, 0x00000000);
+ }
+
+ if (gpuobj->heap.block_size)
+ nouveau_mm_fini(&gpuobj->heap);
+
+ nouveau_object_destroy(&gpuobj->base);
+}
+
+int
+nouveau_gpuobj_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, u32 pclass,
+ struct nouveau_object *pargpu,
+ u32 size, u32 align, u32 flags,
+ int length, void **pobject)
+{
+ struct nouveau_instmem *imem = nouveau_instmem(parent);
+ struct nouveau_bar *bar = nouveau_bar(parent);
+ struct nouveau_gpuobj *gpuobj;
+ struct nouveau_mm *heap = NULL;
+ int ret, i;
+ u64 addr;
+
+ *pobject = NULL;
+
+ if (pargpu) {
+ while ((pargpu = nv_pclass(pargpu, NV_GPUOBJ_CLASS))) {
+ if (nv_gpuobj(pargpu)->heap.block_size)
+ break;
+ pargpu = pargpu->parent;
+ }
+
+ if (unlikely(pargpu == NULL)) {
+ nv_error(parent, "no gpuobj heap\n");
+ return -EINVAL;
+ }
+
+ addr = nv_gpuobj(pargpu)->addr;
+ heap = &nv_gpuobj(pargpu)->heap;
+ atomic_inc(&parent->refcount);
+ } else {
+ ret = imem->alloc(imem, parent, size, align, &parent);
+ pargpu = parent;
+ if (ret)
+ return ret;
+
+ addr = nv_memobj(pargpu)->addr;
+ size = nv_memobj(pargpu)->size;
+
+ if (bar && bar->alloc) {
+ struct nouveau_instobj *iobj = (void *)parent;
+ struct nouveau_mem **mem = (void *)(iobj + 1);
+ struct nouveau_mem *node = *mem;
+ if (!bar->alloc(bar, parent, node, &pargpu)) {
+ nouveau_object_ref(NULL, &parent);
+ parent = pargpu;
+ }
+ }
+ }
+
+ ret = nouveau_object_create_(parent, engine, oclass, pclass |
+ NV_GPUOBJ_CLASS, length, pobject);
+ nouveau_object_ref(NULL, &parent);
+ gpuobj = *pobject;
+ if (ret)
+ return ret;
+
+ gpuobj->parent = pargpu;
+ gpuobj->flags = flags;
+ gpuobj->addr = addr;
+ gpuobj->size = size;
+
+ if (heap) {
+ ret = nouveau_mm_head(heap, 1, size, size,
+ max(align, (u32)1), &gpuobj->node);
+ if (ret)
+ return ret;
+
+ gpuobj->addr += gpuobj->node->offset;
+ }
+
+ if (gpuobj->flags & NVOBJ_FLAG_HEAP) {
+ ret = nouveau_mm_init(&gpuobj->heap, 0, gpuobj->size, 1);
+ if (ret)
+ return ret;
+ }
+
+ if (flags & NVOBJ_FLAG_ZERO_ALLOC) {
+ for (i = 0; i < gpuobj->size; i += 4)
+ nv_wo32(gpuobj, i, 0x00000000);
+ }
+
+ return ret;
+}
+
+struct nouveau_gpuobj_class {
+ struct nouveau_object *pargpu;
+ u64 size;
+ u32 align;
+ u32 flags;
+};
+
+static int
+_nouveau_gpuobj_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_gpuobj_class *args = data;
+ struct nouveau_gpuobj *object;
+ int ret;
+
+ ret = nouveau_gpuobj_create(parent, engine, oclass, 0, args->pargpu,
+ args->size, args->align, args->flags,
+ &object);
+ *pobject = nv_object(object);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void
+_nouveau_gpuobj_dtor(struct nouveau_object *object)
+{
+ nouveau_gpuobj_destroy(nv_gpuobj(object));
+}
+
+int
+_nouveau_gpuobj_init(struct nouveau_object *object)
+{
+ return nouveau_gpuobj_init(nv_gpuobj(object));
+}
+
+int
+_nouveau_gpuobj_fini(struct nouveau_object *object, bool suspend)
+{
+ return nouveau_gpuobj_fini(nv_gpuobj(object), suspend);
+}
+
+u32
+_nouveau_gpuobj_rd32(struct nouveau_object *object, u32 addr)
+{
+ struct nouveau_gpuobj *gpuobj = nv_gpuobj(object);
+ struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
+ if (gpuobj->node)
+ addr += gpuobj->node->offset;
+ return pfuncs->rd32(gpuobj->parent, addr);
+}
+
+void
+_nouveau_gpuobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
+{
+ struct nouveau_gpuobj *gpuobj = nv_gpuobj(object);
+ struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
+ if (gpuobj->node)
+ addr += gpuobj->node->offset;
+ pfuncs->wr32(gpuobj->parent, addr, data);
+}
+
+static struct nouveau_oclass
+_nouveau_gpuobj_oclass = {
+ .handle = 0x00000000,
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_gpuobj_ctor,
+ .dtor = _nouveau_gpuobj_dtor,
+ .init = _nouveau_gpuobj_init,
+ .fini = _nouveau_gpuobj_fini,
+ .rd32 = _nouveau_gpuobj_rd32,
+ .wr32 = _nouveau_gpuobj_wr32,
+ },
+};
+
+int
+nouveau_gpuobj_new(struct nouveau_object *parent, struct nouveau_object *pargpu,
+ u32 size, u32 align, u32 flags,
+ struct nouveau_gpuobj **pgpuobj)
+{
+ struct nouveau_object *engine = parent;
+ struct nouveau_gpuobj_class args = {
+ .pargpu = pargpu,
+ .size = size,
+ .align = align,
+ .flags = flags,
+ };
+
+ if (!nv_iclass(engine, NV_SUBDEV_CLASS))
+ engine = engine->engine;
+ BUG_ON(engine == NULL);
+
+ return nouveau_object_ctor(parent, engine, &_nouveau_gpuobj_oclass,
+ &args, sizeof(args),
+ (struct nouveau_object **)pgpuobj);
+}
+
+int
+nouveau_gpuobj_map(struct nouveau_gpuobj *gpuobj, u32 access,
+ struct nouveau_vma *vma)
+{
+ struct nouveau_bar *bar = nouveau_bar(gpuobj);
+ int ret = -EINVAL;
+
+ if (bar && bar->umap) {
+ struct nouveau_instobj *iobj = (void *)
+ nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS);
+ struct nouveau_mem **mem = (void *)(iobj + 1);
+ ret = bar->umap(bar, *mem, access, vma);
+ }
+
+ return ret;
+}
+
+int
+nouveau_gpuobj_map_vm(struct nouveau_gpuobj *gpuobj, struct nouveau_vm *vm,
+ u32 access, struct nouveau_vma *vma)
+{
+ struct nouveau_instobj *iobj = (void *)
+ nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS);
+ struct nouveau_mem **mem = (void *)(iobj + 1);
+ int ret;
+
+ ret = nouveau_vm_get(vm, gpuobj->size, 12, access, vma);
+ if (ret)
+ return ret;
+
+ nouveau_vm_map(vma, *mem);
+ return 0;
+}
+
+void
+nouveau_gpuobj_unmap(struct nouveau_vma *vma)
+{
+ if (vma->node) {
+ nouveau_vm_unmap(vma);
+ nouveau_vm_put(vma);
+ }
+}
+
+/* the below is basically only here to support sharing the paged dma object
+ * for PCI(E)GART on <=nv4x chipsets, and should *not* be expected to work
+ * anywhere else.
+ */
+
+static void
+nouveau_gpudup_dtor(struct nouveau_object *object)
+{
+ struct nouveau_gpuobj *gpuobj = (void *)object;
+ nouveau_object_ref(NULL, &gpuobj->parent);
+ nouveau_object_destroy(&gpuobj->base);
+}
+
+static struct nouveau_oclass
+nouveau_gpudup_oclass = {
+ .handle = NV_GPUOBJ_CLASS,
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .dtor = nouveau_gpudup_dtor,
+ .init = nouveau_object_init,
+ .fini = nouveau_object_fini,
+ },
+};
+
+int
+nouveau_gpuobj_dup(struct nouveau_object *parent, struct nouveau_gpuobj *base,
+ struct nouveau_gpuobj **pgpuobj)
+{
+ struct nouveau_gpuobj *gpuobj;
+ int ret;
+
+ ret = nouveau_object_create(parent, parent->engine,
+ &nouveau_gpudup_oclass, 0, &gpuobj);
+ *pgpuobj = gpuobj;
+ if (ret)
+ return ret;
+
+ nouveau_object_ref(nv_object(base), &gpuobj->parent);
+ gpuobj->addr = base->addr;
+ gpuobj->size = base->size;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/handle.c b/drivers/gpu/drm/nouveau/core/core/handle.c
new file mode 100644
index 0000000000000000000000000000000000000000..b8d2cbf8a7a7bb9b0f5a2c8fabfb65de9e93142a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/handle.c
@@ -0,0 +1,223 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include
+#include
+#include
+
+#define hprintk(h,l,f,a...) do { \
+ struct nouveau_client *c = nouveau_client((h)->object); \
+ struct nouveau_handle *p = (h)->parent; u32 n = p ? p->name : ~0; \
+ nv_printk((c), l, "0x%08x:0x%08x "f, n, (h)->name, ##a); \
+} while(0)
+
+int
+nouveau_handle_init(struct nouveau_handle *handle)
+{
+ struct nouveau_handle *item;
+ int ret;
+
+ hprintk(handle, TRACE, "init running\n");
+ ret = nouveau_object_inc(handle->object);
+ if (ret)
+ return ret;
+
+ hprintk(handle, TRACE, "init children\n");
+ list_for_each_entry(item, &handle->tree, head) {
+ ret = nouveau_handle_init(item);
+ if (ret)
+ goto fail;
+ }
+
+ hprintk(handle, TRACE, "init completed\n");
+ return 0;
+fail:
+ hprintk(handle, ERROR, "init failed with %d\n", ret);
+ list_for_each_entry_continue_reverse(item, &handle->tree, head) {
+ nouveau_handle_fini(item, false);
+ }
+
+ nouveau_object_dec(handle->object, false);
+ return ret;
+}
+
+int
+nouveau_handle_fini(struct nouveau_handle *handle, bool suspend)
+{
+ static char *name[2] = { "fini", "suspend" };
+ struct nouveau_handle *item;
+ int ret;
+
+ hprintk(handle, TRACE, "%s children\n", name[suspend]);
+ list_for_each_entry(item, &handle->tree, head) {
+ ret = nouveau_handle_fini(item, suspend);
+ if (ret && suspend)
+ goto fail;
+ }
+
+ hprintk(handle, TRACE, "%s running\n", name[suspend]);
+ if (handle->object) {
+ ret = nouveau_object_dec(handle->object, suspend);
+ if (ret && suspend)
+ goto fail;
+ }
+
+ hprintk(handle, TRACE, "%s completed\n", name[suspend]);
+ return 0;
+fail:
+ hprintk(handle, ERROR, "%s failed with %d\n", name[suspend], ret);
+ list_for_each_entry_continue_reverse(item, &handle->tree, head) {
+ int rret = nouveau_handle_init(item);
+ if (rret)
+ hprintk(handle, FATAL, "failed to restart, %d\n", rret);
+ }
+
+ return ret;
+}
+
+int
+nouveau_handle_create(struct nouveau_object *parent, u32 _parent, u32 _handle,
+ struct nouveau_object *object,
+ struct nouveau_handle **phandle)
+{
+ struct nouveau_object *namedb;
+ struct nouveau_handle *handle;
+ int ret;
+
+ namedb = parent;
+ while (!nv_iclass(namedb, NV_NAMEDB_CLASS))
+ namedb = namedb->parent;
+
+ handle = *phandle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ if (!handle)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&handle->head);
+ INIT_LIST_HEAD(&handle->tree);
+ handle->name = _handle;
+ handle->priv = ~0;
+
+ ret = nouveau_namedb_insert(nv_namedb(namedb), _handle, object, handle);
+ if (ret) {
+ kfree(handle);
+ return ret;
+ }
+
+ if (nv_parent(parent)->object_attach) {
+ ret = nv_parent(parent)->object_attach(parent, object, _handle);
+ if (ret < 0) {
+ nouveau_handle_destroy(handle);
+ return ret;
+ }
+
+ handle->priv = ret;
+ }
+
+ if (object != namedb) {
+ while (!nv_iclass(namedb, NV_CLIENT_CLASS))
+ namedb = namedb->parent;
+
+ handle->parent = nouveau_namedb_get(nv_namedb(namedb), _parent);
+ if (handle->parent) {
+ list_add(&handle->head, &handle->parent->tree);
+ nouveau_namedb_put(handle->parent);
+ }
+ }
+
+ hprintk(handle, TRACE, "created\n");
+ return 0;
+}
+
+void
+nouveau_handle_destroy(struct nouveau_handle *handle)
+{
+ struct nouveau_handle *item, *temp;
+
+ hprintk(handle, TRACE, "destroy running\n");
+ list_for_each_entry_safe(item, temp, &handle->tree, head) {
+ nouveau_handle_destroy(item);
+ }
+ list_del(&handle->head);
+
+ if (handle->priv != ~0) {
+ struct nouveau_object *parent = handle->parent->object;
+ nv_parent(parent)->object_detach(parent, handle->priv);
+ }
+
+ hprintk(handle, TRACE, "destroy completed\n");
+ nouveau_namedb_remove(handle);
+ kfree(handle);
+}
+
+struct nouveau_object *
+nouveau_handle_ref(struct nouveau_object *parent, u32 name)
+{
+ struct nouveau_object *object = NULL;
+ struct nouveau_handle *handle;
+
+ while (!nv_iclass(parent, NV_NAMEDB_CLASS))
+ parent = parent->parent;
+
+ handle = nouveau_namedb_get(nv_namedb(parent), name);
+ if (handle) {
+ nouveau_object_ref(handle->object, &object);
+ nouveau_namedb_put(handle);
+ }
+
+ return object;
+}
+
+struct nouveau_handle *
+nouveau_handle_get_class(struct nouveau_object *engctx, u16 oclass)
+{
+ struct nouveau_namedb *namedb;
+ if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS)))
+ return nouveau_namedb_get_class(namedb, oclass);
+ return NULL;
+}
+
+struct nouveau_handle *
+nouveau_handle_get_vinst(struct nouveau_object *engctx, u64 vinst)
+{
+ struct nouveau_namedb *namedb;
+ if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS)))
+ return nouveau_namedb_get_vinst(namedb, vinst);
+ return NULL;
+}
+
+struct nouveau_handle *
+nouveau_handle_get_cinst(struct nouveau_object *engctx, u32 cinst)
+{
+ struct nouveau_namedb *namedb;
+ if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS)))
+ return nouveau_namedb_get_cinst(namedb, cinst);
+ return NULL;
+}
+
+void
+nouveau_handle_put(struct nouveau_handle *handle)
+{
+ if (handle)
+ nouveau_namedb_put(handle);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/core/core/mm.c
similarity index 53%
rename from drivers/gpu/drm/nouveau/nouveau_mm.c
rename to drivers/gpu/drm/nouveau/core/core/mm.c
index 3e98806dd76ff2f1e3a08da2b56dce58dc451a4b..bfddf87926dd8857a73df69cb26757836bd02f1f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.c
+++ b/drivers/gpu/drm/nouveau/core/core/mm.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2010 Red Hat Inc.
+ * Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -22,20 +22,52 @@
* Authors: Ben Skeggs
*/
-#include
-#include "nouveau_drv.h"
-#include "nouveau_mm.h"
+#include "core/os.h"
+#include "core/mm.h"
-static inline void
-region_put(struct nouveau_mm *mm, struct nouveau_mm_node *a)
+#define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \
+ list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry)
+
+void
+nouveau_mm_free(struct nouveau_mm *mm, struct nouveau_mm_node **pthis)
{
- list_del(&a->nl_entry);
- list_del(&a->fl_entry);
- kfree(a);
+ struct nouveau_mm_node *this = *pthis;
+
+ if (this) {
+ struct nouveau_mm_node *prev = node(this, prev);
+ struct nouveau_mm_node *next = node(this, next);
+
+ if (prev && prev->type == 0) {
+ prev->length += this->length;
+ list_del(&this->nl_entry);
+ kfree(this); this = prev;
+ }
+
+ if (next && next->type == 0) {
+ next->offset = this->offset;
+ next->length += this->length;
+ if (this->type == 0)
+ list_del(&this->fl_entry);
+ list_del(&this->nl_entry);
+ kfree(this); this = NULL;
+ }
+
+ if (this && this->type != 0) {
+ list_for_each_entry(prev, &mm->free, fl_entry) {
+ if (this->offset < prev->offset)
+ break;
+ }
+
+ list_add_tail(&this->fl_entry, &prev->fl_entry);
+ this->type = 0;
+ }
+ }
+
+ *pthis = NULL;
}
static struct nouveau_mm_node *
-region_split(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size)
+region_head(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size)
{
struct nouveau_mm_node *b;
@@ -57,38 +89,12 @@ region_split(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size)
return b;
}
-#define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \
- list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry)
-
-void
-nouveau_mm_put(struct nouveau_mm *mm, struct nouveau_mm_node *this)
-{
- struct nouveau_mm_node *prev = node(this, prev);
- struct nouveau_mm_node *next = node(this, next);
-
- list_add(&this->fl_entry, &mm->free);
- this->type = 0;
-
- if (prev && prev->type == 0) {
- prev->length += this->length;
- region_put(mm, this);
- this = prev;
- }
-
- if (next && next->type == 0) {
- next->offset = this->offset;
- next->length += this->length;
- region_put(mm, this);
- }
-}
-
int
-nouveau_mm_get(struct nouveau_mm *mm, int type, u32 size, u32 size_nc,
- u32 align, struct nouveau_mm_node **pnode)
+nouveau_mm_head(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min,
+ u32 align, struct nouveau_mm_node **pnode)
{
struct nouveau_mm_node *prev, *this, *next;
- u32 min = size_nc ? size_nc : size;
- u32 align_mask = align - 1;
+ u32 mask = align - 1;
u32 splitoff;
u32 s, e;
@@ -104,16 +110,86 @@ nouveau_mm_get(struct nouveau_mm *mm, int type, u32 size, u32 size_nc,
if (next && next->type != type)
e = rounddown(e, mm->block_size);
- s = (s + align_mask) & ~align_mask;
- e &= ~align_mask;
- if (s > e || e - s < min)
+ s = (s + mask) & ~mask;
+ e &= ~mask;
+ if (s > e || e - s < size_min)
continue;
splitoff = s - this->offset;
- if (splitoff && !region_split(mm, this, splitoff))
+ if (splitoff && !region_head(mm, this, splitoff))
+ return -ENOMEM;
+
+ this = region_head(mm, this, min(size_max, e - s));
+ if (!this)
+ return -ENOMEM;
+
+ this->type = type;
+ list_del(&this->fl_entry);
+ *pnode = this;
+ return 0;
+ }
+
+ return -ENOSPC;
+}
+
+static struct nouveau_mm_node *
+region_tail(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size)
+{
+ struct nouveau_mm_node *b;
+
+ if (a->length == size)
+ return a;
+
+ b = kmalloc(sizeof(*b), GFP_KERNEL);
+ if (unlikely(b == NULL))
+ return NULL;
+
+ a->length -= size;
+ b->offset = a->offset + a->length;
+ b->length = size;
+ b->type = a->type;
+
+ list_add(&b->nl_entry, &a->nl_entry);
+ if (b->type == 0)
+ list_add(&b->fl_entry, &a->fl_entry);
+ return b;
+}
+
+int
+nouveau_mm_tail(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min,
+ u32 align, struct nouveau_mm_node **pnode)
+{
+ struct nouveau_mm_node *prev, *this, *next;
+ u32 mask = align - 1;
+
+ list_for_each_entry_reverse(this, &mm->free, fl_entry) {
+ u32 e = this->offset + this->length;
+ u32 s = this->offset;
+ u32 c = 0, a;
+
+ prev = node(this, prev);
+ if (prev && prev->type != type)
+ s = roundup(s, mm->block_size);
+
+ next = node(this, next);
+ if (next && next->type != type) {
+ e = rounddown(e, mm->block_size);
+ c = next->offset - e;
+ }
+
+ s = (s + mask) & ~mask;
+ a = e - s;
+ if (s > e || a < size_min)
+ continue;
+
+ a = min(a, size_max);
+ s = (e - a) & ~mask;
+ c += (e - s) - a;
+
+ if (c && !region_tail(mm, this, c))
return -ENOMEM;
- this = region_split(mm, this, min(size, e - s));
+ this = region_tail(mm, this, a);
if (!this)
return -ENOMEM;
@@ -148,6 +224,7 @@ nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block)
list_add_tail(&node->nl_entry, &mm->nodes);
list_add_tail(&node->fl_entry, &mm->free);
mm->heap_nodes++;
+ mm->heap_size += length;
return 0;
}
@@ -159,15 +236,8 @@ nouveau_mm_fini(struct nouveau_mm *mm)
int nodes = 0;
list_for_each_entry(node, &mm->nodes, nl_entry) {
- if (nodes++ == mm->heap_nodes) {
- printk(KERN_ERR "nouveau_mm in use at destroy time!\n");
- list_for_each_entry(node, &mm->nodes, nl_entry) {
- printk(KERN_ERR "0x%02x: 0x%08x 0x%08x\n",
- node->type, node->offset, node->length);
- }
- WARN_ON(1);
+ if (nodes++ == mm->heap_nodes)
return -EBUSY;
- }
}
kfree(heap);
diff --git a/drivers/gpu/drm/nouveau/core/core/namedb.c b/drivers/gpu/drm/nouveau/core/core/namedb.c
new file mode 100644
index 0000000000000000000000000000000000000000..1ce95a8709df7763ad24f93f9cb21b61e44e34aa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/namedb.c
@@ -0,0 +1,203 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include
+#include
+#include
+#include
+
+static struct nouveau_handle *
+nouveau_namedb_lookup(struct nouveau_namedb *namedb, u32 name)
+{
+ struct nouveau_handle *handle;
+
+ list_for_each_entry(handle, &namedb->list, node) {
+ if (handle->name == name)
+ return handle;
+ }
+
+ return NULL;
+}
+
+static struct nouveau_handle *
+nouveau_namedb_lookup_class(struct nouveau_namedb *namedb, u16 oclass)
+{
+ struct nouveau_handle *handle;
+
+ list_for_each_entry(handle, &namedb->list, node) {
+ if (nv_mclass(handle->object) == oclass)
+ return handle;
+ }
+
+ return NULL;
+}
+
+static struct nouveau_handle *
+nouveau_namedb_lookup_vinst(struct nouveau_namedb *namedb, u64 vinst)
+{
+ struct nouveau_handle *handle;
+
+ list_for_each_entry(handle, &namedb->list, node) {
+ if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
+ if (nv_gpuobj(handle->object)->addr == vinst)
+ return handle;
+ }
+ }
+
+ return NULL;
+}
+
+static struct nouveau_handle *
+nouveau_namedb_lookup_cinst(struct nouveau_namedb *namedb, u32 cinst)
+{
+ struct nouveau_handle *handle;
+
+ list_for_each_entry(handle, &namedb->list, node) {
+ if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
+ if (nv_gpuobj(handle->object)->node &&
+ nv_gpuobj(handle->object)->node->offset == cinst)
+ return handle;
+ }
+ }
+
+ return NULL;
+}
+
+int
+nouveau_namedb_insert(struct nouveau_namedb *namedb, u32 name,
+ struct nouveau_object *object,
+ struct nouveau_handle *handle)
+{
+ int ret = -EEXIST;
+ write_lock_irq(&namedb->lock);
+ if (!nouveau_namedb_lookup(namedb, name)) {
+ nouveau_object_ref(object, &handle->object);
+ handle->namedb = namedb;
+ list_add(&handle->node, &namedb->list);
+ ret = 0;
+ }
+ write_unlock_irq(&namedb->lock);
+ return ret;
+}
+
+void
+nouveau_namedb_remove(struct nouveau_handle *handle)
+{
+ struct nouveau_namedb *namedb = handle->namedb;
+ struct nouveau_object *object = handle->object;
+ write_lock_irq(&namedb->lock);
+ list_del(&handle->node);
+ write_unlock_irq(&namedb->lock);
+ nouveau_object_ref(NULL, &object);
+}
+
+struct nouveau_handle *
+nouveau_namedb_get(struct nouveau_namedb *namedb, u32 name)
+{
+ struct nouveau_handle *handle;
+ read_lock(&namedb->lock);
+ handle = nouveau_namedb_lookup(namedb, name);
+ if (handle == NULL)
+ read_unlock(&namedb->lock);
+ return handle;
+}
+
+struct nouveau_handle *
+nouveau_namedb_get_class(struct nouveau_namedb *namedb, u16 oclass)
+{
+ struct nouveau_handle *handle;
+ read_lock(&namedb->lock);
+ handle = nouveau_namedb_lookup_class(namedb, oclass);
+ if (handle == NULL)
+ read_unlock(&namedb->lock);
+ return handle;
+}
+
+struct nouveau_handle *
+nouveau_namedb_get_vinst(struct nouveau_namedb *namedb, u64 vinst)
+{
+ struct nouveau_handle *handle;
+ read_lock(&namedb->lock);
+ handle = nouveau_namedb_lookup_vinst(namedb, vinst);
+ if (handle == NULL)
+ read_unlock(&namedb->lock);
+ return handle;
+}
+
+struct nouveau_handle *
+nouveau_namedb_get_cinst(struct nouveau_namedb *namedb, u32 cinst)
+{
+ struct nouveau_handle *handle;
+ read_lock(&namedb->lock);
+ handle = nouveau_namedb_lookup_cinst(namedb, cinst);
+ if (handle == NULL)
+ read_unlock(&namedb->lock);
+ return handle;
+}
+
+void
+nouveau_namedb_put(struct nouveau_handle *handle)
+{
+ if (handle)
+ read_unlock(&handle->namedb->lock);
+}
+
+int
+nouveau_namedb_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, u32 pclass,
+ struct nouveau_oclass *sclass, u32 engcls,
+ int length, void **pobject)
+{
+ struct nouveau_namedb *namedb;
+ int ret;
+
+ ret = nouveau_parent_create_(parent, engine, oclass, pclass |
+ NV_NAMEDB_CLASS, sclass, engcls,
+ length, pobject);
+ namedb = *pobject;
+ if (ret)
+ return ret;
+
+ rwlock_init(&namedb->lock);
+ INIT_LIST_HEAD(&namedb->list);
+ return 0;
+}
+
+int
+_nouveau_namedb_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_namedb *object;
+ int ret;
+
+ ret = nouveau_namedb_create(parent, engine, oclass, 0, NULL, 0, &object);
+ *pobject = nv_object(object);
+ if (ret)
+ return ret;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/object.c b/drivers/gpu/drm/nouveau/core/core/object.c
new file mode 100644
index 0000000000000000000000000000000000000000..0daab62ea14c68a13f9fa1e58b681f4d39d760e0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/object.c
@@ -0,0 +1,468 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include
+#include
+#include
+#include
+#include
+
+#ifdef NOUVEAU_OBJECT_MAGIC
+static struct list_head _objlist = LIST_HEAD_INIT(_objlist);
+static DEFINE_SPINLOCK(_objlist_lock);
+#endif
+
+int
+nouveau_object_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, u32 pclass,
+ int size, void **pobject)
+{
+ struct nouveau_object *object;
+
+ object = *pobject = kzalloc(size, GFP_KERNEL);
+ if (!object)
+ return -ENOMEM;
+
+ nouveau_object_ref(parent, &object->parent);
+ nouveau_object_ref(engine, &object->engine);
+ object->oclass = oclass;
+ object->oclass->handle |= pclass;
+ atomic_set(&object->refcount, 1);
+ atomic_set(&object->usecount, 0);
+
+#ifdef NOUVEAU_OBJECT_MAGIC
+ object->_magic = NOUVEAU_OBJECT_MAGIC;
+ spin_lock(&_objlist_lock);
+ list_add(&object->list, &_objlist);
+ spin_unlock(&_objlist_lock);
+#endif
+ return 0;
+}
+
+static int
+_nouveau_object_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_object *object;
+ int ret;
+
+ ret = nouveau_object_create(parent, engine, oclass, 0, &object);
+ *pobject = nv_object(object);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void
+nouveau_object_destroy(struct nouveau_object *object)
+{
+#ifdef NOUVEAU_OBJECT_MAGIC
+ spin_lock(&_objlist_lock);
+ list_del(&object->list);
+ spin_unlock(&_objlist_lock);
+#endif
+ nouveau_object_ref(NULL, &object->engine);
+ nouveau_object_ref(NULL, &object->parent);
+ kfree(object);
+}
+
+static void
+_nouveau_object_dtor(struct nouveau_object *object)
+{
+ nouveau_object_destroy(object);
+}
+
+int
+nouveau_object_init(struct nouveau_object *object)
+{
+ return 0;
+}
+
+static int
+_nouveau_object_init(struct nouveau_object *object)
+{
+ return nouveau_object_init(object);
+}
+
+int
+nouveau_object_fini(struct nouveau_object *object, bool suspend)
+{
+ return 0;
+}
+
+static int
+_nouveau_object_fini(struct nouveau_object *object, bool suspend)
+{
+ return nouveau_object_fini(object, suspend);
+}
+
+struct nouveau_ofuncs
+nouveau_object_ofuncs = {
+ .ctor = _nouveau_object_ctor,
+ .dtor = _nouveau_object_dtor,
+ .init = _nouveau_object_init,
+ .fini = _nouveau_object_fini,
+};
+
+int
+nouveau_object_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_ofuncs *ofuncs = oclass->ofuncs;
+ int ret;
+
+ *pobject = NULL;
+
+ ret = ofuncs->ctor(parent, engine, oclass, data, size, pobject);
+ if (ret < 0) {
+ if (ret != -ENODEV) {
+ nv_error(parent, "failed to create 0x%08x, %d\n",
+ oclass->handle, ret);
+ }
+
+ if (*pobject) {
+ ofuncs->dtor(*pobject);
+ *pobject = NULL;
+ }
+
+ return ret;
+ }
+
+ nv_debug(*pobject, "created\n");
+ return 0;
+}
+
+static void
+nouveau_object_dtor(struct nouveau_object *object)
+{
+ nv_debug(object, "destroying\n");
+ nv_ofuncs(object)->dtor(object);
+}
+
+void
+nouveau_object_ref(struct nouveau_object *obj, struct nouveau_object **ref)
+{
+ if (obj) {
+ atomic_inc(&obj->refcount);
+ nv_trace(obj, "inc() == %d\n", atomic_read(&obj->refcount));
+ }
+
+ if (*ref) {
+ int dead = atomic_dec_and_test(&(*ref)->refcount);
+ nv_trace(*ref, "dec() == %d\n", atomic_read(&(*ref)->refcount));
+ if (dead)
+ nouveau_object_dtor(*ref);
+ }
+
+ *ref = obj;
+}
+
+int
+nouveau_object_new(struct nouveau_object *client, u32 _parent, u32 _handle,
+ u16 _oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_object *parent = NULL;
+ struct nouveau_object *engctx = NULL;
+ struct nouveau_object *object = NULL;
+ struct nouveau_object *engine;
+ struct nouveau_oclass *oclass;
+ struct nouveau_handle *handle;
+ int ret;
+
+ /* lookup parent object and ensure it *is* a parent */
+ parent = nouveau_handle_ref(client, _parent);
+ if (!parent) {
+ nv_error(client, "parent 0x%08x not found\n", _parent);
+ return -ENOENT;
+ }
+
+ if (!nv_iclass(parent, NV_PARENT_CLASS)) {
+ nv_error(parent, "cannot have children\n");
+ ret = -EINVAL;
+ goto fail_class;
+ }
+
+ /* check that parent supports the requested subclass */
+ ret = nouveau_parent_sclass(parent, _oclass, &engine, &oclass);
+ if (ret) {
+ nv_debug(parent, "illegal class 0x%04x\n", _oclass);
+ goto fail_class;
+ }
+
+ /* make sure engine init has been completed *before* any objects
+ * it controls are created - the constructors may depend on
+ * state calculated at init (ie. default context construction)
+ */
+ if (engine) {
+ ret = nouveau_object_inc(engine);
+ if (ret)
+ goto fail_class;
+ }
+
+ /* if engine requires it, create a context object to insert
+ * between the parent and its children (eg. PGRAPH context)
+ */
+ if (engine && nv_engine(engine)->cclass) {
+ ret = nouveau_object_ctor(parent, engine,
+ nv_engine(engine)->cclass,
+ data, size, &engctx);
+ if (ret)
+ goto fail_engctx;
+ } else {
+ nouveau_object_ref(parent, &engctx);
+ }
+
+ /* finally, create new object and bind it to its handle */
+ ret = nouveau_object_ctor(engctx, engine, oclass, data, size, &object);
+ *pobject = object;
+ if (ret)
+ goto fail_ctor;
+
+ ret = nouveau_object_inc(object);
+ if (ret)
+ goto fail_init;
+
+ ret = nouveau_handle_create(parent, _parent, _handle, object, &handle);
+ if (ret)
+ goto fail_handle;
+
+ ret = nouveau_handle_init(handle);
+ if (ret)
+ nouveau_handle_destroy(handle);
+
+fail_handle:
+ nouveau_object_dec(object, false);
+fail_init:
+ nouveau_object_ref(NULL, &object);
+fail_ctor:
+ nouveau_object_ref(NULL, &engctx);
+fail_engctx:
+ if (engine)
+ nouveau_object_dec(engine, false);
+fail_class:
+ nouveau_object_ref(NULL, &parent);
+ return ret;
+}
+
+int
+nouveau_object_del(struct nouveau_object *client, u32 _parent, u32 _handle)
+{
+ struct nouveau_object *parent = NULL;
+ struct nouveau_object *namedb = NULL;
+ struct nouveau_handle *handle = NULL;
+ int ret = -EINVAL;
+
+ parent = nouveau_handle_ref(client, _parent);
+ if (!parent)
+ return -ENOENT;
+
+ namedb = nv_pclass(parent, NV_NAMEDB_CLASS);
+ if (namedb) {
+ handle = nouveau_namedb_get(nv_namedb(namedb), _handle);
+ if (handle) {
+ nouveau_namedb_put(handle);
+ nouveau_handle_fini(handle, false);
+ nouveau_handle_destroy(handle);
+ }
+ }
+
+ nouveau_object_ref(NULL, &parent);
+ return ret;
+}
+
+int
+nouveau_object_inc(struct nouveau_object *object)
+{
+ int ref = atomic_add_return(1, &object->usecount);
+ int ret;
+
+ nv_trace(object, "use(+1) == %d\n", atomic_read(&object->usecount));
+ if (ref != 1)
+ return 0;
+
+ nv_trace(object, "initialising...\n");
+ if (object->parent) {
+ ret = nouveau_object_inc(object->parent);
+ if (ret) {
+ nv_error(object, "parent failed, %d\n", ret);
+ goto fail_parent;
+ }
+ }
+
+ if (object->engine) {
+ mutex_lock(&nv_subdev(object->engine)->mutex);
+ ret = nouveau_object_inc(object->engine);
+ mutex_unlock(&nv_subdev(object->engine)->mutex);
+ if (ret) {
+ nv_error(object, "engine failed, %d\n", ret);
+ goto fail_engine;
+ }
+ }
+
+ ret = nv_ofuncs(object)->init(object);
+ if (ret) {
+ nv_error(object, "init failed, %d\n", ret);
+ goto fail_self;
+ }
+
+ nv_debug(object, "initialised\n");
+ return 0;
+
+fail_self:
+ if (object->engine) {
+ mutex_lock(&nv_subdev(object->engine)->mutex);
+ nouveau_object_dec(object->engine, false);
+ mutex_unlock(&nv_subdev(object->engine)->mutex);
+ }
+fail_engine:
+ if (object->parent)
+ nouveau_object_dec(object->parent, false);
+fail_parent:
+ atomic_dec(&object->usecount);
+ return ret;
+}
+
+static int
+nouveau_object_decf(struct nouveau_object *object)
+{
+ int ret;
+
+ nv_trace(object, "stopping...\n");
+
+ ret = nv_ofuncs(object)->fini(object, false);
+ if (ret)
+ nv_warn(object, "failed fini, %d\n", ret);
+
+ if (object->engine) {
+ mutex_lock(&nv_subdev(object->engine)->mutex);
+ nouveau_object_dec(object->engine, false);
+ mutex_unlock(&nv_subdev(object->engine)->mutex);
+ }
+
+ if (object->parent)
+ nouveau_object_dec(object->parent, false);
+
+ nv_debug(object, "stopped\n");
+ return 0;
+}
+
+static int
+nouveau_object_decs(struct nouveau_object *object)
+{
+ int ret, rret;
+
+ nv_trace(object, "suspending...\n");
+
+ ret = nv_ofuncs(object)->fini(object, true);
+ if (ret) {
+ nv_error(object, "failed suspend, %d\n", ret);
+ return ret;
+ }
+
+ if (object->engine) {
+ mutex_lock(&nv_subdev(object->engine)->mutex);
+ ret = nouveau_object_dec(object->engine, true);
+ mutex_unlock(&nv_subdev(object->engine)->mutex);
+ if (ret) {
+ nv_warn(object, "engine failed suspend, %d\n", ret);
+ goto fail_engine;
+ }
+ }
+
+ if (object->parent) {
+ ret = nouveau_object_dec(object->parent, true);
+ if (ret) {
+ nv_warn(object, "parent failed suspend, %d\n", ret);
+ goto fail_parent;
+ }
+ }
+
+ nv_debug(object, "suspended\n");
+ return 0;
+
+fail_parent:
+ if (object->engine) {
+ mutex_lock(&nv_subdev(object->engine)->mutex);
+ rret = nouveau_object_inc(object->engine);
+ mutex_unlock(&nv_subdev(object->engine)->mutex);
+ if (rret)
+ nv_fatal(object, "engine failed to reinit, %d\n", rret);
+ }
+
+fail_engine:
+ rret = nv_ofuncs(object)->init(object);
+ if (rret)
+ nv_fatal(object, "failed to reinit, %d\n", rret);
+
+ return ret;
+}
+
+int
+nouveau_object_dec(struct nouveau_object *object, bool suspend)
+{
+ int ref = atomic_add_return(-1, &object->usecount);
+ int ret;
+
+ nv_trace(object, "use(-1) == %d\n", atomic_read(&object->usecount));
+
+ if (ref == 0) {
+ if (suspend)
+ ret = nouveau_object_decs(object);
+ else
+ ret = nouveau_object_decf(object);
+
+ if (ret) {
+ atomic_inc(&object->usecount);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+void
+nouveau_object_debug(void)
+{
+#ifdef NOUVEAU_OBJECT_MAGIC
+ struct nouveau_object *object;
+ if (!list_empty(&_objlist)) {
+ nv_fatal(NULL, "*******************************************\n");
+ nv_fatal(NULL, "* AIIIII! object(s) still exist!!!\n");
+ nv_fatal(NULL, "*******************************************\n");
+ list_for_each_entry(object, &_objlist, list) {
+ nv_fatal(object, "%p/%p/%d/%d\n",
+ object->parent, object->engine,
+ atomic_read(&object->refcount),
+ atomic_read(&object->usecount));
+ }
+ }
+#endif
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/option.c b/drivers/gpu/drm/nouveau/core/core/option.c
new file mode 100644
index 0000000000000000000000000000000000000000..62a432ea39e5b2d6b8cadfddee26cb39e7bc94e1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/option.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include
+#include
+
+/* compares unterminated string 'str' with zero-terminated string 'cmp' */
+static inline int
+strncasecmpz(const char *str, const char *cmp, size_t len)
+{
+ if (strlen(cmp) != len)
+ return len;
+ return strncasecmp(str, cmp, len);
+}
+
+const char *
+nouveau_stropt(const char *optstr, const char *opt, int *arglen)
+{
+ while (optstr && *optstr != '\0') {
+ int len = strcspn(optstr, ",=");
+ switch (optstr[len]) {
+ case '=':
+ if (!strncasecmpz(optstr, opt, len)) {
+ optstr += len + 1;
+ *arglen = strcspn(optstr, ",=");
+ return *arglen ? optstr : NULL;
+ }
+ optstr++;
+ break;
+ case ',':
+ optstr++;
+ break;
+ default:
+ break;
+ }
+ optstr += len;
+ }
+
+ return NULL;
+}
+
+bool
+nouveau_boolopt(const char *optstr, const char *opt, bool value)
+{
+ int arglen;
+
+ optstr = nouveau_stropt(optstr, opt, &arglen);
+ if (optstr) {
+ if (!strncasecmpz(optstr, "0", arglen) ||
+ !strncasecmpz(optstr, "no", arglen) ||
+ !strncasecmpz(optstr, "off", arglen) ||
+ !strncasecmpz(optstr, "false", arglen))
+ value = false;
+ else
+ if (!strncasecmpz(optstr, "1", arglen) ||
+ !strncasecmpz(optstr, "yes", arglen) ||
+ !strncasecmpz(optstr, "on", arglen) ||
+ !strncasecmpz(optstr, "true", arglen))
+ value = true;
+ }
+
+ return value;
+}
+
+int
+nouveau_dbgopt(const char *optstr, const char *sub)
+{
+ int mode = 1, level = CONFIG_NOUVEAU_DEBUG_DEFAULT;
+
+ while (optstr) {
+ int len = strcspn(optstr, ",=");
+ switch (optstr[len]) {
+ case '=':
+ if (strncasecmpz(optstr, sub, len))
+ mode = 0;
+ optstr++;
+ break;
+ default:
+ if (mode) {
+ if (!strncasecmpz(optstr, "fatal", len))
+ level = NV_DBG_FATAL;
+ else if (!strncasecmpz(optstr, "error", len))
+ level = NV_DBG_ERROR;
+ else if (!strncasecmpz(optstr, "warn", len))
+ level = NV_DBG_WARN;
+ else if (!strncasecmpz(optstr, "info", len))
+ level = NV_DBG_INFO;
+ else if (!strncasecmpz(optstr, "debug", len))
+ level = NV_DBG_DEBUG;
+ else if (!strncasecmpz(optstr, "trace", len))
+ level = NV_DBG_TRACE;
+ else if (!strncasecmpz(optstr, "paranoia", len))
+ level = NV_DBG_PARANOIA;
+ else if (!strncasecmpz(optstr, "spam", len))
+ level = NV_DBG_SPAM;
+ }
+
+ if (optstr[len] != '\0') {
+ optstr++;
+ mode = 1;
+ break;
+ }
+
+ return level;
+ }
+ optstr += len;
+ }
+
+ return level;
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/parent.c b/drivers/gpu/drm/nouveau/core/core/parent.c
new file mode 100644
index 0000000000000000000000000000000000000000..a1ea034611d5cb03c08f72a7149e97b02b94dadf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/parent.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include
+#include
+
+int
+nouveau_parent_sclass(struct nouveau_object *parent, u16 handle,
+ struct nouveau_object **pengine,
+ struct nouveau_oclass **poclass)
+{
+ struct nouveau_sclass *sclass;
+ struct nouveau_engine *engine;
+ struct nouveau_oclass *oclass;
+ u64 mask;
+
+ sclass = nv_parent(parent)->sclass;
+ while (sclass) {
+ if ((sclass->oclass->handle & 0xffff) == handle) {
+ *pengine = parent->engine;
+ *poclass = sclass->oclass;
+ return 0;
+ }
+
+ sclass = sclass->sclass;
+ }
+
+ mask = nv_parent(parent)->engine;
+ while (mask) {
+ int i = ffsll(mask) - 1;
+
+ if ((engine = nouveau_engine(parent, i))) {
+ oclass = engine->sclass;
+ while (oclass->ofuncs) {
+ if ((oclass->handle & 0xffff) == handle) {
+ *pengine = nv_object(engine);
+ *poclass = oclass;
+ return 0;
+ }
+ oclass++;
+ }
+ }
+
+ mask &= ~(1ULL << i);
+ }
+
+ return -EINVAL;
+}
+
+int
+nouveau_parent_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, u32 pclass,
+ struct nouveau_oclass *sclass, u64 engcls,
+ int size, void **pobject)
+{
+ struct nouveau_parent *object;
+ struct nouveau_sclass *nclass;
+ int ret;
+
+ ret = nouveau_object_create_(parent, engine, oclass, pclass |
+ NV_PARENT_CLASS, size, pobject);
+ object = *pobject;
+ if (ret)
+ return ret;
+
+ while (sclass && sclass->ofuncs) {
+ nclass = kzalloc(sizeof(*nclass), GFP_KERNEL);
+ if (!nclass)
+ return -ENOMEM;
+
+ nclass->sclass = object->sclass;
+ object->sclass = nclass;
+ nclass->engine = engine ? nv_engine(engine) : NULL;
+ nclass->oclass = sclass;
+ sclass++;
+ }
+
+ object->engine = engcls;
+ return 0;
+}
+
+int
+_nouveau_parent_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_parent *object;
+ int ret;
+
+ ret = nouveau_parent_create(parent, engine, oclass, 0, NULL, 0, &object);
+ *pobject = nv_object(object);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void
+nouveau_parent_destroy(struct nouveau_parent *parent)
+{
+ struct nouveau_sclass *sclass;
+
+ while ((sclass = parent->sclass)) {
+ parent->sclass = sclass->sclass;
+ kfree(sclass);
+ }
+
+ nouveau_object_destroy(&parent->base);
+}
+
+
+void
+_nouveau_parent_dtor(struct nouveau_object *object)
+{
+ nouveau_parent_destroy(nv_parent(object));
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/printk.c b/drivers/gpu/drm/nouveau/core/core/printk.c
new file mode 100644
index 0000000000000000000000000000000000000000..6161eaf5447c222399e062ea5c138d1cee2ca28f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/printk.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include
+#include
+#include
+#include
+
+void
+nv_printk_(struct nouveau_object *object, const char *pfx, int level,
+ const char *fmt, ...)
+{
+ static const char name[] = { '!', 'E', 'W', ' ', 'D', 'T', 'P', 'S' };
+ char mfmt[256];
+ va_list args;
+
+ if (object && !nv_iclass(object, NV_CLIENT_CLASS)) {
+ struct nouveau_object *device = object;
+ struct nouveau_object *subdev = object;
+ char obuf[64], *ofmt = "";
+
+ if (object->engine) {
+ snprintf(obuf, sizeof(obuf), "[0x%08x][%p]",
+ nv_hclass(object), object);
+ ofmt = obuf;
+ subdev = object->engine;
+ device = object->engine;
+ }
+
+ if (subdev->parent)
+ device = subdev->parent;
+
+ if (level > nv_subdev(subdev)->debug)
+ return;
+
+ snprintf(mfmt, sizeof(mfmt), "%snouveau %c[%8s][%s]%s %s", pfx,
+ name[level], nv_subdev(subdev)->name,
+ nv_device(device)->name, ofmt, fmt);
+ } else
+ if (object && nv_iclass(object, NV_CLIENT_CLASS)) {
+ if (level > nv_client(object)->debug)
+ return;
+
+ snprintf(mfmt, sizeof(mfmt), "%snouveau %c[%8s] %s", pfx,
+ name[level], nv_client(object)->name, fmt);
+ } else {
+ snprintf(mfmt, sizeof(mfmt), "%snouveau: %s", pfx, fmt);
+ }
+
+ va_start(args, fmt);
+ vprintk(mfmt, args);
+ va_end(args);
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/ramht.c b/drivers/gpu/drm/nouveau/core/core/ramht.c
new file mode 100644
index 0000000000000000000000000000000000000000..86a64045dd602973a7c78bcef213bc2d3571d662
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/ramht.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include
+#include
+#include
+
+#include
+
+static u32
+nouveau_ramht_hash(struct nouveau_ramht *ramht, int chid, u32 handle)
+{
+ u32 hash = 0;
+
+ while (handle) {
+ hash ^= (handle & ((1 << ramht->bits) - 1));
+ handle >>= ramht->bits;
+ }
+
+ hash ^= chid << (ramht->bits - 4);
+ hash = hash << 3;
+ return hash;
+}
+
+int
+nouveau_ramht_insert(struct nouveau_ramht *ramht, int chid,
+ u32 handle, u32 context)
+{
+ struct nouveau_bar *bar = nouveau_bar(ramht);
+ u32 co, ho;
+
+ co = ho = nouveau_ramht_hash(ramht, chid, handle);
+ do {
+ if (!nv_ro32(ramht, co + 4)) {
+ nv_wo32(ramht, co + 0, handle);
+ nv_wo32(ramht, co + 4, context);
+ if (bar)
+ bar->flush(bar);
+ return co;
+ }
+
+ co += 8;
+ if (co >= nv_gpuobj(ramht)->size)
+ co = 0;
+ } while (co != ho);
+
+ return -ENOMEM;
+}
+
+void
+nouveau_ramht_remove(struct nouveau_ramht *ramht, int cookie)
+{
+ struct nouveau_bar *bar = nouveau_bar(ramht);
+ nv_wo32(ramht, cookie + 0, 0x00000000);
+ nv_wo32(ramht, cookie + 4, 0x00000000);
+ if (bar)
+ bar->flush(bar);
+}
+
+static struct nouveau_oclass
+nouveau_ramht_oclass = {
+ .handle = 0x0000abcd,
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = NULL,
+ .dtor = _nouveau_gpuobj_dtor,
+ .init = _nouveau_gpuobj_init,
+ .fini = _nouveau_gpuobj_fini,
+ .rd32 = _nouveau_gpuobj_rd32,
+ .wr32 = _nouveau_gpuobj_wr32,
+ },
+};
+
+int
+nouveau_ramht_new(struct nouveau_object *parent, struct nouveau_object *pargpu,
+ u32 size, u32 align, struct nouveau_ramht **pramht)
+{
+ struct nouveau_ramht *ramht;
+ int ret;
+
+ ret = nouveau_gpuobj_create(parent, parent->engine ?
+ parent->engine : parent, /* bits = log2i(nv_gpuobj(ramht)->size >> 3);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/subdev.c b/drivers/gpu/drm/nouveau/core/core/subdev.c
new file mode 100644
index 0000000000000000000000000000000000000000..f74c30aa33a0e334eda733ae1a0ea5b9a3d10377
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/subdev.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include
+#include
+#include
+#include
+
+void
+nouveau_subdev_reset(struct nouveau_object *subdev)
+{
+ nv_trace(subdev, "resetting...\n");
+ nv_ofuncs(subdev)->fini(subdev, false);
+ nv_debug(subdev, "reset\n");
+}
+
+int
+nouveau_subdev_init(struct nouveau_subdev *subdev)
+{
+ int ret = nouveau_object_init(&subdev->base);
+ if (ret)
+ return ret;
+
+ nouveau_subdev_reset(&subdev->base);
+ return 0;
+}
+
+int
+_nouveau_subdev_init(struct nouveau_object *object)
+{
+ return nouveau_subdev_init(nv_subdev(object));
+}
+
+int
+nouveau_subdev_fini(struct nouveau_subdev *subdev, bool suspend)
+{
+ if (subdev->unit) {
+ nv_mask(subdev, 0x000200, subdev->unit, 0x00000000);
+ nv_mask(subdev, 0x000200, subdev->unit, subdev->unit);
+ }
+
+ return nouveau_object_fini(&subdev->base, suspend);
+}
+
+int
+_nouveau_subdev_fini(struct nouveau_object *object, bool suspend)
+{
+ return nouveau_subdev_fini(nv_subdev(object), suspend);
+}
+
+void
+nouveau_subdev_destroy(struct nouveau_subdev *subdev)
+{
+ int subidx = nv_hclass(subdev) & 0xff;
+ nv_device(subdev)->subdev[subidx] = NULL;
+ nouveau_object_destroy(&subdev->base);
+}
+
+void
+_nouveau_subdev_dtor(struct nouveau_object *object)
+{
+ nouveau_subdev_destroy(nv_subdev(object));
+}
+
+int
+nouveau_subdev_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, u32 pclass,
+ const char *subname, const char *sysname,
+ int size, void **pobject)
+{
+ struct nouveau_subdev *subdev;
+ int ret;
+
+ ret = nouveau_object_create_(parent, engine, oclass, pclass |
+ NV_SUBDEV_CLASS, size, pobject);
+ subdev = *pobject;
+ if (ret)
+ return ret;
+
+ mutex_init(&subdev->mutex);
+ subdev->name = subname;
+
+ if (parent) {
+ struct nouveau_device *device = nv_device(parent);
+ int subidx = nv_hclass(subdev) & 0xff;
+
+ subdev->debug = nouveau_dbgopt(device->dbgopt, subname);
+ subdev->mmio = nv_subdev(device)->mmio;
+ device->subdev[subidx] = *pobject;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
new file mode 100644
index 0000000000000000000000000000000000000000..66f7dfd907ee506035143513135f91345183e4d2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
@@ -0,0 +1,175 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include
+#include
+#include
+
+#include
+
+struct nv84_bsp_priv {
+ struct nouveau_bsp base;
+};
+
+struct nv84_bsp_chan {
+ struct nouveau_bsp_chan base;
+};
+
+/*******************************************************************************
+ * BSP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv84_bsp_sclass[] = {
+ {},
+};
+
+/*******************************************************************************
+ * BSP context
+ ******************************************************************************/
+
+static int
+nv84_bsp_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv84_bsp_chan *priv;
+ int ret;
+
+ ret = nouveau_bsp_context_create(parent, engine, oclass, NULL,
+ 0, 0, 0, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void
+nv84_bsp_context_dtor(struct nouveau_object *object)
+{
+ struct nv84_bsp_chan *priv = (void *)object;
+ nouveau_bsp_context_destroy(&priv->base);
+}
+
+static int
+nv84_bsp_context_init(struct nouveau_object *object)
+{
+ struct nv84_bsp_chan *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_bsp_context_init(&priv->base);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int
+nv84_bsp_context_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv84_bsp_chan *priv = (void *)object;
+ return nouveau_bsp_context_fini(&priv->base, suspend);
+}
+
+static struct nouveau_oclass
+nv84_bsp_cclass = {
+ .handle = NV_ENGCTX(BSP, 0x84),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv84_bsp_context_ctor,
+ .dtor = nv84_bsp_context_dtor,
+ .init = nv84_bsp_context_init,
+ .fini = nv84_bsp_context_fini,
+ .rd32 = _nouveau_bsp_context_rd32,
+ .wr32 = _nouveau_bsp_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * BSP engine/subdev functions
+ ******************************************************************************/
+
+static void
+nv84_bsp_intr(struct nouveau_subdev *subdev)
+{
+}
+
+static int
+nv84_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv84_bsp_priv *priv;
+ int ret;
+
+ ret = nouveau_bsp_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x04008000;
+ nv_subdev(priv)->intr = nv84_bsp_intr;
+ nv_engine(priv)->cclass = &nv84_bsp_cclass;
+ nv_engine(priv)->sclass = nv84_bsp_sclass;
+ return 0;
+}
+
+static void
+nv84_bsp_dtor(struct nouveau_object *object)
+{
+ struct nv84_bsp_priv *priv = (void *)object;
+ nouveau_bsp_destroy(&priv->base);
+}
+
+static int
+nv84_bsp_init(struct nouveau_object *object)
+{
+ struct nv84_bsp_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_bsp_init(&priv->base);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int
+nv84_bsp_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv84_bsp_priv *priv = (void *)object;
+ return nouveau_bsp_fini(&priv->base, suspend);
+}
+
+struct nouveau_oclass
+nv84_bsp_oclass = {
+ .handle = NV_ENGINE(BSP, 0x84),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv84_bsp_ctor,
+ .dtor = nv84_bsp_dtor,
+ .init = nv84_bsp_init,
+ .fini = nv84_bsp_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.fuc b/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc
similarity index 100%
rename from drivers/gpu/drm/nouveau/nva3_copy.fuc
rename to drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.fuc.h b/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc.h
similarity index 99%
rename from drivers/gpu/drm/nouveau/nva3_copy.fuc.h
rename to drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc.h
index 37d6de3c9d618c653e1663f8692de48a6ff7176f..c92520f3ed46f2c11d49de137ae366ddb4e0fc8c 100644
--- a/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc.h
@@ -1,4 +1,4 @@
-u32 nva3_pcopy_data[] = {
+static u32 nva3_pcopy_data[] = {
/* 0x0000: ctx_object */
0x00000000,
/* 0x0004: ctx_dma */
@@ -183,7 +183,7 @@ u32 nva3_pcopy_data[] = {
0x00000800,
};
-u32 nva3_pcopy_code[] = {
+static u32 nva3_pcopy_code[] = {
/* 0x0000: main */
0x04fe04bd,
0x3517f000,
diff --git a/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h b/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nvc0.fuc.h
similarity index 99%
rename from drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
rename to drivers/gpu/drm/nouveau/core/engine/copy/fuc/nvc0.fuc.h
index cd879f31bb38b595a18269b475717d7e70e2cd7b..0d98c6c0958d5f9853a428d242d934ccd3b162ca 100644
--- a/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nvc0.fuc.h
@@ -1,4 +1,4 @@
-u32 nvc0_pcopy_data[] = {
+static u32 nvc0_pcopy_data[] = {
/* 0x0000: ctx_object */
0x00000000,
/* 0x0004: ctx_query_address_high */
@@ -171,7 +171,7 @@ u32 nvc0_pcopy_data[] = {
0x00000800,
};
-u32 nvc0_pcopy_code[] = {
+static u32 nvc0_pcopy_code[] = {
/* 0x0000: main */
0x04fe04bd,
0x3517f000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
new file mode 100644
index 0000000000000000000000000000000000000000..4df6da0af7402ea616ae15349f237e80334440ec
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
@@ -0,0 +1,222 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include
+#include
+#include
+#include
+
+#include
+#include
+
+#include
+#include
+
+#include "fuc/nva3.fuc.h"
+
+struct nva3_copy_priv {
+ struct nouveau_copy base;
+};
+
+struct nva3_copy_chan {
+ struct nouveau_copy_chan base;
+};
+
+/*******************************************************************************
+ * Copy object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nva3_copy_sclass[] = {
+ { 0x85b5, &nouveau_object_ofuncs },
+ {}
+};
+
+/*******************************************************************************
+ * PCOPY context
+ ******************************************************************************/
+
+static int
+nva3_copy_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nva3_copy_chan *priv;
+ int ret;
+
+ ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256, 0,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static struct nouveau_oclass
+nva3_copy_cclass = {
+ .handle = NV_ENGCTX(COPY0, 0xa3),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nva3_copy_context_ctor,
+ .dtor = _nouveau_copy_context_dtor,
+ .init = _nouveau_copy_context_init,
+ .fini = _nouveau_copy_context_fini,
+ .rd32 = _nouveau_copy_context_rd32,
+ .wr32 = _nouveau_copy_context_wr32,
+
+ },
+};
+
+/*******************************************************************************
+ * PCOPY engine/subdev functions
+ ******************************************************************************/
+
+static const struct nouveau_enum nva3_copy_isr_error_name[] = {
+ { 0x0001, "ILLEGAL_MTHD" },
+ { 0x0002, "INVALID_ENUM" },
+ { 0x0003, "INVALID_BITFIELD" },
+ {}
+};
+
+static void
+nva3_copy_intr(struct nouveau_subdev *subdev)
+{
+ struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
+ struct nouveau_engine *engine = nv_engine(subdev);
+ struct nouveau_object *engctx;
+ struct nva3_copy_priv *priv = (void *)subdev;
+ u32 dispatch = nv_rd32(priv, 0x10401c);
+ u32 stat = nv_rd32(priv, 0x104008) & dispatch & ~(dispatch >> 16);
+ u64 inst = nv_rd32(priv, 0x104050) & 0x3fffffff;
+ u32 ssta = nv_rd32(priv, 0x104040) & 0x0000ffff;
+ u32 addr = nv_rd32(priv, 0x104040) >> 16;
+ u32 mthd = (addr & 0x07ff) << 2;
+ u32 subc = (addr & 0x3800) >> 11;
+ u32 data = nv_rd32(priv, 0x104044);
+ int chid;
+
+ engctx = nouveau_engctx_get(engine, inst);
+ chid = pfifo->chid(pfifo, engctx);
+
+ if (stat & 0x00000040) {
+ nv_error(priv, "DISPATCH_ERROR [");
+ nouveau_enum_print(nva3_copy_isr_error_name, ssta);
+ printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
+ chid, inst << 12, subc, mthd, data);
+ nv_wr32(priv, 0x104004, 0x00000040);
+ stat &= ~0x00000040;
+ }
+
+ if (stat) {
+ nv_error(priv, "unhandled intr 0x%08x\n", stat);
+ nv_wr32(priv, 0x104004, stat);
+ }
+
+ nv50_fb_trap(nouveau_fb(priv), 1);
+ nouveau_engctx_put(engctx);
+}
+
+static int
+nva3_copy_tlb_flush(struct nouveau_engine *engine)
+{
+ nv50_vm_flush_engine(&engine->base, 0x0d);
+ return 0;
+}
+
+static int
+nva3_copy_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ bool enable = (nv_device(parent)->chipset != 0xaf);
+ struct nva3_copy_priv *priv;
+ int ret;
+
+ ret = nouveau_copy_create(parent, engine, oclass, enable, 0, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00802000;
+ nv_subdev(priv)->intr = nva3_copy_intr;
+ nv_engine(priv)->cclass = &nva3_copy_cclass;
+ nv_engine(priv)->sclass = nva3_copy_sclass;
+ nv_engine(priv)->tlb_flush = nva3_copy_tlb_flush;
+ return 0;
+}
+
+static int
+nva3_copy_init(struct nouveau_object *object)
+{
+ struct nva3_copy_priv *priv = (void *)object;
+ int ret, i;
+
+ ret = nouveau_copy_init(&priv->base);
+ if (ret)
+ return ret;
+
+ /* disable all interrupts */
+ nv_wr32(priv, 0x104014, 0xffffffff);
+
+ /* upload ucode */
+ nv_wr32(priv, 0x1041c0, 0x01000000);
+ for (i = 0; i < sizeof(nva3_pcopy_data) / 4; i++)
+ nv_wr32(priv, 0x1041c4, nva3_pcopy_data[i]);
+
+ nv_wr32(priv, 0x104180, 0x01000000);
+ for (i = 0; i < sizeof(nva3_pcopy_code) / 4; i++) {
+ if ((i & 0x3f) == 0)
+ nv_wr32(priv, 0x104188, i >> 6);
+ nv_wr32(priv, 0x104184, nva3_pcopy_code[i]);
+ }
+
+ /* start it running */
+ nv_wr32(priv, 0x10410c, 0x00000000);
+ nv_wr32(priv, 0x104104, 0x00000000); /* ENTRY */
+ nv_wr32(priv, 0x104100, 0x00000002); /* TRIGGER */
+ return 0;
+}
+
+static int
+nva3_copy_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nva3_copy_priv *priv = (void *)object;
+
+ nv_mask(priv, 0x104048, 0x00000003, 0x00000000);
+ nv_wr32(priv, 0x104014, 0xffffffff);
+
+ return nouveau_copy_fini(&priv->base, suspend);
+}
+
+struct nouveau_oclass
+nva3_copy_oclass = {
+ .handle = NV_ENGINE(COPY0, 0xa3),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nva3_copy_ctor,
+ .dtor = _nouveau_copy_dtor,
+ .init = nva3_copy_init,
+ .fini = nva3_copy_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
new file mode 100644
index 0000000000000000000000000000000000000000..06d4a879105565d4f1e336a2e1bcf7d489f78dea
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
@@ -0,0 +1,265 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include
+#include
+#include
+#include
+
+#include
+#include
+
+#include "fuc/nvc0.fuc.h"
+
+struct nvc0_copy_priv {
+ struct nouveau_copy base;
+};
+
+struct nvc0_copy_chan {
+ struct nouveau_copy_chan base;
+};
+
+/*******************************************************************************
+ * Copy object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_copy0_sclass[] = {
+ { 0x90b5, &nouveau_object_ofuncs },
+ {},
+};
+
+static struct nouveau_oclass
+nvc0_copy1_sclass[] = {
+ { 0x90b8, &nouveau_object_ofuncs },
+ {},
+};
+
+/*******************************************************************************
+ * PCOPY context
+ ******************************************************************************/
+
+static int
+nvc0_copy_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvc0_copy_chan *priv;
+ int ret;
+
+ ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256,
+ 256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static struct nouveau_ofuncs
+nvc0_copy_context_ofuncs = {
+ .ctor = nvc0_copy_context_ctor,
+ .dtor = _nouveau_copy_context_dtor,
+ .init = _nouveau_copy_context_init,
+ .fini = _nouveau_copy_context_fini,
+ .rd32 = _nouveau_copy_context_rd32,
+ .wr32 = _nouveau_copy_context_wr32,
+};
+
+static struct nouveau_oclass
+nvc0_copy0_cclass = {
+ .handle = NV_ENGCTX(COPY0, 0xc0),
+ .ofuncs = &nvc0_copy_context_ofuncs,
+};
+
+static struct nouveau_oclass
+nvc0_copy1_cclass = {
+ .handle = NV_ENGCTX(COPY1, 0xc0),
+ .ofuncs = &nvc0_copy_context_ofuncs,
+};
+
+/*******************************************************************************
+ * PCOPY engine/subdev functions
+ ******************************************************************************/
+
+static const struct nouveau_enum nvc0_copy_isr_error_name[] = {
+ { 0x0001, "ILLEGAL_MTHD" },
+ { 0x0002, "INVALID_ENUM" },
+ { 0x0003, "INVALID_BITFIELD" },
+ {}
+};
+
+static void
+nvc0_copy_intr(struct nouveau_subdev *subdev)
+{
+ struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
+ struct nouveau_engine *engine = nv_engine(subdev);
+ struct nouveau_object *engctx;
+ int idx = nv_engidx(nv_object(subdev)) - NVDEV_ENGINE_COPY0;
+ struct nvc0_copy_priv *priv = (void *)subdev;
+ u32 disp = nv_rd32(priv, 0x10401c + (idx * 0x1000));
+ u32 intr = nv_rd32(priv, 0x104008 + (idx * 0x1000));
+ u32 stat = intr & disp & ~(disp >> 16);
+ u64 inst = nv_rd32(priv, 0x104050 + (idx * 0x1000)) & 0x0fffffff;
+ u32 ssta = nv_rd32(priv, 0x104040 + (idx * 0x1000)) & 0x0000ffff;
+ u32 addr = nv_rd32(priv, 0x104040 + (idx * 0x1000)) >> 16;
+ u32 mthd = (addr & 0x07ff) << 2;
+ u32 subc = (addr & 0x3800) >> 11;
+ u32 data = nv_rd32(priv, 0x104044 + (idx * 0x1000));
+ int chid;
+
+ engctx = nouveau_engctx_get(engine, inst);
+ chid = pfifo->chid(pfifo, engctx);
+
+ if (stat & 0x00000040) {
+ nv_error(priv, "DISPATCH_ERROR [");
+ nouveau_enum_print(nvc0_copy_isr_error_name, ssta);
+ printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
+ chid, (u64)inst << 12, subc, mthd, data);
+ nv_wr32(priv, 0x104004 + (idx * 0x1000), 0x00000040);
+ stat &= ~0x00000040;
+ }
+
+ if (stat) {
+ nv_error(priv, "unhandled intr 0x%08x\n", stat);
+ nv_wr32(priv, 0x104004 + (idx * 0x1000), stat);
+ }
+
+ nouveau_engctx_put(engctx);
+}
+
+static int
+nvc0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvc0_copy_priv *priv;
+ int ret;
+
+ if (nv_rd32(parent, 0x022500) & 0x00000100)
+ return -ENODEV;
+
+ ret = nouveau_copy_create(parent, engine, oclass, true, 0, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00000040;
+ nv_subdev(priv)->intr = nvc0_copy_intr;
+ nv_engine(priv)->cclass = &nvc0_copy0_cclass;
+ nv_engine(priv)->sclass = nvc0_copy0_sclass;
+ return 0;
+}
+
+static int
+nvc0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvc0_copy_priv *priv;
+ int ret;
+
+ if (nv_rd32(parent, 0x022500) & 0x00000200)
+ return -ENODEV;
+
+ ret = nouveau_copy_create(parent, engine, oclass, true, 1, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00000080;
+ nv_subdev(priv)->intr = nvc0_copy_intr;
+ nv_engine(priv)->cclass = &nvc0_copy1_cclass;
+ nv_engine(priv)->sclass = nvc0_copy1_sclass;
+ return 0;
+}
+
+static int
+nvc0_copy_init(struct nouveau_object *object)
+{
+ int idx = nv_engidx(object) - NVDEV_ENGINE_COPY0;
+ struct nvc0_copy_priv *priv = (void *)object;
+ int ret, i;
+
+ ret = nouveau_copy_init(&priv->base);
+ if (ret)
+ return ret;
+
+ /* disable all interrupts */
+ nv_wr32(priv, 0x104014 + (idx * 0x1000), 0xffffffff);
+
+ /* upload ucode */
+ nv_wr32(priv, 0x1041c0 + (idx * 0x1000), 0x01000000);
+ for (i = 0; i < sizeof(nvc0_pcopy_data) / 4; i++)
+ nv_wr32(priv, 0x1041c4 + (idx * 0x1000), nvc0_pcopy_data[i]);
+
+ nv_wr32(priv, 0x104180 + (idx * 0x1000), 0x01000000);
+ for (i = 0; i < sizeof(nvc0_pcopy_code) / 4; i++) {
+ if ((i & 0x3f) == 0)
+ nv_wr32(priv, 0x104188 + (idx * 0x1000), i >> 6);
+ nv_wr32(priv, 0x104184 + (idx * 0x1000), nvc0_pcopy_code[i]);
+ }
+
+ /* start it running */
+ nv_wr32(priv, 0x104084 + (idx * 0x1000), idx);
+ nv_wr32(priv, 0x10410c + (idx * 0x1000), 0x00000000);
+ nv_wr32(priv, 0x104104 + (idx * 0x1000), 0x00000000); /* ENTRY */
+ nv_wr32(priv, 0x104100 + (idx * 0x1000), 0x00000002); /* TRIGGER */
+ return 0;
+}
+
+static int
+nvc0_copy_fini(struct nouveau_object *object, bool suspend)
+{
+ int idx = nv_engidx(object) - NVDEV_ENGINE_COPY0;
+ struct nvc0_copy_priv *priv = (void *)object;
+
+ nv_mask(priv, 0x104048 + (idx * 0x1000), 0x00000003, 0x00000000);
+ nv_wr32(priv, 0x104014 + (idx * 0x1000), 0xffffffff);
+
+ return nouveau_copy_fini(&priv->base, suspend);
+}
+
+struct nouveau_oclass
+nvc0_copy0_oclass = {
+ .handle = NV_ENGINE(COPY0, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_copy0_ctor,
+ .dtor = _nouveau_copy_dtor,
+ .init = nvc0_copy_init,
+ .fini = nvc0_copy_fini,
+ },
+};
+
+struct nouveau_oclass
+nvc0_copy1_oclass = {
+ .handle = NV_ENGINE(COPY1, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_copy1_ctor,
+ .dtor = _nouveau_copy_dtor,
+ .init = nvc0_copy_init,
+ .fini = nvc0_copy_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
new file mode 100644
index 0000000000000000000000000000000000000000..2017c1579ac5b5c51c88b100e74e5754f164eba6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include
+#include
+#include
+#include
+
+#include
+
+struct nve0_copy_priv {
+ struct nouveau_copy base;
+};
+
+struct nve0_copy_chan {
+ struct nouveau_copy_chan base;
+};
+
+/*******************************************************************************
+ * Copy object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_copy_sclass[] = {
+ { 0xa0b5, &nouveau_object_ofuncs },
+ {},
+};
+
+/*******************************************************************************
+ * PCOPY context
+ ******************************************************************************/
+
+static int
+nve0_copy_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nve0_copy_chan *priv;
+ int ret;
+
+ ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256,
+ 256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static struct nouveau_ofuncs
+nve0_copy_context_ofuncs = {
+ .ctor = nve0_copy_context_ctor,
+ .dtor = _nouveau_copy_context_dtor,
+ .init = _nouveau_copy_context_init,
+ .fini = _nouveau_copy_context_fini,
+ .rd32 = _nouveau_copy_context_rd32,
+ .wr32 = _nouveau_copy_context_wr32,
+};
+
+static struct nouveau_oclass
+nve0_copy_cclass = {
+ .handle = NV_ENGCTX(COPY0, 0xc0),
+ .ofuncs = &nve0_copy_context_ofuncs,
+};
+
+/*******************************************************************************
+ * PCOPY engine/subdev functions
+ ******************************************************************************/
+
+static int
+nve0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nve0_copy_priv *priv;
+ int ret;
+
+ if (nv_rd32(parent, 0x022500) & 0x00000100)
+ return -ENODEV;
+
+ ret = nouveau_copy_create(parent, engine, oclass, true, 0, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00000040;
+ nv_engine(priv)->cclass = &nve0_copy_cclass;
+ nv_engine(priv)->sclass = nve0_copy_sclass;
+ return 0;
+}
+
+static int
+nve0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nve0_copy_priv *priv;
+ int ret;
+
+ if (nv_rd32(parent, 0x022500) & 0x00000200)
+ return -ENODEV;
+
+ ret = nouveau_copy_create(parent, engine, oclass, true, 1, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00000080;
+ nv_engine(priv)->cclass = &nve0_copy_cclass;
+ nv_engine(priv)->sclass = nve0_copy_sclass;
+ return 0;
+}
+
+struct nouveau_oclass
+nve0_copy0_oclass = {
+ .handle = NV_ENGINE(COPY0, 0xe0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nve0_copy0_ctor,
+ .dtor = _nouveau_copy_dtor,
+ .init = _nouveau_copy_init,
+ .fini = _nouveau_copy_fini,
+ },
+};
+
+struct nouveau_oclass
+nve0_copy1_oclass = {
+ .handle = NV_ENGINE(COPY1, 0xe0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nve0_copy1_ctor,
+ .dtor = _nouveau_copy_dtor,
+ .init = _nouveau_copy_init,
+ .fini = _nouveau_copy_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/nv98_crypt.fuc b/drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc
similarity index 99%
rename from drivers/gpu/drm/nouveau/nv98_crypt.fuc
rename to drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc
index 7393813044de42c09cdbcb26cca2be5b9c24e7f6..629da02dc352ab091cdaa15c388fa137e4b8b629 100644
--- a/drivers/gpu/drm/nouveau/nv98_crypt.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc
@@ -238,7 +238,7 @@ ih:
cmpu b32 $r4 0x60+#dma_count
bra nc #illegal_mthd
shl b32 $r5 $r4 2
- add b32 $r5 (#ctx_dma - 0x60 * 4) & 0xffff
+ add b32 $r5 ((#ctx_dma - 0x60 * 4) & 0xffff)
bset $r3 0x1e
st b32 D[$r5] $r3
add b32 $r4 0x180 - 0x60
diff --git a/drivers/gpu/drm/nouveau/nv98_crypt.fuc.h b/drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc.h
similarity index 99%
rename from drivers/gpu/drm/nouveau/nv98_crypt.fuc.h
rename to drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc.h
index 38676c74e6e04873e071f34a8e4863366e42bdf8..09962e4210e9e617033aceedad79542528f52d9f 100644
--- a/drivers/gpu/drm/nouveau/nv98_crypt.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc.h
@@ -1,4 +1,4 @@
-uint32_t nv98_pcrypt_data[] = {
+static uint32_t nv98_pcrypt_data[] = {
/* 0x0000: ctx_dma */
/* 0x0000: ctx_dma_query */
0x00000000,
@@ -150,7 +150,7 @@ uint32_t nv98_pcrypt_data[] = {
0x00000000,
};
-uint32_t nv98_pcrypt_code[] = {
+static uint32_t nv98_pcrypt_code[] = {
0x17f004bd,
0x0010fe35,
0xf10004fe,
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
new file mode 100644
index 0000000000000000000000000000000000000000..1d85e5b66ca0041be9e514771f24e992679196e7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
@@ -0,0 +1,217 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+#include
+#include
+
+struct nv84_crypt_priv {
+ struct nouveau_crypt base;
+};
+
+struct nv84_crypt_chan {
+ struct nouveau_crypt_chan base;
+};
+
+/*******************************************************************************
+ * Crypt object classes
+ ******************************************************************************/
+
+static int
+nv84_crypt_object_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_gpuobj *obj;
+ int ret;
+
+ ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
+ 16, 16, 0, &obj);
+ *pobject = nv_object(obj);
+ if (ret)
+ return ret;
+
+ nv_wo32(obj, 0x00, nv_mclass(obj));
+ nv_wo32(obj, 0x04, 0x00000000);
+ nv_wo32(obj, 0x08, 0x00000000);
+ nv_wo32(obj, 0x0c, 0x00000000);
+ return 0;
+}
+
+static struct nouveau_ofuncs
+nv84_crypt_ofuncs = {
+ .ctor = nv84_crypt_object_ctor,
+ .dtor = _nouveau_gpuobj_dtor,
+ .init = _nouveau_gpuobj_init,
+ .fini = _nouveau_gpuobj_fini,
+ .rd32 = _nouveau_gpuobj_rd32,
+ .wr32 = _nouveau_gpuobj_wr32,
+};
+
+static struct nouveau_oclass
+nv84_crypt_sclass[] = {
+ { 0x74c1, &nv84_crypt_ofuncs },
+ {}
+};
+
+/*******************************************************************************
+ * PCRYPT context
+ ******************************************************************************/
+
+static int
+nv84_crypt_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv84_crypt_chan *priv;
+ int ret;
+
+ ret = nouveau_crypt_context_create(parent, engine, oclass, NULL, 256,
+ 0, NVOBJ_FLAG_ZERO_ALLOC, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static struct nouveau_oclass
+nv84_crypt_cclass = {
+ .handle = NV_ENGCTX(CRYPT, 0x84),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv84_crypt_context_ctor,
+ .dtor = _nouveau_crypt_context_dtor,
+ .init = _nouveau_crypt_context_init,
+ .fini = _nouveau_crypt_context_fini,
+ .rd32 = _nouveau_crypt_context_rd32,
+ .wr32 = _nouveau_crypt_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PCRYPT engine/subdev functions
+ ******************************************************************************/
+
+static const struct nouveau_bitfield nv84_crypt_intr_mask[] = {
+ { 0x00000001, "INVALID_STATE" },
+ { 0x00000002, "ILLEGAL_MTHD" },
+ { 0x00000004, "ILLEGAL_CLASS" },
+ { 0x00000080, "QUERY" },
+ { 0x00000100, "FAULT" },
+ {}
+};
+
+static void
+nv84_crypt_intr(struct nouveau_subdev *subdev)
+{
+ struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
+ struct nouveau_engine *engine = nv_engine(subdev);
+ struct nouveau_object *engctx;
+ struct nv84_crypt_priv *priv = (void *)subdev;
+ u32 stat = nv_rd32(priv, 0x102130);
+ u32 mthd = nv_rd32(priv, 0x102190);
+ u32 data = nv_rd32(priv, 0x102194);
+ u32 inst = nv_rd32(priv, 0x102188) & 0x7fffffff;
+ int chid;
+
+ engctx = nouveau_engctx_get(engine, inst);
+ chid = pfifo->chid(pfifo, engctx);
+
+ if (stat) {
+ nv_error(priv, "");
+ nouveau_bitfield_print(nv84_crypt_intr_mask, stat);
+ printk(" ch %d [0x%010llx] mthd 0x%04x data 0x%08x\n",
+ chid, (u64)inst << 12, mthd, data);
+ }
+
+ nv_wr32(priv, 0x102130, stat);
+ nv_wr32(priv, 0x10200c, 0x10);
+
+ nv50_fb_trap(nouveau_fb(priv), 1);
+ nouveau_engctx_put(engctx);
+}
+
+static int
+nv84_crypt_tlb_flush(struct nouveau_engine *engine)
+{
+ nv50_vm_flush_engine(&engine->base, 0x0a);
+ return 0;
+}
+
+static int
+nv84_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv84_crypt_priv *priv;
+ int ret;
+
+ ret = nouveau_crypt_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00004000;
+ nv_subdev(priv)->intr = nv84_crypt_intr;
+ nv_engine(priv)->cclass = &nv84_crypt_cclass;
+ nv_engine(priv)->sclass = nv84_crypt_sclass;
+ nv_engine(priv)->tlb_flush = nv84_crypt_tlb_flush;
+ return 0;
+}
+
+static int
+nv84_crypt_init(struct nouveau_object *object)
+{
+ struct nv84_crypt_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_crypt_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x102130, 0xffffffff);
+ nv_wr32(priv, 0x102140, 0xffffffbf);
+ nv_wr32(priv, 0x10200c, 0x00000010);
+ return 0;
+}
+
+struct nouveau_oclass
+nv84_crypt_oclass = {
+ .handle = NV_ENGINE(CRYPT, 0x84),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv84_crypt_ctor,
+ .dtor = _nouveau_crypt_dtor,
+ .init = nv84_crypt_init,
+ .fini = _nouveau_crypt_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
new file mode 100644
index 0000000000000000000000000000000000000000..9e3876c89b9666e8700c3631b2298238115da1f3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
@@ -0,0 +1,208 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include
+#include
+#include
+#include
+
+#include
+#include
+
+#include
+#include
+
+#include "fuc/nv98.fuc.h"
+
+struct nv98_crypt_priv {
+ struct nouveau_crypt base;
+};
+
+struct nv98_crypt_chan {
+ struct nouveau_crypt_chan base;
+};
+
+/*******************************************************************************
+ * Crypt object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv98_crypt_sclass[] = {
+ { 0x88b4, &nouveau_object_ofuncs },
+ {},
+};
+
+/*******************************************************************************
+ * PCRYPT context
+ ******************************************************************************/
+
+static int
+nv98_crypt_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv98_crypt_chan *priv;
+ int ret;
+
+ ret = nouveau_crypt_context_create(parent, engine, oclass, NULL, 256,
+ 256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static struct nouveau_oclass
+nv98_crypt_cclass = {
+ .handle = NV_ENGCTX(CRYPT, 0x98),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv98_crypt_context_ctor,
+ .dtor = _nouveau_crypt_context_dtor,
+ .init = _nouveau_crypt_context_init,
+ .fini = _nouveau_crypt_context_fini,
+ .rd32 = _nouveau_crypt_context_rd32,
+ .wr32 = _nouveau_crypt_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PCRYPT engine/subdev functions
+ ******************************************************************************/
+
+static const struct nouveau_enum nv98_crypt_isr_error_name[] = {
+ { 0x0000, "ILLEGAL_MTHD" },
+ { 0x0001, "INVALID_BITFIELD" },
+ { 0x0002, "INVALID_ENUM" },
+ { 0x0003, "QUERY" },
+ {}
+};
+
+static void
+nv98_crypt_intr(struct nouveau_subdev *subdev)
+{
+ struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
+ struct nouveau_engine *engine = nv_engine(subdev);
+ struct nouveau_object *engctx;
+ struct nv98_crypt_priv *priv = (void *)subdev;
+ u32 disp = nv_rd32(priv, 0x08701c);
+ u32 stat = nv_rd32(priv, 0x087008) & disp & ~(disp >> 16);
+ u32 inst = nv_rd32(priv, 0x087050) & 0x3fffffff;
+ u32 ssta = nv_rd32(priv, 0x087040) & 0x0000ffff;
+ u32 addr = nv_rd32(priv, 0x087040) >> 16;
+ u32 mthd = (addr & 0x07ff) << 2;
+ u32 subc = (addr & 0x3800) >> 11;
+ u32 data = nv_rd32(priv, 0x087044);
+ int chid;
+
+ engctx = nouveau_engctx_get(engine, inst);
+ chid = pfifo->chid(pfifo, engctx);
+
+ if (stat & 0x00000040) {
+ nv_error(priv, "DISPATCH_ERROR [");
+ nouveau_enum_print(nv98_crypt_isr_error_name, ssta);
+ printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
+ chid, (u64)inst << 12, subc, mthd, data);
+ nv_wr32(priv, 0x087004, 0x00000040);
+ stat &= ~0x00000040;
+ }
+
+ if (stat) {
+ nv_error(priv, "unhandled intr 0x%08x\n", stat);
+ nv_wr32(priv, 0x087004, stat);
+ }
+
+ nv50_fb_trap(nouveau_fb(priv), 1);
+ nouveau_engctx_put(engctx);
+}
+
+static int
+nv98_crypt_tlb_flush(struct nouveau_engine *engine)
+{
+ nv50_vm_flush_engine(&engine->base, 0x0a);
+ return 0;
+}
+
+static int
+nv98_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv98_crypt_priv *priv;
+ int ret;
+
+ ret = nouveau_crypt_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00004000;
+ nv_subdev(priv)->intr = nv98_crypt_intr;
+ nv_engine(priv)->cclass = &nv98_crypt_cclass;
+ nv_engine(priv)->sclass = nv98_crypt_sclass;
+ nv_engine(priv)->tlb_flush = nv98_crypt_tlb_flush;
+ return 0;
+}
+
+static int
+nv98_crypt_init(struct nouveau_object *object)
+{
+ struct nv98_crypt_priv *priv = (void *)object;
+ int ret, i;
+
+ ret = nouveau_crypt_init(&priv->base);
+ if (ret)
+ return ret;
+
+ /* wait for exit interrupt to signal */
+ nv_wait(priv, 0x087008, 0x00000010, 0x00000010);
+ nv_wr32(priv, 0x087004, 0x00000010);
+
+ /* upload microcode code and data segments */
+ nv_wr32(priv, 0x087ff8, 0x00100000);
+ for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_code); i++)
+ nv_wr32(priv, 0x087ff4, nv98_pcrypt_code[i]);
+
+ nv_wr32(priv, 0x087ff8, 0x00000000);
+ for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_data); i++)
+ nv_wr32(priv, 0x087ff4, nv98_pcrypt_data[i]);
+
+ /* start it running */
+ nv_wr32(priv, 0x08710c, 0x00000000);
+ nv_wr32(priv, 0x087104, 0x00000000); /* ENTRY */
+ nv_wr32(priv, 0x087100, 0x00000002); /* TRIGGER */
+ return 0;
+}
+
+struct nouveau_oclass
+nv98_crypt_oclass = {
+ .handle = NV_ENGINE(CRYPT, 0x98),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv98_crypt_ctor,
+ .dtor = _nouveau_crypt_dtor,
+ .init = nv98_crypt_init,
+ .fini = _nouveau_crypt_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
new file mode 100644
index 0000000000000000000000000000000000000000..1c919f2af89f818757c75be470bf3e617df472ac
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include
+
+struct nv04_disp_priv {
+ struct nouveau_disp base;
+};
+
+static struct nouveau_oclass
+nv04_disp_sclass[] = {
+ {},
+};
+
+static void
+nv04_disp_intr_vblank(struct nv04_disp_priv *priv, int crtc)
+{
+ struct nouveau_disp *disp = &priv->base;
+ if (disp->vblank.notify)
+ disp->vblank.notify(disp->vblank.data, crtc);
+}
+
+static void
+nv04_disp_intr(struct nouveau_subdev *subdev)
+{
+ struct nv04_disp_priv *priv = (void *)subdev;
+ u32 crtc0 = nv_rd32(priv, 0x600100);
+ u32 crtc1 = nv_rd32(priv, 0x602100);
+
+ if (crtc0 & 0x00000001) {
+ nv04_disp_intr_vblank(priv, 0);
+ nv_wr32(priv, 0x600100, 0x00000001);
+ }
+
+ if (crtc1 & 0x00000001) {
+ nv04_disp_intr_vblank(priv, 1);
+ nv_wr32(priv, 0x602100, 0x00000001);
+ }
+}
+
+static int
+nv04_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv04_disp_priv *priv;
+ int ret;
+
+ ret = nouveau_disp_create(parent, engine, oclass, "DISPLAY",
+ "display", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = nv04_disp_sclass;
+ nv_subdev(priv)->intr = nv04_disp_intr;
+ return 0;
+}
+
+struct nouveau_oclass
+nv04_disp_oclass = {
+ .handle = NV_ENGINE(DISP, 0x04),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_disp_ctor,
+ .dtor = _nouveau_disp_dtor,
+ .init = _nouveau_disp_init,
+ .fini = _nouveau_disp_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
new file mode 100644
index 0000000000000000000000000000000000000000..16a9afb1060b72372ce4cd879870ec362dc9c0e8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include
+#include
+
+struct nv50_disp_priv {
+ struct nouveau_disp base;
+};
+
+static struct nouveau_oclass
+nv50_disp_sclass[] = {
+ {},
+};
+
+static void
+nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
+{
+ struct nouveau_disp *disp = &priv->base;
+ struct nouveau_software_chan *chan, *temp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&disp->vblank.lock, flags);
+ list_for_each_entry_safe(chan, temp, &disp->vblank.list, vblank.head) {
+ if (chan->vblank.crtc != crtc)
+ continue;
+
+ nv_wr32(priv, 0x001704, chan->vblank.channel);
+ nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma);
+
+ if (nv_device(priv)->chipset == 0x50) {
+ nv_wr32(priv, 0x001570, chan->vblank.offset);
+ nv_wr32(priv, 0x001574, chan->vblank.value);
+ } else {
+ if (nv_device(priv)->chipset >= 0xc0) {
+ nv_wr32(priv, 0x06000c,
+ upper_32_bits(chan->vblank.offset));
+ }
+ nv_wr32(priv, 0x060010, chan->vblank.offset);
+ nv_wr32(priv, 0x060014, chan->vblank.value);
+ }
+
+ list_del(&chan->vblank.head);
+ if (disp->vblank.put)
+ disp->vblank.put(disp->vblank.data, crtc);
+ }
+ spin_unlock_irqrestore(&disp->vblank.lock, flags);
+
+ if (disp->vblank.notify)
+ disp->vblank.notify(disp->vblank.data, crtc);
+}
+
+static void
+nv50_disp_intr(struct nouveau_subdev *subdev)
+{
+ struct nv50_disp_priv *priv = (void *)subdev;
+ u32 stat1 = nv_rd32(priv, 0x610024);
+
+ if (stat1 & 0x00000004) {
+ nv50_disp_intr_vblank(priv, 0);
+ nv_wr32(priv, 0x610024, 0x00000004);
+ stat1 &= ~0x00000004;
+ }
+
+ if (stat1 & 0x00000008) {
+ nv50_disp_intr_vblank(priv, 1);
+ nv_wr32(priv, 0x610024, 0x00000008);
+ stat1 &= ~0x00000008;
+ }
+
+}
+
+static int
+nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv;
+ int ret;
+
+ ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+ "display", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = nv50_disp_sclass;
+ nv_subdev(priv)->intr = nv50_disp_intr;
+
+ INIT_LIST_HEAD(&priv->base.vblank.list);
+ spin_lock_init(&priv->base.vblank.lock);
+ return 0;
+}
+
+struct nouveau_oclass
+nv50_disp_oclass = {
+ .handle = NV_ENGINE(DISP, 0x50),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_disp_ctor,
+ .dtor = _nouveau_disp_dtor,
+ .init = _nouveau_disp_init,
+ .fini = _nouveau_disp_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
new file mode 100644
index 0000000000000000000000000000000000000000..d93efbcf75b8a182302499ce32e658e52d6fe202
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include
+
+#include
+#include
+
+struct nvd0_disp_priv {
+ struct nouveau_disp base;
+};
+
+static struct nouveau_oclass
+nvd0_disp_sclass[] = {
+ {},
+};
+
+static void
+nvd0_disp_intr_vblank(struct nvd0_disp_priv *priv, int crtc)
+{
+ struct nouveau_bar *bar = nouveau_bar(priv);
+ struct nouveau_disp *disp = &priv->base;
+ struct nouveau_software_chan *chan, *temp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&disp->vblank.lock, flags);
+ list_for_each_entry_safe(chan, temp, &disp->vblank.list, vblank.head) {
+ if (chan->vblank.crtc != crtc)
+ continue;
+
+ nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel);
+ bar->flush(bar);
+ nv_wr32(priv, 0x06000c, upper_32_bits(chan->vblank.offset));
+ nv_wr32(priv, 0x060010, lower_32_bits(chan->vblank.offset));
+ nv_wr32(priv, 0x060014, chan->vblank.value);
+
+ list_del(&chan->vblank.head);
+ if (disp->vblank.put)
+ disp->vblank.put(disp->vblank.data, crtc);
+ }
+ spin_unlock_irqrestore(&disp->vblank.lock, flags);
+
+ if (disp->vblank.notify)
+ disp->vblank.notify(disp->vblank.data, crtc);
+}
+
+static void
+nvd0_disp_intr(struct nouveau_subdev *subdev)
+{
+ struct nvd0_disp_priv *priv = (void *)subdev;
+ u32 intr = nv_rd32(priv, 0x610088);
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ u32 mask = 0x01000000 << i;
+ if (mask & intr) {
+ u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800));
+ if (stat & 0x00000001)
+ nvd0_disp_intr_vblank(priv, i);
+ nv_mask(priv, 0x6100bc + (i * 0x800), 0, 0);
+ nv_rd32(priv, 0x6100c0 + (i * 0x800));
+ }
+ }
+}
+
+static int
+nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvd0_disp_priv *priv;
+ int ret;
+
+ ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+ "display", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = nvd0_disp_sclass;
+ nv_subdev(priv)->intr = nvd0_disp_intr;
+
+ INIT_LIST_HEAD(&priv->base.vblank.list);
+ spin_lock_init(&priv->base.vblank.lock);
+ return 0;
+}
+
+struct nouveau_oclass
+nvd0_disp_oclass = {
+ .handle = NV_ENGINE(DISP, 0xd0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvd0_disp_ctor,
+ .dtor = _nouveau_disp_dtor,
+ .init = _nouveau_disp_init,
+ .fini = _nouveau_disp_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/vga.c b/drivers/gpu/drm/nouveau/core/engine/disp/vga.c
new file mode 100644
index 0000000000000000000000000000000000000000..5a1c68474597cbc979eb216735941714b1ce4901
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/vga.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include
+#include
+#include
+
+u8
+nv_rdport(void *obj, int head, u16 port)
+{
+ struct nouveau_device *device = nv_device(obj);
+
+ if (device->card_type >= NV_50)
+ return nv_rd08(obj, 0x601000 + port);
+
+ if (port == 0x03c0 || port == 0x03c1 || /* AR */
+ port == 0x03c2 || port == 0x03da || /* INP0 */
+ port == 0x03d4 || port == 0x03d5) /* CR */
+ return nv_rd08(obj, 0x601000 + (head * 0x2000) + port);
+
+ if (port == 0x03c2 || port == 0x03cc || /* MISC */
+ port == 0x03c4 || port == 0x03c5 || /* SR */
+ port == 0x03ce || port == 0x03cf) { /* GR */
+ if (device->card_type < NV_40)
+ head = 0; /* CR44 selects head */
+ return nv_rd08(obj, 0x0c0000 + (head * 0x2000) + port);
+ }
+
+ nv_error(obj, "unknown vga port 0x%04x\n", port);
+ return 0x00;
+}
+
+void
+nv_wrport(void *obj, int head, u16 port, u8 data)
+{
+ struct nouveau_device *device = nv_device(obj);
+
+ if (device->card_type >= NV_50)
+ nv_wr08(obj, 0x601000 + port, data);
+ else
+ if (port == 0x03c0 || port == 0x03c1 || /* AR */
+ port == 0x03c2 || port == 0x03da || /* INP0 */
+ port == 0x03d4 || port == 0x03d5) /* CR */
+ nv_wr08(obj, 0x601000 + (head * 0x2000) + port, data);
+ else
+ if (port == 0x03c2 || port == 0x03cc || /* MISC */
+ port == 0x03c4 || port == 0x03c5 || /* SR */
+ port == 0x03ce || port == 0x03cf) { /* GR */
+ if (device->card_type < NV_40)
+ head = 0; /* CR44 selects head */
+ nv_wr08(obj, 0x0c0000 + (head * 0x2000) + port, data);
+ } else
+ nv_error(obj, "unknown vga port 0x%04x\n", port);
+}
+
+u8
+nv_rdvgas(void *obj, int head, u8 index)
+{
+ nv_wrport(obj, head, 0x03c4, index);
+ return nv_rdport(obj, head, 0x03c5);
+}
+
+void
+nv_wrvgas(void *obj, int head, u8 index, u8 value)
+{
+ nv_wrport(obj, head, 0x03c4, index);
+ nv_wrport(obj, head, 0x03c5, value);
+}
+
+u8
+nv_rdvgag(void *obj, int head, u8 index)
+{
+ nv_wrport(obj, head, 0x03ce, index);
+ return nv_rdport(obj, head, 0x03cf);
+}
+
+void
+nv_wrvgag(void *obj, int head, u8 index, u8 value)
+{
+ nv_wrport(obj, head, 0x03ce, index);
+ nv_wrport(obj, head, 0x03cf, value);
+}
+
+u8
+nv_rdvgac(void *obj, int head, u8 index)
+{
+ nv_wrport(obj, head, 0x03d4, index);
+ return nv_rdport(obj, head, 0x03d5);
+}
+
+void
+nv_wrvgac(void *obj, int head, u8 index, u8 value)
+{
+ nv_wrport(obj, head, 0x03d4, index);
+ nv_wrport(obj, head, 0x03d5, value);
+}
+
+u8
+nv_rdvgai(void *obj, int head, u16 port, u8 index)
+{
+ if (port == 0x03c4) return nv_rdvgas(obj, head, index);
+ if (port == 0x03ce) return nv_rdvgag(obj, head, index);
+ if (port == 0x03d4) return nv_rdvgac(obj, head, index);
+ nv_error(obj, "unknown indexed vga port 0x%04x\n", port);
+ return 0x00;
+}
+
+void
+nv_wrvgai(void *obj, int head, u16 port, u8 index, u8 value)
+{
+ if (port == 0x03c4) nv_wrvgas(obj, head, index, value);
+ else if (port == 0x03ce) nv_wrvgag(obj, head, index, value);
+ else if (port == 0x03d4) nv_wrvgac(obj, head, index, value);
+ else nv_error(obj, "unknown indexed vga port 0x%04x\n", port);
+}
+
+bool
+nv_lockvgac(void *obj, bool lock)
+{
+ bool locked = !nv_rdvgac(obj, 0, 0x1f);
+ u8 data = lock ? 0x99 : 0x57;
+ nv_wrvgac(obj, 0, 0x1f, data);
+ if (nv_device(obj)->chipset == 0x11) {
+ if (!(nv_rd32(obj, 0x001084) & 0x10000000))
+ nv_wrvgac(obj, 1, 0x1f, data);
+ }
+ return locked;
+}
+
+/* CR44 takes values 0 (head A), 3 (head B) and 4 (heads tied)
+ * it affects only the 8 bit vga io regs, which we access using mmio at
+ * 0xc{0,2}3c*, 0x60{1,3}3*, and 0x68{1,3}3d*
+ * in general, the set value of cr44 does not matter: reg access works as
+ * expected and values can be set for the appropriate head by using a 0x2000
+ * offset as required
+ * however:
+ * a) pre nv40, the head B range of PRMVIO regs at 0xc23c* was not exposed and
+ * cr44 must be set to 0 or 3 for accessing values on the correct head
+ * through the common 0xc03c* addresses
+ * b) in tied mode (4) head B is programmed to the values set on head A, and
+ * access using the head B addresses can have strange results, ergo we leave
+ * tied mode in init once we know to what cr44 should be restored on exit
+ *
+ * the owner parameter is slightly abused:
+ * 0 and 1 are treated as head values and so the set value is (owner * 3)
+ * other values are treated as literal values to set
+ */
+u8
+nv_rdvgaowner(void *obj)
+{
+ if (nv_device(obj)->card_type < NV_50) {
+ if (nv_device(obj)->chipset == 0x11) {
+ u32 tied = nv_rd32(obj, 0x001084) & 0x10000000;
+ if (tied == 0) {
+ u8 slA = nv_rdvgac(obj, 0, 0x28) & 0x80;
+ u8 tvA = nv_rdvgac(obj, 0, 0x33) & 0x01;
+ u8 slB = nv_rdvgac(obj, 1, 0x28) & 0x80;
+ u8 tvB = nv_rdvgac(obj, 1, 0x33) & 0x01;
+ if (slA && !tvA) return 0x00;
+ if (slB && !tvB) return 0x03;
+ if (slA) return 0x00;
+ if (slB) return 0x03;
+ return 0x00;
+ }
+ return 0x04;
+ }
+
+ return nv_rdvgac(obj, 0, 0x44);
+ }
+
+ nv_error(obj, "rdvgaowner after nv4x\n");
+ return 0x00;
+}
+
+void
+nv_wrvgaowner(void *obj, u8 select)
+{
+ if (nv_device(obj)->card_type < NV_50) {
+ u8 owner = (select == 1) ? 3 : select;
+ if (nv_device(obj)->chipset == 0x11) {
+ /* workaround hw lockup bug */
+ nv_rdvgac(obj, 0, 0x1f);
+ nv_rdvgac(obj, 1, 0x1f);
+ }
+
+ nv_wrvgac(obj, 0, 0x44, owner);
+
+ if (nv_device(obj)->chipset == 0x11) {
+ nv_wrvgac(obj, 0, 0x2e, owner);
+ nv_wrvgac(obj, 0, 0x2e, owner);
+ }
+ } else
+ nv_error(obj, "wrvgaowner after nv4x\n");
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
new file mode 100644
index 0000000000000000000000000000000000000000..e1f013d39768c30c70c481b97d408e707fb7dd67
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include
+#include
+
+#include
+#include
+
+int
+nouveau_dmaobj_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass,
+ void *data, u32 size, int len, void **pobject)
+{
+ struct nv_dma_class *args = data;
+ struct nouveau_dmaobj *object;
+ int ret;
+
+ if (size < sizeof(*args))
+ return -EINVAL;
+
+ ret = nouveau_object_create_(parent, engine, oclass, 0, len, pobject);
+ object = *pobject;
+ if (ret)
+ return ret;
+
+ switch (args->flags & NV_DMA_TARGET_MASK) {
+ case NV_DMA_TARGET_VM:
+ object->target = NV_MEM_TARGET_VM;
+ break;
+ case NV_DMA_TARGET_VRAM:
+ object->target = NV_MEM_TARGET_VRAM;
+ break;
+ case NV_DMA_TARGET_PCI:
+ object->target = NV_MEM_TARGET_PCI;
+ break;
+ case NV_DMA_TARGET_PCI_US:
+ case NV_DMA_TARGET_AGP:
+ object->target = NV_MEM_TARGET_PCI_NOSNOOP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (args->flags & NV_DMA_ACCESS_MASK) {
+ case NV_DMA_ACCESS_VM:
+ object->access = NV_MEM_ACCESS_VM;
+ break;
+ case NV_DMA_ACCESS_RD:
+ object->access = NV_MEM_ACCESS_RO;
+ break;
+ case NV_DMA_ACCESS_WR:
+ object->access = NV_MEM_ACCESS_WO;
+ break;
+ case NV_DMA_ACCESS_RDWR:
+ object->access = NV_MEM_ACCESS_RW;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ object->start = args->start;
+ object->limit = args->limit;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
new file mode 100644
index 0000000000000000000000000000000000000000..9f4cc2f319945b57e7673416738b57492c509b0f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include
+#include
+
+#include
+#include
+
+#include
+
+struct nv04_dmaeng_priv {
+ struct nouveau_dmaeng base;
+};
+
+struct nv04_dmaobj_priv {
+ struct nouveau_dmaobj base;
+};
+
+static int
+nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
+ struct nouveau_object *parent,
+ struct nouveau_dmaobj *dmaobj,
+ struct nouveau_gpuobj **pgpuobj)
+{
+ struct nv04_vmmgr_priv *vmm = nv04_vmmgr(dmaeng);
+ struct nouveau_gpuobj *gpuobj;
+ u32 flags0 = nv_mclass(dmaobj);
+ u32 flags2 = 0x00000000;
+ u64 offset = dmaobj->start & 0xfffff000;
+ u64 adjust = dmaobj->start & 0x00000fff;
+ u32 length = dmaobj->limit - dmaobj->start;
+ int ret;
+
+ if (dmaobj->target == NV_MEM_TARGET_VM) {
+ if (nv_object(vmm)->oclass == &nv04_vmmgr_oclass) {
+ struct nouveau_gpuobj *pgt = vmm->vm->pgt[0].obj[0];
+ if (!dmaobj->start)
+ return nouveau_gpuobj_dup(parent, pgt, pgpuobj);
+ offset = nv_ro32(pgt, 8 + (offset >> 10));
+ offset &= 0xfffff000;
+ }
+
+ dmaobj->target = NV_MEM_TARGET_PCI;
+ dmaobj->access = NV_MEM_ACCESS_RW;
+ }
+
+ switch (dmaobj->target) {
+ case NV_MEM_TARGET_VRAM:
+ flags0 |= 0x00003000;
+ break;
+ case NV_MEM_TARGET_PCI:
+ flags0 |= 0x00023000;
+ break;
+ case NV_MEM_TARGET_PCI_NOSNOOP:
+ flags0 |= 0x00033000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (dmaobj->access) {
+ case NV_MEM_ACCESS_RO:
+ flags0 |= 0x00004000;
+ break;
+ case NV_MEM_ACCESS_WO:
+ flags0 |= 0x00008000;
+ case NV_MEM_ACCESS_RW:
+ flags2 |= 0x00000002;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = nouveau_gpuobj_new(parent, parent, 16, 16, 0, &gpuobj);
+ *pgpuobj = gpuobj;
+ if (ret == 0) {
+ nv_wo32(*pgpuobj, 0x00, flags0 | (adjust << 20));
+ nv_wo32(*pgpuobj, 0x04, length);
+ nv_wo32(*pgpuobj, 0x08, flags2 | offset);
+ nv_wo32(*pgpuobj, 0x0c, flags2 | offset);
+ }
+
+ return ret;
+}
+
+static int
+nv04_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_dmaeng *dmaeng = (void *)engine;
+ struct nv04_dmaobj_priv *dmaobj;
+ struct nouveau_gpuobj *gpuobj;
+ int ret;
+
+ ret = nouveau_dmaobj_create(parent, engine, oclass,
+ data, size, &dmaobj);
+ *pobject = nv_object(dmaobj);
+ if (ret)
+ return ret;
+
+ switch (nv_mclass(parent)) {
+ case NV_DEVICE_CLASS:
+ break;
+ case NV03_CHANNEL_DMA_CLASS:
+ case NV10_CHANNEL_DMA_CLASS:
+ case NV17_CHANNEL_DMA_CLASS:
+ case NV40_CHANNEL_DMA_CLASS:
+ ret = dmaeng->bind(dmaeng, *pobject, &dmaobj->base, &gpuobj);
+ nouveau_object_ref(NULL, pobject);
+ *pobject = nv_object(gpuobj);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static struct nouveau_ofuncs
+nv04_dmaobj_ofuncs = {
+ .ctor = nv04_dmaobj_ctor,
+ .dtor = _nouveau_dmaobj_dtor,
+ .init = _nouveau_dmaobj_init,
+ .fini = _nouveau_dmaobj_fini,
+};
+
+static struct nouveau_oclass
+nv04_dmaobj_sclass[] = {
+ { 0x0002, &nv04_dmaobj_ofuncs },
+ { 0x0003, &nv04_dmaobj_ofuncs },
+ { 0x003d, &nv04_dmaobj_ofuncs },
+ {}
+};
+
+static int
+nv04_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv04_dmaeng_priv *priv;
+ int ret;
+
+ ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.base.sclass = nv04_dmaobj_sclass;
+ priv->base.bind = nv04_dmaobj_bind;
+ return 0;
+}
+
+struct nouveau_oclass
+nv04_dmaeng_oclass = {
+ .handle = NV_ENGINE(DMAOBJ, 0x04),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_dmaeng_ctor,
+ .dtor = _nouveau_dmaeng_dtor,
+ .init = _nouveau_dmaeng_init,
+ .fini = _nouveau_dmaeng_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
new file mode 100644
index 0000000000000000000000000000000000000000..045d2565e28941cf8cf1162ed4e04183c14aba8c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include
+#include
+
+#include
+#include
+
+struct nv50_dmaeng_priv {
+ struct nouveau_dmaeng base;
+};
+
+struct nv50_dmaobj_priv {
+ struct nouveau_dmaobj base;
+};
+
+static int
+nv50_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
+ struct nouveau_object *parent,
+ struct nouveau_dmaobj *dmaobj,
+ struct nouveau_gpuobj **pgpuobj)
+{
+ u32 flags = nv_mclass(dmaobj);
+ int ret;
+
+ switch (dmaobj->target) {
+ case NV_MEM_TARGET_VM:
+ flags |= 0x00000000;
+ flags |= 0x60000000; /* COMPRESSION_USEVM */
+ flags |= 0x1fc00000; /* STORAGE_TYPE_USEVM */
+ break;
+ case NV_MEM_TARGET_VRAM:
+ flags |= 0x00010000;
+ flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
+ break;
+ case NV_MEM_TARGET_PCI:
+ flags |= 0x00020000;
+ flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
+ break;
+ case NV_MEM_TARGET_PCI_NOSNOOP:
+ flags |= 0x00030000;
+ flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (dmaobj->access) {
+ case NV_MEM_ACCESS_VM:
+ break;
+ case NV_MEM_ACCESS_RO:
+ flags |= 0x00040000;
+ break;
+ case NV_MEM_ACCESS_WO:
+ case NV_MEM_ACCESS_RW:
+ flags |= 0x00080000;
+ break;
+ }
+
+ ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
+ if (ret == 0) {
+ nv_wo32(*pgpuobj, 0x00, flags);
+ nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit));
+ nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start));
+ nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 |
+ upper_32_bits(dmaobj->start));
+ nv_wo32(*pgpuobj, 0x10, 0x00000000);
+ nv_wo32(*pgpuobj, 0x14, 0x00000000);
+ }
+
+ return ret;
+}
+
+static int
+nv50_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_dmaeng *dmaeng = (void *)engine;
+ struct nv50_dmaobj_priv *dmaobj;
+ struct nouveau_gpuobj *gpuobj;
+ int ret;
+
+ ret = nouveau_dmaobj_create(parent, engine, oclass,
+ data, size, &dmaobj);
+ *pobject = nv_object(dmaobj);
+ if (ret)
+ return ret;
+
+ switch (nv_mclass(parent)) {
+ case NV_DEVICE_CLASS:
+ break;
+ case NV50_CHANNEL_DMA_CLASS:
+ case NV84_CHANNEL_DMA_CLASS:
+ case NV50_CHANNEL_IND_CLASS:
+ case NV84_CHANNEL_IND_CLASS:
+ ret = dmaeng->bind(dmaeng, *pobject, &dmaobj->base, &gpuobj);
+ nouveau_object_ref(NULL, pobject);
+ *pobject = nv_object(gpuobj);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static struct nouveau_ofuncs
+nv50_dmaobj_ofuncs = {
+ .ctor = nv50_dmaobj_ctor,
+ .dtor = _nouveau_dmaobj_dtor,
+ .init = _nouveau_dmaobj_init,
+ .fini = _nouveau_dmaobj_fini,
+};
+
+static struct nouveau_oclass
+nv50_dmaobj_sclass[] = {
+ { 0x0002, &nv50_dmaobj_ofuncs },
+ { 0x0003, &nv50_dmaobj_ofuncs },
+ { 0x003d, &nv50_dmaobj_ofuncs },
+ {}
+};
+
+static int
+nv50_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_dmaeng_priv *priv;
+ int ret;
+
+ ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.base.sclass = nv50_dmaobj_sclass;
+ priv->base.bind = nv50_dmaobj_bind;
+ return 0;
+}
+
+struct nouveau_oclass
+nv50_dmaeng_oclass = {
+ .handle = NV_ENGINE(DMAOBJ, 0x50),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_dmaeng_ctor,
+ .dtor = _nouveau_dmaeng_dtor,
+ .init = _nouveau_dmaeng_init,
+ .fini = _nouveau_dmaeng_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
new file mode 100644
index 0000000000000000000000000000000000000000..5baa086955354a96fb7dc6305568a670f1d10dcf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include
+
+#include
+#include
+
+struct nvc0_dmaeng_priv {
+ struct nouveau_dmaeng base;
+};
+
+struct nvc0_dmaobj_priv {
+ struct nouveau_dmaobj base;
+};
+
+static int
+nvc0_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvc0_dmaobj_priv *dmaobj;
+ int ret;
+
+ ret = nouveau_dmaobj_create(parent, engine, oclass, data, size, &dmaobj);
+ *pobject = nv_object(dmaobj);
+ if (ret)
+ return ret;
+
+ if (dmaobj->base.target != NV_MEM_TARGET_VM || dmaobj->base.start)
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct nouveau_ofuncs
+nvc0_dmaobj_ofuncs = {
+ .ctor = nvc0_dmaobj_ctor,
+ .dtor = _nouveau_dmaobj_dtor,
+ .init = _nouveau_dmaobj_init,
+ .fini = _nouveau_dmaobj_fini,
+};
+
+static struct nouveau_oclass
+nvc0_dmaobj_sclass[] = {
+ { 0x0002, &nvc0_dmaobj_ofuncs },
+ { 0x0003, &nvc0_dmaobj_ofuncs },
+ { 0x003d, &nvc0_dmaobj_ofuncs },
+ {}
+};
+
+static int
+nvc0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvc0_dmaeng_priv *priv;
+ int ret;
+
+ ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.base.sclass = nvc0_dmaobj_sclass;
+ return 0;
+}
+
+struct nouveau_oclass
+nvc0_dmaeng_oclass = {
+ .handle = NV_ENGINE(DMAOBJ, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_dmaeng_ctor,
+ .dtor = _nouveau_dmaeng_dtor,
+ .init = _nouveau_dmaeng_init,
+ .fini = _nouveau_dmaeng_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
new file mode 100644
index 0000000000000000000000000000000000000000..bbb43c67c2ae2ce083f300761aff808b250b8209
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include
+#include
+
+#include
+#include
+
+int
+nouveau_fifo_channel_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass,
+ int bar, u32 addr, u32 size, u32 pushbuf,
+ u32 engmask, int len, void **ptr)
+{
+ struct nouveau_device *device = nv_device(engine);
+ struct nouveau_fifo *priv = (void *)engine;
+ struct nouveau_fifo_chan *chan;
+ struct nouveau_dmaeng *dmaeng;
+ unsigned long flags;
+ int ret;
+
+ /* create base object class */
+ ret = nouveau_namedb_create_(parent, engine, oclass, 0, NULL,
+ engmask, len, ptr);
+ chan = *ptr;
+ if (ret)
+ return ret;
+
+ /* validate dma object representing push buffer */
+ chan->pushdma = (void *)nouveau_handle_ref(parent, pushbuf);
+ if (!chan->pushdma)
+ return -ENOENT;
+
+ dmaeng = (void *)chan->pushdma->base.engine;
+ switch (chan->pushdma->base.oclass->handle) {
+ case 0x0002:
+ case 0x003d:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (dmaeng->bind) {
+ ret = dmaeng->bind(dmaeng, parent, chan->pushdma, &chan->pushgpu);
+ if (ret)
+ return ret;
+ }
+
+ /* find a free fifo channel */
+ spin_lock_irqsave(&priv->lock, flags);
+ for (chan->chid = priv->min; chan->chid < priv->max; chan->chid++) {
+ if (!priv->channel[chan->chid]) {
+ priv->channel[chan->chid] = nv_object(chan);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (chan->chid == priv->max) {
+ nv_error(priv, "no free channels\n");
+ return -ENOSPC;
+ }
+
+ /* map fifo control registers */
+ chan->user = ioremap(pci_resource_start(device->pdev, bar) + addr +
+ (chan->chid * size), size);
+ if (!chan->user)
+ return -EFAULT;
+
+ chan->size = size;
+ return 0;
+}
+
+void
+nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *chan)
+{
+ struct nouveau_fifo *priv = (void *)nv_object(chan)->engine;
+ unsigned long flags;
+
+ iounmap(chan->user);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->channel[chan->chid] = NULL;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ nouveau_gpuobj_ref(NULL, &chan->pushgpu);
+ nouveau_object_ref(NULL, (struct nouveau_object **)&chan->pushdma);
+ nouveau_namedb_destroy(&chan->base);
+}
+
+void
+_nouveau_fifo_channel_dtor(struct nouveau_object *object)
+{
+ struct nouveau_fifo_chan *chan = (void *)object;
+ nouveau_fifo_channel_destroy(chan);
+}
+
+u32
+_nouveau_fifo_channel_rd32(struct nouveau_object *object, u32 addr)
+{
+ struct nouveau_fifo_chan *chan = (void *)object;
+ return ioread32_native(chan->user + addr);
+}
+
+void
+_nouveau_fifo_channel_wr32(struct nouveau_object *object, u32 addr, u32 data)
+{
+ struct nouveau_fifo_chan *chan = (void *)object;
+ iowrite32_native(data, chan->user + addr);
+}
+
+static int
+nouveau_fifo_chid(struct nouveau_fifo *priv, struct nouveau_object *object)
+{
+ int engidx = nv_hclass(priv) & 0xff;
+
+ while (object && object->parent) {
+ if ( nv_iclass(object->parent, NV_ENGCTX_CLASS) &&
+ (nv_hclass(object->parent) & 0xff) == engidx)
+ return nouveau_fifo_chan(object)->chid;
+ object = object->parent;
+ }
+
+ return -1;
+}
+
+void
+nouveau_fifo_destroy(struct nouveau_fifo *priv)
+{
+ kfree(priv->channel);
+ nouveau_engine_destroy(&priv->base);
+}
+
+int
+nouveau_fifo_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass,
+ int min, int max, int length, void **pobject)
+{
+ struct nouveau_fifo *priv;
+ int ret;
+
+ ret = nouveau_engine_create_(parent, engine, oclass, true, "PFIFO",
+ "fifo", length, pobject);
+ priv = *pobject;
+ if (ret)
+ return ret;
+
+ priv->min = min;
+ priv->max = max;
+ priv->channel = kzalloc(sizeof(*priv->channel) * (max + 1), GFP_KERNEL);
+ if (!priv->channel)
+ return -ENOMEM;
+
+ priv->chid = nouveau_fifo_chid;
+ spin_lock_init(&priv->lock);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
new file mode 100644
index 0000000000000000000000000000000000000000..ea76e3e8c9c2db45a39c34f230289d556c24da24
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
@@ -0,0 +1,630 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+
+#include
+
+#include "nv04.h"
+
+static struct ramfc_desc
+nv04_ramfc[] = {
+ { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
+ { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
+ { 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
+ { 16, 16, 0x08, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
+ { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_STATE },
+ { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
+ { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_ENGINE },
+ { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_PULL1 },
+ {}
+};
+
+/*******************************************************************************
+ * FIFO channel objects
+ ******************************************************************************/
+
+int
+nv04_fifo_object_attach(struct nouveau_object *parent,
+ struct nouveau_object *object, u32 handle)
+{
+ struct nv04_fifo_priv *priv = (void *)parent->engine;
+ struct nv04_fifo_chan *chan = (void *)parent;
+ u32 context, chid = chan->base.chid;
+ int ret;
+
+ if (nv_iclass(object, NV_GPUOBJ_CLASS))
+ context = nv_gpuobj(object)->addr >> 4;
+ else
+ context = 0x00000004; /* just non-zero */
+
+ switch (nv_engidx(object->engine)) {
+ case NVDEV_ENGINE_DMAOBJ:
+ case NVDEV_ENGINE_SW:
+ context |= 0x00000000;
+ break;
+ case NVDEV_ENGINE_GR:
+ context |= 0x00010000;
+ break;
+ case NVDEV_ENGINE_MPEG:
+ context |= 0x00020000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ context |= 0x80000000; /* valid */
+ context |= chid << 24;
+
+ mutex_lock(&nv_subdev(priv)->mutex);
+ ret = nouveau_ramht_insert(priv->ramht, chid, handle, context);
+ mutex_unlock(&nv_subdev(priv)->mutex);
+ return ret;
+}
+
+void
+nv04_fifo_object_detach(struct nouveau_object *parent, int cookie)
+{
+ struct nv04_fifo_priv *priv = (void *)parent->engine;
+ mutex_lock(&nv_subdev(priv)->mutex);
+ nouveau_ramht_remove(priv->ramht, cookie);
+ mutex_unlock(&nv_subdev(priv)->mutex);
+}
+
+int
+nv04_fifo_context_attach(struct nouveau_object *parent,
+ struct nouveau_object *object)
+{
+ nv_engctx(object)->addr = nouveau_fifo_chan(parent)->chid;
+ return 0;
+}
+
+static int
+nv04_fifo_chan_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv04_fifo_priv *priv = (void *)engine;
+ struct nv04_fifo_chan *chan;
+ struct nv03_channel_dma_class *args = data;
+ int ret;
+
+ if (size < sizeof(*args))
+ return -EINVAL;
+
+ ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
+ 0x10000, args->pushbuf,
+ (1 << NVDEV_ENGINE_DMAOBJ) |
+ (1 << NVDEV_ENGINE_SW) |
+ (1 << NVDEV_ENGINE_GR), &chan);
+ *pobject = nv_object(chan);
+ if (ret)
+ return ret;
+
+ nv_parent(chan)->object_attach = nv04_fifo_object_attach;
+ nv_parent(chan)->object_detach = nv04_fifo_object_detach;
+ nv_parent(chan)->context_attach = nv04_fifo_context_attach;
+ chan->ramfc = chan->base.chid * 32;
+
+ nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
+ nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset);
+ nv_wo32(priv->ramfc, chan->ramfc + 0x08, chan->base.pushgpu->addr >> 4);
+ nv_wo32(priv->ramfc, chan->ramfc + 0x10,
+ NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+ NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+ return 0;
+}
+
+void
+nv04_fifo_chan_dtor(struct nouveau_object *object)
+{
+ struct nv04_fifo_priv *priv = (void *)object->engine;
+ struct nv04_fifo_chan *chan = (void *)object;
+ struct ramfc_desc *c = priv->ramfc_desc;
+
+ do {
+ nv_wo32(priv->ramfc, chan->ramfc + c->ctxp, 0x00000000);
+ } while ((++c)->bits);
+
+ nouveau_fifo_channel_destroy(&chan->base);
+}
+
+int
+nv04_fifo_chan_init(struct nouveau_object *object)
+{
+ struct nv04_fifo_priv *priv = (void *)object->engine;
+ struct nv04_fifo_chan *chan = (void *)object;
+ u32 mask = 1 << chan->base.chid;
+ unsigned long flags;
+ int ret;
+
+ ret = nouveau_fifo_channel_init(&chan->base);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&priv->base.lock, flags);
+ nv_mask(priv, NV04_PFIFO_MODE, mask, mask);
+ spin_unlock_irqrestore(&priv->base.lock, flags);
+ return 0;
+}
+
+int
+nv04_fifo_chan_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv04_fifo_priv *priv = (void *)object->engine;
+ struct nv04_fifo_chan *chan = (void *)object;
+ struct nouveau_gpuobj *fctx = priv->ramfc;
+ struct ramfc_desc *c;
+ unsigned long flags;
+ u32 data = chan->ramfc;
+ u32 chid;
+
+ /* prevent fifo context switches */
+ spin_lock_irqsave(&priv->base.lock, flags);
+ nv_wr32(priv, NV03_PFIFO_CACHES, 0);
+
+ /* if this channel is active, replace it with a null context */
+ chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max;
+ if (chid == chan->base.chid) {
+ nv_mask(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
+ nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 0);
+ nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
+
+ c = priv->ramfc_desc;
+ do {
+ u32 rm = ((1ULL << c->bits) - 1) << c->regs;
+ u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
+ u32 rv = (nv_rd32(priv, c->regp) & rm) >> c->regs;
+ u32 cv = (nv_ro32(fctx, c->ctxp + data) & ~cm);
+ nv_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs));
+ } while ((++c)->bits);
+
+ c = priv->ramfc_desc;
+ do {
+ nv_wr32(priv, c->regp, 0x00000000);
+ } while ((++c)->bits);
+
+ nv_wr32(priv, NV03_PFIFO_CACHE1_GET, 0);
+ nv_wr32(priv, NV03_PFIFO_CACHE1_PUT, 0);
+ nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
+ nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
+ nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
+ }
+
+ /* restore normal operation, after disabling dma mode */
+ nv_mask(priv, NV04_PFIFO_MODE, 1 << chan->base.chid, 0);
+ nv_wr32(priv, NV03_PFIFO_CACHES, 1);
+ spin_unlock_irqrestore(&priv->base.lock, flags);
+
+ return nouveau_fifo_channel_fini(&chan->base, suspend);
+}
+
+static struct nouveau_ofuncs
+nv04_fifo_ofuncs = {
+ .ctor = nv04_fifo_chan_ctor,
+ .dtor = nv04_fifo_chan_dtor,
+ .init = nv04_fifo_chan_init,
+ .fini = nv04_fifo_chan_fini,
+ .rd32 = _nouveau_fifo_channel_rd32,
+ .wr32 = _nouveau_fifo_channel_wr32,
+};
+
+static struct nouveau_oclass
+nv04_fifo_sclass[] = {
+ { NV03_CHANNEL_DMA_CLASS, &nv04_fifo_ofuncs },
+ {}
+};
+
+/*******************************************************************************
+ * FIFO context - basically just the instmem reserved for the channel
+ ******************************************************************************/
+
+int
+nv04_fifo_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv04_fifo_base *base;
+ int ret;
+
+ ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
+ 0x1000, NVOBJ_FLAG_HEAP, &base);
+ *pobject = nv_object(base);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static struct nouveau_oclass
+nv04_fifo_cclass = {
+ .handle = NV_ENGCTX(FIFO, 0x04),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_fifo_context_ctor,
+ .dtor = _nouveau_fifo_context_dtor,
+ .init = _nouveau_fifo_context_init,
+ .fini = _nouveau_fifo_context_fini,
+ .rd32 = _nouveau_fifo_context_rd32,
+ .wr32 = _nouveau_fifo_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PFIFO engine
+ ******************************************************************************/
+
+void
+nv04_fifo_pause(struct nouveau_fifo *pfifo, unsigned long *pflags)
+__acquires(priv->base.lock)
+{
+ struct nv04_fifo_priv *priv = (void *)pfifo;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->base.lock, flags);
+ *pflags = flags;
+
+ nv_wr32(priv, NV03_PFIFO_CACHES, 0x00000000);
+ nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000000);
+
+ /* in some cases the puller may be left in an inconsistent state
+ * if you try to stop it while it's busy translating handles.
+ * sometimes you get a CACHE_ERROR, sometimes it just fails
+ * silently; sending incorrect instance offsets to PGRAPH after
+ * it's started up again.
+ *
+ * to avoid this, we invalidate the most recently calculated
+ * instance.
+ */
+ if (!nv_wait(priv, NV04_PFIFO_CACHE1_PULL0,
+ NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0x00000000))
+ nv_warn(priv, "timeout idling puller\n");
+
+ if (nv_rd32(priv, NV04_PFIFO_CACHE1_PULL0) &
+ NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
+ nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
+
+ nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0x00000000);
+}
+
+void
+nv04_fifo_start(struct nouveau_fifo *pfifo, unsigned long *pflags)
+__releases(priv->base.lock)
+{
+ struct nv04_fifo_priv *priv = (void *)pfifo;
+ unsigned long flags = *pflags;
+
+ nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001);
+ nv_wr32(priv, NV03_PFIFO_CACHES, 0x00000001);
+
+ spin_unlock_irqrestore(&priv->base.lock, flags);
+}
+
+static const char *
+nv_dma_state_err(u32 state)
+{
+ static const char * const desc[] = {
+ "NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE",
+ "INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK"
+ };
+ return desc[(state >> 29) & 0x7];
+}
+
+static bool
+nv04_fifo_swmthd(struct nv04_fifo_priv *priv, u32 chid, u32 addr, u32 data)
+{
+ struct nv04_fifo_chan *chan = NULL;
+ struct nouveau_handle *bind;
+ const int subc = (addr >> 13) & 0x7;
+ const int mthd = addr & 0x1ffc;
+ bool handled = false;
+ unsigned long flags;
+ u32 engine;
+
+ spin_lock_irqsave(&priv->base.lock, flags);
+ if (likely(chid >= priv->base.min && chid <= priv->base.max))
+ chan = (void *)priv->base.channel[chid];
+ if (unlikely(!chan))
+ goto out;
+
+ switch (mthd) {
+ case 0x0000:
+ bind = nouveau_namedb_get(nv_namedb(chan), data);
+ if (unlikely(!bind))
+ break;
+
+ if (nv_engidx(bind->object->engine) == NVDEV_ENGINE_SW) {
+ engine = 0x0000000f << (subc * 4);
+ chan->subc[subc] = data;
+ handled = true;
+
+ nv_mask(priv, NV04_PFIFO_CACHE1_ENGINE, engine, 0);
+ }
+
+ nouveau_namedb_put(bind);
+ break;
+ default:
+ engine = nv_rd32(priv, NV04_PFIFO_CACHE1_ENGINE);
+ if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
+ break;
+
+ bind = nouveau_namedb_get(nv_namedb(chan), chan->subc[subc]);
+ if (likely(bind)) {
+ if (!nv_call(bind->object, mthd, data))
+ handled = true;
+ nouveau_namedb_put(bind);
+ }
+ break;
+ }
+
+out:
+ spin_unlock_irqrestore(&priv->base.lock, flags);
+ return handled;
+}
+
+void
+nv04_fifo_intr(struct nouveau_subdev *subdev)
+{
+ struct nouveau_device *device = nv_device(subdev);
+ struct nv04_fifo_priv *priv = (void *)subdev;
+ uint32_t status, reassign;
+ int cnt = 0;
+
+ reassign = nv_rd32(priv, NV03_PFIFO_CACHES) & 1;
+ while ((status = nv_rd32(priv, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
+ uint32_t chid, get;
+
+ nv_wr32(priv, NV03_PFIFO_CACHES, 0);
+
+ chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max;
+ get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET);
+
+ if (status & NV_PFIFO_INTR_CACHE_ERROR) {
+ uint32_t mthd, data;
+ int ptr;
+
+ /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
+ * wrapping on my G80 chips, but CACHE1 isn't big
+ * enough for this much data.. Tests show that it
+ * wraps around to the start at GET=0x800.. No clue
+ * as to why..
+ */
+ ptr = (get & 0x7ff) >> 2;
+
+ if (device->card_type < NV_40) {
+ mthd = nv_rd32(priv,
+ NV04_PFIFO_CACHE1_METHOD(ptr));
+ data = nv_rd32(priv,
+ NV04_PFIFO_CACHE1_DATA(ptr));
+ } else {
+ mthd = nv_rd32(priv,
+ NV40_PFIFO_CACHE1_METHOD(ptr));
+ data = nv_rd32(priv,
+ NV40_PFIFO_CACHE1_DATA(ptr));
+ }
+
+ if (!nv04_fifo_swmthd(priv, chid, mthd, data)) {
+ nv_info(priv, "CACHE_ERROR - Ch %d/%d "
+ "Mthd 0x%04x Data 0x%08x\n",
+ chid, (mthd >> 13) & 7, mthd & 0x1ffc,
+ data);
+ }
+
+ nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
+ nv_wr32(priv, NV03_PFIFO_INTR_0,
+ NV_PFIFO_INTR_CACHE_ERROR);
+
+ nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0,
+ nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) & ~1);
+ nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4);
+ nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0,
+ nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) | 1);
+ nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0);
+
+ nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH,
+ nv_rd32(priv, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
+ nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
+
+ status &= ~NV_PFIFO_INTR_CACHE_ERROR;
+ }
+
+ if (status & NV_PFIFO_INTR_DMA_PUSHER) {
+ u32 dma_get = nv_rd32(priv, 0x003244);
+ u32 dma_put = nv_rd32(priv, 0x003240);
+ u32 push = nv_rd32(priv, 0x003220);
+ u32 state = nv_rd32(priv, 0x003228);
+
+ if (device->card_type == NV_50) {
+ u32 ho_get = nv_rd32(priv, 0x003328);
+ u32 ho_put = nv_rd32(priv, 0x003320);
+ u32 ib_get = nv_rd32(priv, 0x003334);
+ u32 ib_put = nv_rd32(priv, 0x003330);
+
+ nv_info(priv, "DMA_PUSHER - Ch %d Get 0x%02x%08x "
+ "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
+ "State 0x%08x (err: %s) Push 0x%08x\n",
+ chid, ho_get, dma_get, ho_put,
+ dma_put, ib_get, ib_put, state,
+ nv_dma_state_err(state),
+ push);
+
+ /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
+ nv_wr32(priv, 0x003364, 0x00000000);
+ if (dma_get != dma_put || ho_get != ho_put) {
+ nv_wr32(priv, 0x003244, dma_put);
+ nv_wr32(priv, 0x003328, ho_put);
+ } else
+ if (ib_get != ib_put) {
+ nv_wr32(priv, 0x003334, ib_put);
+ }
+ } else {
+ nv_info(priv, "DMA_PUSHER - Ch %d Get 0x%08x "
+ "Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n",
+ chid, dma_get, dma_put, state,
+ nv_dma_state_err(state), push);
+
+ if (dma_get != dma_put)
+ nv_wr32(priv, 0x003244, dma_put);
+ }
+
+ nv_wr32(priv, 0x003228, 0x00000000);
+ nv_wr32(priv, 0x003220, 0x00000001);
+ nv_wr32(priv, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
+ status &= ~NV_PFIFO_INTR_DMA_PUSHER;
+ }
+
+ if (status & NV_PFIFO_INTR_SEMAPHORE) {
+ uint32_t sem;
+
+ status &= ~NV_PFIFO_INTR_SEMAPHORE;
+ nv_wr32(priv, NV03_PFIFO_INTR_0,
+ NV_PFIFO_INTR_SEMAPHORE);
+
+ sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE);
+ nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
+
+ nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4);
+ nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
+ }
+
+ if (device->card_type == NV_50) {
+ if (status & 0x00000010) {
+ nv50_fb_trap(nouveau_fb(priv), 1);
+ status &= ~0x00000010;
+ nv_wr32(priv, 0x002100, 0x00000010);
+ }
+ }
+
+ if (status) {
+ nv_info(priv, "unknown intr 0x%08x, ch %d\n",
+ status, chid);
+ nv_wr32(priv, NV03_PFIFO_INTR_0, status);
+ status = 0;
+ }
+
+ nv_wr32(priv, NV03_PFIFO_CACHES, reassign);
+ }
+
+ if (status) {
+ nv_info(priv, "still angry after %d spins, halt\n", cnt);
+ nv_wr32(priv, 0x002140, 0);
+ nv_wr32(priv, 0x000140, 0);
+ }
+
+ nv_wr32(priv, 0x000100, 0x00000100);
+}
+
+static int
+nv04_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv04_instmem_priv *imem = nv04_instmem(parent);
+ struct nv04_fifo_priv *priv;
+ int ret;
+
+ ret = nouveau_fifo_create(parent, engine, oclass, 0, 15, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nouveau_ramht_ref(imem->ramht, &priv->ramht);
+ nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
+ nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
+
+ nv_subdev(priv)->unit = 0x00000100;
+ nv_subdev(priv)->intr = nv04_fifo_intr;
+ nv_engine(priv)->cclass = &nv04_fifo_cclass;
+ nv_engine(priv)->sclass = nv04_fifo_sclass;
+ priv->base.pause = nv04_fifo_pause;
+ priv->base.start = nv04_fifo_start;
+ priv->ramfc_desc = nv04_ramfc;
+ return 0;
+}
+
+void
+nv04_fifo_dtor(struct nouveau_object *object)
+{
+ struct nv04_fifo_priv *priv = (void *)object;
+ nouveau_gpuobj_ref(NULL, &priv->ramfc);
+ nouveau_gpuobj_ref(NULL, &priv->ramro);
+ nouveau_ramht_ref(NULL, &priv->ramht);
+ nouveau_fifo_destroy(&priv->base);
+}
+
+int
+nv04_fifo_init(struct nouveau_object *object)
+{
+ struct nv04_fifo_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_fifo_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, NV04_PFIFO_DELAY_0, 0x000000ff);
+ nv_wr32(priv, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
+
+ nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+ ((priv->ramht->bits - 9) << 16) |
+ (priv->ramht->base.addr >> 8));
+ nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
+ nv_wr32(priv, NV03_PFIFO_RAMFC, priv->ramfc->addr >> 8);
+
+ nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
+
+ nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff);
+ nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff);
+
+ nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
+ nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
+ nv_wr32(priv, NV03_PFIFO_CACHES, 1);
+ return 0;
+}
+
+struct nouveau_oclass
+nv04_fifo_oclass = {
+ .handle = NV_ENGINE(FIFO, 0x04),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_fifo_ctor,
+ .dtor = nv04_fifo_dtor,
+ .init = nv04_fifo_init,
+ .fini = _nouveau_fifo_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.h b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.h
new file mode 100644
index 0000000000000000000000000000000000000000..496a4b4fdfafa07e4df261d2d23326aa2d817541
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.h
@@ -0,0 +1,178 @@
+#ifndef __NV04_FIFO_H__
+#define __NV04_FIFO_H__
+
+#include
+
+#define NV04_PFIFO_DELAY_0 0x00002040
+#define NV04_PFIFO_DMA_TIMESLICE 0x00002044
+#define NV04_PFIFO_NEXT_CHANNEL 0x00002050
+#define NV03_PFIFO_INTR_0 0x00002100
+#define NV03_PFIFO_INTR_EN_0 0x00002140
+# define NV_PFIFO_INTR_CACHE_ERROR (1<<0)
+# define NV_PFIFO_INTR_RUNOUT (1<<4)
+# define NV_PFIFO_INTR_RUNOUT_OVERFLOW (1<<8)
+# define NV_PFIFO_INTR_DMA_PUSHER (1<<12)
+# define NV_PFIFO_INTR_DMA_PT (1<<16)
+# define NV_PFIFO_INTR_SEMAPHORE (1<<20)
+# define NV_PFIFO_INTR_ACQUIRE_TIMEOUT (1<<24)
+#define NV03_PFIFO_RAMHT 0x00002210
+#define NV03_PFIFO_RAMFC 0x00002214
+#define NV03_PFIFO_RAMRO 0x00002218
+#define NV40_PFIFO_RAMFC 0x00002220
+#define NV03_PFIFO_CACHES 0x00002500
+#define NV04_PFIFO_MODE 0x00002504
+#define NV04_PFIFO_DMA 0x00002508
+#define NV04_PFIFO_SIZE 0x0000250c
+#define NV50_PFIFO_CTX_TABLE(c) (0x2600+(c)*4)
+#define NV50_PFIFO_CTX_TABLE__SIZE 128
+#define NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED (1<<31)
+#define NV50_PFIFO_CTX_TABLE_UNK30_BAD (1<<30)
+#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80 0x0FFFFFFF
+#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84 0x00FFFFFF
+#define NV03_PFIFO_CACHE0_PUSH0 0x00003000
+#define NV03_PFIFO_CACHE0_PULL0 0x00003040
+#define NV04_PFIFO_CACHE0_PULL0 0x00003050
+#define NV04_PFIFO_CACHE0_PULL1 0x00003054
+#define NV03_PFIFO_CACHE1_PUSH0 0x00003200
+#define NV03_PFIFO_CACHE1_PUSH1 0x00003204
+#define NV03_PFIFO_CACHE1_PUSH1_DMA (1<<8)
+#define NV40_PFIFO_CACHE1_PUSH1_DMA (1<<16)
+#define NV03_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000000f
+#define NV10_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000001f
+#define NV50_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000007f
+#define NV03_PFIFO_CACHE1_PUT 0x00003210
+#define NV04_PFIFO_CACHE1_DMA_PUSH 0x00003220
+#define NV04_PFIFO_CACHE1_DMA_FETCH 0x00003224
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_8_BYTES 0x00000000
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_16_BYTES 0x00000008
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_24_BYTES 0x00000010
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_32_BYTES 0x00000018
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_40_BYTES 0x00000020
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_48_BYTES 0x00000028
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_56_BYTES 0x00000030
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_64_BYTES 0x00000038
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_72_BYTES 0x00000040
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_80_BYTES 0x00000048
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_88_BYTES 0x00000050
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_96_BYTES 0x00000058
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_104_BYTES 0x00000060
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES 0x00000068
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_120_BYTES 0x00000070
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES 0x00000078
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_136_BYTES 0x00000080
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_144_BYTES 0x00000088
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_152_BYTES 0x00000090
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_160_BYTES 0x00000098
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_168_BYTES 0x000000A0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_176_BYTES 0x000000A8
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_184_BYTES 0x000000B0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_192_BYTES 0x000000B8
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_200_BYTES 0x000000C0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_208_BYTES 0x000000C8
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_216_BYTES 0x000000D0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_224_BYTES 0x000000D8
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_232_BYTES 0x000000E0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_240_BYTES 0x000000E8
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_248_BYTES 0x000000F0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_256_BYTES 0x000000F8
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE 0x0000E000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_32_BYTES 0x00000000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_64_BYTES 0x00002000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_96_BYTES 0x00004000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES 0x00006000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_160_BYTES 0x00008000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_192_BYTES 0x0000A000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_224_BYTES 0x0000C000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_256_BYTES 0x0000E000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS 0x001F0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_0 0x00000000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_1 0x00010000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_2 0x00020000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_3 0x00030000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 0x00040000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_5 0x00050000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_6 0x00060000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_7 0x00070000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 0x00080000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_9 0x00090000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_10 0x000A0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_11 0x000B0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_12 0x000C0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_13 0x000D0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_14 0x000E0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_15 0x000F0000
+# define NV_PFIFO_CACHE1_ENDIAN 0x80000000
+# define NV_PFIFO_CACHE1_LITTLE_ENDIAN 0x7FFFFFFF
+# define NV_PFIFO_CACHE1_BIG_ENDIAN 0x80000000
+#define NV04_PFIFO_CACHE1_DMA_STATE 0x00003228
+#define NV04_PFIFO_CACHE1_DMA_INSTANCE 0x0000322c
+#define NV04_PFIFO_CACHE1_DMA_CTL 0x00003230
+#define NV04_PFIFO_CACHE1_DMA_PUT 0x00003240
+#define NV04_PFIFO_CACHE1_DMA_GET 0x00003244
+#define NV10_PFIFO_CACHE1_REF_CNT 0x00003248
+#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE 0x0000324C
+#define NV03_PFIFO_CACHE1_PULL0 0x00003240
+#define NV04_PFIFO_CACHE1_PULL0 0x00003250
+# define NV04_PFIFO_CACHE1_PULL0_HASH_FAILED 0x00000010
+# define NV04_PFIFO_CACHE1_PULL0_HASH_BUSY 0x00001000
+#define NV03_PFIFO_CACHE1_PULL1 0x00003250
+#define NV04_PFIFO_CACHE1_PULL1 0x00003254
+#define NV04_PFIFO_CACHE1_HASH 0x00003258
+#define NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT 0x00003260
+#define NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP 0x00003264
+#define NV10_PFIFO_CACHE1_ACQUIRE_VALUE 0x00003268
+#define NV10_PFIFO_CACHE1_SEMAPHORE 0x0000326C
+#define NV03_PFIFO_CACHE1_GET 0x00003270
+#define NV04_PFIFO_CACHE1_ENGINE 0x00003280
+#define NV04_PFIFO_CACHE1_DMA_DCOUNT 0x000032A0
+#define NV40_PFIFO_GRCTX_INSTANCE 0x000032E0
+#define NV40_PFIFO_UNK32E4 0x000032E4
+#define NV04_PFIFO_CACHE1_METHOD(i) (0x00003800+(i*8))
+#define NV04_PFIFO_CACHE1_DATA(i) (0x00003804+(i*8))
+#define NV40_PFIFO_CACHE1_METHOD(i) (0x00090000+(i*8))
+#define NV40_PFIFO_CACHE1_DATA(i) (0x00090004+(i*8))
+
+struct ramfc_desc {
+ unsigned bits:6;
+ unsigned ctxs:5;
+ unsigned ctxp:8;
+ unsigned regs:5;
+ unsigned regp;
+};
+
+struct nv04_fifo_priv {
+ struct nouveau_fifo base;
+ struct ramfc_desc *ramfc_desc;
+ struct nouveau_ramht *ramht;
+ struct nouveau_gpuobj *ramro;
+ struct nouveau_gpuobj *ramfc;
+};
+
+struct nv04_fifo_base {
+ struct nouveau_fifo_base base;
+};
+
+struct nv04_fifo_chan {
+ struct nouveau_fifo_chan base;
+ u32 subc[8];
+ u32 ramfc;
+};
+
+int nv04_fifo_object_attach(struct nouveau_object *,
+ struct nouveau_object *, u32);
+void nv04_fifo_object_detach(struct nouveau_object *, int);
+
+void nv04_fifo_chan_dtor(struct nouveau_object *);
+int nv04_fifo_chan_init(struct nouveau_object *);
+int nv04_fifo_chan_fini(struct nouveau_object *, bool suspend);
+
+int nv04_fifo_context_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+
+void nv04_fifo_dtor(struct nouveau_object *);
+int nv04_fifo_init(struct nouveau_object *);
+void nv04_fifo_pause(struct nouveau_fifo *, unsigned long *);
+void nv04_fifo_start(struct nouveau_fifo *, unsigned long *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
new file mode 100644
index 0000000000000000000000000000000000000000..4ba75422b89dd406be7bdd3e1b8f4eb0c638e939
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+
+#include
+
+#include "nv04.h"
+
+static struct ramfc_desc
+nv10_ramfc[] = {
+ { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
+ { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
+ { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
+ { 16, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
+ { 16, 16, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
+ { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_STATE },
+ { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
+ { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_ENGINE },
+ { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_PULL1 },
+ {}
+};
+
+/*******************************************************************************
+ * FIFO channel objects
+ ******************************************************************************/
+
+static int
+nv10_fifo_chan_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv04_fifo_priv *priv = (void *)engine;
+ struct nv04_fifo_chan *chan;
+ struct nv03_channel_dma_class *args = data;
+ int ret;
+
+ if (size < sizeof(*args))
+ return -EINVAL;
+
+ ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
+ 0x10000, args->pushbuf,
+ (1 << NVDEV_ENGINE_DMAOBJ) |
+ (1 << NVDEV_ENGINE_SW) |
+ (1 << NVDEV_ENGINE_GR), &chan);
+ *pobject = nv_object(chan);
+ if (ret)
+ return ret;
+
+ nv_parent(chan)->object_attach = nv04_fifo_object_attach;
+ nv_parent(chan)->object_detach = nv04_fifo_object_detach;
+ nv_parent(chan)->context_attach = nv04_fifo_context_attach;
+ chan->ramfc = chan->base.chid * 32;
+
+ nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
+ nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset);
+ nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
+ nv_wo32(priv->ramfc, chan->ramfc + 0x14,
+ NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+ NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+ return 0;
+}
+
+static struct nouveau_ofuncs
+nv10_fifo_ofuncs = {
+ .ctor = nv10_fifo_chan_ctor,
+ .dtor = nv04_fifo_chan_dtor,
+ .init = nv04_fifo_chan_init,
+ .fini = nv04_fifo_chan_fini,
+ .rd32 = _nouveau_fifo_channel_rd32,
+ .wr32 = _nouveau_fifo_channel_wr32,
+};
+
+static struct nouveau_oclass
+nv10_fifo_sclass[] = {
+ { NV10_CHANNEL_DMA_CLASS, &nv10_fifo_ofuncs },
+ {}
+};
+
+/*******************************************************************************
+ * FIFO context - basically just the instmem reserved for the channel
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv10_fifo_cclass = {
+ .handle = NV_ENGCTX(FIFO, 0x10),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_fifo_context_ctor,
+ .dtor = _nouveau_fifo_context_dtor,
+ .init = _nouveau_fifo_context_init,
+ .fini = _nouveau_fifo_context_fini,
+ .rd32 = _nouveau_fifo_context_rd32,
+ .wr32 = _nouveau_fifo_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PFIFO engine
+ ******************************************************************************/
+
+static int
+nv10_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv04_instmem_priv *imem = nv04_instmem(parent);
+ struct nv04_fifo_priv *priv;
+ int ret;
+
+ ret = nouveau_fifo_create(parent, engine, oclass, 0, 31, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nouveau_ramht_ref(imem->ramht, &priv->ramht);
+ nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
+ nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
+
+ nv_subdev(priv)->unit = 0x00000100;
+ nv_subdev(priv)->intr = nv04_fifo_intr;
+ nv_engine(priv)->cclass = &nv10_fifo_cclass;
+ nv_engine(priv)->sclass = nv10_fifo_sclass;
+ priv->base.pause = nv04_fifo_pause;
+ priv->base.start = nv04_fifo_start;
+ priv->ramfc_desc = nv10_ramfc;
+ return 0;
+}
+
+struct nouveau_oclass
+nv10_fifo_oclass = {
+ .handle = NV_ENGINE(FIFO, 0x10),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv10_fifo_ctor,
+ .dtor = nv04_fifo_dtor,
+ .init = nv04_fifo_init,
+ .fini = _nouveau_fifo_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
new file mode 100644
index 0000000000000000000000000000000000000000..b96e6b0ae2b18088640ef0dbb09ee4bd827d632c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
@@ -0,0 +1,208 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+
+#include
+
+#include "nv04.h"
+
+static struct ramfc_desc
+nv17_ramfc[] = {
+ { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
+ { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
+ { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
+ { 16, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
+ { 16, 16, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
+ { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_STATE },
+ { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
+ { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_ENGINE },
+ { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_PULL1 },
+ { 32, 0, 0x20, 0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
+ { 32, 0, 0x24, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
+ { 32, 0, 0x28, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
+ { 32, 0, 0x2c, 0, NV10_PFIFO_CACHE1_SEMAPHORE },
+ { 32, 0, 0x30, 0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
+ {}
+};
+
+/*******************************************************************************
+ * FIFO channel objects
+ ******************************************************************************/
+
+static int
+nv17_fifo_chan_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv04_fifo_priv *priv = (void *)engine;
+ struct nv04_fifo_chan *chan;
+ struct nv03_channel_dma_class *args = data;
+ int ret;
+
+ if (size < sizeof(*args))
+ return -EINVAL;
+
+ ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
+ 0x10000, args->pushbuf,
+ (1 << NVDEV_ENGINE_DMAOBJ) |
+ (1 << NVDEV_ENGINE_SW) |
+ (1 << NVDEV_ENGINE_GR) |
+ (1 << NVDEV_ENGINE_MPEG), /* NV31- */
+ &chan);
+ *pobject = nv_object(chan);
+ if (ret)
+ return ret;
+
+ nv_parent(chan)->object_attach = nv04_fifo_object_attach;
+ nv_parent(chan)->object_detach = nv04_fifo_object_detach;
+ nv_parent(chan)->context_attach = nv04_fifo_context_attach;
+ chan->ramfc = chan->base.chid * 64;
+
+ nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
+ nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset);
+ nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
+ nv_wo32(priv->ramfc, chan->ramfc + 0x14,
+ NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+ NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+ return 0;
+}
+
+static struct nouveau_ofuncs
+nv17_fifo_ofuncs = {
+ .ctor = nv17_fifo_chan_ctor,
+ .dtor = nv04_fifo_chan_dtor,
+ .init = nv04_fifo_chan_init,
+ .fini = nv04_fifo_chan_fini,
+ .rd32 = _nouveau_fifo_channel_rd32,
+ .wr32 = _nouveau_fifo_channel_wr32,
+};
+
+static struct nouveau_oclass
+nv17_fifo_sclass[] = {
+ { NV17_CHANNEL_DMA_CLASS, &nv17_fifo_ofuncs },
+ {}
+};
+
+/*******************************************************************************
+ * FIFO context - basically just the instmem reserved for the channel
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv17_fifo_cclass = {
+ .handle = NV_ENGCTX(FIFO, 0x17),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_fifo_context_ctor,
+ .dtor = _nouveau_fifo_context_dtor,
+ .init = _nouveau_fifo_context_init,
+ .fini = _nouveau_fifo_context_fini,
+ .rd32 = _nouveau_fifo_context_rd32,
+ .wr32 = _nouveau_fifo_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PFIFO engine
+ ******************************************************************************/
+
+static int
+nv17_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv04_instmem_priv *imem = nv04_instmem(parent);
+ struct nv04_fifo_priv *priv;
+ int ret;
+
+ ret = nouveau_fifo_create(parent, engine, oclass, 0, 31, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nouveau_ramht_ref(imem->ramht, &priv->ramht);
+ nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
+ nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
+
+ nv_subdev(priv)->unit = 0x00000100;
+ nv_subdev(priv)->intr = nv04_fifo_intr;
+ nv_engine(priv)->cclass = &nv17_fifo_cclass;
+ nv_engine(priv)->sclass = nv17_fifo_sclass;
+ priv->base.pause = nv04_fifo_pause;
+ priv->base.start = nv04_fifo_start;
+ priv->ramfc_desc = nv17_ramfc;
+ return 0;
+}
+
+static int
+nv17_fifo_init(struct nouveau_object *object)
+{
+ struct nv04_fifo_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_fifo_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, NV04_PFIFO_DELAY_0, 0x000000ff);
+ nv_wr32(priv, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
+
+ nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+ ((priv->ramht->bits - 9) << 16) |
+ (priv->ramht->base.addr >> 8));
+ nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
+ nv_wr32(priv, NV03_PFIFO_RAMFC, priv->ramfc->addr >> 8 | 0x00010000);
+
+ nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
+
+ nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff);
+ nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff);
+
+ nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
+ nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
+ nv_wr32(priv, NV03_PFIFO_CACHES, 1);
+ return 0;
+}
+
+struct nouveau_oclass
+nv17_fifo_oclass = {
+ .handle = NV_ENGINE(FIFO, 0x17),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv17_fifo_ctor,
+ .dtor = nv04_fifo_dtor,
+ .init = nv17_fifo_init,
+ .fini = _nouveau_fifo_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
new file mode 100644
index 0000000000000000000000000000000000000000..559c3b4e1b867cf691384657f0eb30839f39edbb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
@@ -0,0 +1,349 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+
+#include
+
+#include "nv04.h"
+
+static struct ramfc_desc
+nv40_ramfc[] = {
+ { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
+ { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
+ { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
+ { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
+ { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
+ { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_STATE },
+ { 28, 0, 0x18, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
+ { 2, 28, 0x18, 28, 0x002058 },
+ { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_ENGINE },
+ { 32, 0, 0x20, 0, NV04_PFIFO_CACHE1_PULL1 },
+ { 32, 0, 0x24, 0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
+ { 32, 0, 0x28, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
+ { 32, 0, 0x2c, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
+ { 32, 0, 0x30, 0, NV10_PFIFO_CACHE1_SEMAPHORE },
+ { 32, 0, 0x34, 0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
+ { 32, 0, 0x38, 0, NV40_PFIFO_GRCTX_INSTANCE },
+ { 17, 0, 0x3c, 0, NV04_PFIFO_DMA_TIMESLICE },
+ { 32, 0, 0x40, 0, 0x0032e4 },
+ { 32, 0, 0x44, 0, 0x0032e8 },
+ { 32, 0, 0x4c, 0, 0x002088 },
+ { 32, 0, 0x50, 0, 0x003300 },
+ { 32, 0, 0x54, 0, 0x00330c },
+ {}
+};
+
+/*******************************************************************************
+ * FIFO channel objects
+ ******************************************************************************/
+
+static int
+nv40_fifo_object_attach(struct nouveau_object *parent,
+ struct nouveau_object *object, u32 handle)
+{
+ struct nv04_fifo_priv *priv = (void *)parent->engine;
+ struct nv04_fifo_chan *chan = (void *)parent;
+ u32 context, chid = chan->base.chid;
+ int ret;
+
+ if (nv_iclass(object, NV_GPUOBJ_CLASS))
+ context = nv_gpuobj(object)->addr >> 4;
+ else
+ context = 0x00000004; /* just non-zero */
+
+ switch (nv_engidx(object->engine)) {
+ case NVDEV_ENGINE_DMAOBJ:
+ case NVDEV_ENGINE_SW:
+ context |= 0x00000000;
+ break;
+ case NVDEV_ENGINE_GR:
+ context |= 0x00100000;
+ break;
+ case NVDEV_ENGINE_MPEG:
+ context |= 0x00200000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ context |= chid << 23;
+
+ mutex_lock(&nv_subdev(priv)->mutex);
+ ret = nouveau_ramht_insert(priv->ramht, chid, handle, context);
+ mutex_unlock(&nv_subdev(priv)->mutex);
+ return ret;
+}
+
+static int
+nv40_fifo_context_attach(struct nouveau_object *parent,
+ struct nouveau_object *engctx)
+{
+ struct nv04_fifo_priv *priv = (void *)parent->engine;
+ struct nv04_fifo_chan *chan = (void *)parent;
+ unsigned long flags;
+ u32 reg, ctx;
+
+ switch (nv_engidx(engctx->engine)) {
+ case NVDEV_ENGINE_SW:
+ return 0;
+ case NVDEV_ENGINE_GR:
+ reg = 0x32e0;
+ ctx = 0x38;
+ break;
+ case NVDEV_ENGINE_MPEG:
+ reg = 0x330c;
+ ctx = 0x54;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&priv->base.lock, flags);
+ nv_engctx(engctx)->addr = nv_gpuobj(engctx)->addr >> 4;
+ nv_mask(priv, 0x002500, 0x00000001, 0x00000000);
+
+ if ((nv_rd32(priv, 0x003204) & priv->base.max) == chan->base.chid)
+ nv_wr32(priv, reg, nv_engctx(engctx)->addr);
+ nv_wo32(priv->ramfc, chan->ramfc + ctx, nv_engctx(engctx)->addr);
+
+ nv_mask(priv, 0x002500, 0x00000001, 0x00000001);
+ spin_unlock_irqrestore(&priv->base.lock, flags);
+ return 0;
+}
+
+static int
+nv40_fifo_context_detach(struct nouveau_object *parent, bool suspend,
+ struct nouveau_object *engctx)
+{
+ struct nv04_fifo_priv *priv = (void *)parent->engine;
+ struct nv04_fifo_chan *chan = (void *)parent;
+ unsigned long flags;
+ u32 reg, ctx;
+
+ switch (nv_engidx(engctx->engine)) {
+ case NVDEV_ENGINE_SW:
+ return 0;
+ case NVDEV_ENGINE_GR:
+ reg = 0x32e0;
+ ctx = 0x38;
+ break;
+ case NVDEV_ENGINE_MPEG:
+ reg = 0x330c;
+ ctx = 0x54;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&priv->base.lock, flags);
+ nv_mask(priv, 0x002500, 0x00000001, 0x00000000);
+
+ if ((nv_rd32(priv, 0x003204) & priv->base.max) == chan->base.chid)
+ nv_wr32(priv, reg, 0x00000000);
+ nv_wo32(priv->ramfc, chan->ramfc + ctx, 0x00000000);
+
+ nv_mask(priv, 0x002500, 0x00000001, 0x00000001);
+ spin_unlock_irqrestore(&priv->base.lock, flags);
+ return 0;
+}
+
+static int
+nv40_fifo_chan_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv04_fifo_priv *priv = (void *)engine;
+ struct nv04_fifo_chan *chan;
+ struct nv03_channel_dma_class *args = data;
+ int ret;
+
+ if (size < sizeof(*args))
+ return -EINVAL;
+
+ ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
+ 0x1000, args->pushbuf,
+ (1 << NVDEV_ENGINE_DMAOBJ) |
+ (1 << NVDEV_ENGINE_SW) |
+ (1 << NVDEV_ENGINE_GR) |
+ (1 << NVDEV_ENGINE_MPEG), &chan);
+ *pobject = nv_object(chan);
+ if (ret)
+ return ret;
+
+ nv_parent(chan)->context_attach = nv40_fifo_context_attach;
+ nv_parent(chan)->context_detach = nv40_fifo_context_detach;
+ nv_parent(chan)->object_attach = nv40_fifo_object_attach;
+ nv_parent(chan)->object_detach = nv04_fifo_object_detach;
+ chan->ramfc = chan->base.chid * 128;
+
+ nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
+ nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset);
+ nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
+ nv_wo32(priv->ramfc, chan->ramfc + 0x18, 0x30000000 |
+ NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+ NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+ nv_wo32(priv->ramfc, chan->ramfc + 0x3c, 0x0001ffff);
+ return 0;
+}
+
+static struct nouveau_ofuncs
+nv40_fifo_ofuncs = {
+ .ctor = nv40_fifo_chan_ctor,
+ .dtor = nv04_fifo_chan_dtor,
+ .init = nv04_fifo_chan_init,
+ .fini = nv04_fifo_chan_fini,
+ .rd32 = _nouveau_fifo_channel_rd32,
+ .wr32 = _nouveau_fifo_channel_wr32,
+};
+
+static struct nouveau_oclass
+nv40_fifo_sclass[] = {
+ { NV40_CHANNEL_DMA_CLASS, &nv40_fifo_ofuncs },
+ {}
+};
+
+/*******************************************************************************
+ * FIFO context - basically just the instmem reserved for the channel
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv40_fifo_cclass = {
+ .handle = NV_ENGCTX(FIFO, 0x40),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_fifo_context_ctor,
+ .dtor = _nouveau_fifo_context_dtor,
+ .init = _nouveau_fifo_context_init,
+ .fini = _nouveau_fifo_context_fini,
+ .rd32 = _nouveau_fifo_context_rd32,
+ .wr32 = _nouveau_fifo_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PFIFO engine
+ ******************************************************************************/
+
+static int
+nv40_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv04_instmem_priv *imem = nv04_instmem(parent);
+ struct nv04_fifo_priv *priv;
+ int ret;
+
+ ret = nouveau_fifo_create(parent, engine, oclass, 0, 31, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nouveau_ramht_ref(imem->ramht, &priv->ramht);
+ nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
+ nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
+
+ nv_subdev(priv)->unit = 0x00000100;
+ nv_subdev(priv)->intr = nv04_fifo_intr;
+ nv_engine(priv)->cclass = &nv40_fifo_cclass;
+ nv_engine(priv)->sclass = nv40_fifo_sclass;
+ priv->base.pause = nv04_fifo_pause;
+ priv->base.start = nv04_fifo_start;
+ priv->ramfc_desc = nv40_ramfc;
+ return 0;
+}
+
+static int
+nv40_fifo_init(struct nouveau_object *object)
+{
+ struct nv04_fifo_priv *priv = (void *)object;
+ struct nouveau_fb *pfb = nouveau_fb(object);
+ int ret;
+
+ ret = nouveau_fifo_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x002040, 0x000000ff);
+ nv_wr32(priv, 0x002044, 0x2101ffff);
+ nv_wr32(priv, 0x002058, 0x00000001);
+
+ nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+ ((priv->ramht->bits - 9) << 16) |
+ (priv->ramht->base.addr >> 8));
+ nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
+
+ switch (nv_device(priv)->chipset) {
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ nv_wr32(priv, 0x002230, 0x00000001);
+ case 0x40:
+ case 0x41:
+ case 0x42:
+ case 0x43:
+ case 0x45:
+ case 0x48:
+ nv_wr32(priv, 0x002220, 0x00030002);
+ break;
+ default:
+ nv_wr32(priv, 0x002230, 0x00000000);
+ nv_wr32(priv, 0x002220, ((pfb->ram.size - 512 * 1024 +
+ priv->ramfc->addr) >> 16) |
+ 0x00030000);
+ break;
+ }
+
+ nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
+
+ nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff);
+ nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff);
+
+ nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
+ nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
+ nv_wr32(priv, NV03_PFIFO_CACHES, 1);
+ return 0;
+}
+
+struct nouveau_oclass
+nv40_fifo_oclass = {
+ .handle = NV_ENGINE(FIFO, 0x40),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv40_fifo_ctor,
+ .dtor = nv04_fifo_dtor,
+ .init = nv40_fifo_init,
+ .fini = _nouveau_fifo_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
new file mode 100644
index 0000000000000000000000000000000000000000..536e7634a00d26fdb4145479164653cd71961307
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -0,0 +1,502 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+
+#include
+#include
+
+#include "nv50.h"
+
+/*******************************************************************************
+ * FIFO channel objects
+ ******************************************************************************/
+
+void
+nv50_fifo_playlist_update(struct nv50_fifo_priv *priv)
+{
+ struct nouveau_bar *bar = nouveau_bar(priv);
+ struct nouveau_gpuobj *cur;
+ int i, p;
+
+ cur = priv->playlist[priv->cur_playlist];
+ priv->cur_playlist = !priv->cur_playlist;
+
+ for (i = priv->base.min, p = 0; i < priv->base.max; i++) {
+ if (nv_rd32(priv, 0x002600 + (i * 4)) & 0x80000000)
+ nv_wo32(cur, p++ * 4, i);
+ }
+
+ bar->flush(bar);
+
+ nv_wr32(priv, 0x0032f4, cur->addr >> 12);
+ nv_wr32(priv, 0x0032ec, p);
+ nv_wr32(priv, 0x002500, 0x00000101);
+}
+
+static int
+nv50_fifo_context_attach(struct nouveau_object *parent,
+ struct nouveau_object *object)
+{
+ struct nouveau_bar *bar = nouveau_bar(parent);
+ struct nv50_fifo_base *base = (void *)parent->parent;
+ struct nouveau_gpuobj *ectx = (void *)object;
+ u64 limit = ectx->addr + ectx->size - 1;
+ u64 start = ectx->addr;
+ u32 addr;
+
+ switch (nv_engidx(object->engine)) {
+ case NVDEV_ENGINE_SW : return 0;
+ case NVDEV_ENGINE_GR : addr = 0x0000; break;
+ case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
+ default:
+ return -EINVAL;
+ }
+
+ nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
+ nv_wo32(base->eng, addr + 0x00, 0x00190000);
+ nv_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
+ nv_wo32(base->eng, addr + 0x08, lower_32_bits(start));
+ nv_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 |
+ upper_32_bits(start));
+ nv_wo32(base->eng, addr + 0x10, 0x00000000);
+ nv_wo32(base->eng, addr + 0x14, 0x00000000);
+ bar->flush(bar);
+ return 0;
+}
+
+static int
+nv50_fifo_context_detach(struct nouveau_object *parent, bool suspend,
+ struct nouveau_object *object)
+{
+ struct nouveau_bar *bar = nouveau_bar(parent);
+ struct nv50_fifo_priv *priv = (void *)parent->engine;
+ struct nv50_fifo_base *base = (void *)parent->parent;
+ struct nv50_fifo_chan *chan = (void *)parent;
+ u32 addr, me;
+ int ret = 0;
+
+ switch (nv_engidx(object->engine)) {
+ case NVDEV_ENGINE_SW : return 0;
+ case NVDEV_ENGINE_GR : addr = 0x0000; break;
+ case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
+ default:
+ return -EINVAL;
+ }
+
+ nv_wo32(base->eng, addr + 0x00, 0x00000000);
+ nv_wo32(base->eng, addr + 0x04, 0x00000000);
+ nv_wo32(base->eng, addr + 0x08, 0x00000000);
+ nv_wo32(base->eng, addr + 0x0c, 0x00000000);
+ nv_wo32(base->eng, addr + 0x10, 0x00000000);
+ nv_wo32(base->eng, addr + 0x14, 0x00000000);
+ bar->flush(bar);
+
+ /* HW bug workaround:
+ *
+ * PFIFO will hang forever if the connected engines don't report
+ * that they've processed the context switch request.
+ *
+ * In order for the kickoff to work, we need to ensure all the
+ * connected engines are in a state where they can answer.
+ *
+ * Newer chipsets don't seem to suffer from this issue, and well,
+ * there's also a "ignore these engines" bitmask reg we can use
+ * if we hit the issue there..
+ */
+ me = nv_mask(priv, 0x00b860, 0x00000001, 0x00000001);
+
+ /* do the kickoff... */
+ nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12);
+ if (!nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff)) {
+ nv_error(priv, "channel %d unload timeout\n", chan->base.chid);
+ if (suspend)
+ ret = -EBUSY;
+ }
+
+ nv_wr32(priv, 0x00b860, me);
+ return ret;
+}
+
+static int
+nv50_fifo_object_attach(struct nouveau_object *parent,
+ struct nouveau_object *object, u32 handle)
+{
+ struct nv50_fifo_chan *chan = (void *)parent;
+ u32 context;
+
+ if (nv_iclass(object, NV_GPUOBJ_CLASS))
+ context = nv_gpuobj(object)->node->offset >> 4;
+ else
+ context = 0x00000004; /* just non-zero */
+
+ switch (nv_engidx(object->engine)) {
+ case NVDEV_ENGINE_DMAOBJ:
+ case NVDEV_ENGINE_SW : context |= 0x00000000; break;
+ case NVDEV_ENGINE_GR : context |= 0x00100000; break;
+ case NVDEV_ENGINE_MPEG : context |= 0x00200000; break;
+ default:
+ return -EINVAL;
+ }
+
+ return nouveau_ramht_insert(chan->ramht, 0, handle, context);
+}
+
+void
+nv50_fifo_object_detach(struct nouveau_object *parent, int cookie)
+{
+ struct nv50_fifo_chan *chan = (void *)parent;
+ nouveau_ramht_remove(chan->ramht, cookie);
+}
+
+static int
+nv50_fifo_chan_ctor_dma(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_bar *bar = nouveau_bar(parent);
+ struct nv50_fifo_base *base = (void *)parent;
+ struct nv50_fifo_chan *chan;
+ struct nv03_channel_dma_class *args = data;
+ int ret;
+
+ if (size < sizeof(*args))
+ return -EINVAL;
+
+ ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
+ 0x2000, args->pushbuf,
+ (1 << NVDEV_ENGINE_DMAOBJ) |
+ (1 << NVDEV_ENGINE_SW) |
+ (1 << NVDEV_ENGINE_GR) |
+ (1 << NVDEV_ENGINE_MPEG), &chan);
+ *pobject = nv_object(chan);
+ if (ret)
+ return ret;
+
+ nv_parent(chan)->context_attach = nv50_fifo_context_attach;
+ nv_parent(chan)->context_detach = nv50_fifo_context_detach;
+ nv_parent(chan)->object_attach = nv50_fifo_object_attach;
+ nv_parent(chan)->object_detach = nv50_fifo_object_detach;
+
+ ret = nouveau_ramht_new(parent, parent, 0x8000, 16, &chan->ramht);
+ if (ret)
+ return ret;
+
+ nv_wo32(base->ramfc, 0x08, lower_32_bits(args->offset));
+ nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->offset));
+ nv_wo32(base->ramfc, 0x10, lower_32_bits(args->offset));
+ nv_wo32(base->ramfc, 0x14, upper_32_bits(args->offset));
+ nv_wo32(base->ramfc, 0x3c, 0x003f6078);
+ nv_wo32(base->ramfc, 0x44, 0x01003fff);
+ nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
+ nv_wo32(base->ramfc, 0x4c, 0xffffffff);
+ nv_wo32(base->ramfc, 0x60, 0x7fffffff);
+ nv_wo32(base->ramfc, 0x78, 0x00000000);
+ nv_wo32(base->ramfc, 0x7c, 0x30000001);
+ nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
+ (4 << 24) /* SEARCH_FULL */ |
+ (chan->ramht->base.node->offset >> 4));
+ bar->flush(bar);
+ return 0;
+}
+
+static int
+nv50_fifo_chan_ctor_ind(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_channel_ind_class *args = data;
+ struct nouveau_bar *bar = nouveau_bar(parent);
+ struct nv50_fifo_base *base = (void *)parent;
+ struct nv50_fifo_chan *chan;
+ u64 ioffset, ilength;
+ int ret;
+
+ if (size < sizeof(*args))
+ return -EINVAL;
+
+ ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
+ 0x2000, args->pushbuf,
+ (1 << NVDEV_ENGINE_DMAOBJ) |
+ (1 << NVDEV_ENGINE_SW) |
+ (1 << NVDEV_ENGINE_GR) |
+ (1 << NVDEV_ENGINE_MPEG), &chan);
+ *pobject = nv_object(chan);
+ if (ret)
+ return ret;
+
+ nv_parent(chan)->context_attach = nv50_fifo_context_attach;
+ nv_parent(chan)->context_detach = nv50_fifo_context_detach;
+ nv_parent(chan)->object_attach = nv50_fifo_object_attach;
+ nv_parent(chan)->object_detach = nv50_fifo_object_detach;
+
+ ret = nouveau_ramht_new(parent, parent, 0x8000, 16, &chan->ramht);
+ if (ret)
+ return ret;
+
+ ioffset = args->ioffset;
+ ilength = log2i(args->ilength / 8);
+
+ nv_wo32(base->ramfc, 0x3c, 0x403f6078);
+ nv_wo32(base->ramfc, 0x44, 0x01003fff);
+ nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
+ nv_wo32(base->ramfc, 0x50, lower_32_bits(ioffset));
+ nv_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
+ nv_wo32(base->ramfc, 0x60, 0x7fffffff);
+ nv_wo32(base->ramfc, 0x78, 0x00000000);
+ nv_wo32(base->ramfc, 0x7c, 0x30000001);
+ nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
+ (4 << 24) /* SEARCH_FULL */ |
+ (chan->ramht->base.node->offset >> 4));
+ bar->flush(bar);
+ return 0;
+}
+
+void
+nv50_fifo_chan_dtor(struct nouveau_object *object)
+{
+ struct nv50_fifo_chan *chan = (void *)object;
+ nouveau_ramht_ref(NULL, &chan->ramht);
+ nouveau_fifo_channel_destroy(&chan->base);
+}
+
+static int
+nv50_fifo_chan_init(struct nouveau_object *object)
+{
+ struct nv50_fifo_priv *priv = (void *)object->engine;
+ struct nv50_fifo_base *base = (void *)object->parent;
+ struct nv50_fifo_chan *chan = (void *)object;
+ struct nouveau_gpuobj *ramfc = base->ramfc;
+ u32 chid = chan->base.chid;
+ int ret;
+
+ ret = nouveau_fifo_channel_init(&chan->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 12);
+ nv50_fifo_playlist_update(priv);
+ return 0;
+}
+
+int
+nv50_fifo_chan_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv50_fifo_priv *priv = (void *)object->engine;
+ struct nv50_fifo_chan *chan = (void *)object;
+ u32 chid = chan->base.chid;
+
+ /* remove channel from playlist, fifo will unload context */
+ nv_mask(priv, 0x002600 + (chid * 4), 0x80000000, 0x00000000);
+ nv50_fifo_playlist_update(priv);
+ nv_wr32(priv, 0x002600 + (chid * 4), 0x00000000);
+
+ return nouveau_fifo_channel_fini(&chan->base, suspend);
+}
+
+static struct nouveau_ofuncs
+nv50_fifo_ofuncs_dma = {
+ .ctor = nv50_fifo_chan_ctor_dma,
+ .dtor = nv50_fifo_chan_dtor,
+ .init = nv50_fifo_chan_init,
+ .fini = nv50_fifo_chan_fini,
+ .rd32 = _nouveau_fifo_channel_rd32,
+ .wr32 = _nouveau_fifo_channel_wr32,
+};
+
+static struct nouveau_ofuncs
+nv50_fifo_ofuncs_ind = {
+ .ctor = nv50_fifo_chan_ctor_ind,
+ .dtor = nv50_fifo_chan_dtor,
+ .init = nv50_fifo_chan_init,
+ .fini = nv50_fifo_chan_fini,
+ .rd32 = _nouveau_fifo_channel_rd32,
+ .wr32 = _nouveau_fifo_channel_wr32,
+};
+
+static struct nouveau_oclass
+nv50_fifo_sclass[] = {
+ { NV50_CHANNEL_DMA_CLASS, &nv50_fifo_ofuncs_dma },
+ { NV50_CHANNEL_IND_CLASS, &nv50_fifo_ofuncs_ind },
+ {}
+};
+
+/*******************************************************************************
+ * FIFO context - basically just the instmem reserved for the channel
+ ******************************************************************************/
+
+static int
+nv50_fifo_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_fifo_base *base;
+ int ret;
+
+ ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x10000,
+ 0x1000, NVOBJ_FLAG_HEAP, &base);
+ *pobject = nv_object(base);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(parent, nv_object(base), 0x0200, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(parent, nv_object(base), 0x1200, 0,
+ NVOBJ_FLAG_ZERO_ALLOC, &base->eng);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(parent, nv_object(base), 0x4000, 0, 0,
+ &base->pgd);
+ if (ret)
+ return ret;
+
+ ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void
+nv50_fifo_context_dtor(struct nouveau_object *object)
+{
+ struct nv50_fifo_base *base = (void *)object;
+ nouveau_vm_ref(NULL, &base->vm, base->pgd);
+ nouveau_gpuobj_ref(NULL, &base->pgd);
+ nouveau_gpuobj_ref(NULL, &base->eng);
+ nouveau_gpuobj_ref(NULL, &base->ramfc);
+ nouveau_gpuobj_ref(NULL, &base->cache);
+ nouveau_fifo_context_destroy(&base->base);
+}
+
+static struct nouveau_oclass
+nv50_fifo_cclass = {
+ .handle = NV_ENGCTX(FIFO, 0x50),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_fifo_context_ctor,
+ .dtor = nv50_fifo_context_dtor,
+ .init = _nouveau_fifo_context_init,
+ .fini = _nouveau_fifo_context_fini,
+ .rd32 = _nouveau_fifo_context_rd32,
+ .wr32 = _nouveau_fifo_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PFIFO engine
+ ******************************************************************************/
+
+static int
+nv50_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_fifo_priv *priv;
+ int ret;
+
+ ret = nouveau_fifo_create(parent, engine, oclass, 1, 127, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0,
+ &priv->playlist[0]);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0,
+ &priv->playlist[1]);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00000100;
+ nv_subdev(priv)->intr = nv04_fifo_intr;
+ nv_engine(priv)->cclass = &nv50_fifo_cclass;
+ nv_engine(priv)->sclass = nv50_fifo_sclass;
+ return 0;
+}
+
+void
+nv50_fifo_dtor(struct nouveau_object *object)
+{
+ struct nv50_fifo_priv *priv = (void *)object;
+
+ nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
+ nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
+
+ nouveau_fifo_destroy(&priv->base);
+}
+
+int
+nv50_fifo_init(struct nouveau_object *object)
+{
+ struct nv50_fifo_priv *priv = (void *)object;
+ int ret, i;
+
+ ret = nouveau_fifo_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
+ nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
+ nv_wr32(priv, 0x00250c, 0x6f3cfc34);
+ nv_wr32(priv, 0x002044, 0x01003fff);
+
+ nv_wr32(priv, 0x002100, 0xffffffff);
+ nv_wr32(priv, 0x002140, 0xffffffff);
+
+ for (i = 0; i < 128; i++)
+ nv_wr32(priv, 0x002600 + (i * 4), 0x00000000);
+ nv50_fifo_playlist_update(priv);
+
+ nv_wr32(priv, 0x003200, 0x00000001);
+ nv_wr32(priv, 0x003250, 0x00000001);
+ nv_wr32(priv, 0x002500, 0x00000001);
+ return 0;
+}
+
+struct nouveau_oclass
+nv50_fifo_oclass = {
+ .handle = NV_ENGINE(FIFO, 0x50),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_fifo_ctor,
+ .dtor = nv50_fifo_dtor,
+ .init = nv50_fifo_init,
+ .fini = _nouveau_fifo_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.h b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.h
new file mode 100644
index 0000000000000000000000000000000000000000..3a9ceb315c20fbebebfa3308e97ae33a6a78ac66
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.h
@@ -0,0 +1,36 @@
+#ifndef __NV50_FIFO_H__
+#define __NV50_FIFO_H__
+
+struct nv50_fifo_priv {
+ struct nouveau_fifo base;
+ struct nouveau_gpuobj *playlist[2];
+ int cur_playlist;
+};
+
+struct nv50_fifo_base {
+ struct nouveau_fifo_base base;
+ struct nouveau_gpuobj *ramfc;
+ struct nouveau_gpuobj *cache;
+ struct nouveau_gpuobj *eng;
+ struct nouveau_gpuobj *pgd;
+ struct nouveau_vm *vm;
+};
+
+struct nv50_fifo_chan {
+ struct nouveau_fifo_chan base;
+ u32 subc[8];
+ struct nouveau_ramht *ramht;
+};
+
+void nv50_fifo_playlist_update(struct nv50_fifo_priv *);
+
+void nv50_fifo_object_detach(struct nouveau_object *, int);
+void nv50_fifo_chan_dtor(struct nouveau_object *);
+int nv50_fifo_chan_fini(struct nouveau_object *, bool);
+
+void nv50_fifo_context_dtor(struct nouveau_object *);
+
+void nv50_fifo_dtor(struct nouveau_object *);
+int nv50_fifo_init(struct nouveau_object *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
new file mode 100644
index 0000000000000000000000000000000000000000..b4fd26d8f166926849c10c4ad9251f2e868dff4f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
@@ -0,0 +1,420 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+
+#include
+#include
+
+#include "nv50.h"
+
+/*******************************************************************************
+ * FIFO channel objects
+ ******************************************************************************/
+
+static int
+nv84_fifo_context_attach(struct nouveau_object *parent,
+ struct nouveau_object *object)
+{
+ struct nouveau_bar *bar = nouveau_bar(parent);
+ struct nv50_fifo_base *base = (void *)parent->parent;
+ struct nouveau_gpuobj *ectx = (void *)object;
+ u64 limit = ectx->addr + ectx->size - 1;
+ u64 start = ectx->addr;
+ u32 addr;
+
+ switch (nv_engidx(object->engine)) {
+ case NVDEV_ENGINE_SW : return 0;
+ case NVDEV_ENGINE_GR : addr = 0x0020; break;
+ case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
+ case NVDEV_ENGINE_CRYPT: addr = 0x00a0; break;
+ case NVDEV_ENGINE_COPY0: addr = 0x00c0; break;
+ default:
+ return -EINVAL;
+ }
+
+ nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
+ nv_wo32(base->eng, addr + 0x00, 0x00190000);
+ nv_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
+ nv_wo32(base->eng, addr + 0x08, lower_32_bits(start));
+ nv_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 |
+ upper_32_bits(start));
+ nv_wo32(base->eng, addr + 0x10, 0x00000000);
+ nv_wo32(base->eng, addr + 0x14, 0x00000000);
+ bar->flush(bar);
+ return 0;
+}
+
+static int
+nv84_fifo_context_detach(struct nouveau_object *parent, bool suspend,
+ struct nouveau_object *object)
+{
+ struct nouveau_bar *bar = nouveau_bar(parent);
+ struct nv50_fifo_priv *priv = (void *)parent->engine;
+ struct nv50_fifo_base *base = (void *)parent->parent;
+ struct nv50_fifo_chan *chan = (void *)parent;
+ u32 addr, save, engn;
+ bool done;
+
+ switch (nv_engidx(object->engine)) {
+ case NVDEV_ENGINE_SW : return 0;
+ case NVDEV_ENGINE_GR : engn = 0; addr = 0x0020; break;
+ case NVDEV_ENGINE_MPEG : engn = 1; addr = 0x0060; break;
+ case NVDEV_ENGINE_CRYPT: engn = 4; addr = 0x00a0; break;
+ case NVDEV_ENGINE_COPY0: engn = 2; addr = 0x00c0; break;
+ default:
+ return -EINVAL;
+ }
+
+ nv_wo32(base->eng, addr + 0x00, 0x00000000);
+ nv_wo32(base->eng, addr + 0x04, 0x00000000);
+ nv_wo32(base->eng, addr + 0x08, 0x00000000);
+ nv_wo32(base->eng, addr + 0x0c, 0x00000000);
+ nv_wo32(base->eng, addr + 0x10, 0x00000000);
+ nv_wo32(base->eng, addr + 0x14, 0x00000000);
+ bar->flush(bar);
+
+ save = nv_mask(priv, 0x002520, 0x0000003f, 1 << engn);
+ nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12);
+ done = nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff);
+ nv_wr32(priv, 0x002520, save);
+ if (!done) {
+ nv_error(priv, "channel %d unload timeout\n", chan->base.chid);
+ if (suspend)
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static int
+nv84_fifo_object_attach(struct nouveau_object *parent,
+ struct nouveau_object *object, u32 handle)
+{
+ struct nv50_fifo_chan *chan = (void *)parent;
+ u32 context;
+
+ if (nv_iclass(object, NV_GPUOBJ_CLASS))
+ context = nv_gpuobj(object)->node->offset >> 4;
+ else
+ context = 0x00000004; /* just non-zero */
+
+ switch (nv_engidx(object->engine)) {
+ case NVDEV_ENGINE_DMAOBJ:
+ case NVDEV_ENGINE_SW : context |= 0x00000000; break;
+ case NVDEV_ENGINE_GR : context |= 0x00100000; break;
+ case NVDEV_ENGINE_MPEG :
+ case NVDEV_ENGINE_PPP : context |= 0x00200000; break;
+ case NVDEV_ENGINE_ME :
+ case NVDEV_ENGINE_COPY0 : context |= 0x00300000; break;
+ case NVDEV_ENGINE_VP : context |= 0x00400000; break;
+ case NVDEV_ENGINE_CRYPT :
+ case NVDEV_ENGINE_UNK1C1: context |= 0x00500000; break;
+ case NVDEV_ENGINE_BSP : context |= 0x00600000; break;
+ default:
+ return -EINVAL;
+ }
+
+ return nouveau_ramht_insert(chan->ramht, 0, handle, context);
+}
+
+static int
+nv84_fifo_chan_ctor_dma(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_bar *bar = nouveau_bar(parent);
+ struct nv50_fifo_base *base = (void *)parent;
+ struct nv50_fifo_chan *chan;
+ struct nv03_channel_dma_class *args = data;
+ int ret;
+
+ if (size < sizeof(*args))
+ return -EINVAL;
+
+ ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
+ 0x2000, args->pushbuf,
+ (1 << NVDEV_ENGINE_DMAOBJ) |
+ (1 << NVDEV_ENGINE_SW) |
+ (1 << NVDEV_ENGINE_GR) |
+ (1 << NVDEV_ENGINE_MPEG) |
+ (1 << NVDEV_ENGINE_ME) |
+ (1 << NVDEV_ENGINE_VP) |
+ (1 << NVDEV_ENGINE_CRYPT) |
+ (1 << NVDEV_ENGINE_BSP) |
+ (1 << NVDEV_ENGINE_PPP) |
+ (1 << NVDEV_ENGINE_COPY0) |
+ (1 << NVDEV_ENGINE_UNK1C1), &chan);
+ *pobject = nv_object(chan);
+ if (ret)
+ return ret;
+
+ ret = nouveau_ramht_new(parent, parent, 0x8000, 16, &chan->ramht);
+ if (ret)
+ return ret;
+
+ nv_parent(chan)->context_attach = nv84_fifo_context_attach;
+ nv_parent(chan)->context_detach = nv84_fifo_context_detach;
+ nv_parent(chan)->object_attach = nv84_fifo_object_attach;
+ nv_parent(chan)->object_detach = nv50_fifo_object_detach;
+
+ nv_wo32(base->ramfc, 0x08, lower_32_bits(args->offset));
+ nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->offset));
+ nv_wo32(base->ramfc, 0x10, lower_32_bits(args->offset));
+ nv_wo32(base->ramfc, 0x14, upper_32_bits(args->offset));
+ nv_wo32(base->ramfc, 0x3c, 0x003f6078);
+ nv_wo32(base->ramfc, 0x44, 0x01003fff);
+ nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
+ nv_wo32(base->ramfc, 0x4c, 0xffffffff);
+ nv_wo32(base->ramfc, 0x60, 0x7fffffff);
+ nv_wo32(base->ramfc, 0x78, 0x00000000);
+ nv_wo32(base->ramfc, 0x7c, 0x30000001);
+ nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
+ (4 << 24) /* SEARCH_FULL */ |
+ (chan->ramht->base.node->offset >> 4));
+ nv_wo32(base->ramfc, 0x88, base->cache->addr >> 10);
+ nv_wo32(base->ramfc, 0x98, nv_gpuobj(base)->addr >> 12);
+ bar->flush(bar);
+ return 0;
+}
+
+static int
+nv84_fifo_chan_ctor_ind(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_bar *bar = nouveau_bar(parent);
+ struct nv50_fifo_base *base = (void *)parent;
+ struct nv50_fifo_chan *chan;
+ struct nv50_channel_ind_class *args = data;
+ u64 ioffset, ilength;
+ int ret;
+
+ if (size < sizeof(*args))
+ return -EINVAL;
+
+ ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
+ 0x2000, args->pushbuf,
+ (1 << NVDEV_ENGINE_DMAOBJ) |
+ (1 << NVDEV_ENGINE_SW) |
+ (1 << NVDEV_ENGINE_GR) |
+ (1 << NVDEV_ENGINE_MPEG) |
+ (1 << NVDEV_ENGINE_ME) |
+ (1 << NVDEV_ENGINE_VP) |
+ (1 << NVDEV_ENGINE_CRYPT) |
+ (1 << NVDEV_ENGINE_BSP) |
+ (1 << NVDEV_ENGINE_PPP) |
+ (1 << NVDEV_ENGINE_COPY0) |
+ (1 << NVDEV_ENGINE_UNK1C1), &chan);
+ *pobject = nv_object(chan);
+ if (ret)
+ return ret;
+
+ ret = nouveau_ramht_new(parent, parent, 0x8000, 16, &chan->ramht);
+ if (ret)
+ return ret;
+
+ nv_parent(chan)->context_attach = nv84_fifo_context_attach;
+ nv_parent(chan)->context_detach = nv84_fifo_context_detach;
+ nv_parent(chan)->object_attach = nv84_fifo_object_attach;
+ nv_parent(chan)->object_detach = nv50_fifo_object_detach;
+
+ ioffset = args->ioffset;
+ ilength = log2i(args->ilength / 8);
+
+ nv_wo32(base->ramfc, 0x3c, 0x403f6078);
+ nv_wo32(base->ramfc, 0x44, 0x01003fff);
+ nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
+ nv_wo32(base->ramfc, 0x50, lower_32_bits(ioffset));
+ nv_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
+ nv_wo32(base->ramfc, 0x60, 0x7fffffff);
+ nv_wo32(base->ramfc, 0x78, 0x00000000);
+ nv_wo32(base->ramfc, 0x7c, 0x30000001);
+ nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
+ (4 << 24) /* SEARCH_FULL */ |
+ (chan->ramht->base.node->offset >> 4));
+ nv_wo32(base->ramfc, 0x88, base->cache->addr >> 10);
+ nv_wo32(base->ramfc, 0x98, nv_gpuobj(base)->addr >> 12);
+ bar->flush(bar);
+ return 0;
+}
+
+static int
+nv84_fifo_chan_init(struct nouveau_object *object)
+{
+ struct nv50_fifo_priv *priv = (void *)object->engine;
+ struct nv50_fifo_base *base = (void *)object->parent;
+ struct nv50_fifo_chan *chan = (void *)object;
+ struct nouveau_gpuobj *ramfc = base->ramfc;
+ u32 chid = chan->base.chid;
+ int ret;
+
+ ret = nouveau_fifo_channel_init(&chan->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 8);
+ nv50_fifo_playlist_update(priv);
+ return 0;
+}
+
+static struct nouveau_ofuncs
+nv84_fifo_ofuncs_dma = {
+ .ctor = nv84_fifo_chan_ctor_dma,
+ .dtor = nv50_fifo_chan_dtor,
+ .init = nv84_fifo_chan_init,
+ .fini = nv50_fifo_chan_fini,
+ .rd32 = _nouveau_fifo_channel_rd32,
+ .wr32 = _nouveau_fifo_channel_wr32,
+};
+
+static struct nouveau_ofuncs
+nv84_fifo_ofuncs_ind = {
+ .ctor = nv84_fifo_chan_ctor_ind,
+ .dtor = nv50_fifo_chan_dtor,
+ .init = nv84_fifo_chan_init,
+ .fini = nv50_fifo_chan_fini,
+ .rd32 = _nouveau_fifo_channel_rd32,
+ .wr32 = _nouveau_fifo_channel_wr32,
+};
+
+static struct nouveau_oclass
+nv84_fifo_sclass[] = {
+ { NV84_CHANNEL_DMA_CLASS, &nv84_fifo_ofuncs_dma },
+ { NV84_CHANNEL_IND_CLASS, &nv84_fifo_ofuncs_ind },
+ {}
+};
+
+/*******************************************************************************
+ * FIFO context - basically just the instmem reserved for the channel
+ ******************************************************************************/
+
+static int
+nv84_fifo_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_fifo_base *base;
+ int ret;
+
+ ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x10000,
+ 0x1000, NVOBJ_FLAG_HEAP, &base);
+ *pobject = nv_object(base);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(parent, nv_object(base), 0x0200, 0,
+ NVOBJ_FLAG_ZERO_ALLOC, &base->eng);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(parent, nv_object(base), 0x4000, 0,
+ 0, &base->pgd);
+ if (ret)
+ return ret;
+
+ ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(parent, nv_object(base), 0x1000, 0x400,
+ NVOBJ_FLAG_ZERO_ALLOC, &base->cache);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(parent, nv_object(base), 0x0100, 0x100,
+ NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static struct nouveau_oclass
+nv84_fifo_cclass = {
+ .handle = NV_ENGCTX(FIFO, 0x84),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv84_fifo_context_ctor,
+ .dtor = nv50_fifo_context_dtor,
+ .init = _nouveau_fifo_context_init,
+ .fini = _nouveau_fifo_context_fini,
+ .rd32 = _nouveau_fifo_context_rd32,
+ .wr32 = _nouveau_fifo_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PFIFO engine
+ ******************************************************************************/
+
+static int
+nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_fifo_priv *priv;
+ int ret;
+
+ ret = nouveau_fifo_create(parent, engine, oclass, 1, 127, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0,
+ &priv->playlist[0]);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0,
+ &priv->playlist[1]);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00000100;
+ nv_subdev(priv)->intr = nv04_fifo_intr;
+ nv_engine(priv)->cclass = &nv84_fifo_cclass;
+ nv_engine(priv)->sclass = nv84_fifo_sclass;
+ return 0;
+}
+
+struct nouveau_oclass
+nv84_fifo_oclass = {
+ .handle = NV_ENGINE(FIFO, 0x84),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv84_fifo_ctor,
+ .dtor = nv50_fifo_dtor,
+ .init = nv50_fifo_init,
+ .fini = _nouveau_fifo_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
new file mode 100644
index 0000000000000000000000000000000000000000..6f21be60055788bdb6ea90c81e76d47326dfb9f2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -0,0 +1,647 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+
+#include
+#include
+
+struct nvc0_fifo_priv {
+ struct nouveau_fifo base;
+ struct nouveau_gpuobj *playlist[2];
+ int cur_playlist;
+ struct {
+ struct nouveau_gpuobj *mem;
+ struct nouveau_vma bar;
+ } user;
+ int spoon_nr;
+};
+
+struct nvc0_fifo_base {
+ struct nouveau_fifo_base base;
+ struct nouveau_gpuobj *pgd;
+ struct nouveau_vm *vm;
+};
+
+struct nvc0_fifo_chan {
+ struct nouveau_fifo_chan base;
+};
+
+/*******************************************************************************
+ * FIFO channel objects
+ ******************************************************************************/
+
+static void
+nvc0_fifo_playlist_update(struct nvc0_fifo_priv *priv)
+{
+ struct nouveau_bar *bar = nouveau_bar(priv);
+ struct nouveau_gpuobj *cur;
+ int i, p;
+
+ cur = priv->playlist[priv->cur_playlist];
+ priv->cur_playlist = !priv->cur_playlist;
+
+ for (i = 0, p = 0; i < 128; i++) {
+ if (!(nv_rd32(priv, 0x003004 + (i * 8)) & 1))
+ continue;
+ nv_wo32(cur, p + 0, i);
+ nv_wo32(cur, p + 4, 0x00000004);
+ p += 8;
+ }
+ bar->flush(bar);
+
+ nv_wr32(priv, 0x002270, cur->addr >> 12);
+ nv_wr32(priv, 0x002274, 0x01f00000 | (p >> 3));
+ if (!nv_wait(priv, 0x00227c, 0x00100000, 0x00000000))
+ nv_error(priv, "playlist update failed\n");
+}
+
+static int
+nvc0_fifo_context_attach(struct nouveau_object *parent,
+ struct nouveau_object *object)
+{
+ struct nouveau_bar *bar = nouveau_bar(parent);
+ struct nvc0_fifo_base *base = (void *)parent->parent;
+ struct nouveau_engctx *ectx = (void *)object;
+ u32 addr;
+ int ret;
+
+ switch (nv_engidx(object->engine)) {
+ case NVDEV_ENGINE_SW : return 0;
+ case NVDEV_ENGINE_GR : addr = 0x0210; break;
+ case NVDEV_ENGINE_COPY0: addr = 0x0230; break;
+ case NVDEV_ENGINE_COPY1: addr = 0x0240; break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!ectx->vma.node) {
+ ret = nouveau_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
+ NV_MEM_ACCESS_RW, &ectx->vma);
+ if (ret)
+ return ret;
+
+ nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
+ }
+
+ nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
+ nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset));
+ bar->flush(bar);
+ return 0;
+}
+
+static int
+nvc0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
+ struct nouveau_object *object)
+{
+ struct nouveau_bar *bar = nouveau_bar(parent);
+ struct nvc0_fifo_priv *priv = (void *)parent->engine;
+ struct nvc0_fifo_base *base = (void *)parent->parent;
+ struct nvc0_fifo_chan *chan = (void *)parent;
+ u32 addr;
+
+ switch (nv_engidx(object->engine)) {
+ case NVDEV_ENGINE_SW : return 0;
+ case NVDEV_ENGINE_GR : addr = 0x0210; break;
+ case NVDEV_ENGINE_COPY0: addr = 0x0230; break;
+ case NVDEV_ENGINE_COPY1: addr = 0x0240; break;
+ default:
+ return -EINVAL;
+ }
+
+ nv_wo32(base, addr + 0x00, 0x00000000);
+ nv_wo32(base, addr + 0x04, 0x00000000);
+ bar->flush(bar);
+
+ nv_wr32(priv, 0x002634, chan->base.chid);
+ if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
+ nv_error(priv, "channel %d kick timeout\n", chan->base.chid);
+ if (suspend)
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+nvc0_fifo_chan_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_bar *bar = nouveau_bar(parent);
+ struct nvc0_fifo_priv *priv = (void *)engine;
+ struct nvc0_fifo_base *base = (void *)parent;
+ struct nvc0_fifo_chan *chan;
+ struct nv50_channel_ind_class *args = data;
+ u64 usermem, ioffset, ilength;
+ int ret, i;
+
+ if (size < sizeof(*args))
+ return -EINVAL;
+
+ ret = nouveau_fifo_channel_create(parent, engine, oclass, 1,
+ priv->user.bar.offset, 0x1000,
+ args->pushbuf,
+ (1 << NVDEV_ENGINE_SW) |
+ (1 << NVDEV_ENGINE_GR) |
+ (1 << NVDEV_ENGINE_COPY0) |
+ (1 << NVDEV_ENGINE_COPY1), &chan);
+ *pobject = nv_object(chan);
+ if (ret)
+ return ret;
+
+ nv_parent(chan)->context_attach = nvc0_fifo_context_attach;
+ nv_parent(chan)->context_detach = nvc0_fifo_context_detach;
+
+ usermem = chan->base.chid * 0x1000;
+ ioffset = args->ioffset;
+ ilength = log2i(args->ilength / 8);
+
+ for (i = 0; i < 0x1000; i += 4)
+ nv_wo32(priv->user.mem, usermem + i, 0x00000000);
+
+ nv_wo32(base, 0x08, lower_32_bits(priv->user.mem->addr + usermem));
+ nv_wo32(base, 0x0c, upper_32_bits(priv->user.mem->addr + usermem));
+ nv_wo32(base, 0x10, 0x0000face);
+ nv_wo32(base, 0x30, 0xfffff902);
+ nv_wo32(base, 0x48, lower_32_bits(ioffset));
+ nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
+ nv_wo32(base, 0x54, 0x00000002);
+ nv_wo32(base, 0x84, 0x20400000);
+ nv_wo32(base, 0x94, 0x30000001);
+ nv_wo32(base, 0x9c, 0x00000100);
+ nv_wo32(base, 0xa4, 0x1f1f1f1f);
+ nv_wo32(base, 0xa8, 0x1f1f1f1f);
+ nv_wo32(base, 0xac, 0x0000001f);
+ nv_wo32(base, 0xb8, 0xf8000000);
+ nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */
+ nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */
+ bar->flush(bar);
+ return 0;
+}
+
+static int
+nvc0_fifo_chan_init(struct nouveau_object *object)
+{
+ struct nouveau_gpuobj *base = nv_gpuobj(object->parent);
+ struct nvc0_fifo_priv *priv = (void *)object->engine;
+ struct nvc0_fifo_chan *chan = (void *)object;
+ u32 chid = chan->base.chid;
+ int ret;
+
+ ret = nouveau_fifo_channel_init(&chan->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x003000 + (chid * 8), 0xc0000000 | base->addr >> 12);
+ nv_wr32(priv, 0x003004 + (chid * 8), 0x001f0001);
+ nvc0_fifo_playlist_update(priv);
+ return 0;
+}
+
+static int
+nvc0_fifo_chan_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nvc0_fifo_priv *priv = (void *)object->engine;
+ struct nvc0_fifo_chan *chan = (void *)object;
+ u32 chid = chan->base.chid;
+
+ nv_mask(priv, 0x003004 + (chid * 8), 0x00000001, 0x00000000);
+ nvc0_fifo_playlist_update(priv);
+ nv_wr32(priv, 0x003000 + (chid * 8), 0x00000000);
+
+ return nouveau_fifo_channel_fini(&chan->base, suspend);
+}
+
+static struct nouveau_ofuncs
+nvc0_fifo_ofuncs = {
+ .ctor = nvc0_fifo_chan_ctor,
+ .dtor = _nouveau_fifo_channel_dtor,
+ .init = nvc0_fifo_chan_init,
+ .fini = nvc0_fifo_chan_fini,
+ .rd32 = _nouveau_fifo_channel_rd32,
+ .wr32 = _nouveau_fifo_channel_wr32,
+};
+
+static struct nouveau_oclass
+nvc0_fifo_sclass[] = {
+ { NVC0_CHANNEL_IND_CLASS, &nvc0_fifo_ofuncs },
+ {}
+};
+
+/*******************************************************************************
+ * FIFO context - instmem heap and vm setup
+ ******************************************************************************/
+
+static int
+nvc0_fifo_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvc0_fifo_base *base;
+ int ret;
+
+ ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
+ 0x1000, NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_HEAP, &base);
+ *pobject = nv_object(base);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(parent, NULL, 0x10000, 0x1000, 0, &base->pgd);
+ if (ret)
+ return ret;
+
+ nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr));
+ nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr));
+ nv_wo32(base, 0x0208, 0xffffffff);
+ nv_wo32(base, 0x020c, 0x000000ff);
+
+ ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void
+nvc0_fifo_context_dtor(struct nouveau_object *object)
+{
+ struct nvc0_fifo_base *base = (void *)object;
+ nouveau_vm_ref(NULL, &base->vm, base->pgd);
+ nouveau_gpuobj_ref(NULL, &base->pgd);
+ nouveau_fifo_context_destroy(&base->base);
+}
+
+static struct nouveau_oclass
+nvc0_fifo_cclass = {
+ .handle = NV_ENGCTX(FIFO, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_fifo_context_ctor,
+ .dtor = nvc0_fifo_context_dtor,
+ .init = _nouveau_fifo_context_init,
+ .fini = _nouveau_fifo_context_fini,
+ .rd32 = _nouveau_fifo_context_rd32,
+ .wr32 = _nouveau_fifo_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PFIFO engine
+ ******************************************************************************/
+
+static const struct nouveau_enum nvc0_fifo_fault_unit[] = {
+ { 0x00, "PGRAPH" },
+ { 0x03, "PEEPHOLE" },
+ { 0x04, "BAR1" },
+ { 0x05, "BAR3" },
+ { 0x07, "PFIFO" },
+ { 0x10, "PBSP" },
+ { 0x11, "PPPP" },
+ { 0x13, "PCOUNTER" },
+ { 0x14, "PVP" },
+ { 0x15, "PCOPY0" },
+ { 0x16, "PCOPY1" },
+ { 0x17, "PDAEMON" },
+ {}
+};
+
+static const struct nouveau_enum nvc0_fifo_fault_reason[] = {
+ { 0x00, "PT_NOT_PRESENT" },
+ { 0x01, "PT_TOO_SHORT" },
+ { 0x02, "PAGE_NOT_PRESENT" },
+ { 0x03, "VM_LIMIT_EXCEEDED" },
+ { 0x04, "NO_CHANNEL" },
+ { 0x05, "PAGE_SYSTEM_ONLY" },
+ { 0x06, "PAGE_READ_ONLY" },
+ { 0x0a, "COMPRESSED_SYSRAM" },
+ { 0x0c, "INVALID_STORAGE_TYPE" },
+ {}
+};
+
+static const struct nouveau_enum nvc0_fifo_fault_hubclient[] = {
+ { 0x01, "PCOPY0" },
+ { 0x02, "PCOPY1" },
+ { 0x04, "DISPATCH" },
+ { 0x05, "CTXCTL" },
+ { 0x06, "PFIFO" },
+ { 0x07, "BAR_READ" },
+ { 0x08, "BAR_WRITE" },
+ { 0x0b, "PVP" },
+ { 0x0c, "PPPP" },
+ { 0x0d, "PBSP" },
+ { 0x11, "PCOUNTER" },
+ { 0x12, "PDAEMON" },
+ { 0x14, "CCACHE" },
+ { 0x15, "CCACHE_POST" },
+ {}
+};
+
+static const struct nouveau_enum nvc0_fifo_fault_gpcclient[] = {
+ { 0x01, "TEX" },
+ { 0x0c, "ESETUP" },
+ { 0x0e, "CTXCTL" },
+ { 0x0f, "PROP" },
+ {}
+};
+
+static const struct nouveau_bitfield nvc0_fifo_subfifo_intr[] = {
+/* { 0x00008000, "" } seen with null ib push */
+ { 0x00200000, "ILLEGAL_MTHD" },
+ { 0x00800000, "EMPTY_SUBC" },
+ {}
+};
+
+static void
+nvc0_fifo_isr_vm_fault(struct nvc0_fifo_priv *priv, int unit)
+{
+ u32 inst = nv_rd32(priv, 0x002800 + (unit * 0x10));
+ u32 valo = nv_rd32(priv, 0x002804 + (unit * 0x10));
+ u32 vahi = nv_rd32(priv, 0x002808 + (unit * 0x10));
+ u32 stat = nv_rd32(priv, 0x00280c + (unit * 0x10));
+ u32 client = (stat & 0x00001f00) >> 8;
+
+ switch (unit) {
+ case 3: /* PEEPHOLE */
+ nv_mask(priv, 0x001718, 0x00000000, 0x00000000);
+ break;
+ case 4: /* BAR1 */
+ nv_mask(priv, 0x001704, 0x00000000, 0x00000000);
+ break;
+ case 5: /* BAR3 */
+ nv_mask(priv, 0x001714, 0x00000000, 0x00000000);
+ break;
+ default:
+ break;
+ }
+
+ nv_error(priv, "%s fault at 0x%010llx [", (stat & 0x00000080) ?
+ "write" : "read", (u64)vahi << 32 | valo);
+ nouveau_enum_print(nvc0_fifo_fault_reason, stat & 0x0000000f);
+ printk("] from ");
+ nouveau_enum_print(nvc0_fifo_fault_unit, unit);
+ if (stat & 0x00000040) {
+ printk("/");
+ nouveau_enum_print(nvc0_fifo_fault_hubclient, client);
+ } else {
+ printk("/GPC%d/", (stat & 0x1f000000) >> 24);
+ nouveau_enum_print(nvc0_fifo_fault_gpcclient, client);
+ }
+ printk(" on channel 0x%010llx\n", (u64)inst << 12);
+}
+
+static int
+nvc0_fifo_swmthd(struct nvc0_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
+{
+ struct nvc0_fifo_chan *chan = NULL;
+ struct nouveau_handle *bind;
+ unsigned long flags;
+ int ret = -EINVAL;
+
+ spin_lock_irqsave(&priv->base.lock, flags);
+ if (likely(chid >= priv->base.min && chid <= priv->base.max))
+ chan = (void *)priv->base.channel[chid];
+ if (unlikely(!chan))
+ goto out;
+
+ bind = nouveau_namedb_get_class(nv_namedb(chan), 0x906e);
+ if (likely(bind)) {
+ if (!mthd || !nv_call(bind->object, mthd, data))
+ ret = 0;
+ nouveau_namedb_put(bind);
+ }
+
+out:
+ spin_unlock_irqrestore(&priv->base.lock, flags);
+ return ret;
+}
+
+static void
+nvc0_fifo_isr_subfifo_intr(struct nvc0_fifo_priv *priv, int unit)
+{
+ u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000));
+ u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000));
+ u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000));
+ u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0x7f;
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 mthd = (addr & 0x00003ffc);
+ u32 show = stat;
+
+ if (stat & 0x00200000) {
+ if (mthd == 0x0054) {
+ if (!nvc0_fifo_swmthd(priv, chid, 0x0500, 0x00000000))
+ show &= ~0x00200000;
+ }
+ }
+
+ if (stat & 0x00800000) {
+ if (!nvc0_fifo_swmthd(priv, chid, mthd, data))
+ show &= ~0x00800000;
+ }
+
+ if (show) {
+ nv_error(priv, "SUBFIFO%d:", unit);
+ nouveau_bitfield_print(nvc0_fifo_subfifo_intr, show);
+ printk("\n");
+ nv_error(priv, "SUBFIFO%d: ch %d subc %d mthd 0x%04x "
+ "data 0x%08x\n",
+ unit, chid, subc, mthd, data);
+ }
+
+ nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
+ nv_wr32(priv, 0x040108 + (unit * 0x2000), stat);
+}
+
+static void
+nvc0_fifo_intr(struct nouveau_subdev *subdev)
+{
+ struct nvc0_fifo_priv *priv = (void *)subdev;
+ u32 mask = nv_rd32(priv, 0x002140);
+ u32 stat = nv_rd32(priv, 0x002100) & mask;
+
+ if (stat & 0x00000100) {
+ nv_info(priv, "unknown status 0x00000100\n");
+ nv_wr32(priv, 0x002100, 0x00000100);
+ stat &= ~0x00000100;
+ }
+
+ if (stat & 0x10000000) {
+ u32 units = nv_rd32(priv, 0x00259c);
+ u32 u = units;
+
+ while (u) {
+ int i = ffs(u) - 1;
+ nvc0_fifo_isr_vm_fault(priv, i);
+ u &= ~(1 << i);
+ }
+
+ nv_wr32(priv, 0x00259c, units);
+ stat &= ~0x10000000;
+ }
+
+ if (stat & 0x20000000) {
+ u32 units = nv_rd32(priv, 0x0025a0);
+ u32 u = units;
+
+ while (u) {
+ int i = ffs(u) - 1;
+ nvc0_fifo_isr_subfifo_intr(priv, i);
+ u &= ~(1 << i);
+ }
+
+ nv_wr32(priv, 0x0025a0, units);
+ stat &= ~0x20000000;
+ }
+
+ if (stat & 0x40000000) {
+ nv_warn(priv, "unknown status 0x40000000\n");
+ nv_mask(priv, 0x002a00, 0x00000000, 0x00000000);
+ stat &= ~0x40000000;
+ }
+
+ if (stat) {
+ nv_fatal(priv, "unhandled status 0x%08x\n", stat);
+ nv_wr32(priv, 0x002100, stat);
+ nv_wr32(priv, 0x002140, 0);
+ }
+}
+
+static int
+nvc0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvc0_fifo_priv *priv;
+ int ret;
+
+ ret = nouveau_fifo_create(parent, engine, oclass, 0, 127, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0x1000, 0,
+ &priv->playlist[0]);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0x1000, 0,
+ &priv->playlist[1]);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(parent, NULL, 128 * 0x1000, 0x1000, 0,
+ &priv->user.mem);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW,
+ &priv->user.bar);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00000100;
+ nv_subdev(priv)->intr = nvc0_fifo_intr;
+ nv_engine(priv)->cclass = &nvc0_fifo_cclass;
+ nv_engine(priv)->sclass = nvc0_fifo_sclass;
+ return 0;
+}
+
+static void
+nvc0_fifo_dtor(struct nouveau_object *object)
+{
+ struct nvc0_fifo_priv *priv = (void *)object;
+
+ nouveau_gpuobj_unmap(&priv->user.bar);
+ nouveau_gpuobj_ref(NULL, &priv->user.mem);
+ nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
+ nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
+
+ nouveau_fifo_destroy(&priv->base);
+}
+
+static int
+nvc0_fifo_init(struct nouveau_object *object)
+{
+ struct nvc0_fifo_priv *priv = (void *)object;
+ int ret, i;
+
+ ret = nouveau_fifo_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x000204, 0xffffffff);
+ nv_wr32(priv, 0x002204, 0xffffffff);
+
+ priv->spoon_nr = hweight32(nv_rd32(priv, 0x002204));
+ nv_debug(priv, "%d subfifo(s)\n", priv->spoon_nr);
+
+ /* assign engines to subfifos */
+ if (priv->spoon_nr >= 3) {
+ nv_wr32(priv, 0x002208, ~(1 << 0)); /* PGRAPH */
+ nv_wr32(priv, 0x00220c, ~(1 << 1)); /* PVP */
+ nv_wr32(priv, 0x002210, ~(1 << 1)); /* PPP */
+ nv_wr32(priv, 0x002214, ~(1 << 1)); /* PBSP */
+ nv_wr32(priv, 0x002218, ~(1 << 2)); /* PCE0 */
+ nv_wr32(priv, 0x00221c, ~(1 << 1)); /* PCE1 */
+ }
+
+ /* PSUBFIFO[n] */
+ for (i = 0; i < priv->spoon_nr; i++) {
+ nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
+ nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
+ nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
+ }
+
+ nv_mask(priv, 0x002200, 0x00000001, 0x00000001);
+ nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
+
+ nv_wr32(priv, 0x002a00, 0xffffffff); /* clears PFIFO.INTR bit 30 */
+ nv_wr32(priv, 0x002100, 0xffffffff);
+ nv_wr32(priv, 0x002140, 0xbfffffff);
+ return 0;
+}
+
+struct nouveau_oclass
+nvc0_fifo_oclass = {
+ .handle = NV_ENGINE(FIFO, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_fifo_ctor,
+ .dtor = nvc0_fifo_dtor,
+ .init = nvc0_fifo_init,
+ .fini = _nouveau_fifo_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
new file mode 100644
index 0000000000000000000000000000000000000000..36e81b6fafbce032183f8a4f23b0a7b44f033115
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -0,0 +1,628 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+
+#include
+#include
+
+#define _(a,b) { (a), ((1 << (a)) | (b)) }
+static const struct {
+ int subdev;
+ u32 mask;
+} fifo_engine[] = {
+ _(NVDEV_ENGINE_GR , (1 << NVDEV_ENGINE_SW)),
+ _(NVDEV_ENGINE_VP , 0),
+ _(NVDEV_ENGINE_PPP , 0),
+ _(NVDEV_ENGINE_BSP , 0),
+ _(NVDEV_ENGINE_COPY0 , 0),
+ _(NVDEV_ENGINE_COPY1 , 0),
+ _(NVDEV_ENGINE_VENC , 0),
+};
+#undef _
+#define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine)
+
+struct nve0_fifo_engn {
+ struct nouveau_gpuobj *playlist[2];
+ int cur_playlist;
+};
+
+struct nve0_fifo_priv {
+ struct nouveau_fifo base;
+ struct nve0_fifo_engn engine[FIFO_ENGINE_NR];
+ struct {
+ struct nouveau_gpuobj *mem;
+ struct nouveau_vma bar;
+ } user;
+ int spoon_nr;
+};
+
+struct nve0_fifo_base {
+ struct nouveau_fifo_base base;
+ struct nouveau_gpuobj *pgd;
+ struct nouveau_vm *vm;
+};
+
+struct nve0_fifo_chan {
+ struct nouveau_fifo_chan base;
+ u32 engine;
+};
+
+/*******************************************************************************
+ * FIFO channel objects
+ ******************************************************************************/
+
+static void
+nve0_fifo_playlist_update(struct nve0_fifo_priv *priv, u32 engine)
+{
+ struct nouveau_bar *bar = nouveau_bar(priv);
+ struct nve0_fifo_engn *engn = &priv->engine[engine];
+ struct nouveau_gpuobj *cur;
+ u32 match = (engine << 16) | 0x00000001;
+ int i, p;
+
+ cur = engn->playlist[engn->cur_playlist];
+ if (unlikely(cur == NULL)) {
+ int ret = nouveau_gpuobj_new(nv_object(priv)->parent, NULL,
+ 0x8000, 0x1000, 0, &cur);
+ if (ret) {
+ nv_error(priv, "playlist alloc failed\n");
+ return;
+ }
+
+ engn->playlist[engn->cur_playlist] = cur;
+ }
+
+ engn->cur_playlist = !engn->cur_playlist;
+
+ for (i = 0, p = 0; i < priv->base.max; i++) {
+ u32 ctrl = nv_rd32(priv, 0x800004 + (i * 8)) & 0x001f0001;
+ if (ctrl != match)
+ continue;
+ nv_wo32(cur, p + 0, i);
+ nv_wo32(cur, p + 4, 0x00000000);
+ p += 8;
+ }
+ bar->flush(bar);
+
+ nv_wr32(priv, 0x002270, cur->addr >> 12);
+ nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3));
+ if (!nv_wait(priv, 0x002284 + (engine * 4), 0x00100000, 0x00000000))
+ nv_error(priv, "playlist %d update timeout\n", engine);
+}
+
+static int
+nve0_fifo_context_attach(struct nouveau_object *parent,
+ struct nouveau_object *object)
+{
+ struct nouveau_bar *bar = nouveau_bar(parent);
+ struct nve0_fifo_base *base = (void *)parent->parent;
+ struct nouveau_engctx *ectx = (void *)object;
+ u32 addr;
+ int ret;
+
+ switch (nv_engidx(object->engine)) {
+ case NVDEV_ENGINE_SW : return 0;
+ case NVDEV_ENGINE_GR :
+ case NVDEV_ENGINE_COPY0:
+ case NVDEV_ENGINE_COPY1: addr = 0x0210; break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!ectx->vma.node) {
+ ret = nouveau_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
+ NV_MEM_ACCESS_RW, &ectx->vma);
+ if (ret)
+ return ret;
+
+ nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
+ }
+
+ nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
+ nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset));
+ bar->flush(bar);
+ return 0;
+}
+
+static int
+nve0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
+ struct nouveau_object *object)
+{
+ struct nouveau_bar *bar = nouveau_bar(parent);
+ struct nve0_fifo_priv *priv = (void *)parent->engine;
+ struct nve0_fifo_base *base = (void *)parent->parent;
+ struct nve0_fifo_chan *chan = (void *)parent;
+ u32 addr;
+
+ switch (nv_engidx(object->engine)) {
+ case NVDEV_ENGINE_SW : return 0;
+ case NVDEV_ENGINE_GR :
+ case NVDEV_ENGINE_COPY0:
+ case NVDEV_ENGINE_COPY1: addr = 0x0210; break;
+ default:
+ return -EINVAL;
+ }
+
+ nv_wo32(base, addr + 0x00, 0x00000000);
+ nv_wo32(base, addr + 0x04, 0x00000000);
+ bar->flush(bar);
+
+ nv_wr32(priv, 0x002634, chan->base.chid);
+ if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
+ nv_error(priv, "channel %d kick timeout\n", chan->base.chid);
+ if (suspend)
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+nve0_fifo_chan_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_bar *bar = nouveau_bar(parent);
+ struct nve0_fifo_priv *priv = (void *)engine;
+ struct nve0_fifo_base *base = (void *)parent;
+ struct nve0_fifo_chan *chan;
+ struct nve0_channel_ind_class *args = data;
+ u64 usermem, ioffset, ilength;
+ int ret, i;
+
+ if (size < sizeof(*args))
+ return -EINVAL;
+
+ for (i = 0; i < FIFO_ENGINE_NR; i++) {
+ if (args->engine & (1 << i)) {
+ if (nouveau_engine(parent, fifo_engine[i].subdev)) {
+ args->engine = (1 << i);
+ break;
+ }
+ }
+ }
+
+ if (i == FIFO_ENGINE_NR)
+ return -ENODEV;
+
+ ret = nouveau_fifo_channel_create(parent, engine, oclass, 1,
+ priv->user.bar.offset, 0x200,
+ args->pushbuf,
+ fifo_engine[i].mask, &chan);
+ *pobject = nv_object(chan);
+ if (ret)
+ return ret;
+
+ nv_parent(chan)->context_attach = nve0_fifo_context_attach;
+ nv_parent(chan)->context_detach = nve0_fifo_context_detach;
+ chan->engine = i;
+
+ usermem = chan->base.chid * 0x200;
+ ioffset = args->ioffset;
+ ilength = log2i(args->ilength / 8);
+
+ for (i = 0; i < 0x200; i += 4)
+ nv_wo32(priv->user.mem, usermem + i, 0x00000000);
+
+ nv_wo32(base, 0x08, lower_32_bits(priv->user.mem->addr + usermem));
+ nv_wo32(base, 0x0c, upper_32_bits(priv->user.mem->addr + usermem));
+ nv_wo32(base, 0x10, 0x0000face);
+ nv_wo32(base, 0x30, 0xfffff902);
+ nv_wo32(base, 0x48, lower_32_bits(ioffset));
+ nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
+ nv_wo32(base, 0x84, 0x20400000);
+ nv_wo32(base, 0x94, 0x30000001);
+ nv_wo32(base, 0x9c, 0x00000100);
+ nv_wo32(base, 0xac, 0x0000001f);
+ nv_wo32(base, 0xe8, chan->base.chid);
+ nv_wo32(base, 0xb8, 0xf8000000);
+ nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */
+ nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */
+ bar->flush(bar);
+ return 0;
+}
+
+static int
+nve0_fifo_chan_init(struct nouveau_object *object)
+{
+ struct nouveau_gpuobj *base = nv_gpuobj(object->parent);
+ struct nve0_fifo_priv *priv = (void *)object->engine;
+ struct nve0_fifo_chan *chan = (void *)object;
+ u32 chid = chan->base.chid;
+ int ret;
+
+ ret = nouveau_fifo_channel_init(&chan->base);
+ if (ret)
+ return ret;
+
+ nv_mask(priv, 0x800004 + (chid * 8), 0x000f0000, chan->engine << 16);
+ nv_wr32(priv, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12);
+ nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
+ nve0_fifo_playlist_update(priv, chan->engine);
+ nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
+ return 0;
+}
+
+static int
+nve0_fifo_chan_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nve0_fifo_priv *priv = (void *)object->engine;
+ struct nve0_fifo_chan *chan = (void *)object;
+ u32 chid = chan->base.chid;
+
+ nv_mask(priv, 0x800004 + (chid * 8), 0x00000800, 0x00000800);
+ nve0_fifo_playlist_update(priv, chan->engine);
+ nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000);
+
+ return nouveau_fifo_channel_fini(&chan->base, suspend);
+}
+
+static struct nouveau_ofuncs
+nve0_fifo_ofuncs = {
+ .ctor = nve0_fifo_chan_ctor,
+ .dtor = _nouveau_fifo_channel_dtor,
+ .init = nve0_fifo_chan_init,
+ .fini = nve0_fifo_chan_fini,
+ .rd32 = _nouveau_fifo_channel_rd32,
+ .wr32 = _nouveau_fifo_channel_wr32,
+};
+
+static struct nouveau_oclass
+nve0_fifo_sclass[] = {
+ { NVE0_CHANNEL_IND_CLASS, &nve0_fifo_ofuncs },
+ {}
+};
+
+/*******************************************************************************
+ * FIFO context - instmem heap and vm setup
+ ******************************************************************************/
+
+static int
+nve0_fifo_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nve0_fifo_base *base;
+ int ret;
+
+ ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
+ 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base);
+ *pobject = nv_object(base);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(parent, NULL, 0x10000, 0x1000, 0, &base->pgd);
+ if (ret)
+ return ret;
+
+ nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr));
+ nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr));
+ nv_wo32(base, 0x0208, 0xffffffff);
+ nv_wo32(base, 0x020c, 0x000000ff);
+
+ ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void
+nve0_fifo_context_dtor(struct nouveau_object *object)
+{
+ struct nve0_fifo_base *base = (void *)object;
+ nouveau_vm_ref(NULL, &base->vm, base->pgd);
+ nouveau_gpuobj_ref(NULL, &base->pgd);
+ nouveau_fifo_context_destroy(&base->base);
+}
+
+static struct nouveau_oclass
+nve0_fifo_cclass = {
+ .handle = NV_ENGCTX(FIFO, 0xe0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nve0_fifo_context_ctor,
+ .dtor = nve0_fifo_context_dtor,
+ .init = _nouveau_fifo_context_init,
+ .fini = _nouveau_fifo_context_fini,
+ .rd32 = _nouveau_fifo_context_rd32,
+ .wr32 = _nouveau_fifo_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PFIFO engine
+ ******************************************************************************/
+
+static const struct nouveau_enum nve0_fifo_fault_unit[] = {
+ {}
+};
+
+static const struct nouveau_enum nve0_fifo_fault_reason[] = {
+ { 0x00, "PT_NOT_PRESENT" },
+ { 0x01, "PT_TOO_SHORT" },
+ { 0x02, "PAGE_NOT_PRESENT" },
+ { 0x03, "VM_LIMIT_EXCEEDED" },
+ { 0x04, "NO_CHANNEL" },
+ { 0x05, "PAGE_SYSTEM_ONLY" },
+ { 0x06, "PAGE_READ_ONLY" },
+ { 0x0a, "COMPRESSED_SYSRAM" },
+ { 0x0c, "INVALID_STORAGE_TYPE" },
+ {}
+};
+
+static const struct nouveau_enum nve0_fifo_fault_hubclient[] = {
+ {}
+};
+
+static const struct nouveau_enum nve0_fifo_fault_gpcclient[] = {
+ {}
+};
+
+static const struct nouveau_bitfield nve0_fifo_subfifo_intr[] = {
+ { 0x00200000, "ILLEGAL_MTHD" },
+ { 0x00800000, "EMPTY_SUBC" },
+ {}
+};
+
+static void
+nve0_fifo_isr_vm_fault(struct nve0_fifo_priv *priv, int unit)
+{
+ u32 inst = nv_rd32(priv, 0x2800 + (unit * 0x10));
+ u32 valo = nv_rd32(priv, 0x2804 + (unit * 0x10));
+ u32 vahi = nv_rd32(priv, 0x2808 + (unit * 0x10));
+ u32 stat = nv_rd32(priv, 0x280c + (unit * 0x10));
+ u32 client = (stat & 0x00001f00) >> 8;
+
+ nv_error(priv, "PFIFO: %s fault at 0x%010llx [", (stat & 0x00000080) ?
+ "write" : "read", (u64)vahi << 32 | valo);
+ nouveau_enum_print(nve0_fifo_fault_reason, stat & 0x0000000f);
+ printk("] from ");
+ nouveau_enum_print(nve0_fifo_fault_unit, unit);
+ if (stat & 0x00000040) {
+ printk("/");
+ nouveau_enum_print(nve0_fifo_fault_hubclient, client);
+ } else {
+ printk("/GPC%d/", (stat & 0x1f000000) >> 24);
+ nouveau_enum_print(nve0_fifo_fault_gpcclient, client);
+ }
+ printk(" on channel 0x%010llx\n", (u64)inst << 12);
+}
+
+static int
+nve0_fifo_swmthd(struct nve0_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
+{
+ struct nve0_fifo_chan *chan = NULL;
+ struct nouveau_handle *bind;
+ unsigned long flags;
+ int ret = -EINVAL;
+
+ spin_lock_irqsave(&priv->base.lock, flags);
+ if (likely(chid >= priv->base.min && chid <= priv->base.max))
+ chan = (void *)priv->base.channel[chid];
+ if (unlikely(!chan))
+ goto out;
+
+ bind = nouveau_namedb_get_class(nv_namedb(chan), 0x906e);
+ if (likely(bind)) {
+ if (!mthd || !nv_call(bind->object, mthd, data))
+ ret = 0;
+ nouveau_namedb_put(bind);
+ }
+
+out:
+ spin_unlock_irqrestore(&priv->base.lock, flags);
+ return ret;
+}
+
+static void
+nve0_fifo_isr_subfifo_intr(struct nve0_fifo_priv *priv, int unit)
+{
+ u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000));
+ u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000));
+ u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000));
+ u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0xfff;
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 mthd = (addr & 0x00003ffc);
+ u32 show = stat;
+
+ if (stat & 0x00200000) {
+ if (mthd == 0x0054) {
+ if (!nve0_fifo_swmthd(priv, chid, 0x0500, 0x00000000))
+ show &= ~0x00200000;
+ }
+ }
+
+ if (stat & 0x00800000) {
+ if (!nve0_fifo_swmthd(priv, chid, mthd, data))
+ show &= ~0x00800000;
+ }
+
+ if (show) {
+ nv_error(priv, "SUBFIFO%d:", unit);
+ nouveau_bitfield_print(nve0_fifo_subfifo_intr, show);
+ printk("\n");
+ nv_error(priv, "SUBFIFO%d: ch %d subc %d mthd 0x%04x "
+ "data 0x%08x\n",
+ unit, chid, subc, mthd, data);
+ }
+
+ nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
+ nv_wr32(priv, 0x040108 + (unit * 0x2000), stat);
+}
+
+static void
+nve0_fifo_intr(struct nouveau_subdev *subdev)
+{
+ struct nve0_fifo_priv *priv = (void *)subdev;
+ u32 mask = nv_rd32(priv, 0x002140);
+ u32 stat = nv_rd32(priv, 0x002100) & mask;
+
+ if (stat & 0x00000100) {
+ nv_warn(priv, "unknown status 0x00000100\n");
+ nv_wr32(priv, 0x002100, 0x00000100);
+ stat &= ~0x00000100;
+ }
+
+ if (stat & 0x10000000) {
+ u32 units = nv_rd32(priv, 0x00259c);
+ u32 u = units;
+
+ while (u) {
+ int i = ffs(u) - 1;
+ nve0_fifo_isr_vm_fault(priv, i);
+ u &= ~(1 << i);
+ }
+
+ nv_wr32(priv, 0x00259c, units);
+ stat &= ~0x10000000;
+ }
+
+ if (stat & 0x20000000) {
+ u32 units = nv_rd32(priv, 0x0025a0);
+ u32 u = units;
+
+ while (u) {
+ int i = ffs(u) - 1;
+ nve0_fifo_isr_subfifo_intr(priv, i);
+ u &= ~(1 << i);
+ }
+
+ nv_wr32(priv, 0x0025a0, units);
+ stat &= ~0x20000000;
+ }
+
+ if (stat & 0x40000000) {
+ nv_warn(priv, "unknown status 0x40000000\n");
+ nv_mask(priv, 0x002a00, 0x00000000, 0x00000000);
+ stat &= ~0x40000000;
+ }
+
+ if (stat) {
+ nv_fatal(priv, "unhandled status 0x%08x\n", stat);
+ nv_wr32(priv, 0x002100, stat);
+ nv_wr32(priv, 0x002140, 0);
+ }
+}
+
+static int
+nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nve0_fifo_priv *priv;
+ int ret;
+
+ ret = nouveau_fifo_create(parent, engine, oclass, 0, 4095, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(parent, NULL, 4096 * 0x200, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW,
+ &priv->user.bar);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00000100;
+ nv_subdev(priv)->intr = nve0_fifo_intr;
+ nv_engine(priv)->cclass = &nve0_fifo_cclass;
+ nv_engine(priv)->sclass = nve0_fifo_sclass;
+ return 0;
+}
+
+static void
+nve0_fifo_dtor(struct nouveau_object *object)
+{
+ struct nve0_fifo_priv *priv = (void *)object;
+ int i;
+
+ nouveau_gpuobj_unmap(&priv->user.bar);
+ nouveau_gpuobj_ref(NULL, &priv->user.mem);
+
+ for (i = 0; i < ARRAY_SIZE(priv->engine); i++) {
+ nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[1]);
+ nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[0]);
+ }
+
+ nouveau_fifo_destroy(&priv->base);
+}
+
+static int
+nve0_fifo_init(struct nouveau_object *object)
+{
+ struct nve0_fifo_priv *priv = (void *)object;
+ int ret, i;
+
+ ret = nouveau_fifo_init(&priv->base);
+ if (ret)
+ return ret;
+
+ /* enable all available PSUBFIFOs */
+ nv_wr32(priv, 0x000204, 0xffffffff);
+ priv->spoon_nr = hweight32(nv_rd32(priv, 0x000204));
+ nv_debug(priv, "%d subfifo(s)\n", priv->spoon_nr);
+
+ /* PSUBFIFO[n] */
+ for (i = 0; i < priv->spoon_nr; i++) {
+ nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
+ nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
+ nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
+ }
+
+ nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
+
+ nv_wr32(priv, 0x002a00, 0xffffffff);
+ nv_wr32(priv, 0x002100, 0xffffffff);
+ nv_wr32(priv, 0x002140, 0xbfffffff);
+ return 0;
+}
+
+struct nouveau_oclass
+nve0_fifo_oclass = {
+ .handle = NV_ENGINE(FIFO, 0xe0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nve0_fifo_ctor,
+ .dtor = nve0_fifo_dtor,
+ .init = nve0_fifo_init,
+ .fini = _nouveau_fifo_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.h b/drivers/gpu/drm/nouveau/core/engine/graph/ctx.h
similarity index 84%
rename from drivers/gpu/drm/nouveau/nouveau_grctx.h
rename to drivers/gpu/drm/nouveau/core/engine/graph/ctx.h
index b0795ececbdacc5dc58a79e167772149081a371f..e1947013d3bce0b3c1ba84e8f23df328fae9c61d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_grctx.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctx.h
@@ -2,7 +2,7 @@
#define __NOUVEAU_GRCTX_H__
struct nouveau_grctx {
- struct drm_device *dev;
+ struct nouveau_device *device;
enum {
NOUVEAU_GRCTX_PROG,
@@ -10,18 +10,18 @@ struct nouveau_grctx {
} mode;
void *data;
- uint32_t ctxprog_max;
- uint32_t ctxprog_len;
- uint32_t ctxprog_reg;
- int ctxprog_label[32];
- uint32_t ctxvals_pos;
- uint32_t ctxvals_base;
+ u32 ctxprog_max;
+ u32 ctxprog_len;
+ u32 ctxprog_reg;
+ int ctxprog_label[32];
+ u32 ctxvals_pos;
+ u32 ctxvals_base;
};
static inline void
-cp_out(struct nouveau_grctx *ctx, uint32_t inst)
+cp_out(struct nouveau_grctx *ctx, u32 inst)
{
- uint32_t *ctxprog = ctx->data;
+ u32 *ctxprog = ctx->data;
if (ctx->mode != NOUVEAU_GRCTX_PROG)
return;
@@ -31,13 +31,13 @@ cp_out(struct nouveau_grctx *ctx, uint32_t inst)
}
static inline void
-cp_lsr(struct nouveau_grctx *ctx, uint32_t val)
+cp_lsr(struct nouveau_grctx *ctx, u32 val)
{
cp_out(ctx, CP_LOAD_SR | val);
}
static inline void
-cp_ctx(struct nouveau_grctx *ctx, uint32_t reg, uint32_t length)
+cp_ctx(struct nouveau_grctx *ctx, u32 reg, u32 length)
{
ctx->ctxprog_reg = (reg - 0x00400000) >> 2;
@@ -55,7 +55,7 @@ cp_ctx(struct nouveau_grctx *ctx, uint32_t reg, uint32_t length)
static inline void
cp_name(struct nouveau_grctx *ctx, int name)
{
- uint32_t *ctxprog = ctx->data;
+ u32 *ctxprog = ctx->data;
int i;
if (ctx->mode != NOUVEAU_GRCTX_PROG)
@@ -115,7 +115,7 @@ cp_pos(struct nouveau_grctx *ctx, int offset)
}
static inline void
-gr_def(struct nouveau_grctx *ctx, uint32_t reg, uint32_t val)
+gr_def(struct nouveau_grctx *ctx, u32 reg, u32 val)
{
if (ctx->mode != NOUVEAU_GRCTX_VALS)
return;
diff --git a/drivers/gpu/drm/nouveau/nv40_grctx.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c
similarity index 85%
rename from drivers/gpu/drm/nouveau/nv40_grctx.c
rename to drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c
index cf115ad4dad18cb88a6df0773a34ae6e641474e3..e45035efb8ca084c37785ffd4a0ca789523d4a2b 100644
--- a/drivers/gpu/drm/nouveau/nv40_grctx.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c
@@ -22,6 +22,8 @@
* Authors: Ben Skeggs
*/
+#include
+
/* NVIDIA context programs handle a number of other conditions which are
* not implemented in our versions. It's not clear why NVIDIA context
* programs have this code, nor whether it's strictly necessary for
@@ -109,20 +111,18 @@
#define CP_LOAD_MAGIC_NV44TCL 0x00800029 /* per-vs state (0x4497) */
#define CP_LOAD_MAGIC_NV40TCL 0x00800041 /* per-vs state (0x4097) */
-#include
-#include "nouveau_drv.h"
-#include "nouveau_grctx.h"
+#include "nv40.h"
+#include "ctx.h"
/* TODO:
* - get vs count from 0x1540
*/
static int
-nv40_graph_vs_count(struct drm_device *dev)
+nv40_graph_vs_count(struct nouveau_device *device)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- switch (dev_priv->chipset) {
+ switch (device->chipset) {
case 0x47:
case 0x49:
case 0x4b:
@@ -160,7 +160,7 @@ enum cp_label {
static void
nv40_graph_construct_general(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
int i;
cp_ctx(ctx, 0x4000a4, 1);
@@ -187,7 +187,7 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
cp_ctx(ctx, 0x400724, 1);
gr_def(ctx, 0x400724, 0x02008821);
cp_ctx(ctx, 0x400770, 3);
- if (dev_priv->chipset == 0x40) {
+ if (device->chipset == 0x40) {
cp_ctx(ctx, 0x400814, 4);
cp_ctx(ctx, 0x400828, 5);
cp_ctx(ctx, 0x400840, 5);
@@ -208,7 +208,7 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
gr_def(ctx, 0x4009dc, 0x80000000);
} else {
cp_ctx(ctx, 0x400840, 20);
- if (nv44_graph_class(ctx->dev)) {
+ if (nv44_graph_class(ctx->device)) {
for (i = 0; i < 8; i++)
gr_def(ctx, 0x400860 + (i * 4), 0x00000001);
}
@@ -217,21 +217,21 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
gr_def(ctx, 0x400888, 0x00000040);
cp_ctx(ctx, 0x400894, 11);
gr_def(ctx, 0x400894, 0x00000040);
- if (!nv44_graph_class(ctx->dev)) {
+ if (!nv44_graph_class(ctx->device)) {
for (i = 0; i < 8; i++)
gr_def(ctx, 0x4008a0 + (i * 4), 0x80000000);
}
cp_ctx(ctx, 0x4008e0, 2);
cp_ctx(ctx, 0x4008f8, 2);
- if (dev_priv->chipset == 0x4c ||
- (dev_priv->chipset & 0xf0) == 0x60)
+ if (device->chipset == 0x4c ||
+ (device->chipset & 0xf0) == 0x60)
cp_ctx(ctx, 0x4009f8, 1);
}
cp_ctx(ctx, 0x400a00, 73);
gr_def(ctx, 0x400b0c, 0x0b0b0b0c);
cp_ctx(ctx, 0x401000, 4);
cp_ctx(ctx, 0x405004, 1);
- switch (dev_priv->chipset) {
+ switch (device->chipset) {
case 0x47:
case 0x49:
case 0x4b:
@@ -240,7 +240,7 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
break;
default:
cp_ctx(ctx, 0x403440, 1);
- switch (dev_priv->chipset) {
+ switch (device->chipset) {
case 0x40:
gr_def(ctx, 0x403440, 0x00000010);
break;
@@ -266,19 +266,19 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
static void
nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
int i;
- if (dev_priv->chipset == 0x40) {
+ if (device->chipset == 0x40) {
cp_ctx(ctx, 0x401880, 51);
gr_def(ctx, 0x401940, 0x00000100);
} else
- if (dev_priv->chipset == 0x46 || dev_priv->chipset == 0x47 ||
- dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b) {
+ if (device->chipset == 0x46 || device->chipset == 0x47 ||
+ device->chipset == 0x49 || device->chipset == 0x4b) {
cp_ctx(ctx, 0x401880, 32);
for (i = 0; i < 16; i++)
gr_def(ctx, 0x401880 + (i * 4), 0x00000111);
- if (dev_priv->chipset == 0x46)
+ if (device->chipset == 0x46)
cp_ctx(ctx, 0x401900, 16);
cp_ctx(ctx, 0x401940, 3);
}
@@ -289,7 +289,7 @@ nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
gr_def(ctx, 0x401978, 0xffff0000);
gr_def(ctx, 0x40197c, 0x00000001);
gr_def(ctx, 0x401990, 0x46400000);
- if (dev_priv->chipset == 0x40) {
+ if (device->chipset == 0x40) {
cp_ctx(ctx, 0x4019a0, 2);
cp_ctx(ctx, 0x4019ac, 5);
} else {
@@ -297,7 +297,7 @@ nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
cp_ctx(ctx, 0x4019b4, 3);
}
gr_def(ctx, 0x4019bc, 0xffff0000);
- switch (dev_priv->chipset) {
+ switch (device->chipset) {
case 0x46:
case 0x47:
case 0x49:
@@ -316,7 +316,7 @@ nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
for (i = 0; i < 16; i++)
gr_def(ctx, 0x401a44 + (i * 4), 0x07ff0000);
gr_def(ctx, 0x401a8c, 0x4b7fffff);
- if (dev_priv->chipset == 0x40) {
+ if (device->chipset == 0x40) {
cp_ctx(ctx, 0x401ab8, 3);
} else {
cp_ctx(ctx, 0x401ab8, 1);
@@ -327,10 +327,10 @@ nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
gr_def(ctx, 0x401ad4, 0x70605040);
gr_def(ctx, 0x401ad8, 0xb8a89888);
gr_def(ctx, 0x401adc, 0xf8e8d8c8);
- cp_ctx(ctx, 0x401b10, dev_priv->chipset == 0x40 ? 2 : 1);
+ cp_ctx(ctx, 0x401b10, device->chipset == 0x40 ? 2 : 1);
gr_def(ctx, 0x401b10, 0x40100000);
- cp_ctx(ctx, 0x401b18, dev_priv->chipset == 0x40 ? 6 : 5);
- gr_def(ctx, 0x401b28, dev_priv->chipset == 0x40 ?
+ cp_ctx(ctx, 0x401b18, device->chipset == 0x40 ? 6 : 5);
+ gr_def(ctx, 0x401b28, device->chipset == 0x40 ?
0x00000004 : 0x00000000);
cp_ctx(ctx, 0x401b30, 25);
gr_def(ctx, 0x401b34, 0x0000ffff);
@@ -341,8 +341,8 @@ nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
gr_def(ctx, 0x401b84, 0xffffffff);
gr_def(ctx, 0x401b88, 0x00ff7000);
gr_def(ctx, 0x401b8c, 0x0000ffff);
- if (dev_priv->chipset != 0x44 && dev_priv->chipset != 0x4a &&
- dev_priv->chipset != 0x4e)
+ if (device->chipset != 0x44 && device->chipset != 0x4a &&
+ device->chipset != 0x4e)
cp_ctx(ctx, 0x401b94, 1);
cp_ctx(ctx, 0x401b98, 8);
gr_def(ctx, 0x401b9c, 0x00ff0000);
@@ -371,12 +371,12 @@ nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
static void
nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
int i;
cp_ctx(ctx, 0x402000, 1);
- cp_ctx(ctx, 0x402404, dev_priv->chipset == 0x40 ? 1 : 2);
- switch (dev_priv->chipset) {
+ cp_ctx(ctx, 0x402404, device->chipset == 0x40 ? 1 : 2);
+ switch (device->chipset) {
case 0x40:
gr_def(ctx, 0x402404, 0x00000001);
break;
@@ -393,9 +393,9 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
default:
gr_def(ctx, 0x402404, 0x00000021);
}
- if (dev_priv->chipset != 0x40)
+ if (device->chipset != 0x40)
gr_def(ctx, 0x402408, 0x030c30c3);
- switch (dev_priv->chipset) {
+ switch (device->chipset) {
case 0x44:
case 0x46:
case 0x4a:
@@ -408,10 +408,10 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
default:
break;
}
- cp_ctx(ctx, 0x402480, dev_priv->chipset == 0x40 ? 8 : 9);
+ cp_ctx(ctx, 0x402480, device->chipset == 0x40 ? 8 : 9);
gr_def(ctx, 0x402488, 0x3e020200);
gr_def(ctx, 0x40248c, 0x00ffffff);
- switch (dev_priv->chipset) {
+ switch (device->chipset) {
case 0x40:
gr_def(ctx, 0x402490, 0x60103f00);
break;
@@ -428,16 +428,16 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
gr_def(ctx, 0x402490, 0x0c103f00);
break;
}
- gr_def(ctx, 0x40249c, dev_priv->chipset <= 0x43 ?
+ gr_def(ctx, 0x40249c, device->chipset <= 0x43 ?
0x00020000 : 0x00040000);
cp_ctx(ctx, 0x402500, 31);
gr_def(ctx, 0x402530, 0x00008100);
- if (dev_priv->chipset == 0x40)
+ if (device->chipset == 0x40)
cp_ctx(ctx, 0x40257c, 6);
cp_ctx(ctx, 0x402594, 16);
cp_ctx(ctx, 0x402800, 17);
gr_def(ctx, 0x402800, 0x00000001);
- switch (dev_priv->chipset) {
+ switch (device->chipset) {
case 0x47:
case 0x49:
case 0x4b:
@@ -445,7 +445,7 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
gr_def(ctx, 0x402864, 0x00001001);
cp_ctx(ctx, 0x402870, 3);
gr_def(ctx, 0x402878, 0x00000003);
- if (dev_priv->chipset != 0x47) { /* belong at end!! */
+ if (device->chipset != 0x47) { /* belong at end!! */
cp_ctx(ctx, 0x402900, 1);
cp_ctx(ctx, 0x402940, 1);
cp_ctx(ctx, 0x402980, 1);
@@ -470,9 +470,9 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
}
cp_ctx(ctx, 0x402c00, 4);
- gr_def(ctx, 0x402c00, dev_priv->chipset == 0x40 ?
+ gr_def(ctx, 0x402c00, device->chipset == 0x40 ?
0x80800001 : 0x00888001);
- switch (dev_priv->chipset) {
+ switch (device->chipset) {
case 0x47:
case 0x49:
case 0x4b:
@@ -485,30 +485,30 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
break;
default:
cp_ctx(ctx, 0x402c10, 4);
- if (dev_priv->chipset == 0x40)
+ if (device->chipset == 0x40)
cp_ctx(ctx, 0x402c20, 36);
else
- if (dev_priv->chipset <= 0x42)
+ if (device->chipset <= 0x42)
cp_ctx(ctx, 0x402c20, 24);
else
- if (dev_priv->chipset <= 0x4a)
+ if (device->chipset <= 0x4a)
cp_ctx(ctx, 0x402c20, 16);
else
cp_ctx(ctx, 0x402c20, 8);
- cp_ctx(ctx, 0x402cb0, dev_priv->chipset == 0x40 ? 12 : 13);
+ cp_ctx(ctx, 0x402cb0, device->chipset == 0x40 ? 12 : 13);
gr_def(ctx, 0x402cd4, 0x00000005);
- if (dev_priv->chipset != 0x40)
+ if (device->chipset != 0x40)
gr_def(ctx, 0x402ce0, 0x0000ffff);
break;
}
- cp_ctx(ctx, 0x403400, dev_priv->chipset == 0x40 ? 4 : 3);
- cp_ctx(ctx, 0x403410, dev_priv->chipset == 0x40 ? 4 : 3);
- cp_ctx(ctx, 0x403420, nv40_graph_vs_count(ctx->dev));
- for (i = 0; i < nv40_graph_vs_count(ctx->dev); i++)
+ cp_ctx(ctx, 0x403400, device->chipset == 0x40 ? 4 : 3);
+ cp_ctx(ctx, 0x403410, device->chipset == 0x40 ? 4 : 3);
+ cp_ctx(ctx, 0x403420, nv40_graph_vs_count(ctx->device));
+ for (i = 0; i < nv40_graph_vs_count(ctx->device); i++)
gr_def(ctx, 0x403420 + (i * 4), 0x00005555);
- if (dev_priv->chipset != 0x40) {
+ if (device->chipset != 0x40) {
cp_ctx(ctx, 0x403600, 1);
gr_def(ctx, 0x403600, 0x00000001);
}
@@ -516,7 +516,7 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
cp_ctx(ctx, 0x403c18, 1);
gr_def(ctx, 0x403c18, 0x00000001);
- switch (dev_priv->chipset) {
+ switch (device->chipset) {
case 0x46:
case 0x47:
case 0x49:
@@ -527,7 +527,7 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
gr_def(ctx, 0x405c24, 0x000e3000);
break;
}
- if (dev_priv->chipset != 0x4e)
+ if (device->chipset != 0x4e)
cp_ctx(ctx, 0x405800, 11);
cp_ctx(ctx, 0x407000, 1);
}
@@ -535,7 +535,7 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
static void
nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx)
{
- int len = nv44_graph_class(ctx->dev) ? 0x0084 : 0x0684;
+ int len = nv44_graph_class(ctx->device) ? 0x0084 : 0x0684;
cp_out (ctx, 0x300000);
cp_lsr (ctx, len - 4);
@@ -550,32 +550,31 @@ nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx)
static void
nv40_graph_construct_shader(struct nouveau_grctx *ctx)
{
- struct drm_device *dev = ctx->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_device *device = ctx->device;
struct nouveau_gpuobj *obj = ctx->data;
int vs, vs_nr, vs_len, vs_nr_b0, vs_nr_b1, b0_offset, b1_offset;
int offset, i;
- vs_nr = nv40_graph_vs_count(ctx->dev);
+ vs_nr = nv40_graph_vs_count(ctx->device);
vs_nr_b0 = 363;
- vs_nr_b1 = dev_priv->chipset == 0x40 ? 128 : 64;
- if (dev_priv->chipset == 0x40) {
+ vs_nr_b1 = device->chipset == 0x40 ? 128 : 64;
+ if (device->chipset == 0x40) {
b0_offset = 0x2200/4; /* 33a0 */
b1_offset = 0x55a0/4; /* 1500 */
vs_len = 0x6aa0/4;
} else
- if (dev_priv->chipset == 0x41 || dev_priv->chipset == 0x42) {
+ if (device->chipset == 0x41 || device->chipset == 0x42) {
b0_offset = 0x2200/4; /* 2200 */
b1_offset = 0x4400/4; /* 0b00 */
vs_len = 0x4f00/4;
} else {
b0_offset = 0x1d40/4; /* 2200 */
b1_offset = 0x3f40/4; /* 0b00 : 0a40 */
- vs_len = nv44_graph_class(dev) ? 0x4980/4 : 0x4a40/4;
+ vs_len = nv44_graph_class(device) ? 0x4980/4 : 0x4a40/4;
}
cp_lsr(ctx, vs_len * vs_nr + 0x300/4);
- cp_out(ctx, nv44_graph_class(dev) ? 0x800029 : 0x800041);
+ cp_out(ctx, nv44_graph_class(device) ? 0x800029 : 0x800041);
offset = ctx->ctxvals_pos;
ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len));
@@ -661,21 +660,21 @@ nv40_grctx_generate(struct nouveau_grctx *ctx)
}
void
-nv40_grctx_fill(struct drm_device *dev, struct nouveau_gpuobj *mem)
+nv40_grctx_fill(struct nouveau_device *device, struct nouveau_gpuobj *mem)
{
nv40_grctx_generate(&(struct nouveau_grctx) {
- .dev = dev,
+ .device = device,
.mode = NOUVEAU_GRCTX_VALS,
.data = mem,
});
}
void
-nv40_grctx_init(struct drm_device *dev, u32 *size)
+nv40_grctx_init(struct nouveau_device *device, u32 *size)
{
u32 ctxprog[256], i;
struct nouveau_grctx ctx = {
- .dev = dev,
+ .device = device,
.mode = NOUVEAU_GRCTX_PROG,
.data = ctxprog,
.ctxprog_max = ARRAY_SIZE(ctxprog)
@@ -683,8 +682,8 @@ nv40_grctx_init(struct drm_device *dev, u32 *size)
nv40_grctx_generate(&ctx);
- nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
+ nv_wr32(device, 0x400324, 0);
for (i = 0; i < ctx.ctxprog_len; i++)
- nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, ctxprog[i]);
+ nv_wr32(device, 0x400328, ctxprog[i]);
*size = ctx.ctxvals_pos * 4;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c
similarity index 91%
rename from drivers/gpu/drm/nouveau/nv50_grctx.c
rename to drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c
index 3bb96a029d6661d83cba05a4fd6212b246029534..552fdbd45ebe9e389c9e017d793959a6333a69e4 100644
--- a/drivers/gpu/drm/nouveau/nv50_grctx.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c
@@ -20,6 +20,8 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include
+
#define CP_FLAG_CLEAR 0
#define CP_FLAG_SET 1
#define CP_FLAG_SWAP_DIRECTION ((0 * 32) + 0)
@@ -105,9 +107,8 @@
#define CP_SEEK_1 0x00c000ff
#define CP_SEEK_2 0x00c800ff
-#include
-#include "nouveau_drv.h"
-#include "nouveau_grctx.h"
+#include "nv50.h"
+#include "ctx.h"
#define IS_NVA3F(x) (((x) > 0xa0 && (x) < 0xaa) || (x) == 0xaf)
#define IS_NVAAF(x) ((x) >= 0xaa && (x) <= 0xac)
@@ -175,32 +176,6 @@ static void nv50_graph_construct_xfer2(struct nouveau_grctx *ctx);
static int
nv50_grctx_generate(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
-
- switch (dev_priv->chipset) {
- case 0x50:
- case 0x84:
- case 0x86:
- case 0x92:
- case 0x94:
- case 0x96:
- case 0x98:
- case 0xa0:
- case 0xa3:
- case 0xa5:
- case 0xa8:
- case 0xaa:
- case 0xac:
- case 0xaf:
- break;
- default:
- NV_ERROR(ctx->dev, "I don't know how to make a ctxprog for "
- "your NV%x card.\n", dev_priv->chipset);
- NV_ERROR(ctx->dev, "Disabling acceleration. Please contact "
- "the devs.\n");
- return -ENOSYS;
- }
-
cp_set (ctx, STATE, RUNNING);
cp_set (ctx, XFER_SWITCH, ENABLE);
/* decide whether we're loading/unloading the context */
@@ -278,30 +253,36 @@ nv50_grctx_generate(struct nouveau_grctx *ctx)
}
void
-nv50_grctx_fill(struct drm_device *dev, struct nouveau_gpuobj *mem)
+nv50_grctx_fill(struct nouveau_device *device, struct nouveau_gpuobj *mem)
{
nv50_grctx_generate(&(struct nouveau_grctx) {
- .dev = dev,
+ .device = device,
.mode = NOUVEAU_GRCTX_VALS,
.data = mem,
});
}
int
-nv50_grctx_init(struct drm_device *dev, u32 *data, u32 max, u32 *len, u32 *cnt)
+nv50_grctx_init(struct nouveau_device *device, u32 *size)
{
+ u32 *ctxprog = kmalloc(512 * 4, GFP_KERNEL), i;
struct nouveau_grctx ctx = {
- .dev = dev,
+ .device = device,
.mode = NOUVEAU_GRCTX_PROG,
- .data = data,
- .ctxprog_max = max
+ .data = ctxprog,
+ .ctxprog_max = 512,
};
- int ret;
- ret = nv50_grctx_generate(&ctx);
- *cnt = ctx.ctxvals_pos * 4;
- *len = ctx.ctxprog_len;
- return ret;
+ if (!ctxprog)
+ return -ENOMEM;
+ nv50_grctx_generate(&ctx);
+
+ nv_wr32(device, 0x400324, 0);
+ for (i = 0; i < ctx.ctxprog_len; i++)
+ nv_wr32(device, 0x400328, ctxprog[i]);
+ *size = ctx.ctxvals_pos * 4;
+ kfree(ctxprog);
+ return 0;
}
/*
@@ -315,36 +296,36 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx);
static void
nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
int i, j;
int offset, base;
- uint32_t units = nv_rd32 (ctx->dev, 0x1540);
+ u32 units = nv_rd32 (ctx->device, 0x1540);
/* 0800: DISPATCH */
cp_ctx(ctx, 0x400808, 7);
gr_def(ctx, 0x400814, 0x00000030);
cp_ctx(ctx, 0x400834, 0x32);
- if (dev_priv->chipset == 0x50) {
+ if (device->chipset == 0x50) {
gr_def(ctx, 0x400834, 0xff400040);
gr_def(ctx, 0x400838, 0xfff00080);
gr_def(ctx, 0x40083c, 0xfff70090);
gr_def(ctx, 0x400840, 0xffe806a8);
}
gr_def(ctx, 0x400844, 0x00000002);
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
gr_def(ctx, 0x400894, 0x00001000);
gr_def(ctx, 0x4008e8, 0x00000003);
gr_def(ctx, 0x4008ec, 0x00001000);
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
cp_ctx(ctx, 0x400908, 0xb);
- else if (dev_priv->chipset < 0xa0)
+ else if (device->chipset < 0xa0)
cp_ctx(ctx, 0x400908, 0xc);
else
cp_ctx(ctx, 0x400908, 0xe);
- if (dev_priv->chipset >= 0xa0)
+ if (device->chipset >= 0xa0)
cp_ctx(ctx, 0x400b00, 0x1);
- if (IS_NVA3F(dev_priv->chipset)) {
+ if (IS_NVA3F(device->chipset)) {
cp_ctx(ctx, 0x400b10, 0x1);
gr_def(ctx, 0x400b10, 0x0001629d);
cp_ctx(ctx, 0x400b20, 0x1);
@@ -358,10 +339,10 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
gr_def(ctx, 0x400c08, 0x0000fe0c);
/* 1000 */
- if (dev_priv->chipset < 0xa0) {
+ if (device->chipset < 0xa0) {
cp_ctx(ctx, 0x401008, 0x4);
gr_def(ctx, 0x401014, 0x00001000);
- } else if (!IS_NVA3F(dev_priv->chipset)) {
+ } else if (!IS_NVA3F(device->chipset)) {
cp_ctx(ctx, 0x401008, 0x5);
gr_def(ctx, 0x401018, 0x00001000);
} else {
@@ -372,7 +353,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
/* 1400 */
cp_ctx(ctx, 0x401400, 0x8);
cp_ctx(ctx, 0x401424, 0x3);
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
gr_def(ctx, 0x40142c, 0x0001fd87);
else
gr_def(ctx, 0x40142c, 0x00000187);
@@ -382,10 +363,10 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
/* 1800: STREAMOUT */
cp_ctx(ctx, 0x401814, 0x1);
gr_def(ctx, 0x401814, 0x000000ff);
- if (dev_priv->chipset == 0x50) {
+ if (device->chipset == 0x50) {
cp_ctx(ctx, 0x40181c, 0xe);
gr_def(ctx, 0x401850, 0x00000004);
- } else if (dev_priv->chipset < 0xa0) {
+ } else if (device->chipset < 0xa0) {
cp_ctx(ctx, 0x40181c, 0xf);
gr_def(ctx, 0x401854, 0x00000004);
} else {
@@ -395,7 +376,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
/* 1C00 */
cp_ctx(ctx, 0x401c00, 0x1);
- switch (dev_priv->chipset) {
+ switch (device->chipset) {
case 0x50:
gr_def(ctx, 0x401c00, 0x0001005f);
break;
@@ -424,7 +405,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
/* 2400 */
cp_ctx(ctx, 0x402400, 0x1);
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
cp_ctx(ctx, 0x402408, 0x1);
else
cp_ctx(ctx, 0x402408, 0x2);
@@ -432,21 +413,21 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
/* 2800: CSCHED */
cp_ctx(ctx, 0x402800, 0x1);
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
gr_def(ctx, 0x402800, 0x00000006);
/* 2C00: ZCULL */
cp_ctx(ctx, 0x402c08, 0x6);
- if (dev_priv->chipset != 0x50)
+ if (device->chipset != 0x50)
gr_def(ctx, 0x402c14, 0x01000000);
gr_def(ctx, 0x402c18, 0x000000ff);
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
cp_ctx(ctx, 0x402ca0, 0x1);
else
cp_ctx(ctx, 0x402ca0, 0x2);
- if (dev_priv->chipset < 0xa0)
+ if (device->chipset < 0xa0)
gr_def(ctx, 0x402ca0, 0x00000400);
- else if (!IS_NVA3F(dev_priv->chipset))
+ else if (!IS_NVA3F(device->chipset))
gr_def(ctx, 0x402ca0, 0x00000800);
else
gr_def(ctx, 0x402ca0, 0x00000400);
@@ -457,14 +438,14 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
gr_def(ctx, 0x403004, 0x00000001);
/* 3400 */
- if (dev_priv->chipset >= 0xa0) {
+ if (device->chipset >= 0xa0) {
cp_ctx(ctx, 0x403404, 0x1);
gr_def(ctx, 0x403404, 0x00000001);
}
/* 5000: CCACHE */
cp_ctx(ctx, 0x405000, 0x1);
- switch (dev_priv->chipset) {
+ switch (device->chipset) {
case 0x50:
gr_def(ctx, 0x405000, 0x00300080);
break;
@@ -493,22 +474,22 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
cp_ctx(ctx, 0x40502c, 0x1);
/* 6000? */
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
cp_ctx(ctx, 0x4063e0, 0x1);
/* 6800: M2MF */
- if (dev_priv->chipset < 0x90) {
+ if (device->chipset < 0x90) {
cp_ctx(ctx, 0x406814, 0x2b);
gr_def(ctx, 0x406818, 0x00000f80);
gr_def(ctx, 0x406860, 0x007f0080);
gr_def(ctx, 0x40689c, 0x007f0080);
} else {
cp_ctx(ctx, 0x406814, 0x4);
- if (dev_priv->chipset == 0x98)
+ if (device->chipset == 0x98)
gr_def(ctx, 0x406818, 0x00000f80);
else
gr_def(ctx, 0x406818, 0x00001f80);
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
gr_def(ctx, 0x40681c, 0x00000030);
cp_ctx(ctx, 0x406830, 0x3);
}
@@ -517,43 +498,43 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
for (i = 0; i < 8; i++) {
if (units & (1<<(i+16))) {
cp_ctx(ctx, 0x407000 + (i<<8), 3);
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
gr_def(ctx, 0x407000 + (i<<8), 0x1b74f820);
- else if (dev_priv->chipset != 0xa5)
+ else if (device->chipset != 0xa5)
gr_def(ctx, 0x407000 + (i<<8), 0x3b74f821);
else
gr_def(ctx, 0x407000 + (i<<8), 0x7b74f821);
gr_def(ctx, 0x407004 + (i<<8), 0x89058001);
- if (dev_priv->chipset == 0x50) {
+ if (device->chipset == 0x50) {
cp_ctx(ctx, 0x407010 + (i<<8), 1);
- } else if (dev_priv->chipset < 0xa0) {
+ } else if (device->chipset < 0xa0) {
cp_ctx(ctx, 0x407010 + (i<<8), 2);
gr_def(ctx, 0x407010 + (i<<8), 0x00001000);
gr_def(ctx, 0x407014 + (i<<8), 0x0000001f);
} else {
cp_ctx(ctx, 0x407010 + (i<<8), 3);
gr_def(ctx, 0x407010 + (i<<8), 0x00001000);
- if (dev_priv->chipset != 0xa5)
+ if (device->chipset != 0xa5)
gr_def(ctx, 0x407014 + (i<<8), 0x000000ff);
else
gr_def(ctx, 0x407014 + (i<<8), 0x000001ff);
}
cp_ctx(ctx, 0x407080 + (i<<8), 4);
- if (dev_priv->chipset != 0xa5)
+ if (device->chipset != 0xa5)
gr_def(ctx, 0x407080 + (i<<8), 0x027c10fa);
else
gr_def(ctx, 0x407080 + (i<<8), 0x827c10fa);
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
gr_def(ctx, 0x407084 + (i<<8), 0x000000c0);
else
gr_def(ctx, 0x407084 + (i<<8), 0x400000c0);
gr_def(ctx, 0x407088 + (i<<8), 0xb7892080);
- if (dev_priv->chipset < 0xa0)
+ if (device->chipset < 0xa0)
cp_ctx(ctx, 0x407094 + (i<<8), 1);
- else if (!IS_NVA3F(dev_priv->chipset))
+ else if (!IS_NVA3F(device->chipset))
cp_ctx(ctx, 0x407094 + (i<<8), 3);
else {
cp_ctx(ctx, 0x407094 + (i<<8), 4);
@@ -563,30 +544,30 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
}
cp_ctx(ctx, 0x407c00, 0x3);
- if (dev_priv->chipset < 0x90)
+ if (device->chipset < 0x90)
gr_def(ctx, 0x407c00, 0x00010040);
- else if (dev_priv->chipset < 0xa0)
+ else if (device->chipset < 0xa0)
gr_def(ctx, 0x407c00, 0x00390040);
else
gr_def(ctx, 0x407c00, 0x003d0040);
gr_def(ctx, 0x407c08, 0x00000022);
- if (dev_priv->chipset >= 0xa0) {
+ if (device->chipset >= 0xa0) {
cp_ctx(ctx, 0x407c10, 0x3);
cp_ctx(ctx, 0x407c20, 0x1);
cp_ctx(ctx, 0x407c2c, 0x1);
}
- if (dev_priv->chipset < 0xa0) {
+ if (device->chipset < 0xa0) {
cp_ctx(ctx, 0x407d00, 0x9);
} else {
cp_ctx(ctx, 0x407d00, 0x15);
}
- if (dev_priv->chipset == 0x98)
+ if (device->chipset == 0x98)
gr_def(ctx, 0x407d08, 0x00380040);
else {
- if (dev_priv->chipset < 0x90)
+ if (device->chipset < 0x90)
gr_def(ctx, 0x407d08, 0x00010040);
- else if (dev_priv->chipset < 0xa0)
+ else if (device->chipset < 0xa0)
gr_def(ctx, 0x407d08, 0x00390040);
else
gr_def(ctx, 0x407d08, 0x003d0040);
@@ -596,11 +577,11 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
/* 8000+: per-TP state */
for (i = 0; i < 10; i++) {
if (units & (1<chipset < 0xa0)
+ if (device->chipset < 0xa0)
base = 0x408000 + (i<<12);
else
base = 0x408000 + (i<<11);
- if (dev_priv->chipset < 0xa0)
+ if (device->chipset < 0xa0)
offset = base + 0xc00;
else
offset = base + 0x80;
@@ -609,9 +590,9 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
cp_ctx(ctx, offset + 0x08, 1);
/* per-MP state */
- for (j = 0; j < (dev_priv->chipset < 0xa0 ? 2 : 4); j++) {
+ for (j = 0; j < (device->chipset < 0xa0 ? 2 : 4); j++) {
if (!(units & (1 << (j+24)))) continue;
- if (dev_priv->chipset < 0xa0)
+ if (device->chipset < 0xa0)
offset = base + 0x200 + (j<<7);
else
offset = base + 0x100 + (j<<7);
@@ -620,7 +601,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
gr_def(ctx, offset + 0x04, 0x00160000);
gr_def(ctx, offset + 0x08, 0x01800000);
gr_def(ctx, offset + 0x18, 0x0003ffff);
- switch (dev_priv->chipset) {
+ switch (device->chipset) {
case 0x50:
gr_def(ctx, offset + 0x1c, 0x00080000);
break;
@@ -651,53 +632,53 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
break;
}
gr_def(ctx, offset + 0x40, 0x00010401);
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
gr_def(ctx, offset + 0x48, 0x00000040);
else
gr_def(ctx, offset + 0x48, 0x00000078);
gr_def(ctx, offset + 0x50, 0x000000bf);
gr_def(ctx, offset + 0x58, 0x00001210);
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
gr_def(ctx, offset + 0x5c, 0x00000080);
else
gr_def(ctx, offset + 0x5c, 0x08000080);
- if (dev_priv->chipset >= 0xa0)
+ if (device->chipset >= 0xa0)
gr_def(ctx, offset + 0x68, 0x0000003e);
}
- if (dev_priv->chipset < 0xa0)
+ if (device->chipset < 0xa0)
cp_ctx(ctx, base + 0x300, 0x4);
else
cp_ctx(ctx, base + 0x300, 0x5);
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
gr_def(ctx, base + 0x304, 0x00007070);
- else if (dev_priv->chipset < 0xa0)
+ else if (device->chipset < 0xa0)
gr_def(ctx, base + 0x304, 0x00027070);
- else if (!IS_NVA3F(dev_priv->chipset))
+ else if (!IS_NVA3F(device->chipset))
gr_def(ctx, base + 0x304, 0x01127070);
else
gr_def(ctx, base + 0x304, 0x05127070);
- if (dev_priv->chipset < 0xa0)
+ if (device->chipset < 0xa0)
cp_ctx(ctx, base + 0x318, 1);
else
cp_ctx(ctx, base + 0x320, 1);
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
gr_def(ctx, base + 0x318, 0x0003ffff);
- else if (dev_priv->chipset < 0xa0)
+ else if (device->chipset < 0xa0)
gr_def(ctx, base + 0x318, 0x03ffffff);
else
gr_def(ctx, base + 0x320, 0x07ffffff);
- if (dev_priv->chipset < 0xa0)
+ if (device->chipset < 0xa0)
cp_ctx(ctx, base + 0x324, 5);
else
cp_ctx(ctx, base + 0x328, 4);
- if (dev_priv->chipset < 0xa0) {
+ if (device->chipset < 0xa0) {
cp_ctx(ctx, base + 0x340, 9);
offset = base + 0x340;
- } else if (!IS_NVA3F(dev_priv->chipset)) {
+ } else if (!IS_NVA3F(device->chipset)) {
cp_ctx(ctx, base + 0x33c, 0xb);
offset = base + 0x344;
} else {
@@ -706,12 +687,12 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
}
gr_def(ctx, offset + 0x0, 0x00120407);
gr_def(ctx, offset + 0x4, 0x05091507);
- if (dev_priv->chipset == 0x84)
+ if (device->chipset == 0x84)
gr_def(ctx, offset + 0x8, 0x05100202);
else
gr_def(ctx, offset + 0x8, 0x05010202);
gr_def(ctx, offset + 0xc, 0x00030201);
- if (dev_priv->chipset == 0xa3)
+ if (device->chipset == 0xa3)
cp_ctx(ctx, base + 0x36c, 1);
cp_ctx(ctx, base + 0x400, 2);
@@ -720,7 +701,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
gr_def(ctx, base + 0x40c, 0x0d0c0b0a);
gr_def(ctx, base + 0x410, 0x00141210);
- if (dev_priv->chipset < 0xa0)
+ if (device->chipset < 0xa0)
offset = base + 0x800;
else
offset = base + 0x500;
@@ -728,55 +709,55 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
gr_def(ctx, offset + 0x0, 0x000001f0);
gr_def(ctx, offset + 0x4, 0x00000001);
gr_def(ctx, offset + 0x8, 0x00000003);
- if (dev_priv->chipset == 0x50 || IS_NVAAF(dev_priv->chipset))
+ if (device->chipset == 0x50 || IS_NVAAF(device->chipset))
gr_def(ctx, offset + 0xc, 0x00008000);
gr_def(ctx, offset + 0x14, 0x00039e00);
cp_ctx(ctx, offset + 0x1c, 2);
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
gr_def(ctx, offset + 0x1c, 0x00000040);
else
gr_def(ctx, offset + 0x1c, 0x00000100);
gr_def(ctx, offset + 0x20, 0x00003800);
- if (dev_priv->chipset >= 0xa0) {
+ if (device->chipset >= 0xa0) {
cp_ctx(ctx, base + 0x54c, 2);
- if (!IS_NVA3F(dev_priv->chipset))
+ if (!IS_NVA3F(device->chipset))
gr_def(ctx, base + 0x54c, 0x003fe006);
else
gr_def(ctx, base + 0x54c, 0x003fe007);
gr_def(ctx, base + 0x550, 0x003fe000);
}
- if (dev_priv->chipset < 0xa0)
+ if (device->chipset < 0xa0)
offset = base + 0xa00;
else
offset = base + 0x680;
cp_ctx(ctx, offset, 1);
gr_def(ctx, offset, 0x00404040);
- if (dev_priv->chipset < 0xa0)
+ if (device->chipset < 0xa0)
offset = base + 0xe00;
else
offset = base + 0x700;
cp_ctx(ctx, offset, 2);
- if (dev_priv->chipset < 0xa0)
+ if (device->chipset < 0xa0)
gr_def(ctx, offset, 0x0077f005);
- else if (dev_priv->chipset == 0xa5)
+ else if (device->chipset == 0xa5)
gr_def(ctx, offset, 0x6cf7f007);
- else if (dev_priv->chipset == 0xa8)
+ else if (device->chipset == 0xa8)
gr_def(ctx, offset, 0x6cfff007);
- else if (dev_priv->chipset == 0xac)
+ else if (device->chipset == 0xac)
gr_def(ctx, offset, 0x0cfff007);
else
gr_def(ctx, offset, 0x0cf7f007);
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
gr_def(ctx, offset + 0x4, 0x00007fff);
- else if (dev_priv->chipset < 0xa0)
+ else if (device->chipset < 0xa0)
gr_def(ctx, offset + 0x4, 0x003f7fff);
else
gr_def(ctx, offset + 0x4, 0x02bf7fff);
cp_ctx(ctx, offset + 0x2c, 1);
- if (dev_priv->chipset == 0x50) {
+ if (device->chipset == 0x50) {
cp_ctx(ctx, offset + 0x50, 9);
gr_def(ctx, offset + 0x54, 0x000003ff);
gr_def(ctx, offset + 0x58, 0x00000003);
@@ -785,7 +766,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
gr_def(ctx, offset + 0x64, 0x0000001f);
gr_def(ctx, offset + 0x68, 0x0000000f);
gr_def(ctx, offset + 0x6c, 0x0000000f);
- } else if (dev_priv->chipset < 0xa0) {
+ } else if (device->chipset < 0xa0) {
cp_ctx(ctx, offset + 0x50, 1);
cp_ctx(ctx, offset + 0x70, 1);
} else {
@@ -797,7 +778,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
}
static void
-dd_emit(struct nouveau_grctx *ctx, int num, uint32_t val) {
+dd_emit(struct nouveau_grctx *ctx, int num, u32 val) {
int i;
if (val && ctx->mode == NOUVEAU_GRCTX_VALS)
for (i = 0; i < num; i++)
@@ -808,7 +789,7 @@ dd_emit(struct nouveau_grctx *ctx, int num, uint32_t val) {
static void
nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
int base, num;
base = ctx->ctxvals_pos;
@@ -822,7 +803,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
dd_emit(ctx, 1, 1); /* 00000001 SRC_LINEAR #1 */
dd_emit(ctx, 1, 0); /* 000000ff SRC_ADDRESS_HIGH */
dd_emit(ctx, 1, 0); /* 00000001 SRC_SRGB */
- if (dev_priv->chipset >= 0x94)
+ if (device->chipset >= 0x94)
dd_emit(ctx, 1, 0); /* 00000003 eng2d UNK0258 */
dd_emit(ctx, 1, 1); /* 00000fff SRC_DEPTH */
dd_emit(ctx, 1, 0x100); /* 0000ffff SRC_HEIGHT */
@@ -851,7 +832,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
dd_emit(ctx, 1, 1); /* 0000007f BLOCKDIM_Z */
dd_emit(ctx, 1, 4); /* 000000ff CP_REG_ALLOC_TEMP */
dd_emit(ctx, 1, 1); /* 00000001 BLOCKDIM_DIRTY */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
dd_emit(ctx, 1, 0); /* 00000003 UNK03E8 */
dd_emit(ctx, 1, 1); /* 0000007f BLOCK_ALLOC_HALFWARPS */
dd_emit(ctx, 1, 1); /* 00000007 LOCAL_WARPS_NO_CLAMP */
@@ -863,7 +844,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
dd_emit(ctx, 1, 1); /* 000007ff BLOCK_ALLOC_THREADS */
/* compat 2d state */
- if (dev_priv->chipset == 0x50) {
+ if (device->chipset == 0x50) {
dd_emit(ctx, 4, 0); /* 0000ffff clip X, Y, W, H */
dd_emit(ctx, 1, 1); /* ffffffff chroma COLOR_FORMAT */
@@ -923,7 +904,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_PITCH_IN */
/* more compat 2d state */
- if (dev_priv->chipset == 0x50) {
+ if (device->chipset == 0x50) {
dd_emit(ctx, 1, 1); /* ffffffff line COLOR_FORMAT */
dd_emit(ctx, 1, 0); /* ffffffff line OPERATION */
@@ -957,18 +938,18 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
dd_emit(ctx, 1, 0); /* 000000ff UNK12B0_2 */
dd_emit(ctx, 1, 0); /* 0000000f FP_TEXTURES_LOG2 */
dd_emit(ctx, 1, 0); /* 0000000f FP_SAMPLERS_LOG2 */
- if (IS_NVA3F(dev_priv->chipset)) {
+ if (IS_NVA3F(device->chipset)) {
dd_emit(ctx, 1, 0); /* ffffffff */
dd_emit(ctx, 1, 0); /* 0000007f MULTISAMPLE_SAMPLES_LOG2 */
} else {
dd_emit(ctx, 1, 0); /* 0000000f MULTISAMPLE_SAMPLES_LOG2 */
}
dd_emit(ctx, 1, 0xc); /* 000000ff SEMANTIC_COLOR.BFC0_ID */
- if (dev_priv->chipset != 0x50)
+ if (device->chipset != 0x50)
dd_emit(ctx, 1, 0); /* 00000001 SEMANTIC_COLOR.CLMP_EN */
dd_emit(ctx, 1, 8); /* 000000ff SEMANTIC_COLOR.COLR_NR */
dd_emit(ctx, 1, 0x14); /* 000000ff SEMANTIC_COLOR.FFC0_ID */
- if (dev_priv->chipset == 0x50) {
+ if (device->chipset == 0x50) {
dd_emit(ctx, 1, 0); /* 000000ff SEMANTIC_LAYER */
dd_emit(ctx, 1, 0); /* 00000001 */
} else {
@@ -994,7 +975,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
dd_emit(ctx, 8, 0); /* ffffffff RT_ADDRESS_LOW */
dd_emit(ctx, 1, 0xcf); /* 000000ff RT_FORMAT */
dd_emit(ctx, 7, 0); /* 000000ff RT_FORMAT */
- if (dev_priv->chipset != 0x50)
+ if (device->chipset != 0x50)
dd_emit(ctx, 3, 0); /* 1, 1, 1 */
else
dd_emit(ctx, 2, 0); /* 1, 1 */
@@ -1002,15 +983,15 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
dd_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT*/
dd_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */
dd_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
- if (IS_NVA3F(dev_priv->chipset)) {
+ if (IS_NVA3F(device->chipset)) {
dd_emit(ctx, 1, 3); /* 00000003 */
dd_emit(ctx, 1, 0); /* 00000001 UNK1418. Alone. */
}
- if (dev_priv->chipset != 0x50)
+ if (device->chipset != 0x50)
dd_emit(ctx, 1, 3); /* 00000003 UNK15AC */
dd_emit(ctx, 1, 1); /* ffffffff RASTERIZE_ENABLE */
dd_emit(ctx, 1, 0); /* 00000001 FP_CONTROL.EXPORTS_Z */
- if (dev_priv->chipset != 0x50)
+ if (device->chipset != 0x50)
dd_emit(ctx, 1, 0); /* 00000001 FP_CONTROL.MULTIPLE_RESULTS */
dd_emit(ctx, 1, 0x12); /* 000000ff FP_INTERPOLANT_CTRL.COUNT */
dd_emit(ctx, 1, 0x10); /* 000000ff FP_INTERPOLANT_CTRL.COUNT_NONFLAT */
@@ -1022,16 +1003,16 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
dd_emit(ctx, 1, 4); /* 000000ff FP_RESULT_COUNT */
dd_emit(ctx, 1, 2); /* ffffffff REG_MODE */
dd_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */
- if (dev_priv->chipset >= 0xa0)
+ if (device->chipset >= 0xa0)
dd_emit(ctx, 1, 0); /* ffffffff */
dd_emit(ctx, 1, 0); /* 00000001 GP_BUILTIN_RESULT_EN.LAYER_IDX */
dd_emit(ctx, 1, 0); /* ffffffff STRMOUT_ENABLE */
dd_emit(ctx, 1, 0x3fffff); /* 003fffff TIC_LIMIT */
dd_emit(ctx, 1, 0x1fff); /* 000fffff TSC_LIMIT */
dd_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE*/
- if (dev_priv->chipset != 0x50)
+ if (device->chipset != 0x50)
dd_emit(ctx, 8, 0); /* 00000001 */
- if (dev_priv->chipset >= 0xa0) {
+ if (device->chipset >= 0xa0) {
dd_emit(ctx, 1, 1); /* 00000007 VTX_ATTR_DEFINE.COMP */
dd_emit(ctx, 1, 1); /* 00000007 VTX_ATTR_DEFINE.SIZE */
dd_emit(ctx, 1, 2); /* 00000007 VTX_ATTR_DEFINE.TYPE */
@@ -1042,20 +1023,20 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
dd_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
dd_emit(ctx, 1, 0); /* 0000000f VP_TEXTURES_LOG2 */
dd_emit(ctx, 1, 0); /* 0000000f VP_SAMPLERS_LOG2 */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
dd_emit(ctx, 1, 0); /* 00000001 */
dd_emit(ctx, 1, 2); /* 00000003 POLYGON_MODE_BACK */
- if (dev_priv->chipset >= 0xa0)
+ if (device->chipset >= 0xa0)
dd_emit(ctx, 1, 0); /* 00000003 VTX_ATTR_DEFINE.SIZE - 1 */
dd_emit(ctx, 1, 0); /* 0000ffff CB_ADDR_INDEX */
- if (dev_priv->chipset >= 0xa0)
+ if (device->chipset >= 0xa0)
dd_emit(ctx, 1, 0); /* 00000003 */
dd_emit(ctx, 1, 0); /* 00000001 CULL_FACE_ENABLE */
dd_emit(ctx, 1, 1); /* 00000003 CULL_FACE */
dd_emit(ctx, 1, 0); /* 00000001 FRONT_FACE */
dd_emit(ctx, 1, 2); /* 00000003 POLYGON_MODE_FRONT */
dd_emit(ctx, 1, 0x1000); /* 00007fff UNK141C */
- if (dev_priv->chipset != 0x50) {
+ if (device->chipset != 0x50) {
dd_emit(ctx, 1, 0xe00); /* 7fff */
dd_emit(ctx, 1, 0x1000); /* 7fff */
dd_emit(ctx, 1, 0x1e00); /* 7fff */
@@ -1070,10 +1051,10 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
dd_emit(ctx, 1, 0); /* 00000001 VTX_ATTR_MASK_UNK0 nonempty */
dd_emit(ctx, 1, 0); /* 00000001 VTX_ATTR_MASK_UNK1 nonempty */
dd_emit(ctx, 1, 0x200); /* 0003ffff GP_VERTEX_OUTPUT_COUNT*GP_REG_ALLOC_RESULT */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
dd_emit(ctx, 1, 0x200);
dd_emit(ctx, 1, 0); /* 00000001 */
- if (dev_priv->chipset < 0xa0) {
+ if (device->chipset < 0xa0) {
dd_emit(ctx, 1, 1); /* 00000001 */
dd_emit(ctx, 1, 0x70); /* 000000ff */
dd_emit(ctx, 1, 0x80); /* 000000ff */
@@ -1120,7 +1101,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
num = ctx->ctxvals_pos - base;
ctx->ctxvals_pos = base;
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
cp_ctx(ctx, 0x404800, num);
else
cp_ctx(ctx, 0x405400, num);
@@ -1169,7 +1150,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
*/
static void
-xf_emit(struct nouveau_grctx *ctx, int num, uint32_t val) {
+xf_emit(struct nouveau_grctx *ctx, int num, u32 val) {
int i;
if (val && ctx->mode == NOUVEAU_GRCTX_VALS)
for (i = 0; i < num; i++)
@@ -1201,16 +1182,16 @@ static void nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx);
static void
nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
int i;
int offset;
int size = 0;
- uint32_t units = nv_rd32 (ctx->dev, 0x1540);
+ u32 units = nv_rd32 (ctx->device, 0x1540);
offset = (ctx->ctxvals_pos+0x3f)&~0x3f;
ctx->ctxvals_base = offset;
- if (dev_priv->chipset < 0xa0) {
+ if (device->chipset < 0xa0) {
/* Strand 0 */
ctx->ctxvals_pos = offset;
nv50_graph_construct_gene_dispatch(ctx);
@@ -1280,7 +1261,7 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
/* Strand 2 */
ctx->ctxvals_pos = offset + 2;
- if (dev_priv->chipset == 0xa0)
+ if (device->chipset == 0xa0)
nv50_graph_construct_gene_unk14xx(ctx);
nv50_graph_construct_gene_unk24xx(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
@@ -1327,7 +1308,7 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
/* Strand 7 */
ctx->ctxvals_pos = offset + 7;
- if (dev_priv->chipset == 0xa0) {
+ if (device->chipset == 0xa0) {
if (units & (1 << 4))
nv50_graph_construct_xfer_tp(ctx);
if (units & (1 << 5))
@@ -1365,24 +1346,24 @@ static void
nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx)
{
/* start of strand 0 */
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
/* SEEK */
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
xf_emit(ctx, 5, 0);
- else if (!IS_NVA3F(dev_priv->chipset))
+ else if (!IS_NVA3F(device->chipset))
xf_emit(ctx, 6, 0);
else
xf_emit(ctx, 4, 0);
/* SEEK */
/* the PGRAPH's internal FIFO */
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
xf_emit(ctx, 8*3, 0);
else
xf_emit(ctx, 0x100*3, 0);
/* and another bonus slot?!? */
xf_emit(ctx, 3, 0);
/* and YET ANOTHER bonus slot? */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 3, 0);
/* SEEK */
/* CTX_SWITCH: caches of gr objects bound to subchannels. 8 values, last used index */
@@ -1394,7 +1375,7 @@ nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx)
/* SEEK */
xf_emit(ctx, 9, 0);
/* SEEK */
- if (dev_priv->chipset < 0x90)
+ if (device->chipset < 0x90)
xf_emit(ctx, 4, 0);
/* SEEK */
xf_emit(ctx, 2, 0);
@@ -1407,9 +1388,9 @@ nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx)
xf_emit(ctx, 6*2, 0);
xf_emit(ctx, 2, 0);
/* SEEK */
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
xf_emit(ctx, 0x1c, 0);
- else if (dev_priv->chipset < 0xa0)
+ else if (device->chipset < 0xa0)
xf_emit(ctx, 0x1e, 0);
else
xf_emit(ctx, 0x22, 0);
@@ -1421,9 +1402,9 @@ static void
nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx)
{
/* Strand 0, right after dispatch */
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
int smallm2mf = 0;
- if (dev_priv->chipset < 0x92 || dev_priv->chipset == 0x98)
+ if (device->chipset < 0x92 || device->chipset == 0x98)
smallm2mf = 1;
/* SEEK */
xf_emit (ctx, 1, 0); /* DMA_NOTIFY instance >> 4 */
@@ -1472,10 +1453,10 @@ nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx)
static void
nv50_graph_construct_gene_ccache(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
xf_emit(ctx, 2, 0); /* RO */
xf_emit(ctx, 0x800, 0); /* ffffffff */
- switch (dev_priv->chipset) {
+ switch (device->chipset) {
case 0x50:
case 0x92:
case 0xa0:
@@ -1540,7 +1521,7 @@ nv50_graph_construct_gene_ccache(struct nouveau_grctx *ctx)
static void
nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
int i;
/* end of area 2 on pre-NVA0, area 1 on NVAx */
xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
@@ -1550,14 +1531,14 @@ nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */
xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
xf_emit(ctx, 1, 0x3ff);
else
xf_emit(ctx, 1, 0x7ff); /* 000007ff */
xf_emit(ctx, 1, 0); /* 111/113 */
xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
for (i = 0; i < 8; i++) {
- switch (dev_priv->chipset) {
+ switch (device->chipset) {
case 0x50:
case 0x86:
case 0x98:
@@ -1600,7 +1581,7 @@ nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx)
static void
nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
/* end of area 2 on pre-NVA0, area 1 on NVAx */
xf_emit(ctx, 1, 0); /* 00000001 VIEWPORT_CLIP_RECTS_EN */
xf_emit(ctx, 1, 0); /* 00000003 VIEWPORT_CLIP_MODE */
@@ -1614,9 +1595,9 @@ nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
xf_emit(ctx, 1, 0); /* 00000007 */
xf_emit(ctx, 1, 0x1fe21); /* 0001ffff tesla UNK0FAC */
- if (dev_priv->chipset >= 0xa0)
+ if (device->chipset >= 0xa0)
xf_emit(ctx, 1, 0x0fac6881);
- if (IS_NVA3F(dev_priv->chipset)) {
+ if (IS_NVA3F(device->chipset)) {
xf_emit(ctx, 1, 1);
xf_emit(ctx, 3, 0);
}
@@ -1625,9 +1606,9 @@ nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx)
static void
nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
/* middle of area 2 on pre-NVA0, beginning of area 2 on NVA0, area 7 on >NVA0 */
- if (dev_priv->chipset != 0x50) {
+ if (device->chipset != 0x50) {
xf_emit(ctx, 5, 0); /* ffffffff */
xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
xf_emit(ctx, 1, 0); /* 00000001 */
@@ -1643,14 +1624,14 @@ nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */
xf_emit(ctx, 1, 0); /* 000000ff VP_CLIP_DISTANCE_ENABLE */
- if (dev_priv->chipset != 0x50)
+ if (device->chipset != 0x50)
xf_emit(ctx, 1, 0); /* 3ff */
xf_emit(ctx, 1, 0); /* 000000ff tesla UNK1940 */
xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0D7C */
xf_emit(ctx, 1, 0x804); /* 00000fff SEMANTIC_CLIP */
xf_emit(ctx, 1, 1); /* 00000001 VIEWPORT_TRANSFORM_EN */
xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
- if (dev_priv->chipset != 0x50)
+ if (device->chipset != 0x50)
xf_emit(ctx, 1, 0x7f); /* 000000ff tesla UNK0FFC */
xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
xf_emit(ctx, 1, 1); /* 00000001 SHADE_MODEL */
@@ -1669,7 +1650,7 @@ nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx)
xf_emit(ctx, 4, 0); /* ffffffff NOPERSPECTIVE_BITMAP */
xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
xf_emit(ctx, 1, 0); /* 0000000f */
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
xf_emit(ctx, 1, 0x3ff); /* 000003ff tesla UNK0D68 */
else
xf_emit(ctx, 1, 0x7ff); /* 000007ff tesla UNK0D68 */
@@ -1704,11 +1685,11 @@ nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */
xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 1, 0); /* 00000001 */
xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
xf_emit(ctx, 1, 0x10); /* 000000ff VIEW_VOLUME_CLIP_CTRL */
- if (dev_priv->chipset != 0x50) {
+ if (device->chipset != 0x50) {
xf_emit(ctx, 1, 0); /* ffffffff */
xf_emit(ctx, 1, 0); /* 00000001 */
xf_emit(ctx, 1, 0); /* 000003ff */
@@ -1736,7 +1717,7 @@ nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx)
static void
nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
/* end of strand 0 on pre-NVA0, beginning of strand 6 on NVAx */
/* SEEK */
xf_emit(ctx, 1, 0x3f); /* 0000003f UNK1590 */
@@ -1774,7 +1755,7 @@ nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */
xf_emit(ctx, 1, 0); /* 00000007 */
- if (dev_priv->chipset != 0x50)
+ if (device->chipset != 0x50)
xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1108 */
xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */
xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
@@ -1789,7 +1770,7 @@ nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* 00000001 VIEWPORT_CLIP_RECTS_EN */
xf_emit(ctx, 1, 3); /* 00000003 FP_CTRL_UNK196C */
xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1968 */
- if (dev_priv->chipset != 0x50)
+ if (device->chipset != 0x50)
xf_emit(ctx, 1, 0); /* 0fffffff tesla UNK1104 */
xf_emit(ctx, 1, 0); /* 00000001 tesla UNK151C */
}
@@ -1817,7 +1798,7 @@ nv50_graph_construct_gene_clipid(struct nouveau_grctx *ctx)
static void
nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
int i;
/* middle of strand 0 on pre-NVA0 [after m2mf], end of strand 2 on NVAx */
/* SEEK */
@@ -1829,7 +1810,7 @@ nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
/* SEEK */
- if (IS_NVA3F(dev_priv->chipset)) {
+ if (IS_NVA3F(device->chipset)) {
xf_emit(ctx, 4, 0); /* RO */
xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */
xf_emit(ctx, 1, 0); /* 1ff */
@@ -1860,7 +1841,7 @@ nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
- if (dev_priv->chipset != 0x50)
+ if (device->chipset != 0x50)
xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */
/* SEEK */
xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
@@ -1869,7 +1850,7 @@ nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
xf_emit(ctx, 1, 1); /* 00000001 */
/* SEEK */
- if (dev_priv->chipset >= 0xa0)
+ if (device->chipset >= 0xa0)
xf_emit(ctx, 2, 4); /* 000000ff */
xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */
@@ -1893,20 +1874,20 @@ nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx)
xf_emit(ctx, 0x10, 0); /* 00ffffff POINT_COORD_REPLACE_MAP */
xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
- if (dev_priv->chipset != 0x50)
+ if (device->chipset != 0x50)
xf_emit(ctx, 1, 0); /* 000003ff */
}
static void
nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
int acnt = 0x10, rep, i;
/* beginning of strand 1 on pre-NVA0, strand 3 on NVAx */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
acnt = 0x20;
/* SEEK */
- if (dev_priv->chipset >= 0xa0) {
+ if (device->chipset >= 0xa0) {
xf_emit(ctx, 1, 0); /* ffffffff tesla UNK13A4 */
xf_emit(ctx, 1, 1); /* 00000fff tesla UNK1318 */
}
@@ -1923,9 +1904,9 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* 0000ffff turing USER_PARAM_COUNT */
xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
/* SEEK */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 0xb, 0); /* RO */
- else if (dev_priv->chipset >= 0xa0)
+ else if (device->chipset >= 0xa0)
xf_emit(ctx, 0x9, 0); /* RO */
else
xf_emit(ctx, 0x8, 0); /* RO */
@@ -1944,11 +1925,11 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 4); /* 000001ff UNK1A28 */
xf_emit(ctx, 1, 8); /* 000001ff UNK0DF0 */
xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
xf_emit(ctx, 1, 0x3ff); /* 3ff tesla UNK0D68 */
else
xf_emit(ctx, 1, 0x7ff); /* 7ff tesla UNK0D68 */
- if (dev_priv->chipset == 0xa8)
+ if (device->chipset == 0xa8)
xf_emit(ctx, 1, 0x1e00); /* 7fff */
/* SEEK */
xf_emit(ctx, 0xc, 0); /* RO or close */
@@ -1956,13 +1937,13 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */
xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */
xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */
- if (dev_priv->chipset > 0x50 && dev_priv->chipset < 0xa0)
+ if (device->chipset > 0x50 && device->chipset < 0xa0)
xf_emit(ctx, 2, 0); /* ffffffff */
else
xf_emit(ctx, 1, 0); /* ffffffff */
xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0FD8 */
/* SEEK */
- if (IS_NVA3F(dev_priv->chipset)) {
+ if (IS_NVA3F(device->chipset)) {
xf_emit(ctx, 0x10, 0); /* 0? */
xf_emit(ctx, 2, 0); /* weird... */
xf_emit(ctx, 2, 0); /* RO */
@@ -1975,7 +1956,7 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* ffffffff VB_ELEMENT_BASE */
xf_emit(ctx, 1, 0); /* ffffffff UNK1438 */
xf_emit(ctx, acnt, 0); /* 1 tesla UNK1000 */
- if (dev_priv->chipset >= 0xa0)
+ if (device->chipset >= 0xa0)
xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1118? */
/* SEEK */
xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_ARRAY_UNK90C */
@@ -2013,23 +1994,23 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
xf_emit(ctx, acnt, 0); /* 000000ff VERTEX_LIMIT_HIGH */
xf_emit(ctx, 3, 0); /* f/1f */
/* SEEK */
- if (IS_NVA3F(dev_priv->chipset)) {
+ if (IS_NVA3F(device->chipset)) {
xf_emit(ctx, acnt, 0); /* f */
xf_emit(ctx, 3, 0); /* f/1f */
}
/* SEEK */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 2, 0); /* RO */
else
xf_emit(ctx, 5, 0); /* RO */
/* SEEK */
xf_emit(ctx, 1, 0); /* ffff DMA_VTXBUF */
/* SEEK */
- if (dev_priv->chipset < 0xa0) {
+ if (device->chipset < 0xa0) {
xf_emit(ctx, 0x41, 0); /* RO */
/* SEEK */
xf_emit(ctx, 0x11, 0); /* RO */
- } else if (!IS_NVA3F(dev_priv->chipset))
+ } else if (!IS_NVA3F(device->chipset))
xf_emit(ctx, 0x50, 0); /* RO */
else
xf_emit(ctx, 0x58, 0); /* RO */
@@ -2041,7 +2022,7 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
xf_emit(ctx, acnt*4, 0); /* ffffffff VTX_ATTR */
xf_emit(ctx, 4, 0); /* f/1f, 0, 0, 0 */
/* SEEK */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 0x1d, 0); /* RO */
else
xf_emit(ctx, 0x16, 0); /* RO */
@@ -2049,21 +2030,21 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */
xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */
/* SEEK */
- if (dev_priv->chipset < 0xa0)
+ if (device->chipset < 0xa0)
xf_emit(ctx, 8, 0); /* RO */
- else if (IS_NVA3F(dev_priv->chipset))
+ else if (IS_NVA3F(device->chipset))
xf_emit(ctx, 0xc, 0); /* RO */
else
xf_emit(ctx, 7, 0); /* RO */
/* SEEK */
xf_emit(ctx, 0xa, 0); /* RO */
- if (dev_priv->chipset == 0xa0)
+ if (device->chipset == 0xa0)
rep = 0xc;
else
rep = 4;
for (i = 0; i < rep; i++) {
/* SEEK */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 0x20, 0); /* ffffffff */
xf_emit(ctx, 0x200, 0); /* ffffffff */
xf_emit(ctx, 4, 0); /* 7f/ff, 0, 0, 0 */
@@ -2077,7 +2058,7 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */
xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
/* SEEK */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 7, 0); /* weird... */
else
xf_emit(ctx, 5, 0); /* weird... */
@@ -2086,13 +2067,13 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
static void
nv50_graph_construct_gene_eng2d(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
/* middle of strand 1 on pre-NVA0 [after vfetch], middle of strand 6 on NVAx */
/* SEEK */
xf_emit(ctx, 2, 0); /* 0001ffff CLIP_X, CLIP_Y */
xf_emit(ctx, 2, 0); /* 0000ffff CLIP_W, CLIP_H */
xf_emit(ctx, 1, 0); /* 00000001 CLIP_ENABLE */
- if (dev_priv->chipset < 0xa0) {
+ if (device->chipset < 0xa0) {
/* this is useless on everything but the original NV50,
* guess they forgot to nuke it. Or just didn't bother. */
xf_emit(ctx, 2, 0); /* 0000ffff IFC_CLIP_X, Y */
@@ -2148,7 +2129,7 @@ nv50_graph_construct_gene_eng2d(struct nouveau_grctx *ctx)
static void
nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
/* middle of strand 1 on pre-NVA0 [after eng2d], middle of strand 0 on NVAx */
/* SEEK */
xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY... what is it doing here??? */
@@ -2173,7 +2154,7 @@ nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */
/* SEEK */
xf_emit(ctx, 0x40, 0); /* ffffffff USER_PARAM */
- switch (dev_priv->chipset) {
+ switch (device->chipset) {
case 0x50:
case 0x92:
xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */
@@ -2247,7 +2228,7 @@ nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx)
static void
nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */
xf_emit(ctx, 1, 0x3f800000); /* ffffffff LINE_WIDTH */
xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */
@@ -2277,9 +2258,9 @@ nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */
xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 1, 3); /* 00000003 UNK16B4 */
- else if (dev_priv->chipset >= 0xa0)
+ else if (device->chipset >= 0xa0)
xf_emit(ctx, 1, 1); /* 00000001 UNK16B4 */
xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */
xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0F90 */
@@ -2293,11 +2274,11 @@ nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* ffffffff POINT_SIZE */
xf_emit(ctx, 1, 0); /* 00000001 */
xf_emit(ctx, 1, 0); /* 00000007 tesla UNK0FB4 */
- if (dev_priv->chipset != 0x50) {
+ if (device->chipset != 0x50) {
xf_emit(ctx, 1, 0); /* 3ff */
xf_emit(ctx, 1, 1); /* 00000001 tesla UNK1110 */
}
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1928 */
xf_emit(ctx, 0x10, 0); /* ffffffff DEPTH_RANGE_NEAR */
xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */
@@ -2316,11 +2297,11 @@ nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */
xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */
- if (dev_priv->chipset != 0x50)
+ if (device->chipset != 0x50)
xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */
- if (dev_priv->chipset < 0xa0)
+ if (device->chipset < 0xa0)
xf_emit(ctx, 0x1c, 0); /* RO */
- else if (IS_NVA3F(dev_priv->chipset))
+ else if (IS_NVA3F(device->chipset))
xf_emit(ctx, 0x9, 0);
xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */
xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */
@@ -2328,13 +2309,13 @@ nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */
xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
- if (dev_priv->chipset != 0x50) {
+ if (device->chipset != 0x50) {
xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */
xf_emit(ctx, 1, 0); /* 3ff */
}
/* XXX: the following block could belong either to unk1cxx, or
* to STRMOUT. Rather hard to tell. */
- if (dev_priv->chipset < 0xa0)
+ if (device->chipset < 0xa0)
xf_emit(ctx, 0x25, 0);
else
xf_emit(ctx, 0x3b, 0);
@@ -2343,18 +2324,18 @@ nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
static void
nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
xf_emit(ctx, 1, 0x102); /* 0000ffff STRMOUT_BUFFER_CTRL */
xf_emit(ctx, 1, 0); /* ffffffff STRMOUT_PRIMITIVE_COUNT */
xf_emit(ctx, 4, 4); /* 000000ff STRMOUT_NUM_ATTRIBS */
- if (dev_priv->chipset >= 0xa0) {
+ if (device->chipset >= 0xa0) {
xf_emit(ctx, 4, 0); /* ffffffff UNK1A8C */
xf_emit(ctx, 4, 0); /* ffffffff UNK1780 */
}
xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
xf_emit(ctx, 1, 0x3ff); /* 000003ff tesla UNK0D68 */
else
xf_emit(ctx, 1, 0x7ff); /* 000007ff tesla UNK0D68 */
@@ -2365,7 +2346,7 @@ nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx)
xf_emit(ctx, 4, 0); /* 000000ff STRMOUT_ADDRESS_HIGH */
xf_emit(ctx, 4, 0); /* ffffffff STRMOUT_ADDRESS_LOW */
xf_emit(ctx, 4, 4); /* 000000ff STRMOUT_NUM_ATTRIBS */
- if (dev_priv->chipset >= 0xa0) {
+ if (device->chipset >= 0xa0) {
xf_emit(ctx, 4, 0); /* ffffffff UNK1A8C */
xf_emit(ctx, 4, 0); /* ffffffff UNK1780 */
}
@@ -2385,12 +2366,12 @@ nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx)
static void
nv50_graph_construct_gene_ropm1(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0D64 */
xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0DF4 */
xf_emit(ctx, 1, 0); /* 00000007 */
xf_emit(ctx, 1, 0); /* 000003ff */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 1, 0x11); /* 000000ff tesla UNK1968 */
xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
}
@@ -2398,7 +2379,7 @@ nv50_graph_construct_gene_ropm1(struct nouveau_grctx *ctx)
static void
nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
/* SEEK */
xf_emit(ctx, 1, 0); /* 0000ffff DMA_QUERY */
xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
@@ -2416,7 +2397,7 @@ nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* 00000001 eng2d UNK260 */
xf_emit(ctx, 1, 0); /* ff/3ff */
xf_emit(ctx, 1, 0); /* 00000007 */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 1, 0x11); /* 000000ff tesla UNK1968 */
xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
}
@@ -2424,11 +2405,11 @@ nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx)
static void
nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
int magic2;
- if (dev_priv->chipset == 0x50) {
+ if (device->chipset == 0x50) {
magic2 = 0x00003e60;
- } else if (!IS_NVA3F(dev_priv->chipset)) {
+ } else if (!IS_NVA3F(device->chipset)) {
magic2 = 0x001ffe67;
} else {
magic2 = 0x00087e67;
@@ -2446,14 +2427,14 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */
xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */
xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */
xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */
xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */
xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
- if (dev_priv->chipset >= 0xa0 && !IS_NVAAF(dev_priv->chipset))
+ if (device->chipset >= 0xa0 && !IS_NVAAF(device->chipset))
xf_emit(ctx, 1, 0x15); /* 000000ff */
xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */
@@ -2462,14 +2443,14 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
- if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x92 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa0) {
+ if (device->chipset == 0x86 || device->chipset == 0x92 || device->chipset == 0x98 || device->chipset >= 0xa0) {
xf_emit(ctx, 3, 0); /* ff, ffffffff, ffffffff */
xf_emit(ctx, 1, 4); /* 7 */
xf_emit(ctx, 1, 0x400); /* fffffff */
xf_emit(ctx, 1, 0x300); /* ffff */
xf_emit(ctx, 1, 0x1001); /* 1fff */
- if (dev_priv->chipset != 0xa0) {
- if (IS_NVA3F(dev_priv->chipset))
+ if (device->chipset != 0xa0) {
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 1, 0); /* 0000000f UNK15C8 */
else
xf_emit(ctx, 1, 0x15); /* ff */
@@ -2547,7 +2528,7 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */
xf_emit(ctx, 1, 1); /* 00000001 tesla UNK19CC */
- if (dev_priv->chipset >= 0xa0) {
+ if (device->chipset >= 0xa0) {
xf_emit(ctx, 2, 0);
xf_emit(ctx, 1, 0x1001);
xf_emit(ctx, 0xb, 0);
@@ -2564,7 +2545,7 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */
xf_emit(ctx, 1, 0x11); /* 3f/7f */
xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
- if (dev_priv->chipset != 0x50) {
+ if (device->chipset != 0x50) {
xf_emit(ctx, 1, 0); /* 0000000f LOGIC_OP */
xf_emit(ctx, 1, 0); /* 000000ff */
}
@@ -2581,7 +2562,7 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */
xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
- if (IS_NVA3F(dev_priv->chipset)) {
+ if (IS_NVA3F(device->chipset)) {
xf_emit(ctx, 1, 0); /* 00000001 tesla UNK12E4 */
xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */
xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */
@@ -2600,7 +2581,7 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
xf_emit(ctx, 1, 0); /* 00000001 */
xf_emit(ctx, 1, 0); /* 000003ff */
- } else if (dev_priv->chipset >= 0xa0) {
+ } else if (device->chipset >= 0xa0) {
xf_emit(ctx, 2, 0); /* 00000001 */
xf_emit(ctx, 1, 0); /* 00000007 */
xf_emit(ctx, 1, 0); /* 00000003 */
@@ -2614,7 +2595,7 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
xf_emit(ctx, 4, 0); /* ffffffff CLEAR_COLOR */
xf_emit(ctx, 4, 0); /* ffffffff BLEND_COLOR A R G B */
xf_emit(ctx, 1, 0); /* 00000fff eng2d UNK2B0 */
- if (dev_priv->chipset >= 0xa0)
+ if (device->chipset >= 0xa0)
xf_emit(ctx, 2, 0); /* 00000001 */
xf_emit(ctx, 1, 0); /* 000003ff */
xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */
@@ -2628,9 +2609,9 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* 00000001 UNK19C0 */
xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
xf_emit(ctx, 1, 0); /* 0000000f LOGIC_OP */
- if (dev_priv->chipset >= 0xa0)
+ if (device->chipset >= 0xa0)
xf_emit(ctx, 1, 0); /* 00000001 UNK12E4? NVA3+ only? */
- if (IS_NVA3F(dev_priv->chipset)) {
+ if (IS_NVA3F(device->chipset)) {
xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */
xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */
xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */
@@ -2659,9 +2640,9 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
static void
nv50_graph_construct_xfer_unk84xx(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
int magic3;
- switch (dev_priv->chipset) {
+ switch (device->chipset) {
case 0x50:
magic3 = 0x1000;
break;
@@ -2681,16 +2662,16 @@ nv50_graph_construct_xfer_unk84xx(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
xf_emit(ctx, 1, 0); /* 111/113[NVA0+] */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 0x1f, 0); /* ffffffff */
- else if (dev_priv->chipset >= 0xa0)
+ else if (device->chipset >= 0xa0)
xf_emit(ctx, 0x0f, 0); /* ffffffff */
else
xf_emit(ctx, 0x10, 0); /* fffffff VP_RESULT_MAP_1 up */
xf_emit(ctx, 2, 0); /* f/1f[NVA3], fffffff/ffffffff[NVA0+] */
xf_emit(ctx, 1, 4); /* 7f/ff VP_REG_ALLOC_RESULT */
xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */
- if (dev_priv->chipset >= 0xa0)
+ if (device->chipset >= 0xa0)
xf_emit(ctx, 1, 0x03020100); /* ffffffff */
else
xf_emit(ctx, 1, 0x00608080); /* fffffff VP_RESULT_MAP_0 */
@@ -2733,11 +2714,11 @@ nv50_graph_construct_xfer_unk84xx(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
xf_emit(ctx, 1, 0); /* 111/113 */
- if (dev_priv->chipset == 0x94 || dev_priv->chipset == 0x96)
+ if (device->chipset == 0x94 || device->chipset == 0x96)
xf_emit(ctx, 0x1020, 0); /* 4 x (0x400 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */
- else if (dev_priv->chipset < 0xa0)
+ else if (device->chipset < 0xa0)
xf_emit(ctx, 0xa20, 0); /* 4 x (0x280 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */
- else if (!IS_NVA3F(dev_priv->chipset))
+ else if (!IS_NVA3F(device->chipset))
xf_emit(ctx, 0x210, 0); /* ffffffff */
else
xf_emit(ctx, 0x410, 0); /* ffffffff */
@@ -2751,12 +2732,12 @@ nv50_graph_construct_xfer_unk84xx(struct nouveau_grctx *ctx)
static void
nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
int magic1, magic2;
- if (dev_priv->chipset == 0x50) {
+ if (device->chipset == 0x50) {
magic1 = 0x3ff;
magic2 = 0x00003e60;
- } else if (!IS_NVA3F(dev_priv->chipset)) {
+ } else if (!IS_NVA3F(device->chipset)) {
magic1 = 0x7ff;
magic2 = 0x001ffe67;
} else {
@@ -2766,7 +2747,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */
xf_emit(ctx, 1, 0); /* ffffffff ALPHA_TEST_REF */
xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 1, 1); /* 0000000f UNK16A0 */
xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
@@ -2800,11 +2781,11 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 1); /* 00000001 SIFC_BITMAP_WRITE_BIT0_ENABLE */
xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */
xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
- if (IS_NVA3F(dev_priv->chipset)) {
+ if (IS_NVA3F(device->chipset)) {
xf_emit(ctx, 1, 3); /* 00000003 tesla UNK16B4 */
xf_emit(ctx, 1, 0); /* 00000003 */
xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1298 */
- } else if (dev_priv->chipset >= 0xa0) {
+ } else if (device->chipset >= 0xa0) {
xf_emit(ctx, 1, 1); /* 00000001 tesla UNK16B4 */
xf_emit(ctx, 1, 0); /* 00000003 */
} else {
@@ -2818,7 +2799,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */
xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */
xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */
- if (IS_NVA3F(dev_priv->chipset)) {
+ if (IS_NVA3F(device->chipset)) {
xf_emit(ctx, 1, 0); /* 00000001 UNK12E4 */
xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */
xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */
@@ -2846,7 +2827,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0xcf); /* 000000ff SIFC_FORMAT */
xf_emit(ctx, 1, 0xcf); /* 000000ff DRAW_COLOR_FORMAT */
xf_emit(ctx, 1, 0xcf); /* 000000ff SRC_FORMAT */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
xf_emit(ctx, 1, 0); /* 7/f[NVA3] MULTISAMPLE_SAMPLES_LOG2 */
@@ -2870,9 +2851,9 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */
xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
xf_emit(ctx, 1, 0); /* ff */
else
xf_emit(ctx, 3, 0); /* 1, 7, 3ff */
@@ -2907,7 +2888,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
xf_emit(ctx, 1, 0); /* 00000007 */
xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
xf_emit(ctx, 8, 0); /* 0000ffff DMA_COLOR */
xf_emit(ctx, 1, 0); /* 0000ffff DMA_GLOBAL */
@@ -2945,7 +2926,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */
xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */
xf_emit(ctx, 1, 0); /* 00000007 */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */
xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
@@ -2974,7 +2955,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0x1001); /* 00001fff ZETA_ARRAY_MODE */
xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 1, 0); /* 00000001 */
xf_emit(ctx, 1, 0); /* ffff0ff3 */
xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */
@@ -2988,14 +2969,14 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* 00000001 FRAMEBUFFER_SRGB */
xf_emit(ctx, 1, 0); /* 7 */
xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
- if (IS_NVA3F(dev_priv->chipset)) {
+ if (IS_NVA3F(device->chipset)) {
xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */
xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
}
xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */
xf_emit(ctx, 1, 0); /* ffff0ff3 */
- if (dev_priv->chipset >= 0xa0)
+ if (device->chipset >= 0xa0)
xf_emit(ctx, 1, 0x0fac6881); /* fffffff */
xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */
xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
@@ -3012,12 +2993,12 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */
xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
- if (IS_NVA3F(dev_priv->chipset)) {
+ if (IS_NVA3F(device->chipset)) {
xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
xf_emit(ctx, 1, 0); /* 0000000f tesla UNK15C8 */
}
xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
- if (dev_priv->chipset >= 0xa0) {
+ if (device->chipset >= 0xa0) {
xf_emit(ctx, 3, 0); /* 7/f, 1, ffff0ff3 */
xf_emit(ctx, 1, 0xfac6881); /* fffffff */
xf_emit(ctx, 4, 0); /* 1, 1, 1, 3ff */
@@ -3027,7 +3008,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
xf_emit(ctx, 2, 0); /* 7, f */
xf_emit(ctx, 1, 1); /* 1 */
xf_emit(ctx, 1, 0); /* 7/f */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 0x9, 0); /* 1 */
else
xf_emit(ctx, 0x8, 0); /* 1 */
@@ -3041,7 +3022,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0x11); /* 7f */
xf_emit(ctx, 1, 1); /* 1 */
xf_emit(ctx, 5, 0); /* 1, 7, 3ff, 3, 7 */
- if (IS_NVA3F(dev_priv->chipset)) {
+ if (IS_NVA3F(device->chipset)) {
xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */
xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
}
@@ -3051,15 +3032,15 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
static void
nv50_graph_construct_xfer_tex(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
xf_emit(ctx, 2, 0); /* 1 LINKED_TSC. yes, 2. */
- if (dev_priv->chipset != 0x50)
+ if (device->chipset != 0x50)
xf_emit(ctx, 1, 0); /* 3 */
xf_emit(ctx, 1, 1); /* 1ffff BLIT_DU_DX_INT */
xf_emit(ctx, 1, 0); /* fffff BLIT_DU_DX_FRACT */
xf_emit(ctx, 1, 1); /* 1ffff BLIT_DV_DY_INT */
xf_emit(ctx, 1, 0); /* fffff BLIT_DV_DY_FRACT */
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
xf_emit(ctx, 1, 0); /* 3 BLIT_CONTROL */
else
xf_emit(ctx, 2, 0); /* 3ff, 1 */
@@ -3071,13 +3052,13 @@ nv50_graph_construct_xfer_tex(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0x10100); /* ffffffff SRC_TIC_5 */
xf_emit(ctx, 1, 0x02800000); /* ffffffff SRC_TIC_6 */
xf_emit(ctx, 1, 0); /* ffffffff SRC_TIC_7 */
- if (dev_priv->chipset == 0x50) {
+ if (device->chipset == 0x50) {
xf_emit(ctx, 1, 0); /* 00000001 turing UNK358 */
xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34? */
xf_emit(ctx, 1, 0); /* 00000003 turing UNK37C tesla UNK1690 */
xf_emit(ctx, 1, 0); /* 00000003 BLIT_CONTROL */
xf_emit(ctx, 1, 0); /* 00000001 turing UNK32C tesla UNK0F94 */
- } else if (!IS_NVAAF(dev_priv->chipset)) {
+ } else if (!IS_NVAAF(device->chipset)) {
xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34? */
xf_emit(ctx, 1, 0); /* 00000003 */
xf_emit(ctx, 1, 0); /* 000003ff */
@@ -3097,7 +3078,7 @@ nv50_graph_construct_xfer_tex(struct nouveau_grctx *ctx)
static void
nv50_graph_construct_xfer_unk8cxx(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */
xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
xf_emit(ctx, 2, 0); /* 7, ffff0ff3 */
@@ -3109,7 +3090,7 @@ nv50_graph_construct_xfer_unk8cxx(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */
xf_emit(ctx, 1, 1); /* 00000001 tesla UNK0F98 */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1668 */
xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
@@ -3136,8 +3117,8 @@ nv50_graph_construct_xfer_unk8cxx(struct nouveau_grctx *ctx)
static void
nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- if (dev_priv->chipset < 0xa0) {
+ struct nouveau_device *device = ctx->device;
+ if (device->chipset < 0xa0) {
nv50_graph_construct_xfer_unk84xx(ctx);
nv50_graph_construct_xfer_tprop(ctx);
nv50_graph_construct_xfer_tex(ctx);
@@ -3153,9 +3134,9 @@ nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx)
static void
nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
int i, mpcnt = 2;
- switch (dev_priv->chipset) {
+ switch (device->chipset) {
case 0x98:
case 0xaa:
mpcnt = 1;
@@ -3182,34 +3163,34 @@ nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0x80); /* ffffffff tesla UNK1404 */
xf_emit(ctx, 1, 0x80007004); /* ffffffff tesla UNK12B0 */
xf_emit(ctx, 1, 0x04000400); /* ffffffff */
- if (dev_priv->chipset >= 0xa0)
+ if (device->chipset >= 0xa0)
xf_emit(ctx, 1, 0xc0); /* 00007fff tesla UNK152C */
xf_emit(ctx, 1, 0x1000); /* 0000ffff tesla UNK0D60 */
xf_emit(ctx, 1, 0); /* ff/3ff */
xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
- if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset == 0xa8 || IS_NVAAF(dev_priv->chipset)) {
+ if (device->chipset == 0x86 || device->chipset == 0x98 || device->chipset == 0xa8 || IS_NVAAF(device->chipset)) {
xf_emit(ctx, 1, 0xe00); /* 7fff */
xf_emit(ctx, 1, 0x1e00); /* 7fff */
}
xf_emit(ctx, 1, 1); /* 000000ff VP_REG_ALLOC_TEMP */
xf_emit(ctx, 1, 0); /* 00000001 LINKED_TSC */
xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
xf_emit(ctx, 2, 0x1000); /* 7fff tesla UNK141C */
xf_emit(ctx, 1, 1); /* 000000ff GP_REG_ALLOC_TEMP */
xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
xf_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */
xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */
- if (IS_NVAAF(dev_priv->chipset))
+ if (IS_NVAAF(device->chipset))
xf_emit(ctx, 0xb, 0); /* RO */
- else if (dev_priv->chipset >= 0xa0)
+ else if (device->chipset >= 0xa0)
xf_emit(ctx, 0xc, 0); /* RO */
else
xf_emit(ctx, 0xa, 0); /* RO */
}
xf_emit(ctx, 1, 0x08100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
xf_emit(ctx, 1, 0); /* ff/3ff */
- if (dev_priv->chipset >= 0xa0) {
+ if (device->chipset >= 0xa0) {
xf_emit(ctx, 1, 0x1fe21); /* 0003ffff tesla UNK0FAC */
}
xf_emit(ctx, 3, 0); /* 7fff, 0, 0 */
@@ -3223,7 +3204,7 @@ nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* ffffffff SHARED_SIZE */
xf_emit(ctx, 1, 0x1fe21); /* 1ffff/3ffff[NVA0+] tesla UNk0FAC */
xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34 */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
xf_emit(ctx, 1, 0); /* ff/3ff */
xf_emit(ctx, 1, 0); /* 1 LINKED_TSC */
@@ -3238,7 +3219,7 @@ nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* 00000007 */
xf_emit(ctx, 1, 0xfac6881); /* 0fffffff RT_CONTROL */
xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 1, 3); /* 00000003 tesla UNK16B4 */
xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */
@@ -3253,7 +3234,7 @@ nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */
xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */
xf_emit(ctx, 1, 1); /* 00000001 UNK133C */
- if (IS_NVA3F(dev_priv->chipset)) {
+ if (IS_NVA3F(device->chipset)) {
xf_emit(ctx, 1, 0); /* 00000001 UNK12E4 */
xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */
xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_RGB */
@@ -3268,11 +3249,11 @@ nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0F90 */
xf_emit(ctx, 1, 4); /* 000000ff FP_RESULT_COUNT */
/* XXX: demagic this part some day */
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
xf_emit(ctx, 0x3a0, 0);
- else if (dev_priv->chipset < 0x94)
+ else if (device->chipset < 0x94)
xf_emit(ctx, 0x3a2, 0);
- else if (dev_priv->chipset == 0x98 || dev_priv->chipset == 0xaa)
+ else if (device->chipset == 0x98 || device->chipset == 0xaa)
xf_emit(ctx, 0x39f, 0);
else
xf_emit(ctx, 0x3a3, 0);
@@ -3285,15 +3266,15 @@ nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
static void
nv50_graph_construct_xfer2(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
int i;
- uint32_t offset;
- uint32_t units = nv_rd32 (ctx->dev, 0x1540);
+ u32 offset;
+ u32 units = nv_rd32 (ctx->device, 0x1540);
int size = 0;
offset = (ctx->ctxvals_pos+0x3f)&~0x3f;
- if (dev_priv->chipset < 0xa0) {
+ if (device->chipset < 0xa0) {
for (i = 0; i < 8; i++) {
ctx->ctxvals_pos = offset + i;
/* that little bugger belongs to csched. No idea
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
new file mode 100644
index 0000000000000000000000000000000000000000..0b7951a859434dc72f20bbc30b975ee58c22501c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
@@ -0,0 +1,3039 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nvc0.h"
+
+void
+nv_icmd(struct nvc0_graph_priv *priv, u32 icmd, u32 data)
+{
+ nv_wr32(priv, 0x400204, data);
+ nv_wr32(priv, 0x400200, icmd);
+ while (nv_rd32(priv, 0x400700) & 2) {}
+}
+
+int
+nvc0_grctx_init(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
+{
+ struct nouveau_bar *bar = nouveau_bar(priv);
+ struct nouveau_object *parent = nv_object(priv);
+ struct nouveau_gpuobj *chan;
+ u32 size = (0x80000 + priv->size + 4095) & ~4095;
+ int ret, i;
+
+ /* allocate memory to for a "channel", which we'll use to generate
+ * the default context values
+ */
+ ret = nouveau_gpuobj_new(parent, NULL, size, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &info->chan);
+ chan = info->chan;
+ if (ret) {
+ nv_error(priv, "failed to allocate channel memory, %d\n", ret);
+ return ret;
+ }
+
+ /* PGD pointer */
+ nv_wo32(chan, 0x0200, lower_32_bits(chan->addr + 0x1000));
+ nv_wo32(chan, 0x0204, upper_32_bits(chan->addr + 0x1000));
+ nv_wo32(chan, 0x0208, 0xffffffff);
+ nv_wo32(chan, 0x020c, 0x000000ff);
+
+ /* PGT[0] pointer */
+ nv_wo32(chan, 0x1000, 0x00000000);
+ nv_wo32(chan, 0x1004, 0x00000001 | (chan->addr + 0x2000) >> 8);
+
+ /* identity-map the whole "channel" into its own vm */
+ for (i = 0; i < size / 4096; i++) {
+ u64 addr = ((chan->addr + (i * 4096)) >> 8) | 1;
+ nv_wo32(chan, 0x2000 + (i * 8), lower_32_bits(addr));
+ nv_wo32(chan, 0x2004 + (i * 8), upper_32_bits(addr));
+ }
+
+ /* context pointer (virt) */
+ nv_wo32(chan, 0x0210, 0x00080004);
+ nv_wo32(chan, 0x0214, 0x00000000);
+
+ bar->flush(bar);
+
+ nv_wr32(priv, 0x100cb8, (chan->addr + 0x1000) >> 8);
+ nv_wr32(priv, 0x100cbc, 0x80000001);
+ nv_wait(priv, 0x100c80, 0x00008000, 0x00008000);
+
+ /* setup default state for mmio list construction */
+ info->data = priv->mmio_data;
+ info->mmio = priv->mmio_list;
+ info->addr = 0x2000 + (i * 8);
+ info->priv = priv;
+ info->buffer_nr = 0;
+
+ if (priv->firmware) {
+ nv_wr32(priv, 0x409840, 0x00000030);
+ nv_wr32(priv, 0x409500, 0x80000000 | chan->addr >> 12);
+ nv_wr32(priv, 0x409504, 0x00000003);
+ if (!nv_wait(priv, 0x409800, 0x00000010, 0x00000010))
+ nv_error(priv, "load_ctx timeout\n");
+
+ nv_wo32(chan, 0x8001c, 1);
+ nv_wo32(chan, 0x80020, 0);
+ nv_wo32(chan, 0x80028, 0);
+ nv_wo32(chan, 0x8002c, 0);
+ bar->flush(bar);
+ return 0;
+ }
+
+ /* HUB_FUC(SET_CHAN) */
+ nv_wr32(priv, 0x409840, 0x80000000);
+ nv_wr32(priv, 0x409500, 0x80000000 | chan->addr >> 12);
+ nv_wr32(priv, 0x409504, 0x00000001);
+ if (!nv_wait(priv, 0x409800, 0x80000000, 0x80000000)) {
+ nv_error(priv, "HUB_SET_CHAN timeout\n");
+ nvc0_graph_ctxctl_debug(priv);
+ nouveau_gpuobj_ref(NULL, &info->chan);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+void
+nvc0_grctx_data(struct nvc0_grctx *info, u32 size, u32 align, u32 access)
+{
+ info->buffer[info->buffer_nr] = info->addr;
+ info->buffer[info->buffer_nr] += (align - 1);
+ info->buffer[info->buffer_nr] &= ~(align - 1);
+ info->addr = info->buffer[info->buffer_nr++] + size;
+
+ info->data->size = size;
+ info->data->align = align;
+ info->data->access = access;
+ info->data++;
+}
+
+void
+nvc0_grctx_mmio(struct nvc0_grctx *info, u32 addr, u32 data, u32 shift, u32 buf)
+{
+ struct nvc0_graph_priv *priv = info->priv;
+
+ info->mmio->addr = addr;
+ info->mmio->data = data;
+ info->mmio->shift = shift;
+ info->mmio->buffer = buf;
+ info->mmio++;
+
+ if (shift)
+ data |= info->buffer[buf] >> shift;
+ nv_wr32(priv, addr, data);
+}
+
+int
+nvc0_grctx_fini(struct nvc0_grctx *info)
+{
+ struct nvc0_graph_priv *priv = info->priv;
+ int i;
+
+ /* trigger a context unload by unsetting the "next channel valid" bit
+ * and faking a context switch interrupt
+ */
+ nv_mask(priv, 0x409b04, 0x80000000, 0x00000000);
+ nv_wr32(priv, 0x409000, 0x00000100);
+ if (!nv_wait(priv, 0x409b00, 0x80000000, 0x00000000)) {
+ nv_error(priv, "grctx template channel unload timeout\n");
+ return -EBUSY;
+ }
+
+ priv->data = kmalloc(priv->size, GFP_KERNEL);
+ if (priv->data) {
+ for (i = 0; i < priv->size; i += 4)
+ priv->data[i / 4] = nv_ro32(info->chan, 0x80000 + i);
+ }
+
+ nouveau_gpuobj_ref(NULL, &info->chan);
+ return priv->data ? 0 : -ENOMEM;
+}
+
+static void
+nvc0_grctx_generate_9097(struct nvc0_graph_priv *priv)
+{
+ u32 fermi = nvc0_graph_class(priv);
+ u32 mthd;
+
+ nv_mthd(priv, 0x9097, 0x0800, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0840, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0880, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x08c0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0900, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0940, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0980, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x09c0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0804, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0844, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0884, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x08c4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0904, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0944, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0984, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x09c4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0808, 0x00000400);
+ nv_mthd(priv, 0x9097, 0x0848, 0x00000400);
+ nv_mthd(priv, 0x9097, 0x0888, 0x00000400);
+ nv_mthd(priv, 0x9097, 0x08c8, 0x00000400);
+ nv_mthd(priv, 0x9097, 0x0908, 0x00000400);
+ nv_mthd(priv, 0x9097, 0x0948, 0x00000400);
+ nv_mthd(priv, 0x9097, 0x0988, 0x00000400);
+ nv_mthd(priv, 0x9097, 0x09c8, 0x00000400);
+ nv_mthd(priv, 0x9097, 0x080c, 0x00000300);
+ nv_mthd(priv, 0x9097, 0x084c, 0x00000300);
+ nv_mthd(priv, 0x9097, 0x088c, 0x00000300);
+ nv_mthd(priv, 0x9097, 0x08cc, 0x00000300);
+ nv_mthd(priv, 0x9097, 0x090c, 0x00000300);
+ nv_mthd(priv, 0x9097, 0x094c, 0x00000300);
+ nv_mthd(priv, 0x9097, 0x098c, 0x00000300);
+ nv_mthd(priv, 0x9097, 0x09cc, 0x00000300);
+ nv_mthd(priv, 0x9097, 0x0810, 0x000000cf);
+ nv_mthd(priv, 0x9097, 0x0850, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0890, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x08d0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0910, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0950, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0990, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x09d0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0814, 0x00000040);
+ nv_mthd(priv, 0x9097, 0x0854, 0x00000040);
+ nv_mthd(priv, 0x9097, 0x0894, 0x00000040);
+ nv_mthd(priv, 0x9097, 0x08d4, 0x00000040);
+ nv_mthd(priv, 0x9097, 0x0914, 0x00000040);
+ nv_mthd(priv, 0x9097, 0x0954, 0x00000040);
+ nv_mthd(priv, 0x9097, 0x0994, 0x00000040);
+ nv_mthd(priv, 0x9097, 0x09d4, 0x00000040);
+ nv_mthd(priv, 0x9097, 0x0818, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x0858, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x0898, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x08d8, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x0918, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x0958, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x0998, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x09d8, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x081c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x085c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x089c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x08dc, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x091c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x095c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x099c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x09dc, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0820, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0860, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x08a0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x08e0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0920, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0960, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x09a0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x09e0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2700, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2720, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2740, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2760, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2780, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x27a0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x27c0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x27e0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2704, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2724, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2744, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2764, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2784, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x27a4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x27c4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x27e4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2708, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2728, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2748, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2768, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2788, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x27a8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x27c8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x27e8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x270c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x272c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x274c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x276c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x278c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x27ac, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x27cc, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x27ec, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2710, 0x00014000);
+ nv_mthd(priv, 0x9097, 0x2730, 0x00014000);
+ nv_mthd(priv, 0x9097, 0x2750, 0x00014000);
+ nv_mthd(priv, 0x9097, 0x2770, 0x00014000);
+ nv_mthd(priv, 0x9097, 0x2790, 0x00014000);
+ nv_mthd(priv, 0x9097, 0x27b0, 0x00014000);
+ nv_mthd(priv, 0x9097, 0x27d0, 0x00014000);
+ nv_mthd(priv, 0x9097, 0x27f0, 0x00014000);
+ nv_mthd(priv, 0x9097, 0x2714, 0x00000040);
+ nv_mthd(priv, 0x9097, 0x2734, 0x00000040);
+ nv_mthd(priv, 0x9097, 0x2754, 0x00000040);
+ nv_mthd(priv, 0x9097, 0x2774, 0x00000040);
+ nv_mthd(priv, 0x9097, 0x2794, 0x00000040);
+ nv_mthd(priv, 0x9097, 0x27b4, 0x00000040);
+ nv_mthd(priv, 0x9097, 0x27d4, 0x00000040);
+ nv_mthd(priv, 0x9097, 0x27f4, 0x00000040);
+ nv_mthd(priv, 0x9097, 0x1c00, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1c10, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1c20, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1c30, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1c40, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1c50, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1c60, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1c70, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1c80, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1c90, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1ca0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1cb0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1cc0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1cd0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1ce0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1cf0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1c04, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1c14, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1c24, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1c34, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1c44, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1c54, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1c64, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1c74, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1c84, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1c94, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1ca4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1cb4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1cc4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1cd4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1ce4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1cf4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1c08, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1c18, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1c28, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1c38, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1c48, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1c58, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1c68, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1c78, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1c88, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1c98, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1ca8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1cb8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1cc8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1cd8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1ce8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1cf8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1c0c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1c1c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1c2c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1c3c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1c4c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1c5c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1c6c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1c7c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1c8c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1c9c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1cac, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1cbc, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1ccc, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1cdc, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1cec, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1cfc, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1d00, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1d10, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1d20, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1d30, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1d40, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1d50, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1d60, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1d70, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1d80, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1d90, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1da0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1db0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1dc0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1dd0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1de0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1df0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1d04, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1d14, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1d24, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1d34, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1d44, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1d54, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1d64, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1d74, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1d84, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1d94, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1da4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1db4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1dc4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1dd4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1de4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1df4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1d08, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1d18, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1d28, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1d38, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1d48, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1d58, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1d68, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1d78, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1d88, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1d98, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1da8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1db8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1dc8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1dd8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1de8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1df8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1d0c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1d1c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1d2c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1d3c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1d4c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1d5c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1d6c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1d7c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1d8c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1d9c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1dac, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1dbc, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1dcc, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1ddc, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1dec, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1dfc, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1f00, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1f08, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1f10, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1f18, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1f20, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1f28, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1f30, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1f38, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1f40, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1f48, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1f50, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1f58, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1f60, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1f68, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1f70, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1f78, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1f04, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1f0c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1f14, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1f1c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1f24, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1f2c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1f34, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1f3c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1f44, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1f4c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1f54, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1f5c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1f64, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1f6c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1f74, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1f7c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1f80, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1f88, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1f90, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1f98, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1fa0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1fa8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1fb0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1fb8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1fc0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1fc8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1fd0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1fd8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1fe0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1fe8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1ff0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1ff8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1f84, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1f8c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1f94, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1f9c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1fa4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1fac, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1fb4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1fbc, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1fc4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1fcc, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1fd4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1fdc, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1fe4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1fec, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1ff4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1ffc, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2200, 0x00000022);
+ nv_mthd(priv, 0x9097, 0x2210, 0x00000022);
+ nv_mthd(priv, 0x9097, 0x2220, 0x00000022);
+ nv_mthd(priv, 0x9097, 0x2230, 0x00000022);
+ nv_mthd(priv, 0x9097, 0x2240, 0x00000022);
+ nv_mthd(priv, 0x9097, 0x2000, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2040, 0x00000011);
+ nv_mthd(priv, 0x9097, 0x2080, 0x00000020);
+ nv_mthd(priv, 0x9097, 0x20c0, 0x00000030);
+ nv_mthd(priv, 0x9097, 0x2100, 0x00000040);
+ nv_mthd(priv, 0x9097, 0x2140, 0x00000051);
+ nv_mthd(priv, 0x9097, 0x200c, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x204c, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x208c, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x20cc, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x210c, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x214c, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x2010, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2050, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2090, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x20d0, 0x00000002);
+ nv_mthd(priv, 0x9097, 0x2110, 0x00000003);
+ nv_mthd(priv, 0x9097, 0x2150, 0x00000004);
+ nv_mthd(priv, 0x9097, 0x0380, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x03a0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x03c0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x03e0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0384, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x03a4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x03c4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x03e4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0388, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x03a8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x03c8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x03e8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x038c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x03ac, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x03cc, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x03ec, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0700, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0710, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0720, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0730, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0704, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0714, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0724, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0734, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0708, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0718, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0728, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0738, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2800, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2804, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2808, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x280c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2810, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2814, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2818, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x281c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2820, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2824, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2828, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x282c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2830, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2834, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2838, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x283c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2840, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2844, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2848, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x284c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2850, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2854, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2858, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x285c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2860, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2864, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2868, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x286c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2870, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2874, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2878, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x287c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2880, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2884, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2888, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x288c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2890, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2894, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2898, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x289c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x28a0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x28a4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x28a8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x28ac, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x28b0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x28b4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x28b8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x28bc, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x28c0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x28c4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x28c8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x28cc, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x28d0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x28d4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x28d8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x28dc, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x28e0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x28e4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x28e8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x28ec, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x28f0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x28f4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x28f8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x28fc, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2900, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2904, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2908, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x290c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2910, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2914, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2918, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x291c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2920, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2924, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2928, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x292c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2930, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2934, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2938, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x293c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2940, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2944, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2948, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x294c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2950, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2954, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2958, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x295c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2960, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2964, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2968, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x296c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2970, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2974, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2978, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x297c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2980, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2984, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2988, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x298c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2990, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2994, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2998, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x299c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x29a0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x29a4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x29a8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x29ac, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x29b0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x29b4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x29b8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x29bc, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x29c0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x29c4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x29c8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x29cc, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x29d0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x29d4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x29d8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x29dc, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x29e0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x29e4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x29e8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x29ec, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x29f0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x29f4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x29f8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x29fc, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0a00, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0a20, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0a40, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0a60, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0a80, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0aa0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0ac0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0ae0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0b00, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0b20, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0b40, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0b60, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0b80, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0ba0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0bc0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0be0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0a04, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0a24, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0a44, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0a64, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0a84, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0aa4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0ac4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0ae4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0b04, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0b24, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0b44, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0b64, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0b84, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0ba4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0bc4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0be4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0a08, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0a28, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0a48, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0a68, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0a88, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0aa8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0ac8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0ae8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0b08, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0b28, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0b48, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0b68, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0b88, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0ba8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0bc8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0be8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0a0c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0a2c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0a4c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0a6c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0a8c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0aac, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0acc, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0aec, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0b0c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0b2c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0b4c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0b6c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0b8c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0bac, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0bcc, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0bec, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0a10, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0a30, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0a50, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0a70, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0a90, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0ab0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0ad0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0af0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0b10, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0b30, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0b50, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0b70, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0b90, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0bb0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0bd0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0bf0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0a14, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0a34, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0a54, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0a74, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0a94, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0ab4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0ad4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0af4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0b14, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0b34, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0b54, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0b74, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0b94, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0bb4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0bd4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0bf4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0c00, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0c10, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0c20, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0c30, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0c40, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0c50, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0c60, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0c70, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0c80, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0c90, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0ca0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0cb0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0cc0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0cd0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0ce0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0cf0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0c04, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0c14, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0c24, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0c34, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0c44, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0c54, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0c64, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0c74, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0c84, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0c94, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0ca4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0cb4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0cc4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0cd4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0ce4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0cf4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0c08, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0c18, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0c28, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0c38, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0c48, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0c58, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0c68, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0c78, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0c88, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0c98, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0ca8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0cb8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0cc8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0cd8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0ce8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0cf8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0c0c, 0x3f800000);
+ nv_mthd(priv, 0x9097, 0x0c1c, 0x3f800000);
+ nv_mthd(priv, 0x9097, 0x0c2c, 0x3f800000);
+ nv_mthd(priv, 0x9097, 0x0c3c, 0x3f800000);
+ nv_mthd(priv, 0x9097, 0x0c4c, 0x3f800000);
+ nv_mthd(priv, 0x9097, 0x0c5c, 0x3f800000);
+ nv_mthd(priv, 0x9097, 0x0c6c, 0x3f800000);
+ nv_mthd(priv, 0x9097, 0x0c7c, 0x3f800000);
+ nv_mthd(priv, 0x9097, 0x0c8c, 0x3f800000);
+ nv_mthd(priv, 0x9097, 0x0c9c, 0x3f800000);
+ nv_mthd(priv, 0x9097, 0x0cac, 0x3f800000);
+ nv_mthd(priv, 0x9097, 0x0cbc, 0x3f800000);
+ nv_mthd(priv, 0x9097, 0x0ccc, 0x3f800000);
+ nv_mthd(priv, 0x9097, 0x0cdc, 0x3f800000);
+ nv_mthd(priv, 0x9097, 0x0cec, 0x3f800000);
+ nv_mthd(priv, 0x9097, 0x0cfc, 0x3f800000);
+ nv_mthd(priv, 0x9097, 0x0d00, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0d08, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0d10, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0d18, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0d20, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0d28, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0d30, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0d38, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0d04, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0d0c, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0d14, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0d1c, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0d24, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0d2c, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0d34, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0d3c, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0e00, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0e10, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0e20, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0e30, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0e40, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0e50, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0e60, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0e70, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0e80, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0e90, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0ea0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0eb0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0ec0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0ed0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0ee0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0ef0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0e04, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0e14, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0e24, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0e34, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0e44, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0e54, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0e64, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0e74, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0e84, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0e94, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0ea4, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0eb4, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0ec4, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0ed4, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0ee4, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0ef4, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0e08, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0e18, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0e28, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0e38, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0e48, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0e58, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0e68, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0e78, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0e88, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0e98, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0ea8, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0eb8, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0ec8, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0ed8, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0ee8, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0ef8, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0d40, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0d48, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0d50, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0d58, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0d44, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0d4c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0d54, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0d5c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1e00, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1e20, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1e40, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1e60, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1e80, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1ea0, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1ec0, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1ee0, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1e04, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1e24, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1e44, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1e64, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1e84, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1ea4, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1ec4, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1ee4, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1e08, 0x00000002);
+ nv_mthd(priv, 0x9097, 0x1e28, 0x00000002);
+ nv_mthd(priv, 0x9097, 0x1e48, 0x00000002);
+ nv_mthd(priv, 0x9097, 0x1e68, 0x00000002);
+ nv_mthd(priv, 0x9097, 0x1e88, 0x00000002);
+ nv_mthd(priv, 0x9097, 0x1ea8, 0x00000002);
+ nv_mthd(priv, 0x9097, 0x1ec8, 0x00000002);
+ nv_mthd(priv, 0x9097, 0x1ee8, 0x00000002);
+ nv_mthd(priv, 0x9097, 0x1e0c, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1e2c, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1e4c, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1e6c, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1e8c, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1eac, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1ecc, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1eec, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1e10, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1e30, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1e50, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1e70, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1e90, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1eb0, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1ed0, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1ef0, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1e14, 0x00000002);
+ nv_mthd(priv, 0x9097, 0x1e34, 0x00000002);
+ nv_mthd(priv, 0x9097, 0x1e54, 0x00000002);
+ nv_mthd(priv, 0x9097, 0x1e74, 0x00000002);
+ nv_mthd(priv, 0x9097, 0x1e94, 0x00000002);
+ nv_mthd(priv, 0x9097, 0x1eb4, 0x00000002);
+ nv_mthd(priv, 0x9097, 0x1ed4, 0x00000002);
+ nv_mthd(priv, 0x9097, 0x1ef4, 0x00000002);
+ nv_mthd(priv, 0x9097, 0x1e18, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1e38, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1e58, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1e78, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1e98, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1eb8, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1ed8, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1ef8, 0x00000001);
+ if (fermi == 0x9097) {
+ for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
+ nv_mthd(priv, 0x9097, mthd, 0x00000000);
+ }
+ nv_mthd(priv, 0x9097, 0x030c, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1944, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1514, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0d68, 0x0000ffff);
+ nv_mthd(priv, 0x9097, 0x121c, 0x0fac6881);
+ nv_mthd(priv, 0x9097, 0x0fac, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1538, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x0fe0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0fe4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0fe8, 0x00000014);
+ nv_mthd(priv, 0x9097, 0x0fec, 0x00000040);
+ nv_mthd(priv, 0x9097, 0x0ff0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x179c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1228, 0x00000400);
+ nv_mthd(priv, 0x9097, 0x122c, 0x00000300);
+ nv_mthd(priv, 0x9097, 0x1230, 0x00010001);
+ nv_mthd(priv, 0x9097, 0x07f8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x15b4, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x15cc, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1534, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0fb0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x15d0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x153c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x16b4, 0x00000003);
+ nv_mthd(priv, 0x9097, 0x0fbc, 0x0000ffff);
+ nv_mthd(priv, 0x9097, 0x0fc0, 0x0000ffff);
+ nv_mthd(priv, 0x9097, 0x0fc4, 0x0000ffff);
+ nv_mthd(priv, 0x9097, 0x0fc8, 0x0000ffff);
+ nv_mthd(priv, 0x9097, 0x0df8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0dfc, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1948, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1970, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x161c, 0x000009f0);
+ nv_mthd(priv, 0x9097, 0x0dcc, 0x00000010);
+ nv_mthd(priv, 0x9097, 0x163c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x15e4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1160, 0x25e00040);
+ nv_mthd(priv, 0x9097, 0x1164, 0x25e00040);
+ nv_mthd(priv, 0x9097, 0x1168, 0x25e00040);
+ nv_mthd(priv, 0x9097, 0x116c, 0x25e00040);
+ nv_mthd(priv, 0x9097, 0x1170, 0x25e00040);
+ nv_mthd(priv, 0x9097, 0x1174, 0x25e00040);
+ nv_mthd(priv, 0x9097, 0x1178, 0x25e00040);
+ nv_mthd(priv, 0x9097, 0x117c, 0x25e00040);
+ nv_mthd(priv, 0x9097, 0x1180, 0x25e00040);
+ nv_mthd(priv, 0x9097, 0x1184, 0x25e00040);
+ nv_mthd(priv, 0x9097, 0x1188, 0x25e00040);
+ nv_mthd(priv, 0x9097, 0x118c, 0x25e00040);
+ nv_mthd(priv, 0x9097, 0x1190, 0x25e00040);
+ nv_mthd(priv, 0x9097, 0x1194, 0x25e00040);
+ nv_mthd(priv, 0x9097, 0x1198, 0x25e00040);
+ nv_mthd(priv, 0x9097, 0x119c, 0x25e00040);
+ nv_mthd(priv, 0x9097, 0x11a0, 0x25e00040);
+ nv_mthd(priv, 0x9097, 0x11a4, 0x25e00040);
+ nv_mthd(priv, 0x9097, 0x11a8, 0x25e00040);
+ nv_mthd(priv, 0x9097, 0x11ac, 0x25e00040);
+ nv_mthd(priv, 0x9097, 0x11b0, 0x25e00040);
+ nv_mthd(priv, 0x9097, 0x11b4, 0x25e00040);
+ nv_mthd(priv, 0x9097, 0x11b8, 0x25e00040);
+ nv_mthd(priv, 0x9097, 0x11bc, 0x25e00040);
+ nv_mthd(priv, 0x9097, 0x11c0, 0x25e00040);
+ nv_mthd(priv, 0x9097, 0x11c4, 0x25e00040);
+ nv_mthd(priv, 0x9097, 0x11c8, 0x25e00040);
+ nv_mthd(priv, 0x9097, 0x11cc, 0x25e00040);
+ nv_mthd(priv, 0x9097, 0x11d0, 0x25e00040);
+ nv_mthd(priv, 0x9097, 0x11d4, 0x25e00040);
+ nv_mthd(priv, 0x9097, 0x11d8, 0x25e00040);
+ nv_mthd(priv, 0x9097, 0x11dc, 0x25e00040);
+ nv_mthd(priv, 0x9097, 0x1880, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1884, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1888, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x188c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1890, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1894, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1898, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x189c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x18a0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x18a4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x18a8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x18ac, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x18b0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x18b4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x18b8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x18bc, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x18c0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x18c4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x18c8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x18cc, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x18d0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x18d4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x18d8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x18dc, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x18e0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x18e4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x18e8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x18ec, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x18f0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x18f4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x18f8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x18fc, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0f84, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0f88, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x17c8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x17cc, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x17d0, 0x000000ff);
+ nv_mthd(priv, 0x9097, 0x17d4, 0xffffffff);
+ nv_mthd(priv, 0x9097, 0x17d8, 0x00000002);
+ nv_mthd(priv, 0x9097, 0x17dc, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x15f4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x15f8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1434, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1438, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0d74, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0dec, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x13a4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1318, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1644, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0748, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0de8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1648, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x12a4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1120, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1124, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1128, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x112c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1118, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x164c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1658, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1910, 0x00000290);
+ nv_mthd(priv, 0x9097, 0x1518, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x165c, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1520, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1604, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1570, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x13b0, 0x3f800000);
+ nv_mthd(priv, 0x9097, 0x13b4, 0x3f800000);
+ nv_mthd(priv, 0x9097, 0x020c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1670, 0x30201000);
+ nv_mthd(priv, 0x9097, 0x1674, 0x70605040);
+ nv_mthd(priv, 0x9097, 0x1678, 0xb8a89888);
+ nv_mthd(priv, 0x9097, 0x167c, 0xf8e8d8c8);
+ nv_mthd(priv, 0x9097, 0x166c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1680, 0x00ffff00);
+ nv_mthd(priv, 0x9097, 0x12d0, 0x00000003);
+ nv_mthd(priv, 0x9097, 0x12d4, 0x00000002);
+ nv_mthd(priv, 0x9097, 0x1684, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1688, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0dac, 0x00001b02);
+ nv_mthd(priv, 0x9097, 0x0db0, 0x00001b02);
+ nv_mthd(priv, 0x9097, 0x0db4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x168c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x15bc, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x156c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x187c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1110, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x0dc0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0dc4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0dc8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1234, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1690, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x12ac, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x02c4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0790, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0794, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0798, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x079c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x07a0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x077c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1000, 0x00000010);
+ nv_mthd(priv, 0x9097, 0x10fc, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1290, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0218, 0x00000010);
+ nv_mthd(priv, 0x9097, 0x12d8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x12dc, 0x00000010);
+ nv_mthd(priv, 0x9097, 0x0d94, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x155c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1560, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1564, 0x00001fff);
+ nv_mthd(priv, 0x9097, 0x1574, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1578, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x157c, 0x003fffff);
+ nv_mthd(priv, 0x9097, 0x1354, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1664, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1610, 0x00000012);
+ nv_mthd(priv, 0x9097, 0x1608, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x160c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x162c, 0x00000003);
+ nv_mthd(priv, 0x9097, 0x0210, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0320, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0324, 0x3f800000);
+ nv_mthd(priv, 0x9097, 0x0328, 0x3f800000);
+ nv_mthd(priv, 0x9097, 0x032c, 0x3f800000);
+ nv_mthd(priv, 0x9097, 0x0330, 0x3f800000);
+ nv_mthd(priv, 0x9097, 0x0334, 0x3f800000);
+ nv_mthd(priv, 0x9097, 0x0338, 0x3f800000);
+ nv_mthd(priv, 0x9097, 0x0750, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0760, 0x39291909);
+ nv_mthd(priv, 0x9097, 0x0764, 0x79695949);
+ nv_mthd(priv, 0x9097, 0x0768, 0xb9a99989);
+ nv_mthd(priv, 0x9097, 0x076c, 0xf9e9d9c9);
+ nv_mthd(priv, 0x9097, 0x0770, 0x30201000);
+ nv_mthd(priv, 0x9097, 0x0774, 0x70605040);
+ nv_mthd(priv, 0x9097, 0x0778, 0x00009080);
+ nv_mthd(priv, 0x9097, 0x0780, 0x39291909);
+ nv_mthd(priv, 0x9097, 0x0784, 0x79695949);
+ nv_mthd(priv, 0x9097, 0x0788, 0xb9a99989);
+ nv_mthd(priv, 0x9097, 0x078c, 0xf9e9d9c9);
+ nv_mthd(priv, 0x9097, 0x07d0, 0x30201000);
+ nv_mthd(priv, 0x9097, 0x07d4, 0x70605040);
+ nv_mthd(priv, 0x9097, 0x07d8, 0x00009080);
+ nv_mthd(priv, 0x9097, 0x037c, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x0740, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0744, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x2600, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1918, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x191c, 0x00000900);
+ nv_mthd(priv, 0x9097, 0x1920, 0x00000405);
+ nv_mthd(priv, 0x9097, 0x1308, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1924, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x13ac, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x192c, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x193c, 0x00002c1c);
+ nv_mthd(priv, 0x9097, 0x0d7c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0f8c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x02c0, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1510, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1940, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0ff4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0ff8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x194c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1950, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1968, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1590, 0x0000003f);
+ nv_mthd(priv, 0x9097, 0x07e8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x07ec, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x07f0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x07f4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x196c, 0x00000011);
+ nv_mthd(priv, 0x9097, 0x197c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0fcc, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0fd0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x02d8, 0x00000040);
+ nv_mthd(priv, 0x9097, 0x1980, 0x00000080);
+ nv_mthd(priv, 0x9097, 0x1504, 0x00000080);
+ nv_mthd(priv, 0x9097, 0x1984, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0300, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x13a8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x12ec, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1310, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1314, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1380, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1384, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1388, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x138c, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1390, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1394, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x139c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1398, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1594, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1598, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x159c, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x15a0, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x15a4, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x0f54, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0f58, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0f5c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x19bc, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0f9c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0fa0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x12cc, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x12e8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x130c, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1360, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1364, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1368, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x136c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1370, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1374, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1378, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x137c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x133c, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1340, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1344, 0x00000002);
+ nv_mthd(priv, 0x9097, 0x1348, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x134c, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1350, 0x00000002);
+ nv_mthd(priv, 0x9097, 0x1358, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x12e4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x131c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1320, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1324, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1328, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x19c0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1140, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x19c4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x19c8, 0x00001500);
+ nv_mthd(priv, 0x9097, 0x135c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0f90, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x19e0, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x19e4, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x19e8, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x19ec, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x19f0, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x19f4, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x19f8, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x19fc, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x19cc, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x15b8, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1a00, 0x00001111);
+ nv_mthd(priv, 0x9097, 0x1a04, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1a08, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1a0c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1a10, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1a14, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1a18, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1a1c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0d6c, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x0d70, 0xffff0000);
+ nv_mthd(priv, 0x9097, 0x10f8, 0x00001010);
+ nv_mthd(priv, 0x9097, 0x0d80, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0d84, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0d88, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0d8c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0d90, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0da0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1508, 0x80000000);
+ nv_mthd(priv, 0x9097, 0x150c, 0x40000000);
+ nv_mthd(priv, 0x9097, 0x1668, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0318, 0x00000008);
+ nv_mthd(priv, 0x9097, 0x031c, 0x00000008);
+ nv_mthd(priv, 0x9097, 0x0d9c, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x07dc, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x074c, 0x00000055);
+ nv_mthd(priv, 0x9097, 0x1420, 0x00000003);
+ nv_mthd(priv, 0x9097, 0x17bc, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x17c0, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x17c4, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1008, 0x00000008);
+ nv_mthd(priv, 0x9097, 0x100c, 0x00000040);
+ nv_mthd(priv, 0x9097, 0x1010, 0x0000012c);
+ nv_mthd(priv, 0x9097, 0x0d60, 0x00000040);
+ nv_mthd(priv, 0x9097, 0x075c, 0x00000003);
+ nv_mthd(priv, 0x9097, 0x1018, 0x00000020);
+ nv_mthd(priv, 0x9097, 0x101c, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1020, 0x00000020);
+ nv_mthd(priv, 0x9097, 0x1024, 0x00000001);
+ nv_mthd(priv, 0x9097, 0x1444, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x1448, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x144c, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0360, 0x20164010);
+ nv_mthd(priv, 0x9097, 0x0364, 0x00000020);
+ nv_mthd(priv, 0x9097, 0x0368, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0de4, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0204, 0x00000006);
+ nv_mthd(priv, 0x9097, 0x0208, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x02cc, 0x003fffff);
+ nv_mthd(priv, 0x9097, 0x02d0, 0x00000c48);
+ nv_mthd(priv, 0x9097, 0x1220, 0x00000005);
+ nv_mthd(priv, 0x9097, 0x0fdc, 0x00000000);
+ nv_mthd(priv, 0x9097, 0x0f98, 0x00300008);
+ nv_mthd(priv, 0x9097, 0x1284, 0x04000080);
+ nv_mthd(priv, 0x9097, 0x1450, 0x00300008);
+ nv_mthd(priv, 0x9097, 0x1454, 0x04000080);
+ nv_mthd(priv, 0x9097, 0x0214, 0x00000000);
+ /* in trace, right after 0x90c0, not here */
+ nv_mthd(priv, 0x9097, 0x3410, 0x80002006);
+}
+
+static void
+nvc0_grctx_generate_9197(struct nvc0_graph_priv *priv)
+{
+ u32 fermi = nvc0_graph_class(priv);
+ u32 mthd;
+
+ if (fermi == 0x9197) {
+ for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
+ nv_mthd(priv, 0x9197, mthd, 0x00000000);
+ }
+ nv_mthd(priv, 0x9197, 0x02e4, 0x0000b001);
+}
+
+static void
+nvc0_grctx_generate_9297(struct nvc0_graph_priv *priv)
+{
+ u32 fermi = nvc0_graph_class(priv);
+ u32 mthd;
+
+ if (fermi == 0x9297) {
+ for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
+ nv_mthd(priv, 0x9297, mthd, 0x00000000);
+ }
+ nv_mthd(priv, 0x9297, 0x036c, 0x00000000);
+ nv_mthd(priv, 0x9297, 0x0370, 0x00000000);
+ nv_mthd(priv, 0x9297, 0x07a4, 0x00000000);
+ nv_mthd(priv, 0x9297, 0x07a8, 0x00000000);
+ nv_mthd(priv, 0x9297, 0x0374, 0x00000000);
+ nv_mthd(priv, 0x9297, 0x0378, 0x00000020);
+}
+
+static void
+nvc0_grctx_generate_902d(struct nvc0_graph_priv *priv)
+{
+ nv_mthd(priv, 0x902d, 0x0200, 0x000000cf);
+ nv_mthd(priv, 0x902d, 0x0204, 0x00000001);
+ nv_mthd(priv, 0x902d, 0x0208, 0x00000020);
+ nv_mthd(priv, 0x902d, 0x020c, 0x00000001);
+ nv_mthd(priv, 0x902d, 0x0210, 0x00000000);
+ nv_mthd(priv, 0x902d, 0x0214, 0x00000080);
+ nv_mthd(priv, 0x902d, 0x0218, 0x00000100);
+ nv_mthd(priv, 0x902d, 0x021c, 0x00000100);
+ nv_mthd(priv, 0x902d, 0x0220, 0x00000000);
+ nv_mthd(priv, 0x902d, 0x0224, 0x00000000);
+ nv_mthd(priv, 0x902d, 0x0230, 0x000000cf);
+ nv_mthd(priv, 0x902d, 0x0234, 0x00000001);
+ nv_mthd(priv, 0x902d, 0x0238, 0x00000020);
+ nv_mthd(priv, 0x902d, 0x023c, 0x00000001);
+ nv_mthd(priv, 0x902d, 0x0244, 0x00000080);
+ nv_mthd(priv, 0x902d, 0x0248, 0x00000100);
+ nv_mthd(priv, 0x902d, 0x024c, 0x00000100);
+}
+
+static void
+nvc0_grctx_generate_9039(struct nvc0_graph_priv *priv)
+{
+ nv_mthd(priv, 0x9039, 0x030c, 0x00000000);
+ nv_mthd(priv, 0x9039, 0x0310, 0x00000000);
+ nv_mthd(priv, 0x9039, 0x0314, 0x00000000);
+ nv_mthd(priv, 0x9039, 0x0320, 0x00000000);
+ nv_mthd(priv, 0x9039, 0x0238, 0x00000000);
+ nv_mthd(priv, 0x9039, 0x023c, 0x00000000);
+ nv_mthd(priv, 0x9039, 0x0318, 0x00000000);
+ nv_mthd(priv, 0x9039, 0x031c, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_90c0(struct nvc0_graph_priv *priv)
+{
+ int i;
+
+ for (i = 0; nv_device(priv)->chipset == 0xd9 && i < 4; i++) {
+ nv_mthd(priv, 0x90c0, 0x2700 + (i * 0x40), 0x00000000);
+ nv_mthd(priv, 0x90c0, 0x2720 + (i * 0x40), 0x00000000);
+ nv_mthd(priv, 0x90c0, 0x2704 + (i * 0x40), 0x00000000);
+ nv_mthd(priv, 0x90c0, 0x2724 + (i * 0x40), 0x00000000);
+ nv_mthd(priv, 0x90c0, 0x2708 + (i * 0x40), 0x00000000);
+ nv_mthd(priv, 0x90c0, 0x2728 + (i * 0x40), 0x00000000);
+ }
+ nv_mthd(priv, 0x90c0, 0x270c, 0x00000000);
+ nv_mthd(priv, 0x90c0, 0x272c, 0x00000000);
+ nv_mthd(priv, 0x90c0, 0x274c, 0x00000000);
+ nv_mthd(priv, 0x90c0, 0x276c, 0x00000000);
+ nv_mthd(priv, 0x90c0, 0x278c, 0x00000000);
+ nv_mthd(priv, 0x90c0, 0x27ac, 0x00000000);
+ nv_mthd(priv, 0x90c0, 0x27cc, 0x00000000);
+ nv_mthd(priv, 0x90c0, 0x27ec, 0x00000000);
+ for (i = 0; nv_device(priv)->chipset == 0xd9 && i < 4; i++) {
+ nv_mthd(priv, 0x90c0, 0x2710 + (i * 0x40), 0x00014000);
+ nv_mthd(priv, 0x90c0, 0x2730 + (i * 0x40), 0x00014000);
+ nv_mthd(priv, 0x90c0, 0x2714 + (i * 0x40), 0x00000040);
+ nv_mthd(priv, 0x90c0, 0x2734 + (i * 0x40), 0x00000040);
+ }
+ nv_mthd(priv, 0x90c0, 0x030c, 0x00000001);
+ nv_mthd(priv, 0x90c0, 0x1944, 0x00000000);
+ nv_mthd(priv, 0x90c0, 0x0758, 0x00000100);
+ nv_mthd(priv, 0x90c0, 0x02c4, 0x00000000);
+ nv_mthd(priv, 0x90c0, 0x0790, 0x00000000);
+ nv_mthd(priv, 0x90c0, 0x0794, 0x00000000);
+ nv_mthd(priv, 0x90c0, 0x0798, 0x00000000);
+ nv_mthd(priv, 0x90c0, 0x079c, 0x00000000);
+ nv_mthd(priv, 0x90c0, 0x07a0, 0x00000000);
+ nv_mthd(priv, 0x90c0, 0x077c, 0x00000000);
+ nv_mthd(priv, 0x90c0, 0x0204, 0x00000000);
+ nv_mthd(priv, 0x90c0, 0x0208, 0x00000000);
+ nv_mthd(priv, 0x90c0, 0x020c, 0x00000000);
+ nv_mthd(priv, 0x90c0, 0x0214, 0x00000000);
+ nv_mthd(priv, 0x90c0, 0x024c, 0x00000000);
+ nv_mthd(priv, 0x90c0, 0x0d94, 0x00000001);
+ nv_mthd(priv, 0x90c0, 0x1608, 0x00000000);
+ nv_mthd(priv, 0x90c0, 0x160c, 0x00000000);
+ nv_mthd(priv, 0x90c0, 0x1664, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_dispatch(struct nvc0_graph_priv *priv)
+{
+ int i;
+
+ nv_wr32(priv, 0x404004, 0x00000000);
+ nv_wr32(priv, 0x404008, 0x00000000);
+ nv_wr32(priv, 0x40400c, 0x00000000);
+ nv_wr32(priv, 0x404010, 0x00000000);
+ nv_wr32(priv, 0x404014, 0x00000000);
+ nv_wr32(priv, 0x404018, 0x00000000);
+ nv_wr32(priv, 0x40401c, 0x00000000);
+ nv_wr32(priv, 0x404020, 0x00000000);
+ nv_wr32(priv, 0x404024, 0x00000000);
+ nv_wr32(priv, 0x404028, 0x00000000);
+ nv_wr32(priv, 0x40402c, 0x00000000);
+ nv_wr32(priv, 0x404044, 0x00000000);
+ nv_wr32(priv, 0x404094, 0x00000000);
+ nv_wr32(priv, 0x404098, 0x00000000);
+ nv_wr32(priv, 0x40409c, 0x00000000);
+ nv_wr32(priv, 0x4040a0, 0x00000000);
+ nv_wr32(priv, 0x4040a4, 0x00000000);
+ nv_wr32(priv, 0x4040a8, 0x00000000);
+ nv_wr32(priv, 0x4040ac, 0x00000000);
+ nv_wr32(priv, 0x4040b0, 0x00000000);
+ nv_wr32(priv, 0x4040b4, 0x00000000);
+ nv_wr32(priv, 0x4040b8, 0x00000000);
+ nv_wr32(priv, 0x4040bc, 0x00000000);
+ nv_wr32(priv, 0x4040c0, 0x00000000);
+ nv_wr32(priv, 0x4040c4, 0x00000000);
+ nv_wr32(priv, 0x4040c8, 0xf0000087);
+ nv_wr32(priv, 0x4040d4, 0x00000000);
+ nv_wr32(priv, 0x4040d8, 0x00000000);
+ nv_wr32(priv, 0x4040dc, 0x00000000);
+ nv_wr32(priv, 0x4040e0, 0x00000000);
+ nv_wr32(priv, 0x4040e4, 0x00000000);
+ nv_wr32(priv, 0x4040e8, 0x00001000);
+ nv_wr32(priv, 0x4040f8, 0x00000000);
+ nv_wr32(priv, 0x404130, 0x00000000);
+ nv_wr32(priv, 0x404134, 0x00000000);
+ nv_wr32(priv, 0x404138, 0x20000040);
+ nv_wr32(priv, 0x404150, 0x0000002e);
+ nv_wr32(priv, 0x404154, 0x00000400);
+ nv_wr32(priv, 0x404158, 0x00000200);
+ nv_wr32(priv, 0x404164, 0x00000055);
+ nv_wr32(priv, 0x404168, 0x00000000);
+ nv_wr32(priv, 0x404174, 0x00000000);
+ nv_wr32(priv, 0x404178, 0x00000000);
+ nv_wr32(priv, 0x40417c, 0x00000000);
+ for (i = 0; i < 8; i++)
+ nv_wr32(priv, 0x404200 + (i * 4), 0x00000000); /* subc */
+}
+
+static void
+nvc0_grctx_generate_macro(struct nvc0_graph_priv *priv)
+{
+ nv_wr32(priv, 0x404404, 0x00000000);
+ nv_wr32(priv, 0x404408, 0x00000000);
+ nv_wr32(priv, 0x40440c, 0x00000000);
+ nv_wr32(priv, 0x404410, 0x00000000);
+ nv_wr32(priv, 0x404414, 0x00000000);
+ nv_wr32(priv, 0x404418, 0x00000000);
+ nv_wr32(priv, 0x40441c, 0x00000000);
+ nv_wr32(priv, 0x404420, 0x00000000);
+ nv_wr32(priv, 0x404424, 0x00000000);
+ nv_wr32(priv, 0x404428, 0x00000000);
+ nv_wr32(priv, 0x40442c, 0x00000000);
+ nv_wr32(priv, 0x404430, 0x00000000);
+ nv_wr32(priv, 0x404434, 0x00000000);
+ nv_wr32(priv, 0x404438, 0x00000000);
+ nv_wr32(priv, 0x404460, 0x00000000);
+ nv_wr32(priv, 0x404464, 0x00000000);
+ nv_wr32(priv, 0x404468, 0x00ffffff);
+ nv_wr32(priv, 0x40446c, 0x00000000);
+ nv_wr32(priv, 0x404480, 0x00000001);
+ nv_wr32(priv, 0x404498, 0x00000001);
+}
+
+static void
+nvc0_grctx_generate_m2mf(struct nvc0_graph_priv *priv)
+{
+ nv_wr32(priv, 0x404604, 0x00000015);
+ nv_wr32(priv, 0x404608, 0x00000000);
+ nv_wr32(priv, 0x40460c, 0x00002e00);
+ nv_wr32(priv, 0x404610, 0x00000100);
+ nv_wr32(priv, 0x404618, 0x00000000);
+ nv_wr32(priv, 0x40461c, 0x00000000);
+ nv_wr32(priv, 0x404620, 0x00000000);
+ nv_wr32(priv, 0x404624, 0x00000000);
+ nv_wr32(priv, 0x404628, 0x00000000);
+ nv_wr32(priv, 0x40462c, 0x00000000);
+ nv_wr32(priv, 0x404630, 0x00000000);
+ nv_wr32(priv, 0x404634, 0x00000000);
+ nv_wr32(priv, 0x404638, 0x00000004);
+ nv_wr32(priv, 0x40463c, 0x00000000);
+ nv_wr32(priv, 0x404640, 0x00000000);
+ nv_wr32(priv, 0x404644, 0x00000000);
+ nv_wr32(priv, 0x404648, 0x00000000);
+ nv_wr32(priv, 0x40464c, 0x00000000);
+ nv_wr32(priv, 0x404650, 0x00000000);
+ nv_wr32(priv, 0x404654, 0x00000000);
+ nv_wr32(priv, 0x404658, 0x00000000);
+ nv_wr32(priv, 0x40465c, 0x007f0100);
+ nv_wr32(priv, 0x404660, 0x00000000);
+ nv_wr32(priv, 0x404664, 0x00000000);
+ nv_wr32(priv, 0x404668, 0x00000000);
+ nv_wr32(priv, 0x40466c, 0x00000000);
+ nv_wr32(priv, 0x404670, 0x00000000);
+ nv_wr32(priv, 0x404674, 0x00000000);
+ nv_wr32(priv, 0x404678, 0x00000000);
+ nv_wr32(priv, 0x40467c, 0x00000002);
+ nv_wr32(priv, 0x404680, 0x00000000);
+ nv_wr32(priv, 0x404684, 0x00000000);
+ nv_wr32(priv, 0x404688, 0x00000000);
+ nv_wr32(priv, 0x40468c, 0x00000000);
+ nv_wr32(priv, 0x404690, 0x00000000);
+ nv_wr32(priv, 0x404694, 0x00000000);
+ nv_wr32(priv, 0x404698, 0x00000000);
+ nv_wr32(priv, 0x40469c, 0x00000000);
+ nv_wr32(priv, 0x4046a0, 0x007f0080);
+ nv_wr32(priv, 0x4046a4, 0x00000000);
+ nv_wr32(priv, 0x4046a8, 0x00000000);
+ nv_wr32(priv, 0x4046ac, 0x00000000);
+ nv_wr32(priv, 0x4046b0, 0x00000000);
+ nv_wr32(priv, 0x4046b4, 0x00000000);
+ nv_wr32(priv, 0x4046b8, 0x00000000);
+ nv_wr32(priv, 0x4046bc, 0x00000000);
+ nv_wr32(priv, 0x4046c0, 0x00000000);
+ nv_wr32(priv, 0x4046c4, 0x00000000);
+ nv_wr32(priv, 0x4046c8, 0x00000000);
+ nv_wr32(priv, 0x4046cc, 0x00000000);
+ nv_wr32(priv, 0x4046d0, 0x00000000);
+ nv_wr32(priv, 0x4046d4, 0x00000000);
+ nv_wr32(priv, 0x4046d8, 0x00000000);
+ nv_wr32(priv, 0x4046dc, 0x00000000);
+ nv_wr32(priv, 0x4046e0, 0x00000000);
+ nv_wr32(priv, 0x4046e4, 0x00000000);
+ nv_wr32(priv, 0x4046e8, 0x00000000);
+ nv_wr32(priv, 0x4046f0, 0x00000000);
+ nv_wr32(priv, 0x4046f4, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_unk47xx(struct nvc0_graph_priv *priv)
+{
+ nv_wr32(priv, 0x404700, 0x00000000);
+ nv_wr32(priv, 0x404704, 0x00000000);
+ nv_wr32(priv, 0x404708, 0x00000000);
+ nv_wr32(priv, 0x40470c, 0x00000000);
+ nv_wr32(priv, 0x404710, 0x00000000);
+ nv_wr32(priv, 0x404714, 0x00000000);
+ nv_wr32(priv, 0x404718, 0x00000000);
+ nv_wr32(priv, 0x40471c, 0x00000000);
+ nv_wr32(priv, 0x404720, 0x00000000);
+ nv_wr32(priv, 0x404724, 0x00000000);
+ nv_wr32(priv, 0x404728, 0x00000000);
+ nv_wr32(priv, 0x40472c, 0x00000000);
+ nv_wr32(priv, 0x404730, 0x00000000);
+ nv_wr32(priv, 0x404734, 0x00000100);
+ nv_wr32(priv, 0x404738, 0x00000000);
+ nv_wr32(priv, 0x40473c, 0x00000000);
+ nv_wr32(priv, 0x404740, 0x00000000);
+ nv_wr32(priv, 0x404744, 0x00000000);
+ nv_wr32(priv, 0x404748, 0x00000000);
+ nv_wr32(priv, 0x40474c, 0x00000000);
+ nv_wr32(priv, 0x404750, 0x00000000);
+ nv_wr32(priv, 0x404754, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_shaders(struct nvc0_graph_priv *priv)
+{
+
+ if (nv_device(priv)->chipset == 0xd9) {
+ nv_wr32(priv, 0x405800, 0x0f8000bf);
+ nv_wr32(priv, 0x405830, 0x02180218);
+ nv_wr32(priv, 0x405834, 0x08000000);
+ } else
+ if (nv_device(priv)->chipset == 0xc1) {
+ nv_wr32(priv, 0x405800, 0x0f8000bf);
+ nv_wr32(priv, 0x405830, 0x02180218);
+ nv_wr32(priv, 0x405834, 0x00000000);
+ } else {
+ nv_wr32(priv, 0x405800, 0x078000bf);
+ nv_wr32(priv, 0x405830, 0x02180000);
+ nv_wr32(priv, 0x405834, 0x00000000);
+ }
+ nv_wr32(priv, 0x405838, 0x00000000);
+ nv_wr32(priv, 0x405854, 0x00000000);
+ nv_wr32(priv, 0x405870, 0x00000001);
+ nv_wr32(priv, 0x405874, 0x00000001);
+ nv_wr32(priv, 0x405878, 0x00000001);
+ nv_wr32(priv, 0x40587c, 0x00000001);
+ nv_wr32(priv, 0x405a00, 0x00000000);
+ nv_wr32(priv, 0x405a04, 0x00000000);
+ nv_wr32(priv, 0x405a18, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_unk60xx(struct nvc0_graph_priv *priv)
+{
+ nv_wr32(priv, 0x406020, 0x000103c1);
+ nv_wr32(priv, 0x406028, 0x00000001);
+ nv_wr32(priv, 0x40602c, 0x00000001);
+ nv_wr32(priv, 0x406030, 0x00000001);
+ nv_wr32(priv, 0x406034, 0x00000001);
+}
+
+static void
+nvc0_grctx_generate_unk64xx(struct nvc0_graph_priv *priv)
+{
+
+ nv_wr32(priv, 0x4064a8, 0x00000000);
+ nv_wr32(priv, 0x4064ac, 0x00003fff);
+ nv_wr32(priv, 0x4064b4, 0x00000000);
+ nv_wr32(priv, 0x4064b8, 0x00000000);
+ if (nv_device(priv)->chipset == 0xd9)
+ nv_wr32(priv, 0x4064bc, 0x00000000);
+ if (nv_device(priv)->chipset == 0xc1 ||
+ nv_device(priv)->chipset == 0xd9) {
+ nv_wr32(priv, 0x4064c0, 0x80140078);
+ nv_wr32(priv, 0x4064c4, 0x0086ffff);
+ }
+}
+
+static void
+nvc0_grctx_generate_tpbus(struct nvc0_graph_priv *priv)
+{
+ nv_wr32(priv, 0x407804, 0x00000023);
+ nv_wr32(priv, 0x40780c, 0x0a418820);
+ nv_wr32(priv, 0x407810, 0x062080e6);
+ nv_wr32(priv, 0x407814, 0x020398a4);
+ nv_wr32(priv, 0x407818, 0x0e629062);
+ nv_wr32(priv, 0x40781c, 0x0a418820);
+ nv_wr32(priv, 0x407820, 0x000000e6);
+ nv_wr32(priv, 0x4078bc, 0x00000103);
+}
+
+static void
+nvc0_grctx_generate_ccache(struct nvc0_graph_priv *priv)
+{
+ nv_wr32(priv, 0x408000, 0x00000000);
+ nv_wr32(priv, 0x408004, 0x00000000);
+ nv_wr32(priv, 0x408008, 0x00000018);
+ nv_wr32(priv, 0x40800c, 0x00000000);
+ nv_wr32(priv, 0x408010, 0x00000000);
+ nv_wr32(priv, 0x408014, 0x00000069);
+ nv_wr32(priv, 0x408018, 0xe100e100);
+ nv_wr32(priv, 0x408064, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_rop(struct nvc0_graph_priv *priv)
+{
+ int chipset = nv_device(priv)->chipset;
+
+ /* ROPC_BROADCAST */
+ nv_wr32(priv, 0x408800, 0x02802a3c);
+ nv_wr32(priv, 0x408804, 0x00000040);
+ if (chipset == 0xd9) {
+ nv_wr32(priv, 0x408808, 0x1043e005);
+ nv_wr32(priv, 0x408900, 0x3080b801);
+ nv_wr32(priv, 0x408904, 0x1043e005);
+ nv_wr32(priv, 0x408908, 0x00c8102f);
+ } else
+ if (chipset == 0xc1) {
+ nv_wr32(priv, 0x408808, 0x1003e005);
+ nv_wr32(priv, 0x408900, 0x3080b801);
+ nv_wr32(priv, 0x408904, 0x62000001);
+ nv_wr32(priv, 0x408908, 0x00c80929);
+ } else {
+ nv_wr32(priv, 0x408808, 0x0003e00d);
+ nv_wr32(priv, 0x408900, 0x3080b801);
+ nv_wr32(priv, 0x408904, 0x02000001);
+ nv_wr32(priv, 0x408908, 0x00c80929);
+ }
+ nv_wr32(priv, 0x40890c, 0x00000000);
+ nv_wr32(priv, 0x408980, 0x0000011d);
+}
+
+static void
+nvc0_grctx_generate_gpc(struct nvc0_graph_priv *priv)
+{
+ int chipset = nv_device(priv)->chipset;
+ int i;
+
+ /* GPC_BROADCAST */
+ nv_wr32(priv, 0x418380, 0x00000016);
+ nv_wr32(priv, 0x418400, 0x38004e00);
+ nv_wr32(priv, 0x418404, 0x71e0ffff);
+ nv_wr32(priv, 0x418408, 0x00000000);
+ nv_wr32(priv, 0x41840c, 0x00001008);
+ nv_wr32(priv, 0x418410, 0x0fff0fff);
+ nv_wr32(priv, 0x418414, chipset != 0xd9 ? 0x00200fff : 0x02200fff);
+ nv_wr32(priv, 0x418450, 0x00000000);
+ nv_wr32(priv, 0x418454, 0x00000000);
+ nv_wr32(priv, 0x418458, 0x00000000);
+ nv_wr32(priv, 0x41845c, 0x00000000);
+ nv_wr32(priv, 0x418460, 0x00000000);
+ nv_wr32(priv, 0x418464, 0x00000000);
+ nv_wr32(priv, 0x418468, 0x00000001);
+ nv_wr32(priv, 0x41846c, 0x00000000);
+ nv_wr32(priv, 0x418470, 0x00000000);
+ nv_wr32(priv, 0x418600, 0x0000001f);
+ nv_wr32(priv, 0x418684, 0x0000000f);
+ nv_wr32(priv, 0x418700, 0x00000002);
+ nv_wr32(priv, 0x418704, 0x00000080);
+ nv_wr32(priv, 0x418708, 0x00000000);
+ nv_wr32(priv, 0x41870c, chipset != 0xd9 ? 0x07c80000 : 0x00000000);
+ nv_wr32(priv, 0x418710, 0x00000000);
+ nv_wr32(priv, 0x418800, chipset != 0xd9 ? 0x0006860a : 0x7006860a);
+ nv_wr32(priv, 0x418808, 0x00000000);
+ nv_wr32(priv, 0x41880c, 0x00000000);
+ nv_wr32(priv, 0x418810, 0x00000000);
+ nv_wr32(priv, 0x418828, 0x00008442);
+ if (chipset == 0xc1 || chipset == 0xd9)
+ nv_wr32(priv, 0x418830, 0x10000001);
+ else
+ nv_wr32(priv, 0x418830, 0x00000001);
+ nv_wr32(priv, 0x4188d8, 0x00000008);
+ nv_wr32(priv, 0x4188e0, 0x01000000);
+ nv_wr32(priv, 0x4188e8, 0x00000000);
+ nv_wr32(priv, 0x4188ec, 0x00000000);
+ nv_wr32(priv, 0x4188f0, 0x00000000);
+ nv_wr32(priv, 0x4188f4, 0x00000000);
+ nv_wr32(priv, 0x4188f8, 0x00000000);
+ if (chipset == 0xd9)
+ nv_wr32(priv, 0x4188fc, 0x20100008);
+ else if (chipset == 0xc1)
+ nv_wr32(priv, 0x4188fc, 0x00100018);
+ else
+ nv_wr32(priv, 0x4188fc, 0x00100000);
+ nv_wr32(priv, 0x41891c, 0x00ff00ff);
+ nv_wr32(priv, 0x418924, 0x00000000);
+ nv_wr32(priv, 0x418928, 0x00ffff00);
+ nv_wr32(priv, 0x41892c, 0x0000ff00);
+ for (i = 0; i < 8; i++) {
+ nv_wr32(priv, 0x418a00 + (i * 0x20), 0x00000000);
+ nv_wr32(priv, 0x418a04 + (i * 0x20), 0x00000000);
+ nv_wr32(priv, 0x418a08 + (i * 0x20), 0x00000000);
+ nv_wr32(priv, 0x418a0c + (i * 0x20), 0x00010000);
+ nv_wr32(priv, 0x418a10 + (i * 0x20), 0x00000000);
+ nv_wr32(priv, 0x418a14 + (i * 0x20), 0x00000000);
+ nv_wr32(priv, 0x418a18 + (i * 0x20), 0x00000000);
+ }
+ nv_wr32(priv, 0x418b00, chipset != 0xd9 ? 0x00000000 : 0x00000006);
+ nv_wr32(priv, 0x418b08, 0x0a418820);
+ nv_wr32(priv, 0x418b0c, 0x062080e6);
+ nv_wr32(priv, 0x418b10, 0x020398a4);
+ nv_wr32(priv, 0x418b14, 0x0e629062);
+ nv_wr32(priv, 0x418b18, 0x0a418820);
+ nv_wr32(priv, 0x418b1c, 0x000000e6);
+ nv_wr32(priv, 0x418bb8, 0x00000103);
+ nv_wr32(priv, 0x418c08, 0x00000001);
+ nv_wr32(priv, 0x418c10, 0x00000000);
+ nv_wr32(priv, 0x418c14, 0x00000000);
+ nv_wr32(priv, 0x418c18, 0x00000000);
+ nv_wr32(priv, 0x418c1c, 0x00000000);
+ nv_wr32(priv, 0x418c20, 0x00000000);
+ nv_wr32(priv, 0x418c24, 0x00000000);
+ nv_wr32(priv, 0x418c28, 0x00000000);
+ nv_wr32(priv, 0x418c2c, 0x00000000);
+ if (chipset == 0xc1 || chipset == 0xd9)
+ nv_wr32(priv, 0x418c6c, 0x00000001);
+ nv_wr32(priv, 0x418c80, 0x20200004);
+ nv_wr32(priv, 0x418c8c, 0x00000001);
+ nv_wr32(priv, 0x419000, 0x00000780);
+ nv_wr32(priv, 0x419004, 0x00000000);
+ nv_wr32(priv, 0x419008, 0x00000000);
+ nv_wr32(priv, 0x419014, 0x00000004);
+}
+
+static void
+nvc0_grctx_generate_tp(struct nvc0_graph_priv *priv)
+{
+ int chipset = nv_device(priv)->chipset;
+
+ /* GPC_BROADCAST.TP_BROADCAST */
+ nv_wr32(priv, 0x419818, 0x00000000);
+ nv_wr32(priv, 0x41983c, 0x00038bc7);
+ nv_wr32(priv, 0x419848, 0x00000000);
+ if (chipset == 0xc1 || chipset == 0xd9)
+ nv_wr32(priv, 0x419864, 0x00000129);
+ else
+ nv_wr32(priv, 0x419864, 0x0000012a);
+ nv_wr32(priv, 0x419888, 0x00000000);
+ nv_wr32(priv, 0x419a00, 0x000001f0);
+ nv_wr32(priv, 0x419a04, 0x00000001);
+ nv_wr32(priv, 0x419a08, 0x00000023);
+ nv_wr32(priv, 0x419a0c, 0x00020000);
+ nv_wr32(priv, 0x419a10, 0x00000000);
+ nv_wr32(priv, 0x419a14, 0x00000200);
+ nv_wr32(priv, 0x419a1c, 0x00000000);
+ nv_wr32(priv, 0x419a20, 0x00000800);
+ if (chipset == 0xd9)
+ nv_wr32(priv, 0x00419ac4, 0x0017f440);
+ else if (chipset != 0xc0 && chipset != 0xc8)
+ nv_wr32(priv, 0x00419ac4, 0x0007f440);
+ nv_wr32(priv, 0x419b00, 0x0a418820);
+ nv_wr32(priv, 0x419b04, 0x062080e6);
+ nv_wr32(priv, 0x419b08, 0x020398a4);
+ nv_wr32(priv, 0x419b0c, 0x0e629062);
+ nv_wr32(priv, 0x419b10, 0x0a418820);
+ nv_wr32(priv, 0x419b14, 0x000000e6);
+ nv_wr32(priv, 0x419bd0, 0x00900103);
+ if (chipset == 0xc1 || chipset == 0xd9)
+ nv_wr32(priv, 0x419be0, 0x00400001);
+ else
+ nv_wr32(priv, 0x419be0, 0x00000001);
+ nv_wr32(priv, 0x419be4, 0x00000000);
+ nv_wr32(priv, 0x419c00, chipset != 0xd9 ? 0x00000002 : 0x0000000a);
+ nv_wr32(priv, 0x419c04, 0x00000006);
+ nv_wr32(priv, 0x419c08, 0x00000002);
+ nv_wr32(priv, 0x419c20, 0x00000000);
+ if (nv_device(priv)->chipset == 0xd9) {
+ nv_wr32(priv, 0x419c24, 0x00084210);
+ nv_wr32(priv, 0x419c28, 0x3cf3cf3c);
+ nv_wr32(priv, 0x419cb0, 0x00020048);
+ } else
+ if (chipset == 0xce || chipset == 0xcf) {
+ nv_wr32(priv, 0x419cb0, 0x00020048);
+ } else {
+ nv_wr32(priv, 0x419cb0, 0x00060048);
+ }
+ nv_wr32(priv, 0x419ce8, 0x00000000);
+ nv_wr32(priv, 0x419cf4, 0x00000183);
+ if (chipset == 0xc1 || chipset == 0xd9)
+ nv_wr32(priv, 0x419d20, 0x12180000);
+ else
+ nv_wr32(priv, 0x419d20, 0x02180000);
+ nv_wr32(priv, 0x419d24, 0x00001fff);
+ if (chipset == 0xc1 || chipset == 0xd9)
+ nv_wr32(priv, 0x419d44, 0x02180218);
+ nv_wr32(priv, 0x419e04, 0x00000000);
+ nv_wr32(priv, 0x419e08, 0x00000000);
+ nv_wr32(priv, 0x419e0c, 0x00000000);
+ nv_wr32(priv, 0x419e10, 0x00000002);
+ nv_wr32(priv, 0x419e44, 0x001beff2);
+ nv_wr32(priv, 0x419e48, 0x00000000);
+ nv_wr32(priv, 0x419e4c, 0x0000000f);
+ nv_wr32(priv, 0x419e50, 0x00000000);
+ nv_wr32(priv, 0x419e54, 0x00000000);
+ nv_wr32(priv, 0x419e58, 0x00000000);
+ nv_wr32(priv, 0x419e5c, 0x00000000);
+ nv_wr32(priv, 0x419e60, 0x00000000);
+ nv_wr32(priv, 0x419e64, 0x00000000);
+ nv_wr32(priv, 0x419e68, 0x00000000);
+ nv_wr32(priv, 0x419e6c, 0x00000000);
+ nv_wr32(priv, 0x419e70, 0x00000000);
+ nv_wr32(priv, 0x419e74, 0x00000000);
+ nv_wr32(priv, 0x419e78, 0x00000000);
+ nv_wr32(priv, 0x419e7c, 0x00000000);
+ nv_wr32(priv, 0x419e80, 0x00000000);
+ nv_wr32(priv, 0x419e84, 0x00000000);
+ nv_wr32(priv, 0x419e88, 0x00000000);
+ nv_wr32(priv, 0x419e8c, 0x00000000);
+ nv_wr32(priv, 0x419e90, 0x00000000);
+ nv_wr32(priv, 0x419e98, 0x00000000);
+ if (chipset != 0xc0 && chipset != 0xc8)
+ nv_wr32(priv, 0x419ee0, 0x00011110);
+ nv_wr32(priv, 0x419f50, 0x00000000);
+ nv_wr32(priv, 0x419f54, 0x00000000);
+ if (chipset != 0xc0 && chipset != 0xc8)
+ nv_wr32(priv, 0x419f58, 0x00000000);
+}
+
+int
+nvc0_grctx_generate(struct nvc0_graph_priv *priv)
+{
+ struct nvc0_grctx info;
+ int ret, i, gpc, tpc, id;
+ u32 fermi = nvc0_graph_class(priv);
+ u32 r000260, tmp;
+
+ ret = nvc0_grctx_init(priv, &info);
+ if (ret)
+ return ret;
+
+ r000260 = nv_rd32(priv, 0x000260);
+ nv_wr32(priv, 0x000260, r000260 & ~1);
+ nv_wr32(priv, 0x400208, 0x00000000);
+
+ nvc0_grctx_generate_dispatch(priv);
+ nvc0_grctx_generate_macro(priv);
+ nvc0_grctx_generate_m2mf(priv);
+ nvc0_grctx_generate_unk47xx(priv);
+ nvc0_grctx_generate_shaders(priv);
+ nvc0_grctx_generate_unk60xx(priv);
+ nvc0_grctx_generate_unk64xx(priv);
+ nvc0_grctx_generate_tpbus(priv);
+ nvc0_grctx_generate_ccache(priv);
+ nvc0_grctx_generate_rop(priv);
+ nvc0_grctx_generate_gpc(priv);
+ nvc0_grctx_generate_tp(priv);
+
+ nv_wr32(priv, 0x404154, 0x00000000);
+
+ /* generate per-context mmio list data */
+ mmio_data(0x002000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
+ mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
+ mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW);
+ mmio_list(0x408004, 0x00000000, 8, 0);
+ mmio_list(0x408008, 0x80000018, 0, 0);
+ mmio_list(0x40800c, 0x00000000, 8, 1);
+ mmio_list(0x408010, 0x80000000, 0, 0);
+ mmio_list(0x418810, 0x80000000, 12, 2);
+ mmio_list(0x419848, 0x10000000, 12, 2);
+ mmio_list(0x419004, 0x00000000, 8, 1);
+ mmio_list(0x419008, 0x00000000, 0, 0);
+ mmio_list(0x418808, 0x00000000, 8, 0);
+ mmio_list(0x41880c, 0x80000018, 0, 0);
+ if (nv_device(priv)->chipset != 0xc1) {
+ tmp = 0x02180000;
+ mmio_list(0x405830, tmp, 0, 0);
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
+ u32 reg = TPC_UNIT(gpc, tpc, 0x0520);
+ mmio_list(reg, tmp, 0, 0);
+ tmp += 0x0324;
+ }
+ }
+ } else {
+ tmp = 0x02180000;
+ mmio_list(0x405830, 0x00000218 | tmp, 0, 0);
+ mmio_list(0x4064c4, 0x0086ffff, 0, 0);
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
+ u32 reg = TPC_UNIT(gpc, tpc, 0x0520);
+ mmio_list(reg, 0x10000000 | tmp, 0, 0);
+ tmp += 0x0324;
+ }
+ for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
+ u32 reg = TPC_UNIT(gpc, tpc, 0x0544);
+ mmio_list(reg, tmp, 0, 0);
+ tmp += 0x0324;
+ }
+ }
+ }
+
+ for (tpc = 0, id = 0; tpc < 4; tpc++) {
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ if (tpc < priv->tpc_nr[gpc]) {
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x698), id);
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x4e8), id);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x088), id);
+ id++;
+ }
+
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0c08), priv->tpc_nr[gpc]);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0c8c), priv->tpc_nr[gpc]);
+ }
+ }
+
+ tmp = 0;
+ for (i = 0; i < priv->gpc_nr; i++)
+ tmp |= priv->tpc_nr[i] << (i * 4);
+ nv_wr32(priv, 0x406028, tmp);
+ nv_wr32(priv, 0x405870, tmp);
+
+ nv_wr32(priv, 0x40602c, 0x00000000);
+ nv_wr32(priv, 0x405874, 0x00000000);
+ nv_wr32(priv, 0x406030, 0x00000000);
+ nv_wr32(priv, 0x405878, 0x00000000);
+ nv_wr32(priv, 0x406034, 0x00000000);
+ nv_wr32(priv, 0x40587c, 0x00000000);
+
+ if (1) {
+ u8 tpcnr[GPC_MAX], data[TPC_MAX];
+
+ memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+ memset(data, 0x1f, sizeof(data));
+
+ gpc = -1;
+ for (tpc = 0; tpc < priv->tpc_total; tpc++) {
+ do {
+ gpc = (gpc + 1) % priv->gpc_nr;
+ } while (!tpcnr[gpc]);
+ tpcnr[gpc]--;
+ data[tpc] = gpc;
+ }
+
+ for (i = 0; i < 4; i++)
+ nv_wr32(priv, 0x4060a8 + (i * 4), ((u32 *)data)[i]);
+ }
+
+ if (1) {
+ u32 data[6] = {}, data2[2] = {};
+ u8 tpcnr[GPC_MAX];
+ u8 shift, ntpcv;
+
+ /* calculate first set of magics */
+ memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+
+ gpc = -1;
+ for (tpc = 0; tpc < priv->tpc_total; tpc++) {
+ do {
+ gpc = (gpc + 1) % priv->gpc_nr;
+ } while (!tpcnr[gpc]);
+ tpcnr[gpc]--;
+
+ data[tpc / 6] |= gpc << ((tpc % 6) * 5);
+ }
+
+ for (; tpc < 32; tpc++)
+ data[tpc / 6] |= 7 << ((tpc % 6) * 5);
+
+ /* and the second... */
+ shift = 0;
+ ntpcv = priv->tpc_total;
+ while (!(ntpcv & (1 << 4))) {
+ ntpcv <<= 1;
+ shift++;
+ }
+
+ data2[0] = (ntpcv << 16);
+ data2[0] |= (shift << 21);
+ data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24);
+ for (i = 1; i < 7; i++)
+ data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
+
+ /* GPC_BROADCAST */
+ nv_wr32(priv, 0x418bb8, (priv->tpc_total << 8) |
+ priv->magic_not_rop_nr);
+ for (i = 0; i < 6; i++)
+ nv_wr32(priv, 0x418b08 + (i * 4), data[i]);
+
+ /* GPC_BROADCAST.TP_BROADCAST */
+ nv_wr32(priv, 0x419bd0, (priv->tpc_total << 8) |
+ priv->magic_not_rop_nr |
+ data2[0]);
+ nv_wr32(priv, 0x419be4, data2[1]);
+ for (i = 0; i < 6; i++)
+ nv_wr32(priv, 0x419b00 + (i * 4), data[i]);
+
+ /* UNK78xx */
+ nv_wr32(priv, 0x4078bc, (priv->tpc_total << 8) |
+ priv->magic_not_rop_nr);
+ for (i = 0; i < 6; i++)
+ nv_wr32(priv, 0x40780c + (i * 4), data[i]);
+ }
+
+ if (1) {
+ u32 tpc_mask = 0, tpc_set = 0;
+ u8 tpcnr[GPC_MAX], a, b;
+
+ memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++)
+ tpc_mask |= ((1 << priv->tpc_nr[gpc]) - 1) << (gpc * 8);
+
+ for (i = 0, gpc = -1, b = -1; i < 32; i++) {
+ a = (i * (priv->tpc_total - 1)) / 32;
+ if (a != b) {
+ b = a;
+ do {
+ gpc = (gpc + 1) % priv->gpc_nr;
+ } while (!tpcnr[gpc]);
+ tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+
+ tpc_set |= 1 << ((gpc * 8) + tpc);
+ }
+
+ nv_wr32(priv, 0x406800 + (i * 0x20), tpc_set);
+ nv_wr32(priv, 0x406c00 + (i * 0x20), tpc_set ^ tpc_mask);
+ }
+ }
+
+ nv_wr32(priv, 0x400208, 0x80000000);
+
+ nv_icmd(priv, 0x00001000, 0x00000004);
+ nv_icmd(priv, 0x000000a9, 0x0000ffff);
+ nv_icmd(priv, 0x00000038, 0x0fac6881);
+ nv_icmd(priv, 0x0000003d, 0x00000001);
+ nv_icmd(priv, 0x000000e8, 0x00000400);
+ nv_icmd(priv, 0x000000e9, 0x00000400);
+ nv_icmd(priv, 0x000000ea, 0x00000400);
+ nv_icmd(priv, 0x000000eb, 0x00000400);
+ nv_icmd(priv, 0x000000ec, 0x00000400);
+ nv_icmd(priv, 0x000000ed, 0x00000400);
+ nv_icmd(priv, 0x000000ee, 0x00000400);
+ nv_icmd(priv, 0x000000ef, 0x00000400);
+ nv_icmd(priv, 0x00000078, 0x00000300);
+ nv_icmd(priv, 0x00000079, 0x00000300);
+ nv_icmd(priv, 0x0000007a, 0x00000300);
+ nv_icmd(priv, 0x0000007b, 0x00000300);
+ nv_icmd(priv, 0x0000007c, 0x00000300);
+ nv_icmd(priv, 0x0000007d, 0x00000300);
+ nv_icmd(priv, 0x0000007e, 0x00000300);
+ nv_icmd(priv, 0x0000007f, 0x00000300);
+ nv_icmd(priv, 0x00000050, 0x00000011);
+ nv_icmd(priv, 0x00000058, 0x00000008);
+ nv_icmd(priv, 0x00000059, 0x00000008);
+ nv_icmd(priv, 0x0000005a, 0x00000008);
+ nv_icmd(priv, 0x0000005b, 0x00000008);
+ nv_icmd(priv, 0x0000005c, 0x00000008);
+ nv_icmd(priv, 0x0000005d, 0x00000008);
+ nv_icmd(priv, 0x0000005e, 0x00000008);
+ nv_icmd(priv, 0x0000005f, 0x00000008);
+ nv_icmd(priv, 0x00000208, 0x00000001);
+ nv_icmd(priv, 0x00000209, 0x00000001);
+ nv_icmd(priv, 0x0000020a, 0x00000001);
+ nv_icmd(priv, 0x0000020b, 0x00000001);
+ nv_icmd(priv, 0x0000020c, 0x00000001);
+ nv_icmd(priv, 0x0000020d, 0x00000001);
+ nv_icmd(priv, 0x0000020e, 0x00000001);
+ nv_icmd(priv, 0x0000020f, 0x00000001);
+ nv_icmd(priv, 0x00000081, 0x00000001);
+ nv_icmd(priv, 0x00000085, 0x00000004);
+ nv_icmd(priv, 0x00000088, 0x00000400);
+ nv_icmd(priv, 0x00000090, 0x00000300);
+ nv_icmd(priv, 0x00000098, 0x00001001);
+ nv_icmd(priv, 0x000000e3, 0x00000001);
+ nv_icmd(priv, 0x000000da, 0x00000001);
+ nv_icmd(priv, 0x000000f8, 0x00000003);
+ nv_icmd(priv, 0x000000fa, 0x00000001);
+ nv_icmd(priv, 0x0000009f, 0x0000ffff);
+ nv_icmd(priv, 0x000000a0, 0x0000ffff);
+ nv_icmd(priv, 0x000000a1, 0x0000ffff);
+ nv_icmd(priv, 0x000000a2, 0x0000ffff);
+ nv_icmd(priv, 0x000000b1, 0x00000001);
+ nv_icmd(priv, 0x000000b2, 0x00000000);
+ nv_icmd(priv, 0x000000b3, 0x00000000);
+ nv_icmd(priv, 0x000000b4, 0x00000000);
+ nv_icmd(priv, 0x000000b5, 0x00000000);
+ nv_icmd(priv, 0x000000b6, 0x00000000);
+ nv_icmd(priv, 0x000000b7, 0x00000000);
+ nv_icmd(priv, 0x000000b8, 0x00000000);
+ nv_icmd(priv, 0x000000b9, 0x00000000);
+ nv_icmd(priv, 0x000000ba, 0x00000000);
+ nv_icmd(priv, 0x000000bb, 0x00000000);
+ nv_icmd(priv, 0x000000bc, 0x00000000);
+ nv_icmd(priv, 0x000000bd, 0x00000000);
+ nv_icmd(priv, 0x000000be, 0x00000000);
+ nv_icmd(priv, 0x000000bf, 0x00000000);
+ nv_icmd(priv, 0x000000c0, 0x00000000);
+ nv_icmd(priv, 0x000000c1, 0x00000000);
+ nv_icmd(priv, 0x000000c2, 0x00000000);
+ nv_icmd(priv, 0x000000c3, 0x00000000);
+ nv_icmd(priv, 0x000000c4, 0x00000000);
+ nv_icmd(priv, 0x000000c5, 0x00000000);
+ nv_icmd(priv, 0x000000c6, 0x00000000);
+ nv_icmd(priv, 0x000000c7, 0x00000000);
+ nv_icmd(priv, 0x000000c8, 0x00000000);
+ nv_icmd(priv, 0x000000c9, 0x00000000);
+ nv_icmd(priv, 0x000000ca, 0x00000000);
+ nv_icmd(priv, 0x000000cb, 0x00000000);
+ nv_icmd(priv, 0x000000cc, 0x00000000);
+ nv_icmd(priv, 0x000000cd, 0x00000000);
+ nv_icmd(priv, 0x000000ce, 0x00000000);
+ nv_icmd(priv, 0x000000cf, 0x00000000);
+ nv_icmd(priv, 0x000000d0, 0x00000000);
+ nv_icmd(priv, 0x000000d1, 0x00000000);
+ nv_icmd(priv, 0x000000d2, 0x00000000);
+ nv_icmd(priv, 0x000000d3, 0x00000000);
+ nv_icmd(priv, 0x000000d4, 0x00000000);
+ nv_icmd(priv, 0x000000d5, 0x00000000);
+ nv_icmd(priv, 0x000000d6, 0x00000000);
+ nv_icmd(priv, 0x000000d7, 0x00000000);
+ nv_icmd(priv, 0x000000d8, 0x00000000);
+ nv_icmd(priv, 0x000000d9, 0x00000000);
+ nv_icmd(priv, 0x00000210, 0x00000040);
+ nv_icmd(priv, 0x00000211, 0x00000040);
+ nv_icmd(priv, 0x00000212, 0x00000040);
+ nv_icmd(priv, 0x00000213, 0x00000040);
+ nv_icmd(priv, 0x00000214, 0x00000040);
+ nv_icmd(priv, 0x00000215, 0x00000040);
+ nv_icmd(priv, 0x00000216, 0x00000040);
+ nv_icmd(priv, 0x00000217, 0x00000040);
+ if (nv_device(priv)->chipset == 0xd9) {
+ for (i = 0x0400; i <= 0x0417; i++)
+ nv_icmd(priv, i, 0x00000040);
+ }
+ nv_icmd(priv, 0x00000218, 0x0000c080);
+ nv_icmd(priv, 0x00000219, 0x0000c080);
+ nv_icmd(priv, 0x0000021a, 0x0000c080);
+ nv_icmd(priv, 0x0000021b, 0x0000c080);
+ nv_icmd(priv, 0x0000021c, 0x0000c080);
+ nv_icmd(priv, 0x0000021d, 0x0000c080);
+ nv_icmd(priv, 0x0000021e, 0x0000c080);
+ nv_icmd(priv, 0x0000021f, 0x0000c080);
+ if (nv_device(priv)->chipset == 0xd9) {
+ for (i = 0x0440; i <= 0x0457; i++)
+ nv_icmd(priv, i, 0x0000c080);
+ }
+ nv_icmd(priv, 0x000000ad, 0x0000013e);
+ nv_icmd(priv, 0x000000e1, 0x00000010);
+ nv_icmd(priv, 0x00000290, 0x00000000);
+ nv_icmd(priv, 0x00000291, 0x00000000);
+ nv_icmd(priv, 0x00000292, 0x00000000);
+ nv_icmd(priv, 0x00000293, 0x00000000);
+ nv_icmd(priv, 0x00000294, 0x00000000);
+ nv_icmd(priv, 0x00000295, 0x00000000);
+ nv_icmd(priv, 0x00000296, 0x00000000);
+ nv_icmd(priv, 0x00000297, 0x00000000);
+ nv_icmd(priv, 0x00000298, 0x00000000);
+ nv_icmd(priv, 0x00000299, 0x00000000);
+ nv_icmd(priv, 0x0000029a, 0x00000000);
+ nv_icmd(priv, 0x0000029b, 0x00000000);
+ nv_icmd(priv, 0x0000029c, 0x00000000);
+ nv_icmd(priv, 0x0000029d, 0x00000000);
+ nv_icmd(priv, 0x0000029e, 0x00000000);
+ nv_icmd(priv, 0x0000029f, 0x00000000);
+ nv_icmd(priv, 0x000003b0, 0x00000000);
+ nv_icmd(priv, 0x000003b1, 0x00000000);
+ nv_icmd(priv, 0x000003b2, 0x00000000);
+ nv_icmd(priv, 0x000003b3, 0x00000000);
+ nv_icmd(priv, 0x000003b4, 0x00000000);
+ nv_icmd(priv, 0x000003b5, 0x00000000);
+ nv_icmd(priv, 0x000003b6, 0x00000000);
+ nv_icmd(priv, 0x000003b7, 0x00000000);
+ nv_icmd(priv, 0x000003b8, 0x00000000);
+ nv_icmd(priv, 0x000003b9, 0x00000000);
+ nv_icmd(priv, 0x000003ba, 0x00000000);
+ nv_icmd(priv, 0x000003bb, 0x00000000);
+ nv_icmd(priv, 0x000003bc, 0x00000000);
+ nv_icmd(priv, 0x000003bd, 0x00000000);
+ nv_icmd(priv, 0x000003be, 0x00000000);
+ nv_icmd(priv, 0x000003bf, 0x00000000);
+ nv_icmd(priv, 0x000002a0, 0x00000000);
+ nv_icmd(priv, 0x000002a1, 0x00000000);
+ nv_icmd(priv, 0x000002a2, 0x00000000);
+ nv_icmd(priv, 0x000002a3, 0x00000000);
+ nv_icmd(priv, 0x000002a4, 0x00000000);
+ nv_icmd(priv, 0x000002a5, 0x00000000);
+ nv_icmd(priv, 0x000002a6, 0x00000000);
+ nv_icmd(priv, 0x000002a7, 0x00000000);
+ nv_icmd(priv, 0x000002a8, 0x00000000);
+ nv_icmd(priv, 0x000002a9, 0x00000000);
+ nv_icmd(priv, 0x000002aa, 0x00000000);
+ nv_icmd(priv, 0x000002ab, 0x00000000);
+ nv_icmd(priv, 0x000002ac, 0x00000000);
+ nv_icmd(priv, 0x000002ad, 0x00000000);
+ nv_icmd(priv, 0x000002ae, 0x00000000);
+ nv_icmd(priv, 0x000002af, 0x00000000);
+ nv_icmd(priv, 0x00000420, 0x00000000);
+ nv_icmd(priv, 0x00000421, 0x00000000);
+ nv_icmd(priv, 0x00000422, 0x00000000);
+ nv_icmd(priv, 0x00000423, 0x00000000);
+ nv_icmd(priv, 0x00000424, 0x00000000);
+ nv_icmd(priv, 0x00000425, 0x00000000);
+ nv_icmd(priv, 0x00000426, 0x00000000);
+ nv_icmd(priv, 0x00000427, 0x00000000);
+ nv_icmd(priv, 0x00000428, 0x00000000);
+ nv_icmd(priv, 0x00000429, 0x00000000);
+ nv_icmd(priv, 0x0000042a, 0x00000000);
+ nv_icmd(priv, 0x0000042b, 0x00000000);
+ nv_icmd(priv, 0x0000042c, 0x00000000);
+ nv_icmd(priv, 0x0000042d, 0x00000000);
+ nv_icmd(priv, 0x0000042e, 0x00000000);
+ nv_icmd(priv, 0x0000042f, 0x00000000);
+ nv_icmd(priv, 0x000002b0, 0x00000000);
+ nv_icmd(priv, 0x000002b1, 0x00000000);
+ nv_icmd(priv, 0x000002b2, 0x00000000);
+ nv_icmd(priv, 0x000002b3, 0x00000000);
+ nv_icmd(priv, 0x000002b4, 0x00000000);
+ nv_icmd(priv, 0x000002b5, 0x00000000);
+ nv_icmd(priv, 0x000002b6, 0x00000000);
+ nv_icmd(priv, 0x000002b7, 0x00000000);
+ nv_icmd(priv, 0x000002b8, 0x00000000);
+ nv_icmd(priv, 0x000002b9, 0x00000000);
+ nv_icmd(priv, 0x000002ba, 0x00000000);
+ nv_icmd(priv, 0x000002bb, 0x00000000);
+ nv_icmd(priv, 0x000002bc, 0x00000000);
+ nv_icmd(priv, 0x000002bd, 0x00000000);
+ nv_icmd(priv, 0x000002be, 0x00000000);
+ nv_icmd(priv, 0x000002bf, 0x00000000);
+ nv_icmd(priv, 0x00000430, 0x00000000);
+ nv_icmd(priv, 0x00000431, 0x00000000);
+ nv_icmd(priv, 0x00000432, 0x00000000);
+ nv_icmd(priv, 0x00000433, 0x00000000);
+ nv_icmd(priv, 0x00000434, 0x00000000);
+ nv_icmd(priv, 0x00000435, 0x00000000);
+ nv_icmd(priv, 0x00000436, 0x00000000);
+ nv_icmd(priv, 0x00000437, 0x00000000);
+ nv_icmd(priv, 0x00000438, 0x00000000);
+ nv_icmd(priv, 0x00000439, 0x00000000);
+ nv_icmd(priv, 0x0000043a, 0x00000000);
+ nv_icmd(priv, 0x0000043b, 0x00000000);
+ nv_icmd(priv, 0x0000043c, 0x00000000);
+ nv_icmd(priv, 0x0000043d, 0x00000000);
+ nv_icmd(priv, 0x0000043e, 0x00000000);
+ nv_icmd(priv, 0x0000043f, 0x00000000);
+ nv_icmd(priv, 0x000002c0, 0x00000000);
+ nv_icmd(priv, 0x000002c1, 0x00000000);
+ nv_icmd(priv, 0x000002c2, 0x00000000);
+ nv_icmd(priv, 0x000002c3, 0x00000000);
+ nv_icmd(priv, 0x000002c4, 0x00000000);
+ nv_icmd(priv, 0x000002c5, 0x00000000);
+ nv_icmd(priv, 0x000002c6, 0x00000000);
+ nv_icmd(priv, 0x000002c7, 0x00000000);
+ nv_icmd(priv, 0x000002c8, 0x00000000);
+ nv_icmd(priv, 0x000002c9, 0x00000000);
+ nv_icmd(priv, 0x000002ca, 0x00000000);
+ nv_icmd(priv, 0x000002cb, 0x00000000);
+ nv_icmd(priv, 0x000002cc, 0x00000000);
+ nv_icmd(priv, 0x000002cd, 0x00000000);
+ nv_icmd(priv, 0x000002ce, 0x00000000);
+ nv_icmd(priv, 0x000002cf, 0x00000000);
+ nv_icmd(priv, 0x000004d0, 0x00000000);
+ nv_icmd(priv, 0x000004d1, 0x00000000);
+ nv_icmd(priv, 0x000004d2, 0x00000000);
+ nv_icmd(priv, 0x000004d3, 0x00000000);
+ nv_icmd(priv, 0x000004d4, 0x00000000);
+ nv_icmd(priv, 0x000004d5, 0x00000000);
+ nv_icmd(priv, 0x000004d6, 0x00000000);
+ nv_icmd(priv, 0x000004d7, 0x00000000);
+ nv_icmd(priv, 0x000004d8, 0x00000000);
+ nv_icmd(priv, 0x000004d9, 0x00000000);
+ nv_icmd(priv, 0x000004da, 0x00000000);
+ nv_icmd(priv, 0x000004db, 0x00000000);
+ nv_icmd(priv, 0x000004dc, 0x00000000);
+ nv_icmd(priv, 0x000004dd, 0x00000000);
+ nv_icmd(priv, 0x000004de, 0x00000000);
+ nv_icmd(priv, 0x000004df, 0x00000000);
+ nv_icmd(priv, 0x00000720, 0x00000000);
+ nv_icmd(priv, 0x00000721, 0x00000000);
+ nv_icmd(priv, 0x00000722, 0x00000000);
+ nv_icmd(priv, 0x00000723, 0x00000000);
+ nv_icmd(priv, 0x00000724, 0x00000000);
+ nv_icmd(priv, 0x00000725, 0x00000000);
+ nv_icmd(priv, 0x00000726, 0x00000000);
+ nv_icmd(priv, 0x00000727, 0x00000000);
+ nv_icmd(priv, 0x00000728, 0x00000000);
+ nv_icmd(priv, 0x00000729, 0x00000000);
+ nv_icmd(priv, 0x0000072a, 0x00000000);
+ nv_icmd(priv, 0x0000072b, 0x00000000);
+ nv_icmd(priv, 0x0000072c, 0x00000000);
+ nv_icmd(priv, 0x0000072d, 0x00000000);
+ nv_icmd(priv, 0x0000072e, 0x00000000);
+ nv_icmd(priv, 0x0000072f, 0x00000000);
+ nv_icmd(priv, 0x000008c0, 0x00000000);
+ nv_icmd(priv, 0x000008c1, 0x00000000);
+ nv_icmd(priv, 0x000008c2, 0x00000000);
+ nv_icmd(priv, 0x000008c3, 0x00000000);
+ nv_icmd(priv, 0x000008c4, 0x00000000);
+ nv_icmd(priv, 0x000008c5, 0x00000000);
+ nv_icmd(priv, 0x000008c6, 0x00000000);
+ nv_icmd(priv, 0x000008c7, 0x00000000);
+ nv_icmd(priv, 0x000008c8, 0x00000000);
+ nv_icmd(priv, 0x000008c9, 0x00000000);
+ nv_icmd(priv, 0x000008ca, 0x00000000);
+ nv_icmd(priv, 0x000008cb, 0x00000000);
+ nv_icmd(priv, 0x000008cc, 0x00000000);
+ nv_icmd(priv, 0x000008cd, 0x00000000);
+ nv_icmd(priv, 0x000008ce, 0x00000000);
+ nv_icmd(priv, 0x000008cf, 0x00000000);
+ nv_icmd(priv, 0x00000890, 0x00000000);
+ nv_icmd(priv, 0x00000891, 0x00000000);
+ nv_icmd(priv, 0x00000892, 0x00000000);
+ nv_icmd(priv, 0x00000893, 0x00000000);
+ nv_icmd(priv, 0x00000894, 0x00000000);
+ nv_icmd(priv, 0x00000895, 0x00000000);
+ nv_icmd(priv, 0x00000896, 0x00000000);
+ nv_icmd(priv, 0x00000897, 0x00000000);
+ nv_icmd(priv, 0x00000898, 0x00000000);
+ nv_icmd(priv, 0x00000899, 0x00000000);
+ nv_icmd(priv, 0x0000089a, 0x00000000);
+ nv_icmd(priv, 0x0000089b, 0x00000000);
+ nv_icmd(priv, 0x0000089c, 0x00000000);
+ nv_icmd(priv, 0x0000089d, 0x00000000);
+ nv_icmd(priv, 0x0000089e, 0x00000000);
+ nv_icmd(priv, 0x0000089f, 0x00000000);
+ nv_icmd(priv, 0x000008e0, 0x00000000);
+ nv_icmd(priv, 0x000008e1, 0x00000000);
+ nv_icmd(priv, 0x000008e2, 0x00000000);
+ nv_icmd(priv, 0x000008e3, 0x00000000);
+ nv_icmd(priv, 0x000008e4, 0x00000000);
+ nv_icmd(priv, 0x000008e5, 0x00000000);
+ nv_icmd(priv, 0x000008e6, 0x00000000);
+ nv_icmd(priv, 0x000008e7, 0x00000000);
+ nv_icmd(priv, 0x000008e8, 0x00000000);
+ nv_icmd(priv, 0x000008e9, 0x00000000);
+ nv_icmd(priv, 0x000008ea, 0x00000000);
+ nv_icmd(priv, 0x000008eb, 0x00000000);
+ nv_icmd(priv, 0x000008ec, 0x00000000);
+ nv_icmd(priv, 0x000008ed, 0x00000000);
+ nv_icmd(priv, 0x000008ee, 0x00000000);
+ nv_icmd(priv, 0x000008ef, 0x00000000);
+ nv_icmd(priv, 0x000008a0, 0x00000000);
+ nv_icmd(priv, 0x000008a1, 0x00000000);
+ nv_icmd(priv, 0x000008a2, 0x00000000);
+ nv_icmd(priv, 0x000008a3, 0x00000000);
+ nv_icmd(priv, 0x000008a4, 0x00000000);
+ nv_icmd(priv, 0x000008a5, 0x00000000);
+ nv_icmd(priv, 0x000008a6, 0x00000000);
+ nv_icmd(priv, 0x000008a7, 0x00000000);
+ nv_icmd(priv, 0x000008a8, 0x00000000);
+ nv_icmd(priv, 0x000008a9, 0x00000000);
+ nv_icmd(priv, 0x000008aa, 0x00000000);
+ nv_icmd(priv, 0x000008ab, 0x00000000);
+ nv_icmd(priv, 0x000008ac, 0x00000000);
+ nv_icmd(priv, 0x000008ad, 0x00000000);
+ nv_icmd(priv, 0x000008ae, 0x00000000);
+ nv_icmd(priv, 0x000008af, 0x00000000);
+ nv_icmd(priv, 0x000008f0, 0x00000000);
+ nv_icmd(priv, 0x000008f1, 0x00000000);
+ nv_icmd(priv, 0x000008f2, 0x00000000);
+ nv_icmd(priv, 0x000008f3, 0x00000000);
+ nv_icmd(priv, 0x000008f4, 0x00000000);
+ nv_icmd(priv, 0x000008f5, 0x00000000);
+ nv_icmd(priv, 0x000008f6, 0x00000000);
+ nv_icmd(priv, 0x000008f7, 0x00000000);
+ nv_icmd(priv, 0x000008f8, 0x00000000);
+ nv_icmd(priv, 0x000008f9, 0x00000000);
+ nv_icmd(priv, 0x000008fa, 0x00000000);
+ nv_icmd(priv, 0x000008fb, 0x00000000);
+ nv_icmd(priv, 0x000008fc, 0x00000000);
+ nv_icmd(priv, 0x000008fd, 0x00000000);
+ nv_icmd(priv, 0x000008fe, 0x00000000);
+ nv_icmd(priv, 0x000008ff, 0x00000000);
+ nv_icmd(priv, 0x0000094c, 0x000000ff);
+ nv_icmd(priv, 0x0000094d, 0xffffffff);
+ nv_icmd(priv, 0x0000094e, 0x00000002);
+ nv_icmd(priv, 0x000002ec, 0x00000001);
+ nv_icmd(priv, 0x00000303, 0x00000001);
+ nv_icmd(priv, 0x000002e6, 0x00000001);
+ nv_icmd(priv, 0x00000466, 0x00000052);
+ nv_icmd(priv, 0x00000301, 0x3f800000);
+ nv_icmd(priv, 0x00000304, 0x30201000);
+ nv_icmd(priv, 0x00000305, 0x70605040);
+ nv_icmd(priv, 0x00000306, 0xb8a89888);
+ nv_icmd(priv, 0x00000307, 0xf8e8d8c8);
+ nv_icmd(priv, 0x0000030a, 0x00ffff00);
+ nv_icmd(priv, 0x0000030b, 0x0000001a);
+ nv_icmd(priv, 0x0000030c, 0x00000001);
+ nv_icmd(priv, 0x00000318, 0x00000001);
+ nv_icmd(priv, 0x00000340, 0x00000000);
+ nv_icmd(priv, 0x00000375, 0x00000001);
+ nv_icmd(priv, 0x00000351, 0x00000100);
+ nv_icmd(priv, 0x0000037d, 0x00000006);
+ nv_icmd(priv, 0x000003a0, 0x00000002);
+ nv_icmd(priv, 0x000003aa, 0x00000001);
+ nv_icmd(priv, 0x000003a9, 0x00000001);
+ nv_icmd(priv, 0x00000380, 0x00000001);
+ nv_icmd(priv, 0x00000360, 0x00000040);
+ nv_icmd(priv, 0x00000366, 0x00000000);
+ nv_icmd(priv, 0x00000367, 0x00000000);
+ nv_icmd(priv, 0x00000368, 0x00001fff);
+ nv_icmd(priv, 0x00000370, 0x00000000);
+ nv_icmd(priv, 0x00000371, 0x00000000);
+ nv_icmd(priv, 0x00000372, 0x003fffff);
+ nv_icmd(priv, 0x0000037a, 0x00000012);
+ nv_icmd(priv, 0x000005e0, 0x00000022);
+ nv_icmd(priv, 0x000005e1, 0x00000022);
+ nv_icmd(priv, 0x000005e2, 0x00000022);
+ nv_icmd(priv, 0x000005e3, 0x00000022);
+ nv_icmd(priv, 0x000005e4, 0x00000022);
+ nv_icmd(priv, 0x00000619, 0x00000003);
+ nv_icmd(priv, 0x00000811, 0x00000003);
+ nv_icmd(priv, 0x00000812, 0x00000004);
+ nv_icmd(priv, 0x00000813, 0x00000006);
+ nv_icmd(priv, 0x00000814, 0x00000008);
+ nv_icmd(priv, 0x00000815, 0x0000000b);
+ nv_icmd(priv, 0x00000800, 0x00000001);
+ nv_icmd(priv, 0x00000801, 0x00000001);
+ nv_icmd(priv, 0x00000802, 0x00000001);
+ nv_icmd(priv, 0x00000803, 0x00000001);
+ nv_icmd(priv, 0x00000804, 0x00000001);
+ nv_icmd(priv, 0x00000805, 0x00000001);
+ nv_icmd(priv, 0x00000632, 0x00000001);
+ nv_icmd(priv, 0x00000633, 0x00000002);
+ nv_icmd(priv, 0x00000634, 0x00000003);
+ nv_icmd(priv, 0x00000635, 0x00000004);
+ nv_icmd(priv, 0x00000654, 0x3f800000);
+ nv_icmd(priv, 0x00000657, 0x3f800000);
+ nv_icmd(priv, 0x00000655, 0x3f800000);
+ nv_icmd(priv, 0x00000656, 0x3f800000);
+ nv_icmd(priv, 0x000006cd, 0x3f800000);
+ nv_icmd(priv, 0x000007f5, 0x3f800000);
+ nv_icmd(priv, 0x000007dc, 0x39291909);
+ nv_icmd(priv, 0x000007dd, 0x79695949);
+ nv_icmd(priv, 0x000007de, 0xb9a99989);
+ nv_icmd(priv, 0x000007df, 0xf9e9d9c9);
+ nv_icmd(priv, 0x000007e8, 0x00003210);
+ nv_icmd(priv, 0x000007e9, 0x00007654);
+ nv_icmd(priv, 0x000007ea, 0x00000098);
+ nv_icmd(priv, 0x000007ec, 0x39291909);
+ nv_icmd(priv, 0x000007ed, 0x79695949);
+ nv_icmd(priv, 0x000007ee, 0xb9a99989);
+ nv_icmd(priv, 0x000007ef, 0xf9e9d9c9);
+ nv_icmd(priv, 0x000007f0, 0x00003210);
+ nv_icmd(priv, 0x000007f1, 0x00007654);
+ nv_icmd(priv, 0x000007f2, 0x00000098);
+ nv_icmd(priv, 0x000005a5, 0x00000001);
+ nv_icmd(priv, 0x00000980, 0x00000000);
+ nv_icmd(priv, 0x00000981, 0x00000000);
+ nv_icmd(priv, 0x00000982, 0x00000000);
+ nv_icmd(priv, 0x00000983, 0x00000000);
+ nv_icmd(priv, 0x00000984, 0x00000000);
+ nv_icmd(priv, 0x00000985, 0x00000000);
+ nv_icmd(priv, 0x00000986, 0x00000000);
+ nv_icmd(priv, 0x00000987, 0x00000000);
+ nv_icmd(priv, 0x00000988, 0x00000000);
+ nv_icmd(priv, 0x00000989, 0x00000000);
+ nv_icmd(priv, 0x0000098a, 0x00000000);
+ nv_icmd(priv, 0x0000098b, 0x00000000);
+ nv_icmd(priv, 0x0000098c, 0x00000000);
+ nv_icmd(priv, 0x0000098d, 0x00000000);
+ nv_icmd(priv, 0x0000098e, 0x00000000);
+ nv_icmd(priv, 0x0000098f, 0x00000000);
+ nv_icmd(priv, 0x00000990, 0x00000000);
+ nv_icmd(priv, 0x00000991, 0x00000000);
+ nv_icmd(priv, 0x00000992, 0x00000000);
+ nv_icmd(priv, 0x00000993, 0x00000000);
+ nv_icmd(priv, 0x00000994, 0x00000000);
+ nv_icmd(priv, 0x00000995, 0x00000000);
+ nv_icmd(priv, 0x00000996, 0x00000000);
+ nv_icmd(priv, 0x00000997, 0x00000000);
+ nv_icmd(priv, 0x00000998, 0x00000000);
+ nv_icmd(priv, 0x00000999, 0x00000000);
+ nv_icmd(priv, 0x0000099a, 0x00000000);
+ nv_icmd(priv, 0x0000099b, 0x00000000);
+ nv_icmd(priv, 0x0000099c, 0x00000000);
+ nv_icmd(priv, 0x0000099d, 0x00000000);
+ nv_icmd(priv, 0x0000099e, 0x00000000);
+ nv_icmd(priv, 0x0000099f, 0x00000000);
+ nv_icmd(priv, 0x000009a0, 0x00000000);
+ nv_icmd(priv, 0x000009a1, 0x00000000);
+ nv_icmd(priv, 0x000009a2, 0x00000000);
+ nv_icmd(priv, 0x000009a3, 0x00000000);
+ nv_icmd(priv, 0x000009a4, 0x00000000);
+ nv_icmd(priv, 0x000009a5, 0x00000000);
+ nv_icmd(priv, 0x000009a6, 0x00000000);
+ nv_icmd(priv, 0x000009a7, 0x00000000);
+ nv_icmd(priv, 0x000009a8, 0x00000000);
+ nv_icmd(priv, 0x000009a9, 0x00000000);
+ nv_icmd(priv, 0x000009aa, 0x00000000);
+ nv_icmd(priv, 0x000009ab, 0x00000000);
+ nv_icmd(priv, 0x000009ac, 0x00000000);
+ nv_icmd(priv, 0x000009ad, 0x00000000);
+ nv_icmd(priv, 0x000009ae, 0x00000000);
+ nv_icmd(priv, 0x000009af, 0x00000000);
+ nv_icmd(priv, 0x000009b0, 0x00000000);
+ nv_icmd(priv, 0x000009b1, 0x00000000);
+ nv_icmd(priv, 0x000009b2, 0x00000000);
+ nv_icmd(priv, 0x000009b3, 0x00000000);
+ nv_icmd(priv, 0x000009b4, 0x00000000);
+ nv_icmd(priv, 0x000009b5, 0x00000000);
+ nv_icmd(priv, 0x000009b6, 0x00000000);
+ nv_icmd(priv, 0x000009b7, 0x00000000);
+ nv_icmd(priv, 0x000009b8, 0x00000000);
+ nv_icmd(priv, 0x000009b9, 0x00000000);
+ nv_icmd(priv, 0x000009ba, 0x00000000);
+ nv_icmd(priv, 0x000009bb, 0x00000000);
+ nv_icmd(priv, 0x000009bc, 0x00000000);
+ nv_icmd(priv, 0x000009bd, 0x00000000);
+ nv_icmd(priv, 0x000009be, 0x00000000);
+ nv_icmd(priv, 0x000009bf, 0x00000000);
+ nv_icmd(priv, 0x000009c0, 0x00000000);
+ nv_icmd(priv, 0x000009c1, 0x00000000);
+ nv_icmd(priv, 0x000009c2, 0x00000000);
+ nv_icmd(priv, 0x000009c3, 0x00000000);
+ nv_icmd(priv, 0x000009c4, 0x00000000);
+ nv_icmd(priv, 0x000009c5, 0x00000000);
+ nv_icmd(priv, 0x000009c6, 0x00000000);
+ nv_icmd(priv, 0x000009c7, 0x00000000);
+ nv_icmd(priv, 0x000009c8, 0x00000000);
+ nv_icmd(priv, 0x000009c9, 0x00000000);
+ nv_icmd(priv, 0x000009ca, 0x00000000);
+ nv_icmd(priv, 0x000009cb, 0x00000000);
+ nv_icmd(priv, 0x000009cc, 0x00000000);
+ nv_icmd(priv, 0x000009cd, 0x00000000);
+ nv_icmd(priv, 0x000009ce, 0x00000000);
+ nv_icmd(priv, 0x000009cf, 0x00000000);
+ nv_icmd(priv, 0x000009d0, 0x00000000);
+ nv_icmd(priv, 0x000009d1, 0x00000000);
+ nv_icmd(priv, 0x000009d2, 0x00000000);
+ nv_icmd(priv, 0x000009d3, 0x00000000);
+ nv_icmd(priv, 0x000009d4, 0x00000000);
+ nv_icmd(priv, 0x000009d5, 0x00000000);
+ nv_icmd(priv, 0x000009d6, 0x00000000);
+ nv_icmd(priv, 0x000009d7, 0x00000000);
+ nv_icmd(priv, 0x000009d8, 0x00000000);
+ nv_icmd(priv, 0x000009d9, 0x00000000);
+ nv_icmd(priv, 0x000009da, 0x00000000);
+ nv_icmd(priv, 0x000009db, 0x00000000);
+ nv_icmd(priv, 0x000009dc, 0x00000000);
+ nv_icmd(priv, 0x000009dd, 0x00000000);
+ nv_icmd(priv, 0x000009de, 0x00000000);
+ nv_icmd(priv, 0x000009df, 0x00000000);
+ nv_icmd(priv, 0x000009e0, 0x00000000);
+ nv_icmd(priv, 0x000009e1, 0x00000000);
+ nv_icmd(priv, 0x000009e2, 0x00000000);
+ nv_icmd(priv, 0x000009e3, 0x00000000);
+ nv_icmd(priv, 0x000009e4, 0x00000000);
+ nv_icmd(priv, 0x000009e5, 0x00000000);
+ nv_icmd(priv, 0x000009e6, 0x00000000);
+ nv_icmd(priv, 0x000009e7, 0x00000000);
+ nv_icmd(priv, 0x000009e8, 0x00000000);
+ nv_icmd(priv, 0x000009e9, 0x00000000);
+ nv_icmd(priv, 0x000009ea, 0x00000000);
+ nv_icmd(priv, 0x000009eb, 0x00000000);
+ nv_icmd(priv, 0x000009ec, 0x00000000);
+ nv_icmd(priv, 0x000009ed, 0x00000000);
+ nv_icmd(priv, 0x000009ee, 0x00000000);
+ nv_icmd(priv, 0x000009ef, 0x00000000);
+ nv_icmd(priv, 0x000009f0, 0x00000000);
+ nv_icmd(priv, 0x000009f1, 0x00000000);
+ nv_icmd(priv, 0x000009f2, 0x00000000);
+ nv_icmd(priv, 0x000009f3, 0x00000000);
+ nv_icmd(priv, 0x000009f4, 0x00000000);
+ nv_icmd(priv, 0x000009f5, 0x00000000);
+ nv_icmd(priv, 0x000009f6, 0x00000000);
+ nv_icmd(priv, 0x000009f7, 0x00000000);
+ nv_icmd(priv, 0x000009f8, 0x00000000);
+ nv_icmd(priv, 0x000009f9, 0x00000000);
+ nv_icmd(priv, 0x000009fa, 0x00000000);
+ nv_icmd(priv, 0x000009fb, 0x00000000);
+ nv_icmd(priv, 0x000009fc, 0x00000000);
+ nv_icmd(priv, 0x000009fd, 0x00000000);
+ nv_icmd(priv, 0x000009fe, 0x00000000);
+ nv_icmd(priv, 0x000009ff, 0x00000000);
+ nv_icmd(priv, 0x00000468, 0x00000004);
+ nv_icmd(priv, 0x0000046c, 0x00000001);
+ nv_icmd(priv, 0x00000470, 0x00000000);
+ nv_icmd(priv, 0x00000471, 0x00000000);
+ nv_icmd(priv, 0x00000472, 0x00000000);
+ nv_icmd(priv, 0x00000473, 0x00000000);
+ nv_icmd(priv, 0x00000474, 0x00000000);
+ nv_icmd(priv, 0x00000475, 0x00000000);
+ nv_icmd(priv, 0x00000476, 0x00000000);
+ nv_icmd(priv, 0x00000477, 0x00000000);
+ nv_icmd(priv, 0x00000478, 0x00000000);
+ nv_icmd(priv, 0x00000479, 0x00000000);
+ nv_icmd(priv, 0x0000047a, 0x00000000);
+ nv_icmd(priv, 0x0000047b, 0x00000000);
+ nv_icmd(priv, 0x0000047c, 0x00000000);
+ nv_icmd(priv, 0x0000047d, 0x00000000);
+ nv_icmd(priv, 0x0000047e, 0x00000000);
+ nv_icmd(priv, 0x0000047f, 0x00000000);
+ nv_icmd(priv, 0x00000480, 0x00000000);
+ nv_icmd(priv, 0x00000481, 0x00000000);
+ nv_icmd(priv, 0x00000482, 0x00000000);
+ nv_icmd(priv, 0x00000483, 0x00000000);
+ nv_icmd(priv, 0x00000484, 0x00000000);
+ nv_icmd(priv, 0x00000485, 0x00000000);
+ nv_icmd(priv, 0x00000486, 0x00000000);
+ nv_icmd(priv, 0x00000487, 0x00000000);
+ nv_icmd(priv, 0x00000488, 0x00000000);
+ nv_icmd(priv, 0x00000489, 0x00000000);
+ nv_icmd(priv, 0x0000048a, 0x00000000);
+ nv_icmd(priv, 0x0000048b, 0x00000000);
+ nv_icmd(priv, 0x0000048c, 0x00000000);
+ nv_icmd(priv, 0x0000048d, 0x00000000);
+ nv_icmd(priv, 0x0000048e, 0x00000000);
+ nv_icmd(priv, 0x0000048f, 0x00000000);
+ nv_icmd(priv, 0x00000490, 0x00000000);
+ nv_icmd(priv, 0x00000491, 0x00000000);
+ nv_icmd(priv, 0x00000492, 0x00000000);
+ nv_icmd(priv, 0x00000493, 0x00000000);
+ nv_icmd(priv, 0x00000494, 0x00000000);
+ nv_icmd(priv, 0x00000495, 0x00000000);
+ nv_icmd(priv, 0x00000496, 0x00000000);
+ nv_icmd(priv, 0x00000497, 0x00000000);
+ nv_icmd(priv, 0x00000498, 0x00000000);
+ nv_icmd(priv, 0x00000499, 0x00000000);
+ nv_icmd(priv, 0x0000049a, 0x00000000);
+ nv_icmd(priv, 0x0000049b, 0x00000000);
+ nv_icmd(priv, 0x0000049c, 0x00000000);
+ nv_icmd(priv, 0x0000049d, 0x00000000);
+ nv_icmd(priv, 0x0000049e, 0x00000000);
+ nv_icmd(priv, 0x0000049f, 0x00000000);
+ nv_icmd(priv, 0x000004a0, 0x00000000);
+ nv_icmd(priv, 0x000004a1, 0x00000000);
+ nv_icmd(priv, 0x000004a2, 0x00000000);
+ nv_icmd(priv, 0x000004a3, 0x00000000);
+ nv_icmd(priv, 0x000004a4, 0x00000000);
+ nv_icmd(priv, 0x000004a5, 0x00000000);
+ nv_icmd(priv, 0x000004a6, 0x00000000);
+ nv_icmd(priv, 0x000004a7, 0x00000000);
+ nv_icmd(priv, 0x000004a8, 0x00000000);
+ nv_icmd(priv, 0x000004a9, 0x00000000);
+ nv_icmd(priv, 0x000004aa, 0x00000000);
+ nv_icmd(priv, 0x000004ab, 0x00000000);
+ nv_icmd(priv, 0x000004ac, 0x00000000);
+ nv_icmd(priv, 0x000004ad, 0x00000000);
+ nv_icmd(priv, 0x000004ae, 0x00000000);
+ nv_icmd(priv, 0x000004af, 0x00000000);
+ nv_icmd(priv, 0x000004b0, 0x00000000);
+ nv_icmd(priv, 0x000004b1, 0x00000000);
+ nv_icmd(priv, 0x000004b2, 0x00000000);
+ nv_icmd(priv, 0x000004b3, 0x00000000);
+ nv_icmd(priv, 0x000004b4, 0x00000000);
+ nv_icmd(priv, 0x000004b5, 0x00000000);
+ nv_icmd(priv, 0x000004b6, 0x00000000);
+ nv_icmd(priv, 0x000004b7, 0x00000000);
+ nv_icmd(priv, 0x000004b8, 0x00000000);
+ nv_icmd(priv, 0x000004b9, 0x00000000);
+ nv_icmd(priv, 0x000004ba, 0x00000000);
+ nv_icmd(priv, 0x000004bb, 0x00000000);
+ nv_icmd(priv, 0x000004bc, 0x00000000);
+ nv_icmd(priv, 0x000004bd, 0x00000000);
+ nv_icmd(priv, 0x000004be, 0x00000000);
+ nv_icmd(priv, 0x000004bf, 0x00000000);
+ nv_icmd(priv, 0x000004c0, 0x00000000);
+ nv_icmd(priv, 0x000004c1, 0x00000000);
+ nv_icmd(priv, 0x000004c2, 0x00000000);
+ nv_icmd(priv, 0x000004c3, 0x00000000);
+ nv_icmd(priv, 0x000004c4, 0x00000000);
+ nv_icmd(priv, 0x000004c5, 0x00000000);
+ nv_icmd(priv, 0x000004c6, 0x00000000);
+ nv_icmd(priv, 0x000004c7, 0x00000000);
+ nv_icmd(priv, 0x000004c8, 0x00000000);
+ nv_icmd(priv, 0x000004c9, 0x00000000);
+ nv_icmd(priv, 0x000004ca, 0x00000000);
+ nv_icmd(priv, 0x000004cb, 0x00000000);
+ nv_icmd(priv, 0x000004cc, 0x00000000);
+ nv_icmd(priv, 0x000004cd, 0x00000000);
+ nv_icmd(priv, 0x000004ce, 0x00000000);
+ nv_icmd(priv, 0x000004cf, 0x00000000);
+ nv_icmd(priv, 0x00000510, 0x3f800000);
+ nv_icmd(priv, 0x00000511, 0x3f800000);
+ nv_icmd(priv, 0x00000512, 0x3f800000);
+ nv_icmd(priv, 0x00000513, 0x3f800000);
+ nv_icmd(priv, 0x00000514, 0x3f800000);
+ nv_icmd(priv, 0x00000515, 0x3f800000);
+ nv_icmd(priv, 0x00000516, 0x3f800000);
+ nv_icmd(priv, 0x00000517, 0x3f800000);
+ nv_icmd(priv, 0x00000518, 0x3f800000);
+ nv_icmd(priv, 0x00000519, 0x3f800000);
+ nv_icmd(priv, 0x0000051a, 0x3f800000);
+ nv_icmd(priv, 0x0000051b, 0x3f800000);
+ nv_icmd(priv, 0x0000051c, 0x3f800000);
+ nv_icmd(priv, 0x0000051d, 0x3f800000);
+ nv_icmd(priv, 0x0000051e, 0x3f800000);
+ nv_icmd(priv, 0x0000051f, 0x3f800000);
+ nv_icmd(priv, 0x00000520, 0x000002b6);
+ nv_icmd(priv, 0x00000529, 0x00000001);
+ nv_icmd(priv, 0x00000530, 0xffff0000);
+ nv_icmd(priv, 0x00000531, 0xffff0000);
+ nv_icmd(priv, 0x00000532, 0xffff0000);
+ nv_icmd(priv, 0x00000533, 0xffff0000);
+ nv_icmd(priv, 0x00000534, 0xffff0000);
+ nv_icmd(priv, 0x00000535, 0xffff0000);
+ nv_icmd(priv, 0x00000536, 0xffff0000);
+ nv_icmd(priv, 0x00000537, 0xffff0000);
+ nv_icmd(priv, 0x00000538, 0xffff0000);
+ nv_icmd(priv, 0x00000539, 0xffff0000);
+ nv_icmd(priv, 0x0000053a, 0xffff0000);
+ nv_icmd(priv, 0x0000053b, 0xffff0000);
+ nv_icmd(priv, 0x0000053c, 0xffff0000);
+ nv_icmd(priv, 0x0000053d, 0xffff0000);
+ nv_icmd(priv, 0x0000053e, 0xffff0000);
+ nv_icmd(priv, 0x0000053f, 0xffff0000);
+ nv_icmd(priv, 0x00000585, 0x0000003f);
+ nv_icmd(priv, 0x00000576, 0x00000003);
+ if (nv_device(priv)->chipset == 0xc1 ||
+ nv_device(priv)->chipset == 0xd9)
+ nv_icmd(priv, 0x0000057b, 0x00000059);
+ nv_icmd(priv, 0x00000586, 0x00000040);
+ nv_icmd(priv, 0x00000582, 0x00000080);
+ nv_icmd(priv, 0x00000583, 0x00000080);
+ nv_icmd(priv, 0x000005c2, 0x00000001);
+ nv_icmd(priv, 0x00000638, 0x00000001);
+ nv_icmd(priv, 0x00000639, 0x00000001);
+ nv_icmd(priv, 0x0000063a, 0x00000002);
+ nv_icmd(priv, 0x0000063b, 0x00000001);
+ nv_icmd(priv, 0x0000063c, 0x00000001);
+ nv_icmd(priv, 0x0000063d, 0x00000002);
+ nv_icmd(priv, 0x0000063e, 0x00000001);
+ nv_icmd(priv, 0x000008b8, 0x00000001);
+ nv_icmd(priv, 0x000008b9, 0x00000001);
+ nv_icmd(priv, 0x000008ba, 0x00000001);
+ nv_icmd(priv, 0x000008bb, 0x00000001);
+ nv_icmd(priv, 0x000008bc, 0x00000001);
+ nv_icmd(priv, 0x000008bd, 0x00000001);
+ nv_icmd(priv, 0x000008be, 0x00000001);
+ nv_icmd(priv, 0x000008bf, 0x00000001);
+ nv_icmd(priv, 0x00000900, 0x00000001);
+ nv_icmd(priv, 0x00000901, 0x00000001);
+ nv_icmd(priv, 0x00000902, 0x00000001);
+ nv_icmd(priv, 0x00000903, 0x00000001);
+ nv_icmd(priv, 0x00000904, 0x00000001);
+ nv_icmd(priv, 0x00000905, 0x00000001);
+ nv_icmd(priv, 0x00000906, 0x00000001);
+ nv_icmd(priv, 0x00000907, 0x00000001);
+ nv_icmd(priv, 0x00000908, 0x00000002);
+ nv_icmd(priv, 0x00000909, 0x00000002);
+ nv_icmd(priv, 0x0000090a, 0x00000002);
+ nv_icmd(priv, 0x0000090b, 0x00000002);
+ nv_icmd(priv, 0x0000090c, 0x00000002);
+ nv_icmd(priv, 0x0000090d, 0x00000002);
+ nv_icmd(priv, 0x0000090e, 0x00000002);
+ nv_icmd(priv, 0x0000090f, 0x00000002);
+ nv_icmd(priv, 0x00000910, 0x00000001);
+ nv_icmd(priv, 0x00000911, 0x00000001);
+ nv_icmd(priv, 0x00000912, 0x00000001);
+ nv_icmd(priv, 0x00000913, 0x00000001);
+ nv_icmd(priv, 0x00000914, 0x00000001);
+ nv_icmd(priv, 0x00000915, 0x00000001);
+ nv_icmd(priv, 0x00000916, 0x00000001);
+ nv_icmd(priv, 0x00000917, 0x00000001);
+ nv_icmd(priv, 0x00000918, 0x00000001);
+ nv_icmd(priv, 0x00000919, 0x00000001);
+ nv_icmd(priv, 0x0000091a, 0x00000001);
+ nv_icmd(priv, 0x0000091b, 0x00000001);
+ nv_icmd(priv, 0x0000091c, 0x00000001);
+ nv_icmd(priv, 0x0000091d, 0x00000001);
+ nv_icmd(priv, 0x0000091e, 0x00000001);
+ nv_icmd(priv, 0x0000091f, 0x00000001);
+ nv_icmd(priv, 0x00000920, 0x00000002);
+ nv_icmd(priv, 0x00000921, 0x00000002);
+ nv_icmd(priv, 0x00000922, 0x00000002);
+ nv_icmd(priv, 0x00000923, 0x00000002);
+ nv_icmd(priv, 0x00000924, 0x00000002);
+ nv_icmd(priv, 0x00000925, 0x00000002);
+ nv_icmd(priv, 0x00000926, 0x00000002);
+ nv_icmd(priv, 0x00000927, 0x00000002);
+ nv_icmd(priv, 0x00000928, 0x00000001);
+ nv_icmd(priv, 0x00000929, 0x00000001);
+ nv_icmd(priv, 0x0000092a, 0x00000001);
+ nv_icmd(priv, 0x0000092b, 0x00000001);
+ nv_icmd(priv, 0x0000092c, 0x00000001);
+ nv_icmd(priv, 0x0000092d, 0x00000001);
+ nv_icmd(priv, 0x0000092e, 0x00000001);
+ nv_icmd(priv, 0x0000092f, 0x00000001);
+ nv_icmd(priv, 0x00000648, 0x00000001);
+ nv_icmd(priv, 0x00000649, 0x00000001);
+ nv_icmd(priv, 0x0000064a, 0x00000001);
+ nv_icmd(priv, 0x0000064b, 0x00000001);
+ nv_icmd(priv, 0x0000064c, 0x00000001);
+ nv_icmd(priv, 0x0000064d, 0x00000001);
+ nv_icmd(priv, 0x0000064e, 0x00000001);
+ nv_icmd(priv, 0x0000064f, 0x00000001);
+ nv_icmd(priv, 0x00000650, 0x00000001);
+ nv_icmd(priv, 0x00000658, 0x0000000f);
+ nv_icmd(priv, 0x000007ff, 0x0000000a);
+ nv_icmd(priv, 0x0000066a, 0x40000000);
+ nv_icmd(priv, 0x0000066b, 0x10000000);
+ nv_icmd(priv, 0x0000066c, 0xffff0000);
+ nv_icmd(priv, 0x0000066d, 0xffff0000);
+ nv_icmd(priv, 0x000007af, 0x00000008);
+ nv_icmd(priv, 0x000007b0, 0x00000008);
+ nv_icmd(priv, 0x000007f6, 0x00000001);
+ nv_icmd(priv, 0x000006b2, 0x00000055);
+ nv_icmd(priv, 0x000007ad, 0x00000003);
+ nv_icmd(priv, 0x00000937, 0x00000001);
+ nv_icmd(priv, 0x00000971, 0x00000008);
+ nv_icmd(priv, 0x00000972, 0x00000040);
+ nv_icmd(priv, 0x00000973, 0x0000012c);
+ nv_icmd(priv, 0x0000097c, 0x00000040);
+ nv_icmd(priv, 0x00000979, 0x00000003);
+ nv_icmd(priv, 0x00000975, 0x00000020);
+ nv_icmd(priv, 0x00000976, 0x00000001);
+ nv_icmd(priv, 0x00000977, 0x00000020);
+ nv_icmd(priv, 0x00000978, 0x00000001);
+ nv_icmd(priv, 0x00000957, 0x00000003);
+ nv_icmd(priv, 0x0000095e, 0x20164010);
+ nv_icmd(priv, 0x0000095f, 0x00000020);
+ if (nv_device(priv)->chipset == 0xd9)
+ nv_icmd(priv, 0x0000097d, 0x00000020);
+ nv_icmd(priv, 0x00000683, 0x00000006);
+ nv_icmd(priv, 0x00000685, 0x003fffff);
+ nv_icmd(priv, 0x00000687, 0x00000c48);
+ nv_icmd(priv, 0x000006a0, 0x00000005);
+ nv_icmd(priv, 0x00000840, 0x00300008);
+ nv_icmd(priv, 0x00000841, 0x04000080);
+ nv_icmd(priv, 0x00000842, 0x00300008);
+ nv_icmd(priv, 0x00000843, 0x04000080);
+ nv_icmd(priv, 0x00000818, 0x00000000);
+ nv_icmd(priv, 0x00000819, 0x00000000);
+ nv_icmd(priv, 0x0000081a, 0x00000000);
+ nv_icmd(priv, 0x0000081b, 0x00000000);
+ nv_icmd(priv, 0x0000081c, 0x00000000);
+ nv_icmd(priv, 0x0000081d, 0x00000000);
+ nv_icmd(priv, 0x0000081e, 0x00000000);
+ nv_icmd(priv, 0x0000081f, 0x00000000);
+ nv_icmd(priv, 0x00000848, 0x00000000);
+ nv_icmd(priv, 0x00000849, 0x00000000);
+ nv_icmd(priv, 0x0000084a, 0x00000000);
+ nv_icmd(priv, 0x0000084b, 0x00000000);
+ nv_icmd(priv, 0x0000084c, 0x00000000);
+ nv_icmd(priv, 0x0000084d, 0x00000000);
+ nv_icmd(priv, 0x0000084e, 0x00000000);
+ nv_icmd(priv, 0x0000084f, 0x00000000);
+ nv_icmd(priv, 0x00000850, 0x00000000);
+ nv_icmd(priv, 0x00000851, 0x00000000);
+ nv_icmd(priv, 0x00000852, 0x00000000);
+ nv_icmd(priv, 0x00000853, 0x00000000);
+ nv_icmd(priv, 0x00000854, 0x00000000);
+ nv_icmd(priv, 0x00000855, 0x00000000);
+ nv_icmd(priv, 0x00000856, 0x00000000);
+ nv_icmd(priv, 0x00000857, 0x00000000);
+ nv_icmd(priv, 0x00000738, 0x00000000);
+ nv_icmd(priv, 0x000006aa, 0x00000001);
+ nv_icmd(priv, 0x000006ab, 0x00000002);
+ nv_icmd(priv, 0x000006ac, 0x00000080);
+ nv_icmd(priv, 0x000006ad, 0x00000100);
+ nv_icmd(priv, 0x000006ae, 0x00000100);
+ nv_icmd(priv, 0x000006b1, 0x00000011);
+ nv_icmd(priv, 0x000006bb, 0x000000cf);
+ nv_icmd(priv, 0x000006ce, 0x2a712488);
+ nv_icmd(priv, 0x00000739, 0x4085c000);
+ nv_icmd(priv, 0x0000073a, 0x00000080);
+ nv_icmd(priv, 0x00000786, 0x80000100);
+ nv_icmd(priv, 0x0000073c, 0x00010100);
+ nv_icmd(priv, 0x0000073d, 0x02800000);
+ nv_icmd(priv, 0x00000787, 0x000000cf);
+ nv_icmd(priv, 0x0000078c, 0x00000008);
+ nv_icmd(priv, 0x00000792, 0x00000001);
+ nv_icmd(priv, 0x00000794, 0x00000001);
+ nv_icmd(priv, 0x00000795, 0x00000001);
+ nv_icmd(priv, 0x00000796, 0x00000001);
+ nv_icmd(priv, 0x00000797, 0x000000cf);
+ nv_icmd(priv, 0x00000836, 0x00000001);
+ nv_icmd(priv, 0x0000079a, 0x00000002);
+ nv_icmd(priv, 0x00000833, 0x04444480);
+ nv_icmd(priv, 0x000007a1, 0x00000001);
+ nv_icmd(priv, 0x000007a3, 0x00000001);
+ nv_icmd(priv, 0x000007a4, 0x00000001);
+ nv_icmd(priv, 0x000007a5, 0x00000001);
+ nv_icmd(priv, 0x00000831, 0x00000004);
+ nv_icmd(priv, 0x0000080c, 0x00000002);
+ nv_icmd(priv, 0x0000080d, 0x00000100);
+ nv_icmd(priv, 0x0000080e, 0x00000100);
+ nv_icmd(priv, 0x0000080f, 0x00000001);
+ nv_icmd(priv, 0x00000823, 0x00000002);
+ nv_icmd(priv, 0x00000824, 0x00000100);
+ nv_icmd(priv, 0x00000825, 0x00000100);
+ nv_icmd(priv, 0x00000826, 0x00000001);
+ nv_icmd(priv, 0x0000095d, 0x00000001);
+ nv_icmd(priv, 0x0000082b, 0x00000004);
+ nv_icmd(priv, 0x00000942, 0x00010001);
+ nv_icmd(priv, 0x00000943, 0x00000001);
+ nv_icmd(priv, 0x00000944, 0x00000022);
+ nv_icmd(priv, 0x000007c5, 0x00010001);
+ nv_icmd(priv, 0x00000834, 0x00000001);
+ nv_icmd(priv, 0x000007c7, 0x00000001);
+ nv_icmd(priv, 0x0000c1b0, 0x0000000f);
+ nv_icmd(priv, 0x0000c1b1, 0x0000000f);
+ nv_icmd(priv, 0x0000c1b2, 0x0000000f);
+ nv_icmd(priv, 0x0000c1b3, 0x0000000f);
+ nv_icmd(priv, 0x0000c1b4, 0x0000000f);
+ nv_icmd(priv, 0x0000c1b5, 0x0000000f);
+ nv_icmd(priv, 0x0000c1b6, 0x0000000f);
+ nv_icmd(priv, 0x0000c1b7, 0x0000000f);
+ nv_icmd(priv, 0x0000c1b8, 0x0fac6881);
+ nv_icmd(priv, 0x0000c1b9, 0x00fac688);
+ nv_icmd(priv, 0x0001e100, 0x00000001);
+ nv_icmd(priv, 0x00001000, 0x00000002);
+ nv_icmd(priv, 0x000006aa, 0x00000001);
+ nv_icmd(priv, 0x000006ad, 0x00000100);
+ nv_icmd(priv, 0x000006ae, 0x00000100);
+ nv_icmd(priv, 0x000006b1, 0x00000011);
+ nv_icmd(priv, 0x0000078c, 0x00000008);
+ nv_icmd(priv, 0x00000792, 0x00000001);
+ nv_icmd(priv, 0x00000794, 0x00000001);
+ nv_icmd(priv, 0x00000795, 0x00000001);
+ nv_icmd(priv, 0x00000796, 0x00000001);
+ nv_icmd(priv, 0x00000797, 0x000000cf);
+ nv_icmd(priv, 0x0000079a, 0x00000002);
+ nv_icmd(priv, 0x00000833, 0x04444480);
+ nv_icmd(priv, 0x000007a1, 0x00000001);
+ nv_icmd(priv, 0x000007a3, 0x00000001);
+ nv_icmd(priv, 0x000007a4, 0x00000001);
+ nv_icmd(priv, 0x000007a5, 0x00000001);
+ nv_icmd(priv, 0x00000831, 0x00000004);
+ nv_icmd(priv, 0x0001e100, 0x00000001);
+ nv_icmd(priv, 0x00001000, 0x00000014);
+ nv_icmd(priv, 0x00000351, 0x00000100);
+ nv_icmd(priv, 0x00000957, 0x00000003);
+ nv_icmd(priv, 0x0000095d, 0x00000001);
+ nv_icmd(priv, 0x0000082b, 0x00000004);
+ nv_icmd(priv, 0x00000942, 0x00010001);
+ nv_icmd(priv, 0x00000943, 0x00000001);
+ nv_icmd(priv, 0x000007c5, 0x00010001);
+ nv_icmd(priv, 0x00000834, 0x00000001);
+ nv_icmd(priv, 0x000007c7, 0x00000001);
+ nv_icmd(priv, 0x0001e100, 0x00000001);
+ nv_icmd(priv, 0x00001000, 0x00000001);
+ nv_icmd(priv, 0x0000080c, 0x00000002);
+ nv_icmd(priv, 0x0000080d, 0x00000100);
+ nv_icmd(priv, 0x0000080e, 0x00000100);
+ nv_icmd(priv, 0x0000080f, 0x00000001);
+ nv_icmd(priv, 0x00000823, 0x00000002);
+ nv_icmd(priv, 0x00000824, 0x00000100);
+ nv_icmd(priv, 0x00000825, 0x00000100);
+ nv_icmd(priv, 0x00000826, 0x00000001);
+ nv_icmd(priv, 0x0001e100, 0x00000001);
+ nv_wr32(priv, 0x400208, 0x00000000);
+ nv_wr32(priv, 0x404154, 0x00000400);
+
+ nvc0_grctx_generate_9097(priv);
+ if (fermi >= 0x9197)
+ nvc0_grctx_generate_9197(priv);
+ if (fermi >= 0x9297)
+ nvc0_grctx_generate_9297(priv);
+ nvc0_grctx_generate_902d(priv);
+ nvc0_grctx_generate_9039(priv);
+ nvc0_grctx_generate_90c0(priv);
+
+ nv_wr32(priv, 0x000260, r000260);
+
+ return nvc0_grctx_fini(&info);
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c
new file mode 100644
index 0000000000000000000000000000000000000000..6d8c63931ee6898758b0fdcd93c86771d3e76dea
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c
@@ -0,0 +1,2788 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nvc0.h"
+
+static void
+nve0_grctx_generate_icmd(struct nvc0_graph_priv *priv)
+{
+ nv_wr32(priv, 0x400208, 0x80000000);
+ nv_icmd(priv, 0x001000, 0x00000004);
+ nv_icmd(priv, 0x000039, 0x00000000);
+ nv_icmd(priv, 0x00003a, 0x00000000);
+ nv_icmd(priv, 0x00003b, 0x00000000);
+ nv_icmd(priv, 0x0000a9, 0x0000ffff);
+ nv_icmd(priv, 0x000038, 0x0fac6881);
+ nv_icmd(priv, 0x00003d, 0x00000001);
+ nv_icmd(priv, 0x0000e8, 0x00000400);
+ nv_icmd(priv, 0x0000e9, 0x00000400);
+ nv_icmd(priv, 0x0000ea, 0x00000400);
+ nv_icmd(priv, 0x0000eb, 0x00000400);
+ nv_icmd(priv, 0x0000ec, 0x00000400);
+ nv_icmd(priv, 0x0000ed, 0x00000400);
+ nv_icmd(priv, 0x0000ee, 0x00000400);
+ nv_icmd(priv, 0x0000ef, 0x00000400);
+ nv_icmd(priv, 0x000078, 0x00000300);
+ nv_icmd(priv, 0x000079, 0x00000300);
+ nv_icmd(priv, 0x00007a, 0x00000300);
+ nv_icmd(priv, 0x00007b, 0x00000300);
+ nv_icmd(priv, 0x00007c, 0x00000300);
+ nv_icmd(priv, 0x00007d, 0x00000300);
+ nv_icmd(priv, 0x00007e, 0x00000300);
+ nv_icmd(priv, 0x00007f, 0x00000300);
+ nv_icmd(priv, 0x000050, 0x00000011);
+ nv_icmd(priv, 0x000058, 0x00000008);
+ nv_icmd(priv, 0x000059, 0x00000008);
+ nv_icmd(priv, 0x00005a, 0x00000008);
+ nv_icmd(priv, 0x00005b, 0x00000008);
+ nv_icmd(priv, 0x00005c, 0x00000008);
+ nv_icmd(priv, 0x00005d, 0x00000008);
+ nv_icmd(priv, 0x00005e, 0x00000008);
+ nv_icmd(priv, 0x00005f, 0x00000008);
+ nv_icmd(priv, 0x000208, 0x00000001);
+ nv_icmd(priv, 0x000209, 0x00000001);
+ nv_icmd(priv, 0x00020a, 0x00000001);
+ nv_icmd(priv, 0x00020b, 0x00000001);
+ nv_icmd(priv, 0x00020c, 0x00000001);
+ nv_icmd(priv, 0x00020d, 0x00000001);
+ nv_icmd(priv, 0x00020e, 0x00000001);
+ nv_icmd(priv, 0x00020f, 0x00000001);
+ nv_icmd(priv, 0x000081, 0x00000001);
+ nv_icmd(priv, 0x000085, 0x00000004);
+ nv_icmd(priv, 0x000088, 0x00000400);
+ nv_icmd(priv, 0x000090, 0x00000300);
+ nv_icmd(priv, 0x000098, 0x00001001);
+ nv_icmd(priv, 0x0000e3, 0x00000001);
+ nv_icmd(priv, 0x0000da, 0x00000001);
+ nv_icmd(priv, 0x0000f8, 0x00000003);
+ nv_icmd(priv, 0x0000fa, 0x00000001);
+ nv_icmd(priv, 0x00009f, 0x0000ffff);
+ nv_icmd(priv, 0x0000a0, 0x0000ffff);
+ nv_icmd(priv, 0x0000a1, 0x0000ffff);
+ nv_icmd(priv, 0x0000a2, 0x0000ffff);
+ nv_icmd(priv, 0x0000b1, 0x00000001);
+ nv_icmd(priv, 0x0000ad, 0x0000013e);
+ nv_icmd(priv, 0x0000e1, 0x00000010);
+ nv_icmd(priv, 0x000290, 0x00000000);
+ nv_icmd(priv, 0x000291, 0x00000000);
+ nv_icmd(priv, 0x000292, 0x00000000);
+ nv_icmd(priv, 0x000293, 0x00000000);
+ nv_icmd(priv, 0x000294, 0x00000000);
+ nv_icmd(priv, 0x000295, 0x00000000);
+ nv_icmd(priv, 0x000296, 0x00000000);
+ nv_icmd(priv, 0x000297, 0x00000000);
+ nv_icmd(priv, 0x000298, 0x00000000);
+ nv_icmd(priv, 0x000299, 0x00000000);
+ nv_icmd(priv, 0x00029a, 0x00000000);
+ nv_icmd(priv, 0x00029b, 0x00000000);
+ nv_icmd(priv, 0x00029c, 0x00000000);
+ nv_icmd(priv, 0x00029d, 0x00000000);
+ nv_icmd(priv, 0x00029e, 0x00000000);
+ nv_icmd(priv, 0x00029f, 0x00000000);
+ nv_icmd(priv, 0x0003b0, 0x00000000);
+ nv_icmd(priv, 0x0003b1, 0x00000000);
+ nv_icmd(priv, 0x0003b2, 0x00000000);
+ nv_icmd(priv, 0x0003b3, 0x00000000);
+ nv_icmd(priv, 0x0003b4, 0x00000000);
+ nv_icmd(priv, 0x0003b5, 0x00000000);
+ nv_icmd(priv, 0x0003b6, 0x00000000);
+ nv_icmd(priv, 0x0003b7, 0x00000000);
+ nv_icmd(priv, 0x0003b8, 0x00000000);
+ nv_icmd(priv, 0x0003b9, 0x00000000);
+ nv_icmd(priv, 0x0003ba, 0x00000000);
+ nv_icmd(priv, 0x0003bb, 0x00000000);
+ nv_icmd(priv, 0x0003bc, 0x00000000);
+ nv_icmd(priv, 0x0003bd, 0x00000000);
+ nv_icmd(priv, 0x0003be, 0x00000000);
+ nv_icmd(priv, 0x0003bf, 0x00000000);
+ nv_icmd(priv, 0x0002a0, 0x00000000);
+ nv_icmd(priv, 0x0002a1, 0x00000000);
+ nv_icmd(priv, 0x0002a2, 0x00000000);
+ nv_icmd(priv, 0x0002a3, 0x00000000);
+ nv_icmd(priv, 0x0002a4, 0x00000000);
+ nv_icmd(priv, 0x0002a5, 0x00000000);
+ nv_icmd(priv, 0x0002a6, 0x00000000);
+ nv_icmd(priv, 0x0002a7, 0x00000000);
+ nv_icmd(priv, 0x0002a8, 0x00000000);
+ nv_icmd(priv, 0x0002a9, 0x00000000);
+ nv_icmd(priv, 0x0002aa, 0x00000000);
+ nv_icmd(priv, 0x0002ab, 0x00000000);
+ nv_icmd(priv, 0x0002ac, 0x00000000);
+ nv_icmd(priv, 0x0002ad, 0x00000000);
+ nv_icmd(priv, 0x0002ae, 0x00000000);
+ nv_icmd(priv, 0x0002af, 0x00000000);
+ nv_icmd(priv, 0x000420, 0x00000000);
+ nv_icmd(priv, 0x000421, 0x00000000);
+ nv_icmd(priv, 0x000422, 0x00000000);
+ nv_icmd(priv, 0x000423, 0x00000000);
+ nv_icmd(priv, 0x000424, 0x00000000);
+ nv_icmd(priv, 0x000425, 0x00000000);
+ nv_icmd(priv, 0x000426, 0x00000000);
+ nv_icmd(priv, 0x000427, 0x00000000);
+ nv_icmd(priv, 0x000428, 0x00000000);
+ nv_icmd(priv, 0x000429, 0x00000000);
+ nv_icmd(priv, 0x00042a, 0x00000000);
+ nv_icmd(priv, 0x00042b, 0x00000000);
+ nv_icmd(priv, 0x00042c, 0x00000000);
+ nv_icmd(priv, 0x00042d, 0x00000000);
+ nv_icmd(priv, 0x00042e, 0x00000000);
+ nv_icmd(priv, 0x00042f, 0x00000000);
+ nv_icmd(priv, 0x0002b0, 0x00000000);
+ nv_icmd(priv, 0x0002b1, 0x00000000);
+ nv_icmd(priv, 0x0002b2, 0x00000000);
+ nv_icmd(priv, 0x0002b3, 0x00000000);
+ nv_icmd(priv, 0x0002b4, 0x00000000);
+ nv_icmd(priv, 0x0002b5, 0x00000000);
+ nv_icmd(priv, 0x0002b6, 0x00000000);
+ nv_icmd(priv, 0x0002b7, 0x00000000);
+ nv_icmd(priv, 0x0002b8, 0x00000000);
+ nv_icmd(priv, 0x0002b9, 0x00000000);
+ nv_icmd(priv, 0x0002ba, 0x00000000);
+ nv_icmd(priv, 0x0002bb, 0x00000000);
+ nv_icmd(priv, 0x0002bc, 0x00000000);
+ nv_icmd(priv, 0x0002bd, 0x00000000);
+ nv_icmd(priv, 0x0002be, 0x00000000);
+ nv_icmd(priv, 0x0002bf, 0x00000000);
+ nv_icmd(priv, 0x000430, 0x00000000);
+ nv_icmd(priv, 0x000431, 0x00000000);
+ nv_icmd(priv, 0x000432, 0x00000000);
+ nv_icmd(priv, 0x000433, 0x00000000);
+ nv_icmd(priv, 0x000434, 0x00000000);
+ nv_icmd(priv, 0x000435, 0x00000000);
+ nv_icmd(priv, 0x000436, 0x00000000);
+ nv_icmd(priv, 0x000437, 0x00000000);
+ nv_icmd(priv, 0x000438, 0x00000000);
+ nv_icmd(priv, 0x000439, 0x00000000);
+ nv_icmd(priv, 0x00043a, 0x00000000);
+ nv_icmd(priv, 0x00043b, 0x00000000);
+ nv_icmd(priv, 0x00043c, 0x00000000);
+ nv_icmd(priv, 0x00043d, 0x00000000);
+ nv_icmd(priv, 0x00043e, 0x00000000);
+ nv_icmd(priv, 0x00043f, 0x00000000);
+ nv_icmd(priv, 0x0002c0, 0x00000000);
+ nv_icmd(priv, 0x0002c1, 0x00000000);
+ nv_icmd(priv, 0x0002c2, 0x00000000);
+ nv_icmd(priv, 0x0002c3, 0x00000000);
+ nv_icmd(priv, 0x0002c4, 0x00000000);
+ nv_icmd(priv, 0x0002c5, 0x00000000);
+ nv_icmd(priv, 0x0002c6, 0x00000000);
+ nv_icmd(priv, 0x0002c7, 0x00000000);
+ nv_icmd(priv, 0x0002c8, 0x00000000);
+ nv_icmd(priv, 0x0002c9, 0x00000000);
+ nv_icmd(priv, 0x0002ca, 0x00000000);
+ nv_icmd(priv, 0x0002cb, 0x00000000);
+ nv_icmd(priv, 0x0002cc, 0x00000000);
+ nv_icmd(priv, 0x0002cd, 0x00000000);
+ nv_icmd(priv, 0x0002ce, 0x00000000);
+ nv_icmd(priv, 0x0002cf, 0x00000000);
+ nv_icmd(priv, 0x0004d0, 0x00000000);
+ nv_icmd(priv, 0x0004d1, 0x00000000);
+ nv_icmd(priv, 0x0004d2, 0x00000000);
+ nv_icmd(priv, 0x0004d3, 0x00000000);
+ nv_icmd(priv, 0x0004d4, 0x00000000);
+ nv_icmd(priv, 0x0004d5, 0x00000000);
+ nv_icmd(priv, 0x0004d6, 0x00000000);
+ nv_icmd(priv, 0x0004d7, 0x00000000);
+ nv_icmd(priv, 0x0004d8, 0x00000000);
+ nv_icmd(priv, 0x0004d9, 0x00000000);
+ nv_icmd(priv, 0x0004da, 0x00000000);
+ nv_icmd(priv, 0x0004db, 0x00000000);
+ nv_icmd(priv, 0x0004dc, 0x00000000);
+ nv_icmd(priv, 0x0004dd, 0x00000000);
+ nv_icmd(priv, 0x0004de, 0x00000000);
+ nv_icmd(priv, 0x0004df, 0x00000000);
+ nv_icmd(priv, 0x000720, 0x00000000);
+ nv_icmd(priv, 0x000721, 0x00000000);
+ nv_icmd(priv, 0x000722, 0x00000000);
+ nv_icmd(priv, 0x000723, 0x00000000);
+ nv_icmd(priv, 0x000724, 0x00000000);
+ nv_icmd(priv, 0x000725, 0x00000000);
+ nv_icmd(priv, 0x000726, 0x00000000);
+ nv_icmd(priv, 0x000727, 0x00000000);
+ nv_icmd(priv, 0x000728, 0x00000000);
+ nv_icmd(priv, 0x000729, 0x00000000);
+ nv_icmd(priv, 0x00072a, 0x00000000);
+ nv_icmd(priv, 0x00072b, 0x00000000);
+ nv_icmd(priv, 0x00072c, 0x00000000);
+ nv_icmd(priv, 0x00072d, 0x00000000);
+ nv_icmd(priv, 0x00072e, 0x00000000);
+ nv_icmd(priv, 0x00072f, 0x00000000);
+ nv_icmd(priv, 0x0008c0, 0x00000000);
+ nv_icmd(priv, 0x0008c1, 0x00000000);
+ nv_icmd(priv, 0x0008c2, 0x00000000);
+ nv_icmd(priv, 0x0008c3, 0x00000000);
+ nv_icmd(priv, 0x0008c4, 0x00000000);
+ nv_icmd(priv, 0x0008c5, 0x00000000);
+ nv_icmd(priv, 0x0008c6, 0x00000000);
+ nv_icmd(priv, 0x0008c7, 0x00000000);
+ nv_icmd(priv, 0x0008c8, 0x00000000);
+ nv_icmd(priv, 0x0008c9, 0x00000000);
+ nv_icmd(priv, 0x0008ca, 0x00000000);
+ nv_icmd(priv, 0x0008cb, 0x00000000);
+ nv_icmd(priv, 0x0008cc, 0x00000000);
+ nv_icmd(priv, 0x0008cd, 0x00000000);
+ nv_icmd(priv, 0x0008ce, 0x00000000);
+ nv_icmd(priv, 0x0008cf, 0x00000000);
+ nv_icmd(priv, 0x000890, 0x00000000);
+ nv_icmd(priv, 0x000891, 0x00000000);
+ nv_icmd(priv, 0x000892, 0x00000000);
+ nv_icmd(priv, 0x000893, 0x00000000);
+ nv_icmd(priv, 0x000894, 0x00000000);
+ nv_icmd(priv, 0x000895, 0x00000000);
+ nv_icmd(priv, 0x000896, 0x00000000);
+ nv_icmd(priv, 0x000897, 0x00000000);
+ nv_icmd(priv, 0x000898, 0x00000000);
+ nv_icmd(priv, 0x000899, 0x00000000);
+ nv_icmd(priv, 0x00089a, 0x00000000);
+ nv_icmd(priv, 0x00089b, 0x00000000);
+ nv_icmd(priv, 0x00089c, 0x00000000);
+ nv_icmd(priv, 0x00089d, 0x00000000);
+ nv_icmd(priv, 0x00089e, 0x00000000);
+ nv_icmd(priv, 0x00089f, 0x00000000);
+ nv_icmd(priv, 0x0008e0, 0x00000000);
+ nv_icmd(priv, 0x0008e1, 0x00000000);
+ nv_icmd(priv, 0x0008e2, 0x00000000);
+ nv_icmd(priv, 0x0008e3, 0x00000000);
+ nv_icmd(priv, 0x0008e4, 0x00000000);
+ nv_icmd(priv, 0x0008e5, 0x00000000);
+ nv_icmd(priv, 0x0008e6, 0x00000000);
+ nv_icmd(priv, 0x0008e7, 0x00000000);
+ nv_icmd(priv, 0x0008e8, 0x00000000);
+ nv_icmd(priv, 0x0008e9, 0x00000000);
+ nv_icmd(priv, 0x0008ea, 0x00000000);
+ nv_icmd(priv, 0x0008eb, 0x00000000);
+ nv_icmd(priv, 0x0008ec, 0x00000000);
+ nv_icmd(priv, 0x0008ed, 0x00000000);
+ nv_icmd(priv, 0x0008ee, 0x00000000);
+ nv_icmd(priv, 0x0008ef, 0x00000000);
+ nv_icmd(priv, 0x0008a0, 0x00000000);
+ nv_icmd(priv, 0x0008a1, 0x00000000);
+ nv_icmd(priv, 0x0008a2, 0x00000000);
+ nv_icmd(priv, 0x0008a3, 0x00000000);
+ nv_icmd(priv, 0x0008a4, 0x00000000);
+ nv_icmd(priv, 0x0008a5, 0x00000000);
+ nv_icmd(priv, 0x0008a6, 0x00000000);
+ nv_icmd(priv, 0x0008a7, 0x00000000);
+ nv_icmd(priv, 0x0008a8, 0x00000000);
+ nv_icmd(priv, 0x0008a9, 0x00000000);
+ nv_icmd(priv, 0x0008aa, 0x00000000);
+ nv_icmd(priv, 0x0008ab, 0x00000000);
+ nv_icmd(priv, 0x0008ac, 0x00000000);
+ nv_icmd(priv, 0x0008ad, 0x00000000);
+ nv_icmd(priv, 0x0008ae, 0x00000000);
+ nv_icmd(priv, 0x0008af, 0x00000000);
+ nv_icmd(priv, 0x0008f0, 0x00000000);
+ nv_icmd(priv, 0x0008f1, 0x00000000);
+ nv_icmd(priv, 0x0008f2, 0x00000000);
+ nv_icmd(priv, 0x0008f3, 0x00000000);
+ nv_icmd(priv, 0x0008f4, 0x00000000);
+ nv_icmd(priv, 0x0008f5, 0x00000000);
+ nv_icmd(priv, 0x0008f6, 0x00000000);
+ nv_icmd(priv, 0x0008f7, 0x00000000);
+ nv_icmd(priv, 0x0008f8, 0x00000000);
+ nv_icmd(priv, 0x0008f9, 0x00000000);
+ nv_icmd(priv, 0x0008fa, 0x00000000);
+ nv_icmd(priv, 0x0008fb, 0x00000000);
+ nv_icmd(priv, 0x0008fc, 0x00000000);
+ nv_icmd(priv, 0x0008fd, 0x00000000);
+ nv_icmd(priv, 0x0008fe, 0x00000000);
+ nv_icmd(priv, 0x0008ff, 0x00000000);
+ nv_icmd(priv, 0x00094c, 0x000000ff);
+ nv_icmd(priv, 0x00094d, 0xffffffff);
+ nv_icmd(priv, 0x00094e, 0x00000002);
+ nv_icmd(priv, 0x0002ec, 0x00000001);
+ nv_icmd(priv, 0x000303, 0x00000001);
+ nv_icmd(priv, 0x0002e6, 0x00000001);
+ nv_icmd(priv, 0x000466, 0x00000052);
+ nv_icmd(priv, 0x000301, 0x3f800000);
+ nv_icmd(priv, 0x000304, 0x30201000);
+ nv_icmd(priv, 0x000305, 0x70605040);
+ nv_icmd(priv, 0x000306, 0xb8a89888);
+ nv_icmd(priv, 0x000307, 0xf8e8d8c8);
+ nv_icmd(priv, 0x00030a, 0x00ffff00);
+ nv_icmd(priv, 0x00030b, 0x0000001a);
+ nv_icmd(priv, 0x00030c, 0x00000001);
+ nv_icmd(priv, 0x000318, 0x00000001);
+ nv_icmd(priv, 0x000340, 0x00000000);
+ nv_icmd(priv, 0x000375, 0x00000001);
+ nv_icmd(priv, 0x00037d, 0x00000006);
+ nv_icmd(priv, 0x0003a0, 0x00000002);
+ nv_icmd(priv, 0x0003aa, 0x00000001);
+ nv_icmd(priv, 0x0003a9, 0x00000001);
+ nv_icmd(priv, 0x000380, 0x00000001);
+ nv_icmd(priv, 0x000383, 0x00000011);
+ nv_icmd(priv, 0x000360, 0x00000040);
+ nv_icmd(priv, 0x000366, 0x00000000);
+ nv_icmd(priv, 0x000367, 0x00000000);
+ nv_icmd(priv, 0x000368, 0x00000fff);
+ nv_icmd(priv, 0x000370, 0x00000000);
+ nv_icmd(priv, 0x000371, 0x00000000);
+ nv_icmd(priv, 0x000372, 0x000fffff);
+ nv_icmd(priv, 0x00037a, 0x00000012);
+ nv_icmd(priv, 0x000619, 0x00000003);
+ nv_icmd(priv, 0x000811, 0x00000003);
+ nv_icmd(priv, 0x000812, 0x00000004);
+ nv_icmd(priv, 0x000813, 0x00000006);
+ nv_icmd(priv, 0x000814, 0x00000008);
+ nv_icmd(priv, 0x000815, 0x0000000b);
+ nv_icmd(priv, 0x000800, 0x00000001);
+ nv_icmd(priv, 0x000801, 0x00000001);
+ nv_icmd(priv, 0x000802, 0x00000001);
+ nv_icmd(priv, 0x000803, 0x00000001);
+ nv_icmd(priv, 0x000804, 0x00000001);
+ nv_icmd(priv, 0x000805, 0x00000001);
+ nv_icmd(priv, 0x000632, 0x00000001);
+ nv_icmd(priv, 0x000633, 0x00000002);
+ nv_icmd(priv, 0x000634, 0x00000003);
+ nv_icmd(priv, 0x000635, 0x00000004);
+ nv_icmd(priv, 0x000654, 0x3f800000);
+ nv_icmd(priv, 0x000657, 0x3f800000);
+ nv_icmd(priv, 0x000655, 0x3f800000);
+ nv_icmd(priv, 0x000656, 0x3f800000);
+ nv_icmd(priv, 0x0006cd, 0x3f800000);
+ nv_icmd(priv, 0x0007f5, 0x3f800000);
+ nv_icmd(priv, 0x0007dc, 0x39291909);
+ nv_icmd(priv, 0x0007dd, 0x79695949);
+ nv_icmd(priv, 0x0007de, 0xb9a99989);
+ nv_icmd(priv, 0x0007df, 0xf9e9d9c9);
+ nv_icmd(priv, 0x0007e8, 0x00003210);
+ nv_icmd(priv, 0x0007e9, 0x00007654);
+ nv_icmd(priv, 0x0007ea, 0x00000098);
+ nv_icmd(priv, 0x0007ec, 0x39291909);
+ nv_icmd(priv, 0x0007ed, 0x79695949);
+ nv_icmd(priv, 0x0007ee, 0xb9a99989);
+ nv_icmd(priv, 0x0007ef, 0xf9e9d9c9);
+ nv_icmd(priv, 0x0007f0, 0x00003210);
+ nv_icmd(priv, 0x0007f1, 0x00007654);
+ nv_icmd(priv, 0x0007f2, 0x00000098);
+ nv_icmd(priv, 0x0005a5, 0x00000001);
+ nv_icmd(priv, 0x000980, 0x00000000);
+ nv_icmd(priv, 0x000981, 0x00000000);
+ nv_icmd(priv, 0x000982, 0x00000000);
+ nv_icmd(priv, 0x000983, 0x00000000);
+ nv_icmd(priv, 0x000984, 0x00000000);
+ nv_icmd(priv, 0x000985, 0x00000000);
+ nv_icmd(priv, 0x000986, 0x00000000);
+ nv_icmd(priv, 0x000987, 0x00000000);
+ nv_icmd(priv, 0x000988, 0x00000000);
+ nv_icmd(priv, 0x000989, 0x00000000);
+ nv_icmd(priv, 0x00098a, 0x00000000);
+ nv_icmd(priv, 0x00098b, 0x00000000);
+ nv_icmd(priv, 0x00098c, 0x00000000);
+ nv_icmd(priv, 0x00098d, 0x00000000);
+ nv_icmd(priv, 0x00098e, 0x00000000);
+ nv_icmd(priv, 0x00098f, 0x00000000);
+ nv_icmd(priv, 0x000990, 0x00000000);
+ nv_icmd(priv, 0x000991, 0x00000000);
+ nv_icmd(priv, 0x000992, 0x00000000);
+ nv_icmd(priv, 0x000993, 0x00000000);
+ nv_icmd(priv, 0x000994, 0x00000000);
+ nv_icmd(priv, 0x000995, 0x00000000);
+ nv_icmd(priv, 0x000996, 0x00000000);
+ nv_icmd(priv, 0x000997, 0x00000000);
+ nv_icmd(priv, 0x000998, 0x00000000);
+ nv_icmd(priv, 0x000999, 0x00000000);
+ nv_icmd(priv, 0x00099a, 0x00000000);
+ nv_icmd(priv, 0x00099b, 0x00000000);
+ nv_icmd(priv, 0x00099c, 0x00000000);
+ nv_icmd(priv, 0x00099d, 0x00000000);
+ nv_icmd(priv, 0x00099e, 0x00000000);
+ nv_icmd(priv, 0x00099f, 0x00000000);
+ nv_icmd(priv, 0x0009a0, 0x00000000);
+ nv_icmd(priv, 0x0009a1, 0x00000000);
+ nv_icmd(priv, 0x0009a2, 0x00000000);
+ nv_icmd(priv, 0x0009a3, 0x00000000);
+ nv_icmd(priv, 0x0009a4, 0x00000000);
+ nv_icmd(priv, 0x0009a5, 0x00000000);
+ nv_icmd(priv, 0x0009a6, 0x00000000);
+ nv_icmd(priv, 0x0009a7, 0x00000000);
+ nv_icmd(priv, 0x0009a8, 0x00000000);
+ nv_icmd(priv, 0x0009a9, 0x00000000);
+ nv_icmd(priv, 0x0009aa, 0x00000000);
+ nv_icmd(priv, 0x0009ab, 0x00000000);
+ nv_icmd(priv, 0x0009ac, 0x00000000);
+ nv_icmd(priv, 0x0009ad, 0x00000000);
+ nv_icmd(priv, 0x0009ae, 0x00000000);
+ nv_icmd(priv, 0x0009af, 0x00000000);
+ nv_icmd(priv, 0x0009b0, 0x00000000);
+ nv_icmd(priv, 0x0009b1, 0x00000000);
+ nv_icmd(priv, 0x0009b2, 0x00000000);
+ nv_icmd(priv, 0x0009b3, 0x00000000);
+ nv_icmd(priv, 0x0009b4, 0x00000000);
+ nv_icmd(priv, 0x0009b5, 0x00000000);
+ nv_icmd(priv, 0x0009b6, 0x00000000);
+ nv_icmd(priv, 0x0009b7, 0x00000000);
+ nv_icmd(priv, 0x0009b8, 0x00000000);
+ nv_icmd(priv, 0x0009b9, 0x00000000);
+ nv_icmd(priv, 0x0009ba, 0x00000000);
+ nv_icmd(priv, 0x0009bb, 0x00000000);
+ nv_icmd(priv, 0x0009bc, 0x00000000);
+ nv_icmd(priv, 0x0009bd, 0x00000000);
+ nv_icmd(priv, 0x0009be, 0x00000000);
+ nv_icmd(priv, 0x0009bf, 0x00000000);
+ nv_icmd(priv, 0x0009c0, 0x00000000);
+ nv_icmd(priv, 0x0009c1, 0x00000000);
+ nv_icmd(priv, 0x0009c2, 0x00000000);
+ nv_icmd(priv, 0x0009c3, 0x00000000);
+ nv_icmd(priv, 0x0009c4, 0x00000000);
+ nv_icmd(priv, 0x0009c5, 0x00000000);
+ nv_icmd(priv, 0x0009c6, 0x00000000);
+ nv_icmd(priv, 0x0009c7, 0x00000000);
+ nv_icmd(priv, 0x0009c8, 0x00000000);
+ nv_icmd(priv, 0x0009c9, 0x00000000);
+ nv_icmd(priv, 0x0009ca, 0x00000000);
+ nv_icmd(priv, 0x0009cb, 0x00000000);
+ nv_icmd(priv, 0x0009cc, 0x00000000);
+ nv_icmd(priv, 0x0009cd, 0x00000000);
+ nv_icmd(priv, 0x0009ce, 0x00000000);
+ nv_icmd(priv, 0x0009cf, 0x00000000);
+ nv_icmd(priv, 0x0009d0, 0x00000000);
+ nv_icmd(priv, 0x0009d1, 0x00000000);
+ nv_icmd(priv, 0x0009d2, 0x00000000);
+ nv_icmd(priv, 0x0009d3, 0x00000000);
+ nv_icmd(priv, 0x0009d4, 0x00000000);
+ nv_icmd(priv, 0x0009d5, 0x00000000);
+ nv_icmd(priv, 0x0009d6, 0x00000000);
+ nv_icmd(priv, 0x0009d7, 0x00000000);
+ nv_icmd(priv, 0x0009d8, 0x00000000);
+ nv_icmd(priv, 0x0009d9, 0x00000000);
+ nv_icmd(priv, 0x0009da, 0x00000000);
+ nv_icmd(priv, 0x0009db, 0x00000000);
+ nv_icmd(priv, 0x0009dc, 0x00000000);
+ nv_icmd(priv, 0x0009dd, 0x00000000);
+ nv_icmd(priv, 0x0009de, 0x00000000);
+ nv_icmd(priv, 0x0009df, 0x00000000);
+ nv_icmd(priv, 0x0009e0, 0x00000000);
+ nv_icmd(priv, 0x0009e1, 0x00000000);
+ nv_icmd(priv, 0x0009e2, 0x00000000);
+ nv_icmd(priv, 0x0009e3, 0x00000000);
+ nv_icmd(priv, 0x0009e4, 0x00000000);
+ nv_icmd(priv, 0x0009e5, 0x00000000);
+ nv_icmd(priv, 0x0009e6, 0x00000000);
+ nv_icmd(priv, 0x0009e7, 0x00000000);
+ nv_icmd(priv, 0x0009e8, 0x00000000);
+ nv_icmd(priv, 0x0009e9, 0x00000000);
+ nv_icmd(priv, 0x0009ea, 0x00000000);
+ nv_icmd(priv, 0x0009eb, 0x00000000);
+ nv_icmd(priv, 0x0009ec, 0x00000000);
+ nv_icmd(priv, 0x0009ed, 0x00000000);
+ nv_icmd(priv, 0x0009ee, 0x00000000);
+ nv_icmd(priv, 0x0009ef, 0x00000000);
+ nv_icmd(priv, 0x0009f0, 0x00000000);
+ nv_icmd(priv, 0x0009f1, 0x00000000);
+ nv_icmd(priv, 0x0009f2, 0x00000000);
+ nv_icmd(priv, 0x0009f3, 0x00000000);
+ nv_icmd(priv, 0x0009f4, 0x00000000);
+ nv_icmd(priv, 0x0009f5, 0x00000000);
+ nv_icmd(priv, 0x0009f6, 0x00000000);
+ nv_icmd(priv, 0x0009f7, 0x00000000);
+ nv_icmd(priv, 0x0009f8, 0x00000000);
+ nv_icmd(priv, 0x0009f9, 0x00000000);
+ nv_icmd(priv, 0x0009fa, 0x00000000);
+ nv_icmd(priv, 0x0009fb, 0x00000000);
+ nv_icmd(priv, 0x0009fc, 0x00000000);
+ nv_icmd(priv, 0x0009fd, 0x00000000);
+ nv_icmd(priv, 0x0009fe, 0x00000000);
+ nv_icmd(priv, 0x0009ff, 0x00000000);
+ nv_icmd(priv, 0x000468, 0x00000004);
+ nv_icmd(priv, 0x00046c, 0x00000001);
+ nv_icmd(priv, 0x000470, 0x00000000);
+ nv_icmd(priv, 0x000471, 0x00000000);
+ nv_icmd(priv, 0x000472, 0x00000000);
+ nv_icmd(priv, 0x000473, 0x00000000);
+ nv_icmd(priv, 0x000474, 0x00000000);
+ nv_icmd(priv, 0x000475, 0x00000000);
+ nv_icmd(priv, 0x000476, 0x00000000);
+ nv_icmd(priv, 0x000477, 0x00000000);
+ nv_icmd(priv, 0x000478, 0x00000000);
+ nv_icmd(priv, 0x000479, 0x00000000);
+ nv_icmd(priv, 0x00047a, 0x00000000);
+ nv_icmd(priv, 0x00047b, 0x00000000);
+ nv_icmd(priv, 0x00047c, 0x00000000);
+ nv_icmd(priv, 0x00047d, 0x00000000);
+ nv_icmd(priv, 0x00047e, 0x00000000);
+ nv_icmd(priv, 0x00047f, 0x00000000);
+ nv_icmd(priv, 0x000480, 0x00000000);
+ nv_icmd(priv, 0x000481, 0x00000000);
+ nv_icmd(priv, 0x000482, 0x00000000);
+ nv_icmd(priv, 0x000483, 0x00000000);
+ nv_icmd(priv, 0x000484, 0x00000000);
+ nv_icmd(priv, 0x000485, 0x00000000);
+ nv_icmd(priv, 0x000486, 0x00000000);
+ nv_icmd(priv, 0x000487, 0x00000000);
+ nv_icmd(priv, 0x000488, 0x00000000);
+ nv_icmd(priv, 0x000489, 0x00000000);
+ nv_icmd(priv, 0x00048a, 0x00000000);
+ nv_icmd(priv, 0x00048b, 0x00000000);
+ nv_icmd(priv, 0x00048c, 0x00000000);
+ nv_icmd(priv, 0x00048d, 0x00000000);
+ nv_icmd(priv, 0x00048e, 0x00000000);
+ nv_icmd(priv, 0x00048f, 0x00000000);
+ nv_icmd(priv, 0x000490, 0x00000000);
+ nv_icmd(priv, 0x000491, 0x00000000);
+ nv_icmd(priv, 0x000492, 0x00000000);
+ nv_icmd(priv, 0x000493, 0x00000000);
+ nv_icmd(priv, 0x000494, 0x00000000);
+ nv_icmd(priv, 0x000495, 0x00000000);
+ nv_icmd(priv, 0x000496, 0x00000000);
+ nv_icmd(priv, 0x000497, 0x00000000);
+ nv_icmd(priv, 0x000498, 0x00000000);
+ nv_icmd(priv, 0x000499, 0x00000000);
+ nv_icmd(priv, 0x00049a, 0x00000000);
+ nv_icmd(priv, 0x00049b, 0x00000000);
+ nv_icmd(priv, 0x00049c, 0x00000000);
+ nv_icmd(priv, 0x00049d, 0x00000000);
+ nv_icmd(priv, 0x00049e, 0x00000000);
+ nv_icmd(priv, 0x00049f, 0x00000000);
+ nv_icmd(priv, 0x0004a0, 0x00000000);
+ nv_icmd(priv, 0x0004a1, 0x00000000);
+ nv_icmd(priv, 0x0004a2, 0x00000000);
+ nv_icmd(priv, 0x0004a3, 0x00000000);
+ nv_icmd(priv, 0x0004a4, 0x00000000);
+ nv_icmd(priv, 0x0004a5, 0x00000000);
+ nv_icmd(priv, 0x0004a6, 0x00000000);
+ nv_icmd(priv, 0x0004a7, 0x00000000);
+ nv_icmd(priv, 0x0004a8, 0x00000000);
+ nv_icmd(priv, 0x0004a9, 0x00000000);
+ nv_icmd(priv, 0x0004aa, 0x00000000);
+ nv_icmd(priv, 0x0004ab, 0x00000000);
+ nv_icmd(priv, 0x0004ac, 0x00000000);
+ nv_icmd(priv, 0x0004ad, 0x00000000);
+ nv_icmd(priv, 0x0004ae, 0x00000000);
+ nv_icmd(priv, 0x0004af, 0x00000000);
+ nv_icmd(priv, 0x0004b0, 0x00000000);
+ nv_icmd(priv, 0x0004b1, 0x00000000);
+ nv_icmd(priv, 0x0004b2, 0x00000000);
+ nv_icmd(priv, 0x0004b3, 0x00000000);
+ nv_icmd(priv, 0x0004b4, 0x00000000);
+ nv_icmd(priv, 0x0004b5, 0x00000000);
+ nv_icmd(priv, 0x0004b6, 0x00000000);
+ nv_icmd(priv, 0x0004b7, 0x00000000);
+ nv_icmd(priv, 0x0004b8, 0x00000000);
+ nv_icmd(priv, 0x0004b9, 0x00000000);
+ nv_icmd(priv, 0x0004ba, 0x00000000);
+ nv_icmd(priv, 0x0004bb, 0x00000000);
+ nv_icmd(priv, 0x0004bc, 0x00000000);
+ nv_icmd(priv, 0x0004bd, 0x00000000);
+ nv_icmd(priv, 0x0004be, 0x00000000);
+ nv_icmd(priv, 0x0004bf, 0x00000000);
+ nv_icmd(priv, 0x0004c0, 0x00000000);
+ nv_icmd(priv, 0x0004c1, 0x00000000);
+ nv_icmd(priv, 0x0004c2, 0x00000000);
+ nv_icmd(priv, 0x0004c3, 0x00000000);
+ nv_icmd(priv, 0x0004c4, 0x00000000);
+ nv_icmd(priv, 0x0004c5, 0x00000000);
+ nv_icmd(priv, 0x0004c6, 0x00000000);
+ nv_icmd(priv, 0x0004c7, 0x00000000);
+ nv_icmd(priv, 0x0004c8, 0x00000000);
+ nv_icmd(priv, 0x0004c9, 0x00000000);
+ nv_icmd(priv, 0x0004ca, 0x00000000);
+ nv_icmd(priv, 0x0004cb, 0x00000000);
+ nv_icmd(priv, 0x0004cc, 0x00000000);
+ nv_icmd(priv, 0x0004cd, 0x00000000);
+ nv_icmd(priv, 0x0004ce, 0x00000000);
+ nv_icmd(priv, 0x0004cf, 0x00000000);
+ nv_icmd(priv, 0x000510, 0x3f800000);
+ nv_icmd(priv, 0x000511, 0x3f800000);
+ nv_icmd(priv, 0x000512, 0x3f800000);
+ nv_icmd(priv, 0x000513, 0x3f800000);
+ nv_icmd(priv, 0x000514, 0x3f800000);
+ nv_icmd(priv, 0x000515, 0x3f800000);
+ nv_icmd(priv, 0x000516, 0x3f800000);
+ nv_icmd(priv, 0x000517, 0x3f800000);
+ nv_icmd(priv, 0x000518, 0x3f800000);
+ nv_icmd(priv, 0x000519, 0x3f800000);
+ nv_icmd(priv, 0x00051a, 0x3f800000);
+ nv_icmd(priv, 0x00051b, 0x3f800000);
+ nv_icmd(priv, 0x00051c, 0x3f800000);
+ nv_icmd(priv, 0x00051d, 0x3f800000);
+ nv_icmd(priv, 0x00051e, 0x3f800000);
+ nv_icmd(priv, 0x00051f, 0x3f800000);
+ nv_icmd(priv, 0x000520, 0x000002b6);
+ nv_icmd(priv, 0x000529, 0x00000001);
+ nv_icmd(priv, 0x000530, 0xffff0000);
+ nv_icmd(priv, 0x000531, 0xffff0000);
+ nv_icmd(priv, 0x000532, 0xffff0000);
+ nv_icmd(priv, 0x000533, 0xffff0000);
+ nv_icmd(priv, 0x000534, 0xffff0000);
+ nv_icmd(priv, 0x000535, 0xffff0000);
+ nv_icmd(priv, 0x000536, 0xffff0000);
+ nv_icmd(priv, 0x000537, 0xffff0000);
+ nv_icmd(priv, 0x000538, 0xffff0000);
+ nv_icmd(priv, 0x000539, 0xffff0000);
+ nv_icmd(priv, 0x00053a, 0xffff0000);
+ nv_icmd(priv, 0x00053b, 0xffff0000);
+ nv_icmd(priv, 0x00053c, 0xffff0000);
+ nv_icmd(priv, 0x00053d, 0xffff0000);
+ nv_icmd(priv, 0x00053e, 0xffff0000);
+ nv_icmd(priv, 0x00053f, 0xffff0000);
+ nv_icmd(priv, 0x000585, 0x0000003f);
+ nv_icmd(priv, 0x000576, 0x00000003);
+ nv_icmd(priv, 0x00057b, 0x00000059);
+ nv_icmd(priv, 0x000586, 0x00000040);
+ nv_icmd(priv, 0x000582, 0x00000080);
+ nv_icmd(priv, 0x000583, 0x00000080);
+ nv_icmd(priv, 0x0005c2, 0x00000001);
+ nv_icmd(priv, 0x000638, 0x00000001);
+ nv_icmd(priv, 0x000639, 0x00000001);
+ nv_icmd(priv, 0x00063a, 0x00000002);
+ nv_icmd(priv, 0x00063b, 0x00000001);
+ nv_icmd(priv, 0x00063c, 0x00000001);
+ nv_icmd(priv, 0x00063d, 0x00000002);
+ nv_icmd(priv, 0x00063e, 0x00000001);
+ nv_icmd(priv, 0x0008b8, 0x00000001);
+ nv_icmd(priv, 0x0008b9, 0x00000001);
+ nv_icmd(priv, 0x0008ba, 0x00000001);
+ nv_icmd(priv, 0x0008bb, 0x00000001);
+ nv_icmd(priv, 0x0008bc, 0x00000001);
+ nv_icmd(priv, 0x0008bd, 0x00000001);
+ nv_icmd(priv, 0x0008be, 0x00000001);
+ nv_icmd(priv, 0x0008bf, 0x00000001);
+ nv_icmd(priv, 0x000900, 0x00000001);
+ nv_icmd(priv, 0x000901, 0x00000001);
+ nv_icmd(priv, 0x000902, 0x00000001);
+ nv_icmd(priv, 0x000903, 0x00000001);
+ nv_icmd(priv, 0x000904, 0x00000001);
+ nv_icmd(priv, 0x000905, 0x00000001);
+ nv_icmd(priv, 0x000906, 0x00000001);
+ nv_icmd(priv, 0x000907, 0x00000001);
+ nv_icmd(priv, 0x000908, 0x00000002);
+ nv_icmd(priv, 0x000909, 0x00000002);
+ nv_icmd(priv, 0x00090a, 0x00000002);
+ nv_icmd(priv, 0x00090b, 0x00000002);
+ nv_icmd(priv, 0x00090c, 0x00000002);
+ nv_icmd(priv, 0x00090d, 0x00000002);
+ nv_icmd(priv, 0x00090e, 0x00000002);
+ nv_icmd(priv, 0x00090f, 0x00000002);
+ nv_icmd(priv, 0x000910, 0x00000001);
+ nv_icmd(priv, 0x000911, 0x00000001);
+ nv_icmd(priv, 0x000912, 0x00000001);
+ nv_icmd(priv, 0x000913, 0x00000001);
+ nv_icmd(priv, 0x000914, 0x00000001);
+ nv_icmd(priv, 0x000915, 0x00000001);
+ nv_icmd(priv, 0x000916, 0x00000001);
+ nv_icmd(priv, 0x000917, 0x00000001);
+ nv_icmd(priv, 0x000918, 0x00000001);
+ nv_icmd(priv, 0x000919, 0x00000001);
+ nv_icmd(priv, 0x00091a, 0x00000001);
+ nv_icmd(priv, 0x00091b, 0x00000001);
+ nv_icmd(priv, 0x00091c, 0x00000001);
+ nv_icmd(priv, 0x00091d, 0x00000001);
+ nv_icmd(priv, 0x00091e, 0x00000001);
+ nv_icmd(priv, 0x00091f, 0x00000001);
+ nv_icmd(priv, 0x000920, 0x00000002);
+ nv_icmd(priv, 0x000921, 0x00000002);
+ nv_icmd(priv, 0x000922, 0x00000002);
+ nv_icmd(priv, 0x000923, 0x00000002);
+ nv_icmd(priv, 0x000924, 0x00000002);
+ nv_icmd(priv, 0x000925, 0x00000002);
+ nv_icmd(priv, 0x000926, 0x00000002);
+ nv_icmd(priv, 0x000927, 0x00000002);
+ nv_icmd(priv, 0x000928, 0x00000001);
+ nv_icmd(priv, 0x000929, 0x00000001);
+ nv_icmd(priv, 0x00092a, 0x00000001);
+ nv_icmd(priv, 0x00092b, 0x00000001);
+ nv_icmd(priv, 0x00092c, 0x00000001);
+ nv_icmd(priv, 0x00092d, 0x00000001);
+ nv_icmd(priv, 0x00092e, 0x00000001);
+ nv_icmd(priv, 0x00092f, 0x00000001);
+ nv_icmd(priv, 0x000648, 0x00000001);
+ nv_icmd(priv, 0x000649, 0x00000001);
+ nv_icmd(priv, 0x00064a, 0x00000001);
+ nv_icmd(priv, 0x00064b, 0x00000001);
+ nv_icmd(priv, 0x00064c, 0x00000001);
+ nv_icmd(priv, 0x00064d, 0x00000001);
+ nv_icmd(priv, 0x00064e, 0x00000001);
+ nv_icmd(priv, 0x00064f, 0x00000001);
+ nv_icmd(priv, 0x000650, 0x00000001);
+ nv_icmd(priv, 0x000658, 0x0000000f);
+ nv_icmd(priv, 0x0007ff, 0x0000000a);
+ nv_icmd(priv, 0x00066a, 0x40000000);
+ nv_icmd(priv, 0x00066b, 0x10000000);
+ nv_icmd(priv, 0x00066c, 0xffff0000);
+ nv_icmd(priv, 0x00066d, 0xffff0000);
+ nv_icmd(priv, 0x0007af, 0x00000008);
+ nv_icmd(priv, 0x0007b0, 0x00000008);
+ nv_icmd(priv, 0x0007f6, 0x00000001);
+ nv_icmd(priv, 0x0006b2, 0x00000055);
+ nv_icmd(priv, 0x0007ad, 0x00000003);
+ nv_icmd(priv, 0x000937, 0x00000001);
+ nv_icmd(priv, 0x000971, 0x00000008);
+ nv_icmd(priv, 0x000972, 0x00000040);
+ nv_icmd(priv, 0x000973, 0x0000012c);
+ nv_icmd(priv, 0x00097c, 0x00000040);
+ nv_icmd(priv, 0x000979, 0x00000003);
+ nv_icmd(priv, 0x000975, 0x00000020);
+ nv_icmd(priv, 0x000976, 0x00000001);
+ nv_icmd(priv, 0x000977, 0x00000020);
+ nv_icmd(priv, 0x000978, 0x00000001);
+ nv_icmd(priv, 0x000957, 0x00000003);
+ nv_icmd(priv, 0x00095e, 0x20164010);
+ nv_icmd(priv, 0x00095f, 0x00000020);
+ nv_icmd(priv, 0x00097d, 0x00000020);
+ nv_icmd(priv, 0x000683, 0x00000006);
+ nv_icmd(priv, 0x000685, 0x003fffff);
+ nv_icmd(priv, 0x000687, 0x003fffff);
+ nv_icmd(priv, 0x0006a0, 0x00000005);
+ nv_icmd(priv, 0x000840, 0x00400008);
+ nv_icmd(priv, 0x000841, 0x08000080);
+ nv_icmd(priv, 0x000842, 0x00400008);
+ nv_icmd(priv, 0x000843, 0x08000080);
+ nv_icmd(priv, 0x000818, 0x00000000);
+ nv_icmd(priv, 0x000819, 0x00000000);
+ nv_icmd(priv, 0x00081a, 0x00000000);
+ nv_icmd(priv, 0x00081b, 0x00000000);
+ nv_icmd(priv, 0x00081c, 0x00000000);
+ nv_icmd(priv, 0x00081d, 0x00000000);
+ nv_icmd(priv, 0x00081e, 0x00000000);
+ nv_icmd(priv, 0x00081f, 0x00000000);
+ nv_icmd(priv, 0x000848, 0x00000000);
+ nv_icmd(priv, 0x000849, 0x00000000);
+ nv_icmd(priv, 0x00084a, 0x00000000);
+ nv_icmd(priv, 0x00084b, 0x00000000);
+ nv_icmd(priv, 0x00084c, 0x00000000);
+ nv_icmd(priv, 0x00084d, 0x00000000);
+ nv_icmd(priv, 0x00084e, 0x00000000);
+ nv_icmd(priv, 0x00084f, 0x00000000);
+ nv_icmd(priv, 0x000850, 0x00000000);
+ nv_icmd(priv, 0x000851, 0x00000000);
+ nv_icmd(priv, 0x000852, 0x00000000);
+ nv_icmd(priv, 0x000853, 0x00000000);
+ nv_icmd(priv, 0x000854, 0x00000000);
+ nv_icmd(priv, 0x000855, 0x00000000);
+ nv_icmd(priv, 0x000856, 0x00000000);
+ nv_icmd(priv, 0x000857, 0x00000000);
+ nv_icmd(priv, 0x000738, 0x00000000);
+ nv_icmd(priv, 0x0006aa, 0x00000001);
+ nv_icmd(priv, 0x0006ab, 0x00000002);
+ nv_icmd(priv, 0x0006ac, 0x00000080);
+ nv_icmd(priv, 0x0006ad, 0x00000100);
+ nv_icmd(priv, 0x0006ae, 0x00000100);
+ nv_icmd(priv, 0x0006b1, 0x00000011);
+ nv_icmd(priv, 0x0006bb, 0x000000cf);
+ nv_icmd(priv, 0x0006ce, 0x2a712488);
+ nv_icmd(priv, 0x000739, 0x4085c000);
+ nv_icmd(priv, 0x00073a, 0x00000080);
+ nv_icmd(priv, 0x000786, 0x80000100);
+ nv_icmd(priv, 0x00073c, 0x00010100);
+ nv_icmd(priv, 0x00073d, 0x02800000);
+ nv_icmd(priv, 0x000787, 0x000000cf);
+ nv_icmd(priv, 0x00078c, 0x00000008);
+ nv_icmd(priv, 0x000792, 0x00000001);
+ nv_icmd(priv, 0x000794, 0x00000001);
+ nv_icmd(priv, 0x000795, 0x00000001);
+ nv_icmd(priv, 0x000796, 0x00000001);
+ nv_icmd(priv, 0x000797, 0x000000cf);
+ nv_icmd(priv, 0x000836, 0x00000001);
+ nv_icmd(priv, 0x00079a, 0x00000002);
+ nv_icmd(priv, 0x000833, 0x04444480);
+ nv_icmd(priv, 0x0007a1, 0x00000001);
+ nv_icmd(priv, 0x0007a3, 0x00000001);
+ nv_icmd(priv, 0x0007a4, 0x00000001);
+ nv_icmd(priv, 0x0007a5, 0x00000001);
+ nv_icmd(priv, 0x000831, 0x00000004);
+ nv_icmd(priv, 0x000b07, 0x00000002);
+ nv_icmd(priv, 0x000b08, 0x00000100);
+ nv_icmd(priv, 0x000b09, 0x00000100);
+ nv_icmd(priv, 0x000b0a, 0x00000001);
+ nv_icmd(priv, 0x000a04, 0x000000ff);
+ nv_icmd(priv, 0x000a0b, 0x00000040);
+ nv_icmd(priv, 0x00097f, 0x00000100);
+ nv_icmd(priv, 0x000a02, 0x00000001);
+ nv_icmd(priv, 0x000809, 0x00000007);
+ nv_icmd(priv, 0x00c221, 0x00000040);
+ nv_icmd(priv, 0x00c1b0, 0x0000000f);
+ nv_icmd(priv, 0x00c1b1, 0x0000000f);
+ nv_icmd(priv, 0x00c1b2, 0x0000000f);
+ nv_icmd(priv, 0x00c1b3, 0x0000000f);
+ nv_icmd(priv, 0x00c1b4, 0x0000000f);
+ nv_icmd(priv, 0x00c1b5, 0x0000000f);
+ nv_icmd(priv, 0x00c1b6, 0x0000000f);
+ nv_icmd(priv, 0x00c1b7, 0x0000000f);
+ nv_icmd(priv, 0x00c1b8, 0x0fac6881);
+ nv_icmd(priv, 0x00c1b9, 0x00fac688);
+ nv_icmd(priv, 0x00c401, 0x00000001);
+ nv_icmd(priv, 0x00c402, 0x00010001);
+ nv_icmd(priv, 0x00c403, 0x00000001);
+ nv_icmd(priv, 0x00c404, 0x00000001);
+ nv_icmd(priv, 0x00c40e, 0x00000020);
+ nv_icmd(priv, 0x00c500, 0x00000003);
+ nv_icmd(priv, 0x01e100, 0x00000001);
+ nv_icmd(priv, 0x001000, 0x00000002);
+ nv_icmd(priv, 0x0006aa, 0x00000001);
+ nv_icmd(priv, 0x0006ad, 0x00000100);
+ nv_icmd(priv, 0x0006ae, 0x00000100);
+ nv_icmd(priv, 0x0006b1, 0x00000011);
+ nv_icmd(priv, 0x00078c, 0x00000008);
+ nv_icmd(priv, 0x000792, 0x00000001);
+ nv_icmd(priv, 0x000794, 0x00000001);
+ nv_icmd(priv, 0x000795, 0x00000001);
+ nv_icmd(priv, 0x000796, 0x00000001);
+ nv_icmd(priv, 0x000797, 0x000000cf);
+ nv_icmd(priv, 0x00079a, 0x00000002);
+ nv_icmd(priv, 0x000833, 0x04444480);
+ nv_icmd(priv, 0x0007a1, 0x00000001);
+ nv_icmd(priv, 0x0007a3, 0x00000001);
+ nv_icmd(priv, 0x0007a4, 0x00000001);
+ nv_icmd(priv, 0x0007a5, 0x00000001);
+ nv_icmd(priv, 0x000831, 0x00000004);
+ nv_icmd(priv, 0x01e100, 0x00000001);
+ nv_icmd(priv, 0x001000, 0x00000008);
+ nv_icmd(priv, 0x000039, 0x00000000);
+ nv_icmd(priv, 0x00003a, 0x00000000);
+ nv_icmd(priv, 0x00003b, 0x00000000);
+ nv_icmd(priv, 0x000380, 0x00000001);
+ nv_icmd(priv, 0x000366, 0x00000000);
+ nv_icmd(priv, 0x000367, 0x00000000);
+ nv_icmd(priv, 0x000368, 0x00000fff);
+ nv_icmd(priv, 0x000370, 0x00000000);
+ nv_icmd(priv, 0x000371, 0x00000000);
+ nv_icmd(priv, 0x000372, 0x000fffff);
+ nv_icmd(priv, 0x000813, 0x00000006);
+ nv_icmd(priv, 0x000814, 0x00000008);
+ nv_icmd(priv, 0x000957, 0x00000003);
+ nv_icmd(priv, 0x000818, 0x00000000);
+ nv_icmd(priv, 0x000819, 0x00000000);
+ nv_icmd(priv, 0x00081a, 0x00000000);
+ nv_icmd(priv, 0x00081b, 0x00000000);
+ nv_icmd(priv, 0x00081c, 0x00000000);
+ nv_icmd(priv, 0x00081d, 0x00000000);
+ nv_icmd(priv, 0x00081e, 0x00000000);
+ nv_icmd(priv, 0x00081f, 0x00000000);
+ nv_icmd(priv, 0x000848, 0x00000000);
+ nv_icmd(priv, 0x000849, 0x00000000);
+ nv_icmd(priv, 0x00084a, 0x00000000);
+ nv_icmd(priv, 0x00084b, 0x00000000);
+ nv_icmd(priv, 0x00084c, 0x00000000);
+ nv_icmd(priv, 0x00084d, 0x00000000);
+ nv_icmd(priv, 0x00084e, 0x00000000);
+ nv_icmd(priv, 0x00084f, 0x00000000);
+ nv_icmd(priv, 0x000850, 0x00000000);
+ nv_icmd(priv, 0x000851, 0x00000000);
+ nv_icmd(priv, 0x000852, 0x00000000);
+ nv_icmd(priv, 0x000853, 0x00000000);
+ nv_icmd(priv, 0x000854, 0x00000000);
+ nv_icmd(priv, 0x000855, 0x00000000);
+ nv_icmd(priv, 0x000856, 0x00000000);
+ nv_icmd(priv, 0x000857, 0x00000000);
+ nv_icmd(priv, 0x000738, 0x00000000);
+ nv_icmd(priv, 0x000b07, 0x00000002);
+ nv_icmd(priv, 0x000b08, 0x00000100);
+ nv_icmd(priv, 0x000b09, 0x00000100);
+ nv_icmd(priv, 0x000b0a, 0x00000001);
+ nv_icmd(priv, 0x000a04, 0x000000ff);
+ nv_icmd(priv, 0x00097f, 0x00000100);
+ nv_icmd(priv, 0x000a02, 0x00000001);
+ nv_icmd(priv, 0x000809, 0x00000007);
+ nv_icmd(priv, 0x00c221, 0x00000040);
+ nv_icmd(priv, 0x00c401, 0x00000001);
+ nv_icmd(priv, 0x00c402, 0x00010001);
+ nv_icmd(priv, 0x00c403, 0x00000001);
+ nv_icmd(priv, 0x00c404, 0x00000001);
+ nv_icmd(priv, 0x00c40e, 0x00000020);
+ nv_icmd(priv, 0x00c500, 0x00000003);
+ nv_icmd(priv, 0x01e100, 0x00000001);
+ nv_icmd(priv, 0x001000, 0x00000001);
+ nv_icmd(priv, 0x000b07, 0x00000002);
+ nv_icmd(priv, 0x000b08, 0x00000100);
+ nv_icmd(priv, 0x000b09, 0x00000100);
+ nv_icmd(priv, 0x000b0a, 0x00000001);
+ nv_icmd(priv, 0x01e100, 0x00000001);
+ nv_wr32(priv, 0x400208, 0x00000000);
+}
+
+static void
+nve0_grctx_generate_a097(struct nvc0_graph_priv *priv)
+{
+ nv_mthd(priv, 0xa097, 0x0800, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0840, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0880, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x08c0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0900, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0940, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0980, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x09c0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0804, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0844, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0884, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x08c4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0904, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0944, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0984, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x09c4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0808, 0x00000400);
+ nv_mthd(priv, 0xa097, 0x0848, 0x00000400);
+ nv_mthd(priv, 0xa097, 0x0888, 0x00000400);
+ nv_mthd(priv, 0xa097, 0x08c8, 0x00000400);
+ nv_mthd(priv, 0xa097, 0x0908, 0x00000400);
+ nv_mthd(priv, 0xa097, 0x0948, 0x00000400);
+ nv_mthd(priv, 0xa097, 0x0988, 0x00000400);
+ nv_mthd(priv, 0xa097, 0x09c8, 0x00000400);
+ nv_mthd(priv, 0xa097, 0x080c, 0x00000300);
+ nv_mthd(priv, 0xa097, 0x084c, 0x00000300);
+ nv_mthd(priv, 0xa097, 0x088c, 0x00000300);
+ nv_mthd(priv, 0xa097, 0x08cc, 0x00000300);
+ nv_mthd(priv, 0xa097, 0x090c, 0x00000300);
+ nv_mthd(priv, 0xa097, 0x094c, 0x00000300);
+ nv_mthd(priv, 0xa097, 0x098c, 0x00000300);
+ nv_mthd(priv, 0xa097, 0x09cc, 0x00000300);
+ nv_mthd(priv, 0xa097, 0x0810, 0x000000cf);
+ nv_mthd(priv, 0xa097, 0x0850, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0890, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x08d0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0910, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0950, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0990, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x09d0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0814, 0x00000040);
+ nv_mthd(priv, 0xa097, 0x0854, 0x00000040);
+ nv_mthd(priv, 0xa097, 0x0894, 0x00000040);
+ nv_mthd(priv, 0xa097, 0x08d4, 0x00000040);
+ nv_mthd(priv, 0xa097, 0x0914, 0x00000040);
+ nv_mthd(priv, 0xa097, 0x0954, 0x00000040);
+ nv_mthd(priv, 0xa097, 0x0994, 0x00000040);
+ nv_mthd(priv, 0xa097, 0x09d4, 0x00000040);
+ nv_mthd(priv, 0xa097, 0x0818, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x0858, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x0898, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x08d8, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x0918, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x0958, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x0998, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x09d8, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x081c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x085c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x089c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x08dc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x091c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x095c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x099c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x09dc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0820, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0860, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x08a0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x08e0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0920, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0960, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x09a0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x09e0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1c00, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1c10, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1c20, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1c30, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1c40, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1c50, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1c60, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1c70, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1c80, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1c90, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1ca0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1cb0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1cc0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1cd0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1ce0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1cf0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1c04, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1c14, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1c24, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1c34, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1c44, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1c54, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1c64, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1c74, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1c84, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1c94, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1ca4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1cb4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1cc4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1cd4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1ce4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1cf4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1c08, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1c18, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1c28, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1c38, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1c48, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1c58, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1c68, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1c78, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1c88, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1c98, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1ca8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1cb8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1cc8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1cd8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1ce8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1cf8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1c0c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1c1c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1c2c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1c3c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1c4c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1c5c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1c6c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1c7c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1c8c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1c9c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1cac, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1cbc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1ccc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1cdc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1cec, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1cfc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1d00, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1d10, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1d20, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1d30, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1d40, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1d50, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1d60, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1d70, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1d80, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1d90, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1da0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1db0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1dc0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1dd0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1de0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1df0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1d04, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1d14, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1d24, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1d34, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1d44, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1d54, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1d64, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1d74, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1d84, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1d94, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1da4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1db4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1dc4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1dd4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1de4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1df4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1d08, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1d18, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1d28, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1d38, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1d48, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1d58, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1d68, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1d78, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1d88, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1d98, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1da8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1db8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1dc8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1dd8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1de8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1df8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1d0c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1d1c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1d2c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1d3c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1d4c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1d5c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1d6c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1d7c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1d8c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1d9c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1dac, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1dbc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1dcc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1ddc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1dec, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1dfc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1f00, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1f08, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1f10, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1f18, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1f20, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1f28, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1f30, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1f38, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1f40, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1f48, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1f50, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1f58, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1f60, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1f68, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1f70, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1f78, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1f04, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1f0c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1f14, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1f1c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1f24, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1f2c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1f34, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1f3c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1f44, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1f4c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1f54, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1f5c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1f64, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1f6c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1f74, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1f7c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1f80, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1f88, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1f90, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1f98, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1fa0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1fa8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1fb0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1fb8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1fc0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1fc8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1fd0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1fd8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1fe0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1fe8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1ff0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1ff8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1f84, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1f8c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1f94, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1f9c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1fa4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1fac, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1fb4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1fbc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1fc4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1fcc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1fd4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1fdc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1fe4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1fec, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1ff4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1ffc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2000, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2040, 0x00000011);
+ nv_mthd(priv, 0xa097, 0x2080, 0x00000020);
+ nv_mthd(priv, 0xa097, 0x20c0, 0x00000030);
+ nv_mthd(priv, 0xa097, 0x2100, 0x00000040);
+ nv_mthd(priv, 0xa097, 0x2140, 0x00000051);
+ nv_mthd(priv, 0xa097, 0x200c, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x204c, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x208c, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x20cc, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x210c, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x214c, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x2010, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2050, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2090, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x20d0, 0x00000002);
+ nv_mthd(priv, 0xa097, 0x2110, 0x00000003);
+ nv_mthd(priv, 0xa097, 0x2150, 0x00000004);
+ nv_mthd(priv, 0xa097, 0x0380, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x03a0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x03c0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x03e0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0384, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x03a4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x03c4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x03e4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0388, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x03a8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x03c8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x03e8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x038c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x03ac, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x03cc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x03ec, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0700, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0710, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0720, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0730, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0704, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0714, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0724, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0734, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0708, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0718, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0728, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0738, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2800, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2804, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2808, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x280c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2810, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2814, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2818, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x281c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2820, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2824, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2828, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x282c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2830, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2834, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2838, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x283c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2840, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2844, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2848, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x284c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2850, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2854, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2858, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x285c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2860, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2864, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2868, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x286c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2870, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2874, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2878, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x287c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2880, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2884, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2888, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x288c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2890, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2894, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2898, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x289c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x28a0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x28a4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x28a8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x28ac, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x28b0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x28b4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x28b8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x28bc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x28c0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x28c4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x28c8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x28cc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x28d0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x28d4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x28d8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x28dc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x28e0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x28e4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x28e8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x28ec, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x28f0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x28f4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x28f8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x28fc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2900, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2904, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2908, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x290c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2910, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2914, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2918, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x291c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2920, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2924, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2928, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x292c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2930, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2934, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2938, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x293c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2940, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2944, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2948, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x294c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2950, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2954, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2958, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x295c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2960, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2964, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2968, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x296c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2970, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2974, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2978, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x297c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2980, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2984, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2988, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x298c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2990, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2994, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2998, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x299c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x29a0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x29a4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x29a8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x29ac, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x29b0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x29b4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x29b8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x29bc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x29c0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x29c4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x29c8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x29cc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x29d0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x29d4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x29d8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x29dc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x29e0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x29e4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x29e8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x29ec, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x29f0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x29f4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x29f8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x29fc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0a00, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0a20, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0a40, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0a60, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0a80, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0aa0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0ac0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0ae0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0b00, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0b20, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0b40, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0b60, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0b80, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0ba0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0bc0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0be0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0a04, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0a24, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0a44, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0a64, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0a84, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0aa4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0ac4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0ae4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0b04, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0b24, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0b44, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0b64, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0b84, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0ba4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0bc4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0be4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0a08, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0a28, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0a48, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0a68, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0a88, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0aa8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0ac8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0ae8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0b08, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0b28, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0b48, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0b68, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0b88, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0ba8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0bc8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0be8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0a0c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0a2c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0a4c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0a6c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0a8c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0aac, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0acc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0aec, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0b0c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0b2c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0b4c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0b6c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0b8c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0bac, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0bcc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0bec, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0a10, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0a30, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0a50, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0a70, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0a90, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0ab0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0ad0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0af0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0b10, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0b30, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0b50, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0b70, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0b90, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0bb0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0bd0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0bf0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0a14, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0a34, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0a54, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0a74, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0a94, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0ab4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0ad4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0af4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0b14, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0b34, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0b54, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0b74, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0b94, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0bb4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0bd4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0bf4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0c00, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0c10, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0c20, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0c30, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0c40, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0c50, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0c60, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0c70, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0c80, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0c90, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0ca0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0cb0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0cc0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0cd0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0ce0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0cf0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0c04, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0c14, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0c24, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0c34, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0c44, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0c54, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0c64, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0c74, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0c84, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0c94, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0ca4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0cb4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0cc4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0cd4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0ce4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0cf4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0c08, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0c18, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0c28, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0c38, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0c48, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0c58, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0c68, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0c78, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0c88, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0c98, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0ca8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0cb8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0cc8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0cd8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0ce8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0cf8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0c0c, 0x3f800000);
+ nv_mthd(priv, 0xa097, 0x0c1c, 0x3f800000);
+ nv_mthd(priv, 0xa097, 0x0c2c, 0x3f800000);
+ nv_mthd(priv, 0xa097, 0x0c3c, 0x3f800000);
+ nv_mthd(priv, 0xa097, 0x0c4c, 0x3f800000);
+ nv_mthd(priv, 0xa097, 0x0c5c, 0x3f800000);
+ nv_mthd(priv, 0xa097, 0x0c6c, 0x3f800000);
+ nv_mthd(priv, 0xa097, 0x0c7c, 0x3f800000);
+ nv_mthd(priv, 0xa097, 0x0c8c, 0x3f800000);
+ nv_mthd(priv, 0xa097, 0x0c9c, 0x3f800000);
+ nv_mthd(priv, 0xa097, 0x0cac, 0x3f800000);
+ nv_mthd(priv, 0xa097, 0x0cbc, 0x3f800000);
+ nv_mthd(priv, 0xa097, 0x0ccc, 0x3f800000);
+ nv_mthd(priv, 0xa097, 0x0cdc, 0x3f800000);
+ nv_mthd(priv, 0xa097, 0x0cec, 0x3f800000);
+ nv_mthd(priv, 0xa097, 0x0cfc, 0x3f800000);
+ nv_mthd(priv, 0xa097, 0x0d00, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0d08, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0d10, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0d18, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0d20, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0d28, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0d30, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0d38, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0d04, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0d0c, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0d14, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0d1c, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0d24, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0d2c, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0d34, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0d3c, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0e00, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0e10, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0e20, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0e30, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0e40, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0e50, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0e60, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0e70, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0e80, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0e90, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0ea0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0eb0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0ec0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0ed0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0ee0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0ef0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0e04, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0e14, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0e24, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0e34, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0e44, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0e54, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0e64, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0e74, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0e84, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0e94, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0ea4, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0eb4, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0ec4, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0ed4, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0ee4, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0ef4, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0e08, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0e18, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0e28, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0e38, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0e48, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0e58, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0e68, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0e78, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0e88, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0e98, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0ea8, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0eb8, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0ec8, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0ed8, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0ee8, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0ef8, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0d40, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0d48, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0d50, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0d58, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0d44, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0d4c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0d54, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0d5c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1e00, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1e20, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1e40, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1e60, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1e80, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1ea0, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1ec0, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1ee0, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1e04, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1e24, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1e44, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1e64, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1e84, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1ea4, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1ec4, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1ee4, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1e08, 0x00000002);
+ nv_mthd(priv, 0xa097, 0x1e28, 0x00000002);
+ nv_mthd(priv, 0xa097, 0x1e48, 0x00000002);
+ nv_mthd(priv, 0xa097, 0x1e68, 0x00000002);
+ nv_mthd(priv, 0xa097, 0x1e88, 0x00000002);
+ nv_mthd(priv, 0xa097, 0x1ea8, 0x00000002);
+ nv_mthd(priv, 0xa097, 0x1ec8, 0x00000002);
+ nv_mthd(priv, 0xa097, 0x1ee8, 0x00000002);
+ nv_mthd(priv, 0xa097, 0x1e0c, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1e2c, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1e4c, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1e6c, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1e8c, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1eac, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1ecc, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1eec, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1e10, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1e30, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1e50, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1e70, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1e90, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1eb0, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1ed0, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1ef0, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1e14, 0x00000002);
+ nv_mthd(priv, 0xa097, 0x1e34, 0x00000002);
+ nv_mthd(priv, 0xa097, 0x1e54, 0x00000002);
+ nv_mthd(priv, 0xa097, 0x1e74, 0x00000002);
+ nv_mthd(priv, 0xa097, 0x1e94, 0x00000002);
+ nv_mthd(priv, 0xa097, 0x1eb4, 0x00000002);
+ nv_mthd(priv, 0xa097, 0x1ed4, 0x00000002);
+ nv_mthd(priv, 0xa097, 0x1ef4, 0x00000002);
+ nv_mthd(priv, 0xa097, 0x1e18, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1e38, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1e58, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1e78, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1e98, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1eb8, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1ed8, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1ef8, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x3400, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3404, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3408, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x340c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3410, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3414, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3418, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x341c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3420, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3424, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3428, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x342c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3430, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3434, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3438, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x343c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3440, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3444, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3448, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x344c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3450, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3454, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3458, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x345c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3460, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3464, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3468, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x346c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3470, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3474, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3478, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x347c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3480, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3484, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3488, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x348c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3490, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3494, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3498, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x349c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x34a0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x34a4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x34a8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x34ac, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x34b0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x34b4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x34b8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x34bc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x34c0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x34c4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x34c8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x34cc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x34d0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x34d4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x34d8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x34dc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x34e0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x34e4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x34e8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x34ec, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x34f0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x34f4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x34f8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x34fc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3500, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3504, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3508, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x350c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3510, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3514, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3518, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x351c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3520, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3524, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3528, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x352c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3530, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3534, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3538, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x353c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3540, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3544, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3548, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x354c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3550, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3554, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3558, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x355c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3560, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3564, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3568, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x356c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3570, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3574, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3578, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x357c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3580, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3584, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3588, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x358c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3590, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3594, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x3598, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x359c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x35a0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x35a4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x35a8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x35ac, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x35b0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x35b4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x35b8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x35bc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x35c0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x35c4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x35c8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x35cc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x35d0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x35d4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x35d8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x35dc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x35e0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x35e4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x35e8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x35ec, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x35f0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x35f4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x35f8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x35fc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x030c, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1944, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1514, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0d68, 0x0000ffff);
+ nv_mthd(priv, 0xa097, 0x121c, 0x0fac6881);
+ nv_mthd(priv, 0xa097, 0x0fac, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1538, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x0fe0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0fe4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0fe8, 0x00000014);
+ nv_mthd(priv, 0xa097, 0x0fec, 0x00000040);
+ nv_mthd(priv, 0xa097, 0x0ff0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x179c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1228, 0x00000400);
+ nv_mthd(priv, 0xa097, 0x122c, 0x00000300);
+ nv_mthd(priv, 0xa097, 0x1230, 0x00010001);
+ nv_mthd(priv, 0xa097, 0x07f8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x15b4, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x15cc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1534, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0fb0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x15d0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x153c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x16b4, 0x00000003);
+ nv_mthd(priv, 0xa097, 0x0fbc, 0x0000ffff);
+ nv_mthd(priv, 0xa097, 0x0fc0, 0x0000ffff);
+ nv_mthd(priv, 0xa097, 0x0fc4, 0x0000ffff);
+ nv_mthd(priv, 0xa097, 0x0fc8, 0x0000ffff);
+ nv_mthd(priv, 0xa097, 0x0df8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0dfc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1948, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1970, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x161c, 0x000009f0);
+ nv_mthd(priv, 0xa097, 0x0dcc, 0x00000010);
+ nv_mthd(priv, 0xa097, 0x163c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x15e4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1160, 0x25e00040);
+ nv_mthd(priv, 0xa097, 0x1164, 0x25e00040);
+ nv_mthd(priv, 0xa097, 0x1168, 0x25e00040);
+ nv_mthd(priv, 0xa097, 0x116c, 0x25e00040);
+ nv_mthd(priv, 0xa097, 0x1170, 0x25e00040);
+ nv_mthd(priv, 0xa097, 0x1174, 0x25e00040);
+ nv_mthd(priv, 0xa097, 0x1178, 0x25e00040);
+ nv_mthd(priv, 0xa097, 0x117c, 0x25e00040);
+ nv_mthd(priv, 0xa097, 0x1180, 0x25e00040);
+ nv_mthd(priv, 0xa097, 0x1184, 0x25e00040);
+ nv_mthd(priv, 0xa097, 0x1188, 0x25e00040);
+ nv_mthd(priv, 0xa097, 0x118c, 0x25e00040);
+ nv_mthd(priv, 0xa097, 0x1190, 0x25e00040);
+ nv_mthd(priv, 0xa097, 0x1194, 0x25e00040);
+ nv_mthd(priv, 0xa097, 0x1198, 0x25e00040);
+ nv_mthd(priv, 0xa097, 0x119c, 0x25e00040);
+ nv_mthd(priv, 0xa097, 0x11a0, 0x25e00040);
+ nv_mthd(priv, 0xa097, 0x11a4, 0x25e00040);
+ nv_mthd(priv, 0xa097, 0x11a8, 0x25e00040);
+ nv_mthd(priv, 0xa097, 0x11ac, 0x25e00040);
+ nv_mthd(priv, 0xa097, 0x11b0, 0x25e00040);
+ nv_mthd(priv, 0xa097, 0x11b4, 0x25e00040);
+ nv_mthd(priv, 0xa097, 0x11b8, 0x25e00040);
+ nv_mthd(priv, 0xa097, 0x11bc, 0x25e00040);
+ nv_mthd(priv, 0xa097, 0x11c0, 0x25e00040);
+ nv_mthd(priv, 0xa097, 0x11c4, 0x25e00040);
+ nv_mthd(priv, 0xa097, 0x11c8, 0x25e00040);
+ nv_mthd(priv, 0xa097, 0x11cc, 0x25e00040);
+ nv_mthd(priv, 0xa097, 0x11d0, 0x25e00040);
+ nv_mthd(priv, 0xa097, 0x11d4, 0x25e00040);
+ nv_mthd(priv, 0xa097, 0x11d8, 0x25e00040);
+ nv_mthd(priv, 0xa097, 0x11dc, 0x25e00040);
+ nv_mthd(priv, 0xa097, 0x1880, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1884, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1888, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x188c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1890, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1894, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1898, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x189c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x18a0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x18a4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x18a8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x18ac, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x18b0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x18b4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x18b8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x18bc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x18c0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x18c4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x18c8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x18cc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x18d0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x18d4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x18d8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x18dc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x18e0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x18e4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x18e8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x18ec, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x18f0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x18f4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x18f8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x18fc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0f84, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0f88, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x17c8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x17cc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x17d0, 0x000000ff);
+ nv_mthd(priv, 0xa097, 0x17d4, 0xffffffff);
+ nv_mthd(priv, 0xa097, 0x17d8, 0x00000002);
+ nv_mthd(priv, 0xa097, 0x17dc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x15f4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x15f8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1434, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1438, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0d74, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0dec, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x13a4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1318, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1644, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0748, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0de8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1648, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x12a4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1120, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1124, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1128, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x112c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1118, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x164c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1658, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1910, 0x00000290);
+ nv_mthd(priv, 0xa097, 0x1518, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x165c, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1520, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1604, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1570, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x13b0, 0x3f800000);
+ nv_mthd(priv, 0xa097, 0x13b4, 0x3f800000);
+ nv_mthd(priv, 0xa097, 0x020c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1670, 0x30201000);
+ nv_mthd(priv, 0xa097, 0x1674, 0x70605040);
+ nv_mthd(priv, 0xa097, 0x1678, 0xb8a89888);
+ nv_mthd(priv, 0xa097, 0x167c, 0xf8e8d8c8);
+ nv_mthd(priv, 0xa097, 0x166c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1680, 0x00ffff00);
+ nv_mthd(priv, 0xa097, 0x12d0, 0x00000003);
+ nv_mthd(priv, 0xa097, 0x12d4, 0x00000002);
+ nv_mthd(priv, 0xa097, 0x1684, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1688, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0dac, 0x00001b02);
+ nv_mthd(priv, 0xa097, 0x0db0, 0x00001b02);
+ nv_mthd(priv, 0xa097, 0x0db4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x168c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x15bc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x156c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x187c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1110, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x0dc0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0dc4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0dc8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1234, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1690, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x12ac, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x0790, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0794, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0798, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x079c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x07a0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x077c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1000, 0x00000010);
+ nv_mthd(priv, 0xa097, 0x10fc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1290, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0218, 0x00000010);
+ nv_mthd(priv, 0xa097, 0x12d8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x12dc, 0x00000010);
+ nv_mthd(priv, 0xa097, 0x0d94, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x155c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1560, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1564, 0x00000fff);
+ nv_mthd(priv, 0xa097, 0x1574, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1578, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x157c, 0x000fffff);
+ nv_mthd(priv, 0xa097, 0x1354, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1610, 0x00000012);
+ nv_mthd(priv, 0xa097, 0x1608, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x160c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x260c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x07ac, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x162c, 0x00000003);
+ nv_mthd(priv, 0xa097, 0x0210, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0320, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0324, 0x3f800000);
+ nv_mthd(priv, 0xa097, 0x0328, 0x3f800000);
+ nv_mthd(priv, 0xa097, 0x032c, 0x3f800000);
+ nv_mthd(priv, 0xa097, 0x0330, 0x3f800000);
+ nv_mthd(priv, 0xa097, 0x0334, 0x3f800000);
+ nv_mthd(priv, 0xa097, 0x0338, 0x3f800000);
+ nv_mthd(priv, 0xa097, 0x0750, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0760, 0x39291909);
+ nv_mthd(priv, 0xa097, 0x0764, 0x79695949);
+ nv_mthd(priv, 0xa097, 0x0768, 0xb9a99989);
+ nv_mthd(priv, 0xa097, 0x076c, 0xf9e9d9c9);
+ nv_mthd(priv, 0xa097, 0x0770, 0x30201000);
+ nv_mthd(priv, 0xa097, 0x0774, 0x70605040);
+ nv_mthd(priv, 0xa097, 0x0778, 0x00009080);
+ nv_mthd(priv, 0xa097, 0x0780, 0x39291909);
+ nv_mthd(priv, 0xa097, 0x0784, 0x79695949);
+ nv_mthd(priv, 0xa097, 0x0788, 0xb9a99989);
+ nv_mthd(priv, 0xa097, 0x078c, 0xf9e9d9c9);
+ nv_mthd(priv, 0xa097, 0x07d0, 0x30201000);
+ nv_mthd(priv, 0xa097, 0x07d4, 0x70605040);
+ nv_mthd(priv, 0xa097, 0x07d8, 0x00009080);
+ nv_mthd(priv, 0xa097, 0x037c, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x0740, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0744, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x2600, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1918, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x191c, 0x00000900);
+ nv_mthd(priv, 0xa097, 0x1920, 0x00000405);
+ nv_mthd(priv, 0xa097, 0x1308, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1924, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x13ac, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x192c, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x193c, 0x00002c1c);
+ nv_mthd(priv, 0xa097, 0x0d7c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0f8c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x02c0, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1510, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1940, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0ff4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0ff8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x194c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1950, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1968, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1590, 0x0000003f);
+ nv_mthd(priv, 0xa097, 0x07e8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x07ec, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x07f0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x07f4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x196c, 0x00000011);
+ nv_mthd(priv, 0xa097, 0x02e4, 0x0000b001);
+ nv_mthd(priv, 0xa097, 0x036c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0370, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x197c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0fcc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0fd0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x02d8, 0x00000040);
+ nv_mthd(priv, 0xa097, 0x1980, 0x00000080);
+ nv_mthd(priv, 0xa097, 0x1504, 0x00000080);
+ nv_mthd(priv, 0xa097, 0x1984, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0300, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x13a8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x12ec, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1310, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1314, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1380, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1384, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1388, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x138c, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1390, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1394, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x139c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1398, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1594, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1598, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x159c, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x15a0, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x15a4, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x0f54, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0f58, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0f5c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x19bc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0f9c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0fa0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x12cc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x12e8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x130c, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1360, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1364, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1368, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x136c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1370, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1374, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1378, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x137c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x133c, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1340, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1344, 0x00000002);
+ nv_mthd(priv, 0xa097, 0x1348, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x134c, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1350, 0x00000002);
+ nv_mthd(priv, 0xa097, 0x1358, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x12e4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x131c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1320, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1324, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1328, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x19c0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1140, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x19c4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x19c8, 0x00001500);
+ nv_mthd(priv, 0xa097, 0x135c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0f90, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x19e0, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x19e4, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x19e8, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x19ec, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x19f0, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x19f4, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x19f8, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x19fc, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x19cc, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x15b8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1a00, 0x00001111);
+ nv_mthd(priv, 0xa097, 0x1a04, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1a08, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1a0c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1a10, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1a14, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1a18, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1a1c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0d6c, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x0d70, 0xffff0000);
+ nv_mthd(priv, 0xa097, 0x10f8, 0x00001010);
+ nv_mthd(priv, 0xa097, 0x0d80, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0d84, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0d88, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0d8c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0d90, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0da0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x07a4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x07a8, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1508, 0x80000000);
+ nv_mthd(priv, 0xa097, 0x150c, 0x40000000);
+ nv_mthd(priv, 0xa097, 0x1668, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0318, 0x00000008);
+ nv_mthd(priv, 0xa097, 0x031c, 0x00000008);
+ nv_mthd(priv, 0xa097, 0x0d9c, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x0374, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0378, 0x00000020);
+ nv_mthd(priv, 0xa097, 0x07dc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x074c, 0x00000055);
+ nv_mthd(priv, 0xa097, 0x1420, 0x00000003);
+ nv_mthd(priv, 0xa097, 0x17bc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x17c0, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x17c4, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1008, 0x00000008);
+ nv_mthd(priv, 0xa097, 0x100c, 0x00000040);
+ nv_mthd(priv, 0xa097, 0x1010, 0x0000012c);
+ nv_mthd(priv, 0xa097, 0x0d60, 0x00000040);
+ nv_mthd(priv, 0xa097, 0x075c, 0x00000003);
+ nv_mthd(priv, 0xa097, 0x1018, 0x00000020);
+ nv_mthd(priv, 0xa097, 0x101c, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1020, 0x00000020);
+ nv_mthd(priv, 0xa097, 0x1024, 0x00000001);
+ nv_mthd(priv, 0xa097, 0x1444, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x1448, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x144c, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0360, 0x20164010);
+ nv_mthd(priv, 0xa097, 0x0364, 0x00000020);
+ nv_mthd(priv, 0xa097, 0x0368, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0de4, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0204, 0x00000006);
+ nv_mthd(priv, 0xa097, 0x0208, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x02cc, 0x003fffff);
+ nv_mthd(priv, 0xa097, 0x02d0, 0x003fffff);
+ nv_mthd(priv, 0xa097, 0x1220, 0x00000005);
+ nv_mthd(priv, 0xa097, 0x0fdc, 0x00000000);
+ nv_mthd(priv, 0xa097, 0x0f98, 0x00400008);
+ nv_mthd(priv, 0xa097, 0x1284, 0x08000080);
+ nv_mthd(priv, 0xa097, 0x1450, 0x00400008);
+ nv_mthd(priv, 0xa097, 0x1454, 0x08000080);
+ nv_mthd(priv, 0xa097, 0x0214, 0x00000000);
+}
+
+static void
+nve0_grctx_generate_902d(struct nvc0_graph_priv *priv)
+{
+ nv_mthd(priv, 0x902d, 0x0200, 0x000000cf);
+ nv_mthd(priv, 0x902d, 0x0204, 0x00000001);
+ nv_mthd(priv, 0x902d, 0x0208, 0x00000020);
+ nv_mthd(priv, 0x902d, 0x020c, 0x00000001);
+ nv_mthd(priv, 0x902d, 0x0210, 0x00000000);
+ nv_mthd(priv, 0x902d, 0x0214, 0x00000080);
+ nv_mthd(priv, 0x902d, 0x0218, 0x00000100);
+ nv_mthd(priv, 0x902d, 0x021c, 0x00000100);
+ nv_mthd(priv, 0x902d, 0x0220, 0x00000000);
+ nv_mthd(priv, 0x902d, 0x0224, 0x00000000);
+ nv_mthd(priv, 0x902d, 0x0230, 0x000000cf);
+ nv_mthd(priv, 0x902d, 0x0234, 0x00000001);
+ nv_mthd(priv, 0x902d, 0x0238, 0x00000020);
+ nv_mthd(priv, 0x902d, 0x023c, 0x00000001);
+ nv_mthd(priv, 0x902d, 0x0244, 0x00000080);
+ nv_mthd(priv, 0x902d, 0x0248, 0x00000100);
+ nv_mthd(priv, 0x902d, 0x024c, 0x00000100);
+ nv_mthd(priv, 0x902d, 0x3410, 0x00000000);
+}
+
+static void
+nve0_graph_generate_unk40xx(struct nvc0_graph_priv *priv)
+{
+ nv_wr32(priv, 0x404010, 0x0);
+ nv_wr32(priv, 0x404014, 0x0);
+ nv_wr32(priv, 0x404018, 0x0);
+ nv_wr32(priv, 0x40401c, 0x0);
+ nv_wr32(priv, 0x404020, 0x0);
+ nv_wr32(priv, 0x404024, 0xe000);
+ nv_wr32(priv, 0x404028, 0x0);
+ nv_wr32(priv, 0x4040a8, 0x0);
+ nv_wr32(priv, 0x4040ac, 0x0);
+ nv_wr32(priv, 0x4040b0, 0x0);
+ nv_wr32(priv, 0x4040b4, 0x0);
+ nv_wr32(priv, 0x4040b8, 0x0);
+ nv_wr32(priv, 0x4040bc, 0x0);
+ nv_wr32(priv, 0x4040c0, 0x0);
+ nv_wr32(priv, 0x4040c4, 0x0);
+ nv_wr32(priv, 0x4040c8, 0xf800008f);
+ nv_wr32(priv, 0x4040d0, 0x0);
+ nv_wr32(priv, 0x4040d4, 0x0);
+ nv_wr32(priv, 0x4040d8, 0x0);
+ nv_wr32(priv, 0x4040dc, 0x0);
+ nv_wr32(priv, 0x4040e0, 0x0);
+ nv_wr32(priv, 0x4040e4, 0x0);
+ nv_wr32(priv, 0x4040e8, 0x1000);
+ nv_wr32(priv, 0x4040f8, 0x0);
+ nv_wr32(priv, 0x404130, 0x0);
+ nv_wr32(priv, 0x404134, 0x0);
+ nv_wr32(priv, 0x404138, 0x20000040);
+ nv_wr32(priv, 0x404150, 0x2e);
+ nv_wr32(priv, 0x404154, 0x400);
+ nv_wr32(priv, 0x404158, 0x200);
+ nv_wr32(priv, 0x404164, 0x55);
+ nv_wr32(priv, 0x4041a0, 0x0);
+ nv_wr32(priv, 0x4041a4, 0x0);
+ nv_wr32(priv, 0x4041a8, 0x0);
+ nv_wr32(priv, 0x4041ac, 0x0);
+ nv_wr32(priv, 0x404200, 0x0);
+ nv_wr32(priv, 0x404204, 0x0);
+ nv_wr32(priv, 0x404208, 0x0);
+ nv_wr32(priv, 0x40420c, 0x0);
+}
+
+static void
+nve0_graph_generate_unk44xx(struct nvc0_graph_priv *priv)
+{
+ nv_wr32(priv, 0x404404, 0x0);
+ nv_wr32(priv, 0x404408, 0x0);
+ nv_wr32(priv, 0x40440c, 0x0);
+ nv_wr32(priv, 0x404410, 0x0);
+ nv_wr32(priv, 0x404414, 0x0);
+ nv_wr32(priv, 0x404418, 0x0);
+ nv_wr32(priv, 0x40441c, 0x0);
+ nv_wr32(priv, 0x404420, 0x0);
+ nv_wr32(priv, 0x404424, 0x0);
+ nv_wr32(priv, 0x404428, 0x0);
+ nv_wr32(priv, 0x40442c, 0x0);
+ nv_wr32(priv, 0x404430, 0x0);
+ nv_wr32(priv, 0x404434, 0x0);
+ nv_wr32(priv, 0x404438, 0x0);
+ nv_wr32(priv, 0x404460, 0x0);
+ nv_wr32(priv, 0x404464, 0x0);
+ nv_wr32(priv, 0x404468, 0xffffff);
+ nv_wr32(priv, 0x40446c, 0x0);
+ nv_wr32(priv, 0x404480, 0x1);
+ nv_wr32(priv, 0x404498, 0x1);
+}
+
+static void
+nve0_graph_generate_unk46xx(struct nvc0_graph_priv *priv)
+{
+ nv_wr32(priv, 0x404604, 0x14);
+ nv_wr32(priv, 0x404608, 0x0);
+ nv_wr32(priv, 0x40460c, 0x3fff);
+ nv_wr32(priv, 0x404610, 0x100);
+ nv_wr32(priv, 0x404618, 0x0);
+ nv_wr32(priv, 0x40461c, 0x0);
+ nv_wr32(priv, 0x404620, 0x0);
+ nv_wr32(priv, 0x404624, 0x0);
+ nv_wr32(priv, 0x40462c, 0x0);
+ nv_wr32(priv, 0x404630, 0x0);
+ nv_wr32(priv, 0x404640, 0x0);
+ nv_wr32(priv, 0x404654, 0x0);
+ nv_wr32(priv, 0x404660, 0x0);
+ nv_wr32(priv, 0x404678, 0x0);
+ nv_wr32(priv, 0x40467c, 0x2);
+ nv_wr32(priv, 0x404680, 0x0);
+ nv_wr32(priv, 0x404684, 0x0);
+ nv_wr32(priv, 0x404688, 0x0);
+ nv_wr32(priv, 0x40468c, 0x0);
+ nv_wr32(priv, 0x404690, 0x0);
+ nv_wr32(priv, 0x404694, 0x0);
+ nv_wr32(priv, 0x404698, 0x0);
+ nv_wr32(priv, 0x40469c, 0x0);
+ nv_wr32(priv, 0x4046a0, 0x7f0080);
+ nv_wr32(priv, 0x4046a4, 0x0);
+ nv_wr32(priv, 0x4046a8, 0x0);
+ nv_wr32(priv, 0x4046ac, 0x0);
+ nv_wr32(priv, 0x4046b0, 0x0);
+ nv_wr32(priv, 0x4046b4, 0x0);
+ nv_wr32(priv, 0x4046b8, 0x0);
+ nv_wr32(priv, 0x4046bc, 0x0);
+ nv_wr32(priv, 0x4046c0, 0x0);
+ nv_wr32(priv, 0x4046c8, 0x0);
+ nv_wr32(priv, 0x4046cc, 0x0);
+ nv_wr32(priv, 0x4046d0, 0x0);
+}
+
+static void
+nve0_graph_generate_unk47xx(struct nvc0_graph_priv *priv)
+{
+ nv_wr32(priv, 0x404700, 0x0);
+ nv_wr32(priv, 0x404704, 0x0);
+ nv_wr32(priv, 0x404708, 0x0);
+ nv_wr32(priv, 0x404718, 0x0);
+ nv_wr32(priv, 0x40471c, 0x0);
+ nv_wr32(priv, 0x404720, 0x0);
+ nv_wr32(priv, 0x404724, 0x0);
+ nv_wr32(priv, 0x404728, 0x0);
+ nv_wr32(priv, 0x40472c, 0x0);
+ nv_wr32(priv, 0x404730, 0x0);
+ nv_wr32(priv, 0x404734, 0x100);
+ nv_wr32(priv, 0x404738, 0x0);
+ nv_wr32(priv, 0x40473c, 0x0);
+ nv_wr32(priv, 0x404744, 0x0);
+ nv_wr32(priv, 0x404748, 0x0);
+ nv_wr32(priv, 0x404754, 0x0);
+}
+
+static void
+nve0_graph_generate_unk58xx(struct nvc0_graph_priv *priv)
+{
+ nv_wr32(priv, 0x405800, 0xf8000bf);
+ nv_wr32(priv, 0x405830, 0x2180648);
+ nv_wr32(priv, 0x405834, 0x8000000);
+ nv_wr32(priv, 0x405838, 0x0);
+ nv_wr32(priv, 0x405854, 0x0);
+ nv_wr32(priv, 0x405870, 0x1);
+ nv_wr32(priv, 0x405874, 0x1);
+ nv_wr32(priv, 0x405878, 0x1);
+ nv_wr32(priv, 0x40587c, 0x1);
+ nv_wr32(priv, 0x405a00, 0x0);
+ nv_wr32(priv, 0x405a04, 0x0);
+ nv_wr32(priv, 0x405a18, 0x0);
+ nv_wr32(priv, 0x405b00, 0x0);
+ nv_wr32(priv, 0x405b10, 0x1000);
+}
+
+static void
+nve0_graph_generate_unk60xx(struct nvc0_graph_priv *priv)
+{
+ nv_wr32(priv, 0x406020, 0x4103c1);
+ nv_wr32(priv, 0x406028, 0x1);
+ nv_wr32(priv, 0x40602c, 0x1);
+ nv_wr32(priv, 0x406030, 0x1);
+ nv_wr32(priv, 0x406034, 0x1);
+}
+
+static void
+nve0_graph_generate_unk64xx(struct nvc0_graph_priv *priv)
+{
+ nv_wr32(priv, 0x4064a8, 0x0);
+ nv_wr32(priv, 0x4064ac, 0x3fff);
+ nv_wr32(priv, 0x4064b4, 0x0);
+ nv_wr32(priv, 0x4064b8, 0x0);
+ nv_wr32(priv, 0x4064c0, 0x801a00f0);
+ nv_wr32(priv, 0x4064c4, 0x192ffff);
+ nv_wr32(priv, 0x4064c8, 0x1800600);
+ nv_wr32(priv, 0x4064cc, 0x0);
+ nv_wr32(priv, 0x4064d0, 0x0);
+ nv_wr32(priv, 0x4064d4, 0x0);
+ nv_wr32(priv, 0x4064d8, 0x0);
+ nv_wr32(priv, 0x4064dc, 0x0);
+ nv_wr32(priv, 0x4064e0, 0x0);
+ nv_wr32(priv, 0x4064e4, 0x0);
+ nv_wr32(priv, 0x4064e8, 0x0);
+ nv_wr32(priv, 0x4064ec, 0x0);
+ nv_wr32(priv, 0x4064fc, 0x22a);
+}
+
+static void
+nve0_graph_generate_unk70xx(struct nvc0_graph_priv *priv)
+{
+ nv_wr32(priv, 0x407040, 0x0);
+}
+
+static void
+nve0_graph_generate_unk78xx(struct nvc0_graph_priv *priv)
+{
+ nv_wr32(priv, 0x407804, 0x23);
+ nv_wr32(priv, 0x40780c, 0xa418820);
+ nv_wr32(priv, 0x407810, 0x62080e6);
+ nv_wr32(priv, 0x407814, 0x20398a4);
+ nv_wr32(priv, 0x407818, 0xe629062);
+ nv_wr32(priv, 0x40781c, 0xa418820);
+ nv_wr32(priv, 0x407820, 0xe6);
+ nv_wr32(priv, 0x4078bc, 0x103);
+}
+
+static void
+nve0_graph_generate_unk80xx(struct nvc0_graph_priv *priv)
+{
+ nv_wr32(priv, 0x408000, 0x0);
+ nv_wr32(priv, 0x408004, 0x0);
+ nv_wr32(priv, 0x408008, 0x30);
+ nv_wr32(priv, 0x40800c, 0x0);
+ nv_wr32(priv, 0x408010, 0x0);
+ nv_wr32(priv, 0x408014, 0x69);
+ nv_wr32(priv, 0x408018, 0xe100e100);
+ nv_wr32(priv, 0x408064, 0x0);
+}
+
+static void
+nve0_graph_generate_unk88xx(struct nvc0_graph_priv *priv)
+{
+ nv_wr32(priv, 0x408800, 0x2802a3c);
+ nv_wr32(priv, 0x408804, 0x40);
+ nv_wr32(priv, 0x408808, 0x1043e005);
+ nv_wr32(priv, 0x408840, 0xb);
+ nv_wr32(priv, 0x408900, 0x3080b801);
+ nv_wr32(priv, 0x408904, 0x62000001);
+ nv_wr32(priv, 0x408908, 0xc8102f);
+ nv_wr32(priv, 0x408980, 0x11d);
+}
+
+static void
+nve0_graph_generate_gpc(struct nvc0_graph_priv *priv)
+{
+ nv_wr32(priv, 0x418380, 0x16);
+ nv_wr32(priv, 0x418400, 0x38004e00);
+ nv_wr32(priv, 0x418404, 0x71e0ffff);
+ nv_wr32(priv, 0x41840c, 0x1008);
+ nv_wr32(priv, 0x418410, 0xfff0fff);
+ nv_wr32(priv, 0x418414, 0x2200fff);
+ nv_wr32(priv, 0x418450, 0x0);
+ nv_wr32(priv, 0x418454, 0x0);
+ nv_wr32(priv, 0x418458, 0x0);
+ nv_wr32(priv, 0x41845c, 0x0);
+ nv_wr32(priv, 0x418460, 0x0);
+ nv_wr32(priv, 0x418464, 0x0);
+ nv_wr32(priv, 0x418468, 0x1);
+ nv_wr32(priv, 0x41846c, 0x0);
+ nv_wr32(priv, 0x418470, 0x0);
+ nv_wr32(priv, 0x418600, 0x1f);
+ nv_wr32(priv, 0x418684, 0xf);
+ nv_wr32(priv, 0x418700, 0x2);
+ nv_wr32(priv, 0x418704, 0x80);
+ nv_wr32(priv, 0x418708, 0x0);
+ nv_wr32(priv, 0x41870c, 0x0);
+ nv_wr32(priv, 0x418710, 0x0);
+ nv_wr32(priv, 0x418800, 0x7006860a);
+ nv_wr32(priv, 0x418808, 0x0);
+ nv_wr32(priv, 0x41880c, 0x0);
+ nv_wr32(priv, 0x418810, 0x0);
+ nv_wr32(priv, 0x418828, 0x44);
+ nv_wr32(priv, 0x418830, 0x10000001);
+ nv_wr32(priv, 0x4188d8, 0x8);
+ nv_wr32(priv, 0x4188e0, 0x1000000);
+ nv_wr32(priv, 0x4188e8, 0x0);
+ nv_wr32(priv, 0x4188ec, 0x0);
+ nv_wr32(priv, 0x4188f0, 0x0);
+ nv_wr32(priv, 0x4188f4, 0x0);
+ nv_wr32(priv, 0x4188f8, 0x0);
+ nv_wr32(priv, 0x4188fc, 0x20100018);
+ nv_wr32(priv, 0x41891c, 0xff00ff);
+ nv_wr32(priv, 0x418924, 0x0);
+ nv_wr32(priv, 0x418928, 0xffff00);
+ nv_wr32(priv, 0x41892c, 0xff00);
+ nv_wr32(priv, 0x418a00, 0x0);
+ nv_wr32(priv, 0x418a04, 0x0);
+ nv_wr32(priv, 0x418a08, 0x0);
+ nv_wr32(priv, 0x418a0c, 0x10000);
+ nv_wr32(priv, 0x418a10, 0x0);
+ nv_wr32(priv, 0x418a14, 0x0);
+ nv_wr32(priv, 0x418a18, 0x0);
+ nv_wr32(priv, 0x418a20, 0x0);
+ nv_wr32(priv, 0x418a24, 0x0);
+ nv_wr32(priv, 0x418a28, 0x0);
+ nv_wr32(priv, 0x418a2c, 0x10000);
+ nv_wr32(priv, 0x418a30, 0x0);
+ nv_wr32(priv, 0x418a34, 0x0);
+ nv_wr32(priv, 0x418a38, 0x0);
+ nv_wr32(priv, 0x418a40, 0x0);
+ nv_wr32(priv, 0x418a44, 0x0);
+ nv_wr32(priv, 0x418a48, 0x0);
+ nv_wr32(priv, 0x418a4c, 0x10000);
+ nv_wr32(priv, 0x418a50, 0x0);
+ nv_wr32(priv, 0x418a54, 0x0);
+ nv_wr32(priv, 0x418a58, 0x0);
+ nv_wr32(priv, 0x418a60, 0x0);
+ nv_wr32(priv, 0x418a64, 0x0);
+ nv_wr32(priv, 0x418a68, 0x0);
+ nv_wr32(priv, 0x418a6c, 0x10000);
+ nv_wr32(priv, 0x418a70, 0x0);
+ nv_wr32(priv, 0x418a74, 0x0);
+ nv_wr32(priv, 0x418a78, 0x0);
+ nv_wr32(priv, 0x418a80, 0x0);
+ nv_wr32(priv, 0x418a84, 0x0);
+ nv_wr32(priv, 0x418a88, 0x0);
+ nv_wr32(priv, 0x418a8c, 0x10000);
+ nv_wr32(priv, 0x418a90, 0x0);
+ nv_wr32(priv, 0x418a94, 0x0);
+ nv_wr32(priv, 0x418a98, 0x0);
+ nv_wr32(priv, 0x418aa0, 0x0);
+ nv_wr32(priv, 0x418aa4, 0x0);
+ nv_wr32(priv, 0x418aa8, 0x0);
+ nv_wr32(priv, 0x418aac, 0x10000);
+ nv_wr32(priv, 0x418ab0, 0x0);
+ nv_wr32(priv, 0x418ab4, 0x0);
+ nv_wr32(priv, 0x418ab8, 0x0);
+ nv_wr32(priv, 0x418ac0, 0x0);
+ nv_wr32(priv, 0x418ac4, 0x0);
+ nv_wr32(priv, 0x418ac8, 0x0);
+ nv_wr32(priv, 0x418acc, 0x10000);
+ nv_wr32(priv, 0x418ad0, 0x0);
+ nv_wr32(priv, 0x418ad4, 0x0);
+ nv_wr32(priv, 0x418ad8, 0x0);
+ nv_wr32(priv, 0x418ae0, 0x0);
+ nv_wr32(priv, 0x418ae4, 0x0);
+ nv_wr32(priv, 0x418ae8, 0x0);
+ nv_wr32(priv, 0x418aec, 0x10000);
+ nv_wr32(priv, 0x418af0, 0x0);
+ nv_wr32(priv, 0x418af4, 0x0);
+ nv_wr32(priv, 0x418af8, 0x0);
+ nv_wr32(priv, 0x418b00, 0x6);
+ nv_wr32(priv, 0x418b08, 0xa418820);
+ nv_wr32(priv, 0x418b0c, 0x62080e6);
+ nv_wr32(priv, 0x418b10, 0x20398a4);
+ nv_wr32(priv, 0x418b14, 0xe629062);
+ nv_wr32(priv, 0x418b18, 0xa418820);
+ nv_wr32(priv, 0x418b1c, 0xe6);
+ nv_wr32(priv, 0x418bb8, 0x103);
+ nv_wr32(priv, 0x418c08, 0x1);
+ nv_wr32(priv, 0x418c10, 0x0);
+ nv_wr32(priv, 0x418c14, 0x0);
+ nv_wr32(priv, 0x418c18, 0x0);
+ nv_wr32(priv, 0x418c1c, 0x0);
+ nv_wr32(priv, 0x418c20, 0x0);
+ nv_wr32(priv, 0x418c24, 0x0);
+ nv_wr32(priv, 0x418c28, 0x0);
+ nv_wr32(priv, 0x418c2c, 0x0);
+ nv_wr32(priv, 0x418c40, 0xffffffff);
+ nv_wr32(priv, 0x418c6c, 0x1);
+ nv_wr32(priv, 0x418c80, 0x20200004);
+ nv_wr32(priv, 0x418c8c, 0x1);
+ nv_wr32(priv, 0x419000, 0x780);
+ nv_wr32(priv, 0x419004, 0x0);
+ nv_wr32(priv, 0x419008, 0x0);
+ nv_wr32(priv, 0x419014, 0x4);
+}
+
+static void
+nve0_graph_generate_tpc(struct nvc0_graph_priv *priv)
+{
+ nv_wr32(priv, 0x419848, 0x0);
+ nv_wr32(priv, 0x419864, 0x129);
+ nv_wr32(priv, 0x419888, 0x0);
+ nv_wr32(priv, 0x419a00, 0xf0);
+ nv_wr32(priv, 0x419a04, 0x1);
+ nv_wr32(priv, 0x419a08, 0x21);
+ nv_wr32(priv, 0x419a0c, 0x20000);
+ nv_wr32(priv, 0x419a10, 0x0);
+ nv_wr32(priv, 0x419a14, 0x200);
+ nv_wr32(priv, 0x419a1c, 0xc000);
+ nv_wr32(priv, 0x419a20, 0x800);
+ nv_wr32(priv, 0x419a30, 0x1);
+ nv_wr32(priv, 0x419ac4, 0x37f440);
+ nv_wr32(priv, 0x419c00, 0xa);
+ nv_wr32(priv, 0x419c04, 0x80000006);
+ nv_wr32(priv, 0x419c08, 0x2);
+ nv_wr32(priv, 0x419c20, 0x0);
+ nv_wr32(priv, 0x419c24, 0x84210);
+ nv_wr32(priv, 0x419c28, 0x3efbefbe);
+ nv_wr32(priv, 0x419ce8, 0x0);
+ nv_wr32(priv, 0x419cf4, 0x3203);
+ nv_wr32(priv, 0x419e04, 0x0);
+ nv_wr32(priv, 0x419e08, 0x0);
+ nv_wr32(priv, 0x419e0c, 0x0);
+ nv_wr32(priv, 0x419e10, 0x402);
+ nv_wr32(priv, 0x419e44, 0x13eff2);
+ nv_wr32(priv, 0x419e48, 0x0);
+ nv_wr32(priv, 0x419e4c, 0x7f);
+ nv_wr32(priv, 0x419e50, 0x0);
+ nv_wr32(priv, 0x419e54, 0x0);
+ nv_wr32(priv, 0x419e58, 0x0);
+ nv_wr32(priv, 0x419e5c, 0x0);
+ nv_wr32(priv, 0x419e60, 0x0);
+ nv_wr32(priv, 0x419e64, 0x0);
+ nv_wr32(priv, 0x419e68, 0x0);
+ nv_wr32(priv, 0x419e6c, 0x0);
+ nv_wr32(priv, 0x419e70, 0x0);
+ nv_wr32(priv, 0x419e74, 0x0);
+ nv_wr32(priv, 0x419e78, 0x0);
+ nv_wr32(priv, 0x419e7c, 0x0);
+ nv_wr32(priv, 0x419e80, 0x0);
+ nv_wr32(priv, 0x419e84, 0x0);
+ nv_wr32(priv, 0x419e88, 0x0);
+ nv_wr32(priv, 0x419e8c, 0x0);
+ nv_wr32(priv, 0x419e90, 0x0);
+ nv_wr32(priv, 0x419e94, 0x0);
+ nv_wr32(priv, 0x419e98, 0x0);
+ nv_wr32(priv, 0x419eac, 0x1fcf);
+ nv_wr32(priv, 0x419eb0, 0xd3f);
+ nv_wr32(priv, 0x419ec8, 0x1304f);
+ nv_wr32(priv, 0x419f30, 0x0);
+ nv_wr32(priv, 0x419f34, 0x0);
+ nv_wr32(priv, 0x419f38, 0x0);
+ nv_wr32(priv, 0x419f3c, 0x0);
+ nv_wr32(priv, 0x419f40, 0x0);
+ nv_wr32(priv, 0x419f44, 0x0);
+ nv_wr32(priv, 0x419f48, 0x0);
+ nv_wr32(priv, 0x419f4c, 0x0);
+ nv_wr32(priv, 0x419f58, 0x0);
+ nv_wr32(priv, 0x419f78, 0xb);
+}
+
+static void
+nve0_graph_generate_tpcunk(struct nvc0_graph_priv *priv)
+{
+ nv_wr32(priv, 0x41be24, 0x6);
+ nv_wr32(priv, 0x41bec0, 0x12180000);
+ nv_wr32(priv, 0x41bec4, 0x37f7f);
+ nv_wr32(priv, 0x41bee4, 0x6480430);
+ nv_wr32(priv, 0x41bf00, 0xa418820);
+ nv_wr32(priv, 0x41bf04, 0x62080e6);
+ nv_wr32(priv, 0x41bf08, 0x20398a4);
+ nv_wr32(priv, 0x41bf0c, 0xe629062);
+ nv_wr32(priv, 0x41bf10, 0xa418820);
+ nv_wr32(priv, 0x41bf14, 0xe6);
+ nv_wr32(priv, 0x41bfd0, 0x900103);
+ nv_wr32(priv, 0x41bfe0, 0x400001);
+ nv_wr32(priv, 0x41bfe4, 0x0);
+}
+
+int
+nve0_grctx_generate(struct nvc0_graph_priv *priv)
+{
+ struct nvc0_grctx info;
+ int ret, i, gpc, tpc, id;
+ u32 data[6] = {}, data2[2] = {}, tmp;
+ u32 tpc_set = 0, tpc_mask = 0;
+ u32 magic[GPC_MAX][2], offset;
+ u8 tpcnr[GPC_MAX], a, b;
+ u8 shift, ntpcv;
+
+ ret = nvc0_grctx_init(priv, &info);
+ if (ret)
+ return ret;
+
+ nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
+ nv_wr32(priv, 0x400204, 0x00000000);
+ nv_wr32(priv, 0x400208, 0x00000000);
+
+ nve0_graph_generate_unk40xx(priv);
+ nve0_graph_generate_unk44xx(priv);
+ nve0_graph_generate_unk46xx(priv);
+ nve0_graph_generate_unk47xx(priv);
+ nve0_graph_generate_unk58xx(priv);
+ nve0_graph_generate_unk60xx(priv);
+ nve0_graph_generate_unk64xx(priv);
+ nve0_graph_generate_unk70xx(priv);
+ nve0_graph_generate_unk78xx(priv);
+ nve0_graph_generate_unk80xx(priv);
+ nve0_graph_generate_unk88xx(priv);
+ nve0_graph_generate_gpc(priv);
+ nve0_graph_generate_tpc(priv);
+ nve0_graph_generate_tpcunk(priv);
+
+ nv_wr32(priv, 0x404154, 0x0);
+
+ mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
+ mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
+ mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW);
+ mmio_list(0x40800c, 0x00000000, 8, 1);
+ mmio_list(0x408010, 0x80000000, 0, 0);
+ mmio_list(0x419004, 0x00000000, 8, 1);
+ mmio_list(0x419008, 0x00000000, 0, 0);
+ mmio_list(0x4064cc, 0x80000000, 0, 0);
+ mmio_list(0x408004, 0x00000000, 8, 0);
+ mmio_list(0x408008, 0x80000030, 0, 0);
+ mmio_list(0x418808, 0x00000000, 8, 0);
+ mmio_list(0x41880c, 0x80000030, 0, 0);
+ mmio_list(0x4064c8, 0x01800600, 0, 0);
+ mmio_list(0x418810, 0x80000000, 12, 2);
+ mmio_list(0x419848, 0x10000000, 12, 2);
+ mmio_list(0x405830, 0x02180648, 0, 0);
+ mmio_list(0x4064c4, 0x0192ffff, 0, 0);
+ for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) {
+ u16 magic0 = 0x0218 * priv->tpc_nr[gpc];
+ u16 magic1 = 0x0648 * priv->tpc_nr[gpc];
+ magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset;
+ magic[gpc][1] = 0x00000000 | (magic1 << 16);
+ offset += 0x0324 * priv->tpc_nr[gpc];
+ }
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ mmio_list(GPC_UNIT(gpc, 0x30c0), magic[gpc][0], 0, 0);
+ mmio_list(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset, 0, 0);
+ offset += 0x07ff * priv->tpc_nr[gpc];
+ }
+ mmio_list(0x17e91c, 0x06060609, 0, 0);
+ mmio_list(0x17e920, 0x00090a05, 0, 0);
+
+ nv_wr32(priv, 0x418c6c, 0x1);
+ nv_wr32(priv, 0x41980c, 0x10);
+ nv_wr32(priv, 0x41be08, 0x4);
+ nv_wr32(priv, 0x4064c0, 0x801a00f0);
+ nv_wr32(priv, 0x405800, 0xf8000bf);
+ nv_wr32(priv, 0x419c00, 0xa);
+
+ for (tpc = 0, id = 0; tpc < 4; tpc++) {
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ if (tpc < priv->tpc_nr[gpc]) {
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0698), id);
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x04e8), id);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0088), id++);
+ }
+
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0c08), priv->tpc_nr[gpc]);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0c8c), priv->tpc_nr[gpc]);
+ }
+ }
+
+ tmp = 0;
+ for (i = 0; i < priv->gpc_nr; i++)
+ tmp |= priv->tpc_nr[i] << (i * 4);
+ nv_wr32(priv, 0x406028, tmp);
+ nv_wr32(priv, 0x405870, tmp);
+
+ nv_wr32(priv, 0x40602c, 0x0);
+ nv_wr32(priv, 0x405874, 0x0);
+ nv_wr32(priv, 0x406030, 0x0);
+ nv_wr32(priv, 0x405878, 0x0);
+ nv_wr32(priv, 0x406034, 0x0);
+ nv_wr32(priv, 0x40587c, 0x0);
+
+ /* calculate first set of magics */
+ memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+
+ gpc = -1;
+ for (tpc = 0; tpc < priv->tpc_total; tpc++) {
+ do {
+ gpc = (gpc + 1) % priv->gpc_nr;
+ } while (!tpcnr[gpc]);
+ tpcnr[gpc]--;
+
+ data[tpc / 6] |= gpc << ((tpc % 6) * 5);
+ }
+
+ for (; tpc < 32; tpc++)
+ data[tpc / 6] |= 7 << ((tpc % 6) * 5);
+
+ /* and the second... */
+ shift = 0;
+ ntpcv = priv->tpc_total;
+ while (!(ntpcv & (1 << 4))) {
+ ntpcv <<= 1;
+ shift++;
+ }
+
+ data2[0] = ntpcv << 16;
+ data2[0] |= shift << 21;
+ data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24);
+ data2[0] |= priv->tpc_total << 8;
+ data2[0] |= priv->magic_not_rop_nr;
+ for (i = 1; i < 7; i++)
+ data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
+
+ /* and write it all the various parts of PGRAPH */
+ nv_wr32(priv, 0x418bb8, (priv->tpc_total << 8) | priv->magic_not_rop_nr);
+ for (i = 0; i < 6; i++)
+ nv_wr32(priv, 0x418b08 + (i * 4), data[i]);
+
+ nv_wr32(priv, 0x41bfd0, data2[0]);
+ nv_wr32(priv, 0x41bfe4, data2[1]);
+ for (i = 0; i < 6; i++)
+ nv_wr32(priv, 0x41bf00 + (i * 4), data[i]);
+
+ nv_wr32(priv, 0x4078bc, (priv->tpc_total << 8) | priv->magic_not_rop_nr);
+ for (i = 0; i < 6; i++)
+ nv_wr32(priv, 0x40780c + (i * 4), data[i]);
+
+
+ memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++)
+ tpc_mask |= ((1 << priv->tpc_nr[gpc]) - 1) << (gpc * 8);
+
+ for (i = 0, gpc = -1, b = -1; i < 32; i++) {
+ a = (i * (priv->tpc_total - 1)) / 32;
+ if (a != b) {
+ b = a;
+ do {
+ gpc = (gpc + 1) % priv->gpc_nr;
+ } while (!tpcnr[gpc]);
+ tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+
+ tpc_set |= 1 << ((gpc * 8) + tpc);
+ }
+
+ nv_wr32(priv, 0x406800 + (i * 0x20), tpc_set);
+ nv_wr32(priv, 0x406c00 + (i * 0x20), tpc_set ^ tpc_mask);
+ }
+
+ for (i = 0; i < 8; i++)
+ nv_wr32(priv, 0x4064d0 + (i * 0x04), 0x00000000);
+
+ nv_wr32(priv, 0x405b00, 0x201);
+ nv_wr32(priv, 0x408850, 0x2);
+ nv_wr32(priv, 0x408958, 0x2);
+ nv_wr32(priv, 0x419f78, 0xa);
+
+ nve0_grctx_generate_icmd(priv);
+ nve0_grctx_generate_a097(priv);
+ nve0_grctx_generate_902d(priv);
+
+ nv_mask(priv, 0x000260, 0x00000001, 0x00000001);
+ nv_wr32(priv, 0x418800, 0x7026860a); //XXX
+ nv_wr32(priv, 0x41be10, 0x00bb8bc7); //XXX
+ return nvc0_grctx_fini(&info);
+}
diff --git a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc
similarity index 98%
rename from drivers/gpu/drm/nouveau/nvc0_grgpc.fuc
rename to drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc
index 15272be33b66431e4a2ee54ed2768bfc981b6ae9..b86cc60dcd56fffe0322ec3dd26d9ad0a1fcd294 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc
@@ -24,7 +24,7 @@
*/
/* To build:
- * m4 nvc0_grgpc.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_grgpc.fuc.h
+ * m4 gpcnvc0.fuc | envyas -a -w -m fuc -V fuc3 -o gpcnvc0.fuc.h
*/
/* TODO
@@ -33,7 +33,7 @@
*/
.section #nvc0_grgpc_data
-include(`nvc0_graph.fuc')
+include(`nvc0.fuc')
gpc_id: .b32 0
gpc_mmio_list_head: .b32 0
gpc_mmio_list_tail: .b32 0
@@ -209,11 +209,11 @@ nvd9_tpc_mmio_tail:
.section #nvc0_grgpc_code
bra #init
define(`include_code')
-include(`nvc0_graph.fuc')
+include(`nvc0.fuc')
// reports an exception to the host
//
-// In: $r15 error code (see nvc0_graph.fuc)
+// In: $r15 error code (see nvc0.fuc)
//
error:
push $r14
diff --git a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h
similarity index 78%
rename from drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h
rename to drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h
index a988b8ad00acf3b1b138138823e5f59559770f11..96050ddb22ca628c4117eb94d986c2efc89c2e05 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h
@@ -1,11 +1,19 @@
uint32_t nvc0_grgpc_data[] = {
+/* 0x0000: gpc_id */
0x00000000,
+/* 0x0004: gpc_mmio_list_head */
0x00000000,
+/* 0x0008: gpc_mmio_list_tail */
0x00000000,
+/* 0x000c: tpc_count */
0x00000000,
+/* 0x0010: tpc_mask */
0x00000000,
+/* 0x0014: tpc_mmio_list_head */
0x00000000,
+/* 0x0018: tpc_mmio_list_tail */
0x00000000,
+/* 0x001c: cmd_queue */
0x00000000,
0x00000000,
0x00000000,
@@ -24,6 +32,7 @@ uint32_t nvc0_grgpc_data[] = {
0x00000000,
0x00000000,
0x00000000,
+/* 0x0064: chipsets */
0x000000c0,
0x012800c8,
0x01e40194,
@@ -49,6 +58,7 @@ uint32_t nvc0_grgpc_data[] = {
0x0194012c,
0x025401f8,
0x00000000,
+/* 0x00c8: nvc0_gpc_mmio_head */
0x00000380,
0x14000400,
0x20000450,
@@ -73,7 +83,10 @@ uint32_t nvc0_grgpc_data[] = {
0x00000c8c,
0x08001000,
0x00001014,
+/* 0x0128: nvc0_gpc_mmio_tail */
0x00000c6c,
+/* 0x012c: nvc1_gpc_mmio_tail */
+/* 0x012c: nvd9_gpc_mmio_head */
0x00000380,
0x04000400,
0x0800040c,
@@ -100,6 +113,8 @@ uint32_t nvc0_grgpc_data[] = {
0x00000c8c,
0x08001000,
0x00001014,
+/* 0x0194: nvd9_gpc_mmio_tail */
+/* 0x0194: nvc0_tpc_mmio_head */
0x00000018,
0x0000003c,
0x00000048,
@@ -120,11 +135,16 @@ uint32_t nvc0_grgpc_data[] = {
0x4c000644,
0x00000698,
0x04000750,
+/* 0x01e4: nvc0_tpc_mmio_tail */
0x00000758,
0x000002c4,
0x000006e0,
+/* 0x01f0: nvcf_tpc_mmio_tail */
0x000004bc,
+/* 0x01f4: nvc3_tpc_mmio_tail */
0x00000544,
+/* 0x01f8: nvc1_tpc_mmio_tail */
+/* 0x01f8: nvd9_tpc_mmio_head */
0x00000018,
0x0000003c,
0x00000048,
@@ -152,12 +172,14 @@ uint32_t nvc0_grgpc_data[] = {
uint32_t nvc0_grgpc_code[] = {
0x03060ef5,
+/* 0x0004: queue_put */
0x9800d898,
0x86f001d9,
0x0489b808,
0xf00c1bf4,
0x21f502f7,
0x00f802ec,
+/* 0x001c: queue_put_next */
0xb60798c4,
0x8dbb0384,
0x0880b600,
@@ -165,6 +187,7 @@ uint32_t nvc0_grgpc_code[] = {
0x90b6018f,
0x0f94f001,
0xf801d980,
+/* 0x0039: queue_get */
0x0131f400,
0x9800d898,
0x89b801d9,
@@ -176,37 +199,46 @@ uint32_t nvc0_grgpc_code[] = {
0x80b6019f,
0x0f84f001,
0xf400d880,
+/* 0x0066: queue_get_done */
0x00f80132,
+/* 0x0068: nv_rd32 */
0x0728b7f1,
0xb906b4b6,
0xc9f002ec,
0x00bcd01f,
+/* 0x0078: nv_rd32_wait */
0xc800bccf,
0x1bf41fcc,
0x06a7f0fa,
0x010321f5,
0xf840bfcf,
+/* 0x008d: nv_wr32 */
0x28b7f100,
0x06b4b607,
0xb980bfd0,
0xc9f002ec,
0x1ec9f01f,
+/* 0x00a3: nv_wr32_wait */
0xcf00bcd0,
0xccc800bc,
0xfa1bf41f,
+/* 0x00ae: watchdog_reset */
0x87f100f8,
0x84b60430,
0x1ff9f006,
0xf8008fd0,
+/* 0x00bd: watchdog_clear */
0x3087f100,
0x0684b604,
0xf80080d0,
+/* 0x00c9: wait_donez */
0x3c87f100,
0x0684b608,
0x99f094bd,
0x0089d000,
0x081887f1,
0xd00684b6,
+/* 0x00e2: wait_done_wait_donez */
0x87f1008a,
0x84b60400,
0x0088cf06,
@@ -215,6 +247,7 @@ uint32_t nvc0_grgpc_code[] = {
0x84b6085c,
0xf094bd06,
0x89d00099,
+/* 0x0103: wait_doneo */
0xf100f800,
0xb6083c87,
0x94bd0684,
@@ -222,6 +255,7 @@ uint32_t nvc0_grgpc_code[] = {
0x87f10089,
0x84b60818,
0x008ad006,
+/* 0x011c: wait_done_wait_doneo */
0x040087f1,
0xcf0684b6,
0x8aff0088,
@@ -230,6 +264,8 @@ uint32_t nvc0_grgpc_code[] = {
0xbd0684b6,
0x0099f094,
0xf80089d0,
+/* 0x013d: mmctx_size */
+/* 0x013f: nv_mmctx_size_loop */
0x9894bd00,
0x85b600e8,
0x0180b61a,
@@ -238,6 +274,7 @@ uint32_t nvc0_grgpc_code[] = {
0x04efb804,
0xb9eb1bf4,
0x00f8029f,
+/* 0x015c: mmctx_xfer */
0x083c87f1,
0xbd0684b6,
0x0199f094,
@@ -247,9 +284,11 @@ uint32_t nvc0_grgpc_code[] = {
0xf405bbfd,
0x8bd0090b,
0x0099f000,
+/* 0x0180: mmctx_base_disabled */
0xf405eefd,
0x8ed00c0b,
0xc08fd080,
+/* 0x018f: mmctx_multi_disabled */
0xb70199f0,
0xc8010080,
0xb4b600ab,
@@ -257,6 +296,8 @@ uint32_t nvc0_grgpc_code[] = {
0xb601aec8,
0xbefd11e4,
0x008bd005,
+/* 0x01a8: mmctx_exec_loop */
+/* 0x01a8: mmctx_wait_free */
0xf0008ecf,
0x0bf41fe4,
0x00ce98fa,
@@ -265,34 +306,42 @@ uint32_t nvc0_grgpc_code[] = {
0x04cdb804,
0xc8e81bf4,
0x1bf402ab,
+/* 0x01c9: mmctx_fini_wait */
0x008bcf18,
0xb01fb4f0,
0x1bf410b4,
0x02a7f0f7,
0xf4c921f4,
+/* 0x01de: mmctx_stop */
0xabc81b0e,
0x10b4b600,
0xf00cb9f0,
0x8bd012b9,
+/* 0x01ed: mmctx_stop_wait */
0x008bcf00,
0xf412bbc8,
+/* 0x01f6: mmctx_done */
0x87f1fa1b,
0x84b6085c,
0xf094bd06,
0x89d00199,
+/* 0x0207: strand_wait */
0xf900f800,
0x02a7f0a0,
0xfcc921f4,
+/* 0x0213: strand_pre */
0xf100f8a0,
0xf04afc87,
0x97f00283,
0x0089d00c,
0x020721f5,
+/* 0x0226: strand_post */
0x87f100f8,
0x83f04afc,
0x0d97f002,
0xf50089d0,
0xf8020721,
+/* 0x0239: strand_set */
0xfca7f100,
0x02a3f04f,
0x0500aba2,
@@ -303,6 +352,7 @@ uint32_t nvc0_grgpc_code[] = {
0xf000aed0,
0xbcd00ac7,
0x0721f500,
+/* 0x0263: strand_ctx_init */
0xf100f802,
0xb6083c87,
0x94bd0684,
@@ -325,6 +375,7 @@ uint32_t nvc0_grgpc_code[] = {
0x0684b608,
0xb70089cf,
0x95220080,
+/* 0x02ba: ctx_init_strand_loop */
0x8ed008fe,
0x408ed000,
0xb6808acf,
@@ -338,12 +389,14 @@ uint32_t nvc0_grgpc_code[] = {
0x94bd0684,
0xd00399f0,
0x00f80089,
+/* 0x02ec: error */
0xe7f1e0f9,
0xe3f09814,
0x8d21f440,
0x041ce0b7,
0xf401f7f0,
0xe0fc8d21,
+/* 0x0306: init */
0x04bd00f8,
0xf10004fe,
0xf0120017,
@@ -366,11 +419,13 @@ uint32_t nvc0_grgpc_code[] = {
0x27f10002,
0x24b60800,
0x0022cf06,
+/* 0x035f: init_find_chipset */
0xb65817f0,
0x13980c10,
0x0432b800,
0xb00b0bf4,
0x1bf40034,
+/* 0x0373: init_context */
0xf100f8f1,
0xb6080027,
0x22cf0624,
@@ -407,6 +462,7 @@ uint32_t nvc0_grgpc_code[] = {
0x0010b740,
0xf024bd08,
0x12d01f29,
+/* 0x0401: main */
0x0031f400,
0xf00028f4,
0x21f41cd7,
@@ -419,9 +475,11 @@ uint32_t nvc0_grgpc_code[] = {
0xfe051efd,
0x21f50018,
0x0ef404c3,
+/* 0x0431: main_not_ctx_xfer */
0x10ef94d3,
0xf501f5f0,
0xf402ec21,
+/* 0x043e: ih */
0x80f9c60e,
0xf90188fe,
0xf990f980,
@@ -436,30 +494,36 @@ uint32_t nvc0_grgpc_code[] = {
0xb0b70421,
0xe7f00400,
0x00bed001,
+/* 0x0474: ih_no_fifo */
0xfc400ad0,
0xfce0fcf0,
0xfcb0fcd0,
0xfc90fca0,
0x0088fe80,
0x32f480fc,
+/* 0x048f: hub_barrier_done */
0xf001f800,
0x0e9801f7,
0x04febb00,
0x9418e7f1,
0xf440e3f0,
0x00f88d21,
+/* 0x04a4: ctx_redswitch */
0x0614e7f1,
0xf006e4b6,
0xefd020f7,
0x08f7f000,
+/* 0x04b4: ctx_redswitch_delay */
0xf401f2b6,
0xf7f1fd1b,
0xefd00a20,
+/* 0x04c3: ctx_xfer */
0xf100f800,
0xb60a0417,
0x1fd00614,
0x0711f400,
0x04a421f5,
+/* 0x04d4: ctx_xfer_not_load */
0x4afc17f1,
0xf00213f0,
0x12d00c27,
@@ -489,11 +553,13 @@ uint32_t nvc0_grgpc_code[] = {
0x5c21f508,
0x0721f501,
0x0601f402,
+/* 0x054b: ctx_xfer_post */
0xf11412f4,
0xf04afc17,
0x27f00213,
0x0012d00d,
0x020721f5,
+/* 0x055c: ctx_xfer_done */
0x048f21f5,
0x000000f8,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc
new file mode 100644
index 0000000000000000000000000000000000000000..7b715fda276356849647c50793da6db4ede26937
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc
@@ -0,0 +1,451 @@
+/* fuc microcode for nve0 PGRAPH/GPC
+ *
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+/* To build:
+ * m4 nve0_grgpc.fuc | envyas -a -w -m fuc -V nva3 -o nve0_grgpc.fuc.h
+ */
+
+/* TODO
+ * - bracket certain functions with scratch writes, useful for debugging
+ * - watchdog timer around ctx operations
+ */
+
+.section #nve0_grgpc_data
+include(`nve0.fuc')
+gpc_id: .b32 0
+gpc_mmio_list_head: .b32 0
+gpc_mmio_list_tail: .b32 0
+
+tpc_count: .b32 0
+tpc_mask: .b32 0
+tpc_mmio_list_head: .b32 0
+tpc_mmio_list_tail: .b32 0
+
+cmd_queue: queue_init
+
+// chipset descriptions
+chipsets:
+.b8 0xe4 0 0 0
+.b16 #nve4_gpc_mmio_head
+.b16 #nve4_gpc_mmio_tail
+.b16 #nve4_tpc_mmio_head
+.b16 #nve4_tpc_mmio_tail
+.b8 0xe7 0 0 0
+.b16 #nve4_gpc_mmio_head
+.b16 #nve4_gpc_mmio_tail
+.b16 #nve4_tpc_mmio_head
+.b16 #nve4_tpc_mmio_tail
+.b8 0 0 0 0
+
+// GPC mmio lists
+nve4_gpc_mmio_head:
+mmctx_data(0x000380, 1)
+mmctx_data(0x000400, 2)
+mmctx_data(0x00040c, 3)
+mmctx_data(0x000450, 9)
+mmctx_data(0x000600, 1)
+mmctx_data(0x000684, 1)
+mmctx_data(0x000700, 5)
+mmctx_data(0x000800, 1)
+mmctx_data(0x000808, 3)
+mmctx_data(0x000828, 1)
+mmctx_data(0x000830, 1)
+mmctx_data(0x0008d8, 1)
+mmctx_data(0x0008e0, 1)
+mmctx_data(0x0008e8, 6)
+mmctx_data(0x00091c, 1)
+mmctx_data(0x000924, 3)
+mmctx_data(0x000b00, 1)
+mmctx_data(0x000b08, 6)
+mmctx_data(0x000bb8, 1)
+mmctx_data(0x000c08, 1)
+mmctx_data(0x000c10, 8)
+mmctx_data(0x000c40, 1)
+mmctx_data(0x000c6c, 1)
+mmctx_data(0x000c80, 1)
+mmctx_data(0x000c8c, 1)
+mmctx_data(0x001000, 3)
+mmctx_data(0x001014, 1)
+mmctx_data(0x003024, 1)
+mmctx_data(0x0030c0, 2)
+mmctx_data(0x0030e4, 1)
+mmctx_data(0x003100, 6)
+mmctx_data(0x0031d0, 1)
+mmctx_data(0x0031e0, 2)
+nve4_gpc_mmio_tail:
+
+// TPC mmio lists
+nve4_tpc_mmio_head:
+mmctx_data(0x000048, 1)
+mmctx_data(0x000064, 1)
+mmctx_data(0x000088, 1)
+mmctx_data(0x000200, 6)
+mmctx_data(0x00021c, 2)
+mmctx_data(0x000230, 1)
+mmctx_data(0x0002c4, 1)
+mmctx_data(0x000400, 3)
+mmctx_data(0x000420, 3)
+mmctx_data(0x0004e8, 1)
+mmctx_data(0x0004f4, 1)
+mmctx_data(0x000604, 4)
+mmctx_data(0x000644, 22)
+mmctx_data(0x0006ac, 2)
+mmctx_data(0x0006c8, 1)
+mmctx_data(0x000730, 8)
+mmctx_data(0x000758, 1)
+mmctx_data(0x000778, 1)
+nve4_tpc_mmio_tail:
+
+.section #nve0_grgpc_code
+bra #init
+define(`include_code')
+include(`nve0.fuc')
+
+// reports an exception to the host
+//
+// In: $r15 error code (see nve0.fuc)
+//
+error:
+ push $r14
+ mov $r14 -0x67ec // 0x9814
+ sethi $r14 0x400000
+ call #nv_wr32 // HUB_CTXCTL_CC_SCRATCH[5] = error code
+ add b32 $r14 0x41c
+ mov $r15 1
+ call #nv_wr32 // HUB_CTXCTL_INTR_UP_SET
+ pop $r14
+ ret
+
+// GPC fuc initialisation, executed by triggering ucode start, will
+// fall through to main loop after completion.
+//
+// Input:
+// CC_SCRATCH[0]: chipset (PMC_BOOT_0 read returns 0x0bad0bad... sigh)
+// CC_SCRATCH[1]: context base
+//
+// Output:
+// CC_SCRATCH[0]:
+// 31:31: set to signal completion
+// CC_SCRATCH[1]:
+// 31:0: GPC context size
+//
+init:
+ clear b32 $r0
+ mov $sp $r0
+
+ // enable fifo access
+ mov $r1 0x1200
+ mov $r2 2
+ iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE
+
+ // setup i0 handler, and route all interrupts to it
+ mov $r1 #ih
+ mov $iv0 $r1
+ mov $r1 0x400
+ iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH
+
+ // enable fifo interrupt
+ mov $r2 4
+ iowr I[$r1 + 0x000] $r2 // INTR_EN_SET
+
+ // enable interrupts
+ bset $flags ie0
+
+ // figure out which GPC we are, and how many TPCs we have
+ mov $r1 0x608
+ shl b32 $r1 6
+ iord $r2 I[$r1 + 0x000] // UNITS
+ mov $r3 1
+ and $r2 0x1f
+ shl b32 $r3 $r2
+ sub b32 $r3 1
+ st b32 D[$r0 + #tpc_count] $r2
+ st b32 D[$r0 + #tpc_mask] $r3
+ add b32 $r1 0x400
+ iord $r2 I[$r1 + 0x000] // MYINDEX
+ st b32 D[$r0 + #gpc_id] $r2
+
+ // find context data for this chipset
+ mov $r2 0x800
+ shl b32 $r2 6
+ iord $r2 I[$r2 + 0x000] // CC_SCRATCH[0]
+ mov $r1 #chipsets - 12
+ init_find_chipset:
+ add b32 $r1 12
+ ld b32 $r3 D[$r1 + 0x00]
+ cmpu b32 $r3 $r2
+ bra e #init_context
+ cmpu b32 $r3 0
+ bra ne #init_find_chipset
+ // unknown chipset
+ ret
+
+ // initialise context base, and size tracking
+ init_context:
+ mov $r2 0x800
+ shl b32 $r2 6
+ iord $r2 I[$r2 + 0x100] // CC_SCRATCH[1], initial base
+ clear b32 $r3 // track GPC context size here
+
+ // set mmctx base addresses now so we don't have to do it later,
+ // they don't currently ever change
+ mov $r4 0x700
+ shl b32 $r4 6
+ shr b32 $r5 $r2 8
+ iowr I[$r4 + 0x000] $r5 // MMCTX_SAVE_SWBASE
+ iowr I[$r4 + 0x100] $r5 // MMCTX_LOAD_SWBASE
+
+ // calculate GPC mmio context size, store the chipset-specific
+ // mmio list pointers somewhere we can get at them later without
+ // re-parsing the chipset list
+ clear b32 $r14
+ clear b32 $r15
+ ld b16 $r14 D[$r1 + 4]
+ ld b16 $r15 D[$r1 + 6]
+ st b16 D[$r0 + #gpc_mmio_list_head] $r14
+ st b16 D[$r0 + #gpc_mmio_list_tail] $r15
+ call #mmctx_size
+ add b32 $r2 $r15
+ add b32 $r3 $r15
+
+ // calculate per-TPC mmio context size, store the list pointers
+ ld b16 $r14 D[$r1 + 8]
+ ld b16 $r15 D[$r1 + 10]
+ st b16 D[$r0 + #tpc_mmio_list_head] $r14
+ st b16 D[$r0 + #tpc_mmio_list_tail] $r15
+ call #mmctx_size
+ ld b32 $r14 D[$r0 + #tpc_count]
+ mulu $r14 $r15
+ add b32 $r2 $r14
+ add b32 $r3 $r14
+
+ // round up base/size to 256 byte boundary (for strand SWBASE)
+ add b32 $r4 0x1300
+ shr b32 $r3 2
+ iowr I[$r4 + 0x000] $r3 // MMCTX_LOAD_COUNT, wtf for?!?
+ shr b32 $r2 8
+ shr b32 $r3 6
+ add b32 $r2 1
+ add b32 $r3 1
+ shl b32 $r2 8
+ shl b32 $r3 8
+
+ // calculate size of strand context data
+ mov b32 $r15 $r2
+ call #strand_ctx_init
+ add b32 $r3 $r15
+
+ // save context size, and tell HUB we're done
+ mov $r1 0x800
+ shl b32 $r1 6
+ iowr I[$r1 + 0x100] $r3 // CC_SCRATCH[1] = context size
+ add b32 $r1 0x800
+ clear b32 $r2
+ bset $r2 31
+ iowr I[$r1 + 0x000] $r2 // CC_SCRATCH[0] |= 0x80000000
+
+// Main program loop, very simple, sleeps until woken up by the interrupt
+// handler, pulls a command from the queue and executes its handler
+//
+main:
+ bset $flags $p0
+ sleep $p0
+ mov $r13 #cmd_queue
+ call #queue_get
+ bra $p1 #main
+
+ // 0x0000-0x0003 are all context transfers
+ cmpu b32 $r14 0x04
+ bra nc #main_not_ctx_xfer
+ // fetch $flags and mask off $p1/$p2
+ mov $r1 $flags
+ mov $r2 0x0006
+ not b32 $r2
+ and $r1 $r2
+ // set $p1/$p2 according to transfer type
+ shl b32 $r14 1
+ or $r1 $r14
+ mov $flags $r1
+ // transfer context data
+ call #ctx_xfer
+ bra #main
+
+ main_not_ctx_xfer:
+ shl b32 $r15 $r14 16
+ or $r15 E_BAD_COMMAND
+ call #error
+ bra #main
+
+// interrupt handler
+ih:
+ push $r8
+ mov $r8 $flags
+ push $r8
+ push $r9
+ push $r10
+ push $r11
+ push $r13
+ push $r14
+ push $r15
+
+ // incoming fifo command?
+ iord $r10 I[$r0 + 0x200] // INTR
+ and $r11 $r10 0x00000004
+ bra e #ih_no_fifo
+ // queue incoming fifo command for later processing
+ mov $r11 0x1900
+ mov $r13 #cmd_queue
+ iord $r14 I[$r11 + 0x100] // FIFO_CMD
+ iord $r15 I[$r11 + 0x000] // FIFO_DATA
+ call #queue_put
+ add b32 $r11 0x400
+ mov $r14 1
+ iowr I[$r11 + 0x000] $r14 // FIFO_ACK
+
+ // ack, and wake up main()
+ ih_no_fifo:
+ iowr I[$r0 + 0x100] $r10 // INTR_ACK
+
+ pop $r15
+ pop $r14
+ pop $r13
+ pop $r11
+ pop $r10
+ pop $r9
+ pop $r8
+ mov $flags $r8
+ pop $r8
+ bclr $flags $p0
+ iret
+
+// Set this GPC's bit in HUB_BAR, used to signal completion of various
+// activities to the HUB fuc
+//
+hub_barrier_done:
+ mov $r15 1
+ ld b32 $r14 D[$r0 + #gpc_id]
+ shl b32 $r15 $r14
+ mov $r14 -0x6be8 // 0x409418 - HUB_BAR_SET
+ sethi $r14 0x400000
+ call #nv_wr32
+ ret
+
+// Disables various things, waits a bit, and re-enables them..
+//
+// Not sure how exactly this helps, perhaps "ENABLE" is not such a
+// good description for the bits we turn off? Anyways, without this,
+// funny things happen.
+//
+ctx_redswitch:
+ mov $r14 0x614
+ shl b32 $r14 6
+ mov $r15 0x020
+ iowr I[$r14] $r15 // GPC_RED_SWITCH = POWER
+ mov $r15 8
+ ctx_redswitch_delay:
+ sub b32 $r15 1
+ bra ne #ctx_redswitch_delay
+ mov $r15 0xa20
+ iowr I[$r14] $r15 // GPC_RED_SWITCH = UNK11, ENABLE, POWER
+ ret
+
+// Transfer GPC context data between GPU and storage area
+//
+// In: $r15 context base address
+// $p1 clear on save, set on load
+// $p2 set if opposite direction done/will be done, so:
+// on save it means: "a load will follow this save"
+// on load it means: "a save preceeded this load"
+//
+ctx_xfer:
+ // set context base address
+ mov $r1 0xa04
+ shl b32 $r1 6
+ iowr I[$r1 + 0x000] $r15// MEM_BASE
+ bra not $p1 #ctx_xfer_not_load
+ call #ctx_redswitch
+ ctx_xfer_not_load:
+
+ // strands
+ mov $r1 0x4afc
+ sethi $r1 0x20000
+ mov $r2 0xc
+ iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c
+ call #strand_wait
+ mov $r2 0x47fc
+ sethi $r2 0x20000
+ iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00
+ xbit $r2 $flags $p1
+ add b32 $r2 3
+ iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD)
+
+ // mmio context
+ xbit $r10 $flags $p1 // direction
+ or $r10 2 // first
+ mov $r11 0x0000
+ sethi $r11 0x500000
+ ld b32 $r12 D[$r0 + #gpc_id]
+ shl b32 $r12 15
+ add b32 $r11 $r12 // base = NV_PGRAPH_GPCn
+ ld b32 $r12 D[$r0 + #gpc_mmio_list_head]
+ ld b32 $r13 D[$r0 + #gpc_mmio_list_tail]
+ mov $r14 0 // not multi
+ call #mmctx_xfer
+
+ // per-TPC mmio context
+ xbit $r10 $flags $p1 // direction
+ or $r10 4 // last
+ mov $r11 0x4000
+ sethi $r11 0x500000 // base = NV_PGRAPH_GPC0_TPC0
+ ld b32 $r12 D[$r0 + #gpc_id]
+ shl b32 $r12 15
+ add b32 $r11 $r12 // base = NV_PGRAPH_GPCn_TPC0
+ ld b32 $r12 D[$r0 + #tpc_mmio_list_head]
+ ld b32 $r13 D[$r0 + #tpc_mmio_list_tail]
+ ld b32 $r15 D[$r0 + #tpc_mask]
+ mov $r14 0x800 // stride = 0x800
+ call #mmctx_xfer
+
+ // wait for strands to finish
+ call #strand_wait
+
+ // if load, or a save without a load following, do some
+ // unknown stuff that's done after finishing a block of
+ // strand commands
+ bra $p1 #ctx_xfer_post
+ bra not $p2 #ctx_xfer_done
+ ctx_xfer_post:
+ mov $r1 0x4afc
+ sethi $r1 0x20000
+ mov $r2 0xd
+ iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0d
+ call #strand_wait
+
+ // mark completion in HUB's barrier
+ ctx_xfer_done:
+ call #hub_barrier_done
+ ret
+
+.align 256
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
new file mode 100644
index 0000000000000000000000000000000000000000..26c2165bad0fc75b6ab5667be24ce2bdb2bba9c1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
@@ -0,0 +1,530 @@
+uint32_t nve0_grgpc_data[] = {
+/* 0x0000: gpc_id */
+ 0x00000000,
+/* 0x0004: gpc_mmio_list_head */
+ 0x00000000,
+/* 0x0008: gpc_mmio_list_tail */
+ 0x00000000,
+/* 0x000c: tpc_count */
+ 0x00000000,
+/* 0x0010: tpc_mask */
+ 0x00000000,
+/* 0x0014: tpc_mmio_list_head */
+ 0x00000000,
+/* 0x0018: tpc_mmio_list_tail */
+ 0x00000000,
+/* 0x001c: cmd_queue */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0064: chipsets */
+ 0x000000e4,
+ 0x01040080,
+ 0x014c0104,
+ 0x000000e7,
+ 0x01040080,
+ 0x014c0104,
+ 0x00000000,
+/* 0x0080: nve4_gpc_mmio_head */
+ 0x00000380,
+ 0x04000400,
+ 0x0800040c,
+ 0x20000450,
+ 0x00000600,
+ 0x00000684,
+ 0x10000700,
+ 0x00000800,
+ 0x08000808,
+ 0x00000828,
+ 0x00000830,
+ 0x000008d8,
+ 0x000008e0,
+ 0x140008e8,
+ 0x0000091c,
+ 0x08000924,
+ 0x00000b00,
+ 0x14000b08,
+ 0x00000bb8,
+ 0x00000c08,
+ 0x1c000c10,
+ 0x00000c40,
+ 0x00000c6c,
+ 0x00000c80,
+ 0x00000c8c,
+ 0x08001000,
+ 0x00001014,
+ 0x00003024,
+ 0x040030c0,
+ 0x000030e4,
+ 0x14003100,
+ 0x000031d0,
+ 0x040031e0,
+/* 0x0104: nve4_gpc_mmio_tail */
+/* 0x0104: nve4_tpc_mmio_head */
+ 0x00000048,
+ 0x00000064,
+ 0x00000088,
+ 0x14000200,
+ 0x0400021c,
+ 0x00000230,
+ 0x000002c4,
+ 0x08000400,
+ 0x08000420,
+ 0x000004e8,
+ 0x000004f4,
+ 0x0c000604,
+ 0x54000644,
+ 0x040006ac,
+ 0x000006c8,
+ 0x1c000730,
+ 0x00000758,
+ 0x00000778,
+};
+
+uint32_t nve0_grgpc_code[] = {
+ 0x03060ef5,
+/* 0x0004: queue_put */
+ 0x9800d898,
+ 0x86f001d9,
+ 0x0489b808,
+ 0xf00c1bf4,
+ 0x21f502f7,
+ 0x00f802ec,
+/* 0x001c: queue_put_next */
+ 0xb60798c4,
+ 0x8dbb0384,
+ 0x0880b600,
+ 0x80008e80,
+ 0x90b6018f,
+ 0x0f94f001,
+ 0xf801d980,
+/* 0x0039: queue_get */
+ 0x0131f400,
+ 0x9800d898,
+ 0x89b801d9,
+ 0x210bf404,
+ 0xb60789c4,
+ 0x9dbb0394,
+ 0x0890b600,
+ 0x98009e98,
+ 0x80b6019f,
+ 0x0f84f001,
+ 0xf400d880,
+/* 0x0066: queue_get_done */
+ 0x00f80132,
+/* 0x0068: nv_rd32 */
+ 0x0728b7f1,
+ 0xb906b4b6,
+ 0xc9f002ec,
+ 0x00bcd01f,
+/* 0x0078: nv_rd32_wait */
+ 0xc800bccf,
+ 0x1bf41fcc,
+ 0x06a7f0fa,
+ 0x010321f5,
+ 0xf840bfcf,
+/* 0x008d: nv_wr32 */
+ 0x28b7f100,
+ 0x06b4b607,
+ 0xb980bfd0,
+ 0xc9f002ec,
+ 0x1ec9f01f,
+/* 0x00a3: nv_wr32_wait */
+ 0xcf00bcd0,
+ 0xccc800bc,
+ 0xfa1bf41f,
+/* 0x00ae: watchdog_reset */
+ 0x87f100f8,
+ 0x84b60430,
+ 0x1ff9f006,
+ 0xf8008fd0,
+/* 0x00bd: watchdog_clear */
+ 0x3087f100,
+ 0x0684b604,
+ 0xf80080d0,
+/* 0x00c9: wait_donez */
+ 0x3c87f100,
+ 0x0684b608,
+ 0x99f094bd,
+ 0x0089d000,
+ 0x081887f1,
+ 0xd00684b6,
+/* 0x00e2: wait_done_wait_donez */
+ 0x87f1008a,
+ 0x84b60400,
+ 0x0088cf06,
+ 0xf4888aff,
+ 0x87f1f31b,
+ 0x84b6085c,
+ 0xf094bd06,
+ 0x89d00099,
+/* 0x0103: wait_doneo */
+ 0xf100f800,
+ 0xb6083c87,
+ 0x94bd0684,
+ 0xd00099f0,
+ 0x87f10089,
+ 0x84b60818,
+ 0x008ad006,
+/* 0x011c: wait_done_wait_doneo */
+ 0x040087f1,
+ 0xcf0684b6,
+ 0x8aff0088,
+ 0xf30bf488,
+ 0x085c87f1,
+ 0xbd0684b6,
+ 0x0099f094,
+ 0xf80089d0,
+/* 0x013d: mmctx_size */
+/* 0x013f: nv_mmctx_size_loop */
+ 0x9894bd00,
+ 0x85b600e8,
+ 0x0180b61a,
+ 0xbb0284b6,
+ 0xe0b60098,
+ 0x04efb804,
+ 0xb9eb1bf4,
+ 0x00f8029f,
+/* 0x015c: mmctx_xfer */
+ 0x083c87f1,
+ 0xbd0684b6,
+ 0x0199f094,
+ 0xf10089d0,
+ 0xb6071087,
+ 0x94bd0684,
+ 0xf405bbfd,
+ 0x8bd0090b,
+ 0x0099f000,
+/* 0x0180: mmctx_base_disabled */
+ 0xf405eefd,
+ 0x8ed00c0b,
+ 0xc08fd080,
+/* 0x018f: mmctx_multi_disabled */
+ 0xb70199f0,
+ 0xc8010080,
+ 0xb4b600ab,
+ 0x0cb9f010,
+ 0xb601aec8,
+ 0xbefd11e4,
+ 0x008bd005,
+/* 0x01a8: mmctx_exec_loop */
+/* 0x01a8: mmctx_wait_free */
+ 0xf0008ecf,
+ 0x0bf41fe4,
+ 0x00ce98fa,
+ 0xd005e9fd,
+ 0xc0b6c08e,
+ 0x04cdb804,
+ 0xc8e81bf4,
+ 0x1bf402ab,
+/* 0x01c9: mmctx_fini_wait */
+ 0x008bcf18,
+ 0xb01fb4f0,
+ 0x1bf410b4,
+ 0x02a7f0f7,
+ 0xf4c921f4,
+/* 0x01de: mmctx_stop */
+ 0xabc81b0e,
+ 0x10b4b600,
+ 0xf00cb9f0,
+ 0x8bd012b9,
+/* 0x01ed: mmctx_stop_wait */
+ 0x008bcf00,
+ 0xf412bbc8,
+/* 0x01f6: mmctx_done */
+ 0x87f1fa1b,
+ 0x84b6085c,
+ 0xf094bd06,
+ 0x89d00199,
+/* 0x0207: strand_wait */
+ 0xf900f800,
+ 0x02a7f0a0,
+ 0xfcc921f4,
+/* 0x0213: strand_pre */
+ 0xf100f8a0,
+ 0xf04afc87,
+ 0x97f00283,
+ 0x0089d00c,
+ 0x020721f5,
+/* 0x0226: strand_post */
+ 0x87f100f8,
+ 0x83f04afc,
+ 0x0d97f002,
+ 0xf50089d0,
+ 0xf8020721,
+/* 0x0239: strand_set */
+ 0xfca7f100,
+ 0x02a3f04f,
+ 0x0500aba2,
+ 0xd00fc7f0,
+ 0xc7f000ac,
+ 0x00bcd00b,
+ 0x020721f5,
+ 0xf000aed0,
+ 0xbcd00ac7,
+ 0x0721f500,
+/* 0x0263: strand_ctx_init */
+ 0xf100f802,
+ 0xb6083c87,
+ 0x94bd0684,
+ 0xd00399f0,
+ 0x21f50089,
+ 0xe7f00213,
+ 0x3921f503,
+ 0xfca7f102,
+ 0x02a3f046,
+ 0x0400aba0,
+ 0xf040a0d0,
+ 0xbcd001c7,
+ 0x0721f500,
+ 0x010c9202,
+ 0xf000acd0,
+ 0xbcd002c7,
+ 0x0721f500,
+ 0x2621f502,
+ 0x8087f102,
+ 0x0684b608,
+ 0xb70089cf,
+ 0x95220080,
+/* 0x02ba: ctx_init_strand_loop */
+ 0x8ed008fe,
+ 0x408ed000,
+ 0xb6808acf,
+ 0xa0b606a5,
+ 0x00eabb01,
+ 0xb60480b6,
+ 0x1bf40192,
+ 0x08e4b6e8,
+ 0xf1f2efbc,
+ 0xb6085c87,
+ 0x94bd0684,
+ 0xd00399f0,
+ 0x00f80089,
+/* 0x02ec: error */
+ 0xe7f1e0f9,
+ 0xe3f09814,
+ 0x8d21f440,
+ 0x041ce0b7,
+ 0xf401f7f0,
+ 0xe0fc8d21,
+/* 0x0306: init */
+ 0x04bd00f8,
+ 0xf10004fe,
+ 0xf0120017,
+ 0x12d00227,
+ 0x3e17f100,
+ 0x0010fe04,
+ 0x040017f1,
+ 0xf0c010d0,
+ 0x12d00427,
+ 0x1031f400,
+ 0x060817f1,
+ 0xcf0614b6,
+ 0x37f00012,
+ 0x1f24f001,
+ 0xb60432bb,
+ 0x02800132,
+ 0x04038003,
+ 0x040010b7,
+ 0x800012cf,
+ 0x27f10002,
+ 0x24b60800,
+ 0x0022cf06,
+/* 0x035f: init_find_chipset */
+ 0xb65817f0,
+ 0x13980c10,
+ 0x0432b800,
+ 0xb00b0bf4,
+ 0x1bf40034,
+/* 0x0373: init_context */
+ 0xf100f8f1,
+ 0xb6080027,
+ 0x22cf0624,
+ 0xf134bd40,
+ 0xb6070047,
+ 0x25950644,
+ 0x0045d008,
+ 0xbd4045d0,
+ 0x58f4bde4,
+ 0x1f58021e,
+ 0x020e4003,
+ 0xf5040f40,
+ 0xbb013d21,
+ 0x3fbb002f,
+ 0x041e5800,
+ 0x40051f58,
+ 0x0f400a0e,
+ 0x3d21f50c,
+ 0x030e9801,
+ 0xbb00effd,
+ 0x3ebb002e,
+ 0x0040b700,
+ 0x0235b613,
+ 0xb60043d0,
+ 0x35b60825,
+ 0x0120b606,
+ 0xb60130b6,
+ 0x34b60824,
+ 0x022fb908,
+ 0x026321f5,
+ 0xf1003fbb,
+ 0xb6080017,
+ 0x13d00614,
+ 0x0010b740,
+ 0xf024bd08,
+ 0x12d01f29,
+/* 0x0401: main */
+ 0x0031f400,
+ 0xf00028f4,
+ 0x21f41cd7,
+ 0xf401f439,
+ 0xf404e4b0,
+ 0x81fe1e18,
+ 0x0627f001,
+ 0x12fd20bd,
+ 0x01e4b604,
+ 0xfe051efd,
+ 0x21f50018,
+ 0x0ef404c3,
+/* 0x0431: main_not_ctx_xfer */
+ 0x10ef94d3,
+ 0xf501f5f0,
+ 0xf402ec21,
+/* 0x043e: ih */
+ 0x80f9c60e,
+ 0xf90188fe,
+ 0xf990f980,
+ 0xf9b0f9a0,
+ 0xf9e0f9d0,
+ 0x800acff0,
+ 0xf404abc4,
+ 0xb7f11d0b,
+ 0xd7f01900,
+ 0x40becf1c,
+ 0xf400bfcf,
+ 0xb0b70421,
+ 0xe7f00400,
+ 0x00bed001,
+/* 0x0474: ih_no_fifo */
+ 0xfc400ad0,
+ 0xfce0fcf0,
+ 0xfcb0fcd0,
+ 0xfc90fca0,
+ 0x0088fe80,
+ 0x32f480fc,
+/* 0x048f: hub_barrier_done */
+ 0xf001f800,
+ 0x0e9801f7,
+ 0x04febb00,
+ 0x9418e7f1,
+ 0xf440e3f0,
+ 0x00f88d21,
+/* 0x04a4: ctx_redswitch */
+ 0x0614e7f1,
+ 0xf006e4b6,
+ 0xefd020f7,
+ 0x08f7f000,
+/* 0x04b4: ctx_redswitch_delay */
+ 0xf401f2b6,
+ 0xf7f1fd1b,
+ 0xefd00a20,
+/* 0x04c3: ctx_xfer */
+ 0xf100f800,
+ 0xb60a0417,
+ 0x1fd00614,
+ 0x0711f400,
+ 0x04a421f5,
+/* 0x04d4: ctx_xfer_not_load */
+ 0x4afc17f1,
+ 0xf00213f0,
+ 0x12d00c27,
+ 0x0721f500,
+ 0xfc27f102,
+ 0x0223f047,
+ 0xf00020d0,
+ 0x20b6012c,
+ 0x0012d003,
+ 0xf001acf0,
+ 0xb7f002a5,
+ 0x50b3f000,
+ 0xb6000c98,
+ 0xbcbb0fc4,
+ 0x010c9800,
+ 0xf0020d98,
+ 0x21f500e7,
+ 0xacf0015c,
+ 0x04a5f001,
+ 0x4000b7f1,
+ 0x9850b3f0,
+ 0xc4b6000c,
+ 0x00bcbb0f,
+ 0x98050c98,
+ 0x0f98060d,
+ 0x00e7f104,
+ 0x5c21f508,
+ 0x0721f501,
+ 0x0601f402,
+/* 0x054b: ctx_xfer_post */
+ 0xf11412f4,
+ 0xf04afc17,
+ 0x27f00213,
+ 0x0012d00d,
+ 0x020721f5,
+/* 0x055c: ctx_xfer_done */
+ 0x048f21f5,
+ 0x000000f8,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
diff --git a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc
similarity index 99%
rename from drivers/gpu/drm/nouveau/nvc0_grhub.fuc
rename to drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc
index 98acddb2c5bb9103da4d5294447694f51f90eb01..acfc457654bdcffb0bc15ac22e36c1c4c4fa7e9c 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc
@@ -24,11 +24,11 @@
*/
/* To build:
- * m4 nvc0_grhub.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_grhub.fuc.h
+ * m4 hubnvc0.fuc | envyas -a -w -m fuc -V fuc3 -o hubnvc0.fuc.h
*/
.section #nvc0_grhub_data
-include(`nvc0_graph.fuc')
+include(`nvc0.fuc')
gpc_count: .b32 0
rop_count: .b32 0
cmd_queue: queue_init
@@ -161,11 +161,11 @@ xfer_data: .b32 0
.section #nvc0_grhub_code
bra #init
define(`include_code')
-include(`nvc0_graph.fuc')
+include(`nvc0.fuc')
// reports an exception to the host
//
-// In: $r15 error code (see nvc0_graph.fuc)
+// In: $r15 error code (see nvc0.fuc)
//
error:
push $r14
diff --git a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
similarity index 81%
rename from drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h
rename to drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
index c5ed307abeb9ca80ec9b366fc0f7da1ae323464c..85a8d556f484d3984ec9350ed7c1f1980c7d315c 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
@@ -1,6 +1,9 @@
uint32_t nvc0_grhub_data[] = {
+/* 0x0000: gpc_count */
0x00000000,
+/* 0x0004: rop_count */
0x00000000,
+/* 0x0008: cmd_queue */
0x00000000,
0x00000000,
0x00000000,
@@ -19,9 +22,13 @@ uint32_t nvc0_grhub_data[] = {
0x00000000,
0x00000000,
0x00000000,
+/* 0x0050: hub_mmio_list_head */
0x00000000,
+/* 0x0054: hub_mmio_list_tail */
0x00000000,
+/* 0x0058: ctx_current */
0x00000000,
+/* 0x005c: chipsets */
0x000000c0,
0x013c00a0,
0x000000c1,
@@ -39,6 +46,7 @@ uint32_t nvc0_grhub_data[] = {
0x000000d9,
0x01dc0140,
0x00000000,
+/* 0x00a0: nvc0_hub_mmio_head */
0x0417e91c,
0x04400204,
0x28404004,
@@ -78,7 +86,10 @@ uint32_t nvc0_grhub_data[] = {
0x08408800,
0x0c408900,
0x00408980,
+/* 0x013c: nvc0_hub_mmio_tail */
0x044064c0,
+/* 0x0140: nvc1_hub_mmio_tail */
+/* 0x0140: nvd9_hub_mmio_head */
0x0417e91c,
0x04400204,
0x24404004,
@@ -118,6 +129,7 @@ uint32_t nvc0_grhub_data[] = {
0x08408800,
0x0c408900,
0x00408980,
+/* 0x01dc: nvd9_hub_mmio_tail */
0x00000000,
0x00000000,
0x00000000,
@@ -127,7 +139,10 @@ uint32_t nvc0_grhub_data[] = {
0x00000000,
0x00000000,
0x00000000,
+/* 0x0200: chan_data */
+/* 0x0200: chan_mmio_count */
0x00000000,
+/* 0x0204: chan_mmio_address */
0x00000000,
0x00000000,
0x00000000,
@@ -191,17 +206,20 @@ uint32_t nvc0_grhub_data[] = {
0x00000000,
0x00000000,
0x00000000,
+/* 0x0300: xfer_data */
0x00000000,
};
uint32_t nvc0_grhub_code[] = {
0x03090ef5,
+/* 0x0004: queue_put */
0x9800d898,
0x86f001d9,
0x0489b808,
0xf00c1bf4,
0x21f502f7,
0x00f802ec,
+/* 0x001c: queue_put_next */
0xb60798c4,
0x8dbb0384,
0x0880b600,
@@ -209,6 +227,7 @@ uint32_t nvc0_grhub_code[] = {
0x90b6018f,
0x0f94f001,
0xf801d980,
+/* 0x0039: queue_get */
0x0131f400,
0x9800d898,
0x89b801d9,
@@ -220,37 +239,46 @@ uint32_t nvc0_grhub_code[] = {
0x80b6019f,
0x0f84f001,
0xf400d880,
+/* 0x0066: queue_get_done */
0x00f80132,
+/* 0x0068: nv_rd32 */
0x0728b7f1,
0xb906b4b6,
0xc9f002ec,
0x00bcd01f,
+/* 0x0078: nv_rd32_wait */
0xc800bccf,
0x1bf41fcc,
0x06a7f0fa,
0x010321f5,
0xf840bfcf,
+/* 0x008d: nv_wr32 */
0x28b7f100,
0x06b4b607,
0xb980bfd0,
0xc9f002ec,
0x1ec9f01f,
+/* 0x00a3: nv_wr32_wait */
0xcf00bcd0,
0xccc800bc,
0xfa1bf41f,
+/* 0x00ae: watchdog_reset */
0x87f100f8,
0x84b60430,
0x1ff9f006,
0xf8008fd0,
+/* 0x00bd: watchdog_clear */
0x3087f100,
0x0684b604,
0xf80080d0,
+/* 0x00c9: wait_donez */
0x3c87f100,
0x0684b608,
0x99f094bd,
0x0089d000,
0x081887f1,
0xd00684b6,
+/* 0x00e2: wait_done_wait_donez */
0x87f1008a,
0x84b60400,
0x0088cf06,
@@ -259,6 +287,7 @@ uint32_t nvc0_grhub_code[] = {
0x84b6085c,
0xf094bd06,
0x89d00099,
+/* 0x0103: wait_doneo */
0xf100f800,
0xb6083c87,
0x94bd0684,
@@ -266,6 +295,7 @@ uint32_t nvc0_grhub_code[] = {
0x87f10089,
0x84b60818,
0x008ad006,
+/* 0x011c: wait_done_wait_doneo */
0x040087f1,
0xcf0684b6,
0x8aff0088,
@@ -274,6 +304,8 @@ uint32_t nvc0_grhub_code[] = {
0xbd0684b6,
0x0099f094,
0xf80089d0,
+/* 0x013d: mmctx_size */
+/* 0x013f: nv_mmctx_size_loop */
0x9894bd00,
0x85b600e8,
0x0180b61a,
@@ -282,6 +314,7 @@ uint32_t nvc0_grhub_code[] = {
0x04efb804,
0xb9eb1bf4,
0x00f8029f,
+/* 0x015c: mmctx_xfer */
0x083c87f1,
0xbd0684b6,
0x0199f094,
@@ -291,9 +324,11 @@ uint32_t nvc0_grhub_code[] = {
0xf405bbfd,
0x8bd0090b,
0x0099f000,
+/* 0x0180: mmctx_base_disabled */
0xf405eefd,
0x8ed00c0b,
0xc08fd080,
+/* 0x018f: mmctx_multi_disabled */
0xb70199f0,
0xc8010080,
0xb4b600ab,
@@ -301,6 +336,8 @@ uint32_t nvc0_grhub_code[] = {
0xb601aec8,
0xbefd11e4,
0x008bd005,
+/* 0x01a8: mmctx_exec_loop */
+/* 0x01a8: mmctx_wait_free */
0xf0008ecf,
0x0bf41fe4,
0x00ce98fa,
@@ -309,34 +346,42 @@ uint32_t nvc0_grhub_code[] = {
0x04cdb804,
0xc8e81bf4,
0x1bf402ab,
+/* 0x01c9: mmctx_fini_wait */
0x008bcf18,
0xb01fb4f0,
0x1bf410b4,
0x02a7f0f7,
0xf4c921f4,
+/* 0x01de: mmctx_stop */
0xabc81b0e,
0x10b4b600,
0xf00cb9f0,
0x8bd012b9,
+/* 0x01ed: mmctx_stop_wait */
0x008bcf00,
0xf412bbc8,
+/* 0x01f6: mmctx_done */
0x87f1fa1b,
0x84b6085c,
0xf094bd06,
0x89d00199,
+/* 0x0207: strand_wait */
0xf900f800,
0x02a7f0a0,
0xfcc921f4,
+/* 0x0213: strand_pre */
0xf100f8a0,
0xf04afc87,
0x97f00283,
0x0089d00c,
0x020721f5,
+/* 0x0226: strand_post */
0x87f100f8,
0x83f04afc,
0x0d97f002,
0xf50089d0,
0xf8020721,
+/* 0x0239: strand_set */
0xfca7f100,
0x02a3f04f,
0x0500aba2,
@@ -347,6 +392,7 @@ uint32_t nvc0_grhub_code[] = {
0xf000aed0,
0xbcd00ac7,
0x0721f500,
+/* 0x0263: strand_ctx_init */
0xf100f802,
0xb6083c87,
0x94bd0684,
@@ -369,6 +415,7 @@ uint32_t nvc0_grhub_code[] = {
0x0684b608,
0xb70089cf,
0x95220080,
+/* 0x02ba: ctx_init_strand_loop */
0x8ed008fe,
0x408ed000,
0xb6808acf,
@@ -382,6 +429,7 @@ uint32_t nvc0_grhub_code[] = {
0x94bd0684,
0xd00399f0,
0x00f80089,
+/* 0x02ec: error */
0xe7f1e0f9,
0xe4b60814,
0x00efd006,
@@ -389,6 +437,7 @@ uint32_t nvc0_grhub_code[] = {
0xf006e4b6,
0xefd001f7,
0xf8e0fc00,
+/* 0x0309: init */
0xfe04bd00,
0x07fe0004,
0x0017f100,
@@ -429,11 +478,13 @@ uint32_t nvc0_grhub_code[] = {
0x080027f1,
0xcf0624b6,
0xf7f00022,
+/* 0x03a9: init_find_chipset */
0x08f0b654,
0xb800f398,
0x0bf40432,
0x0034b00b,
0xf8f11bf4,
+/* 0x03bd: init_context */
0x0017f100,
0x02fe5801,
0xf003ff58,
@@ -454,6 +505,7 @@ uint32_t nvc0_grhub_code[] = {
0x001fbb02,
0xf1000398,
0xf0200047,
+/* 0x040e: init_gpc */
0x4ea05043,
0x1fb90804,
0x8d21f402,
@@ -467,6 +519,7 @@ uint32_t nvc0_grhub_code[] = {
0xf7f00100,
0x8d21f402,
0x08004ea0,
+/* 0x0440: init_gpc_wait */
0xc86821f4,
0x0bf41fff,
0x044ea0fa,
@@ -479,6 +532,7 @@ uint32_t nvc0_grhub_code[] = {
0xb74021d0,
0xbd080020,
0x1f19f014,
+/* 0x0473: main */
0xf40021d0,
0x28f40031,
0x08d7f000,
@@ -517,6 +571,7 @@ uint32_t nvc0_grhub_code[] = {
0x94bd0684,
0xd00699f0,
0x0ef40089,
+/* 0x0509: chsw_prev_no_next */
0xb920f931,
0x32f40212,
0x0232f401,
@@ -524,10 +579,12 @@ uint32_t nvc0_grhub_code[] = {
0x17f120fc,
0x14b60b00,
0x0012d006,
+/* 0x0527: chsw_no_prev */
0xc8130ef4,
0x0bf41f23,
0x0131f40d,
0xf50232f4,
+/* 0x0537: chsw_done */
0xf1082921,
0xb60b0c17,
0x27f00614,
@@ -536,10 +593,12 @@ uint32_t nvc0_grhub_code[] = {
0xbd0684b6,
0x0499f094,
0xf50089d0,
+/* 0x0557: main_not_ctx_switch */
0xb0ff200e,
0x1bf401e4,
0x02f2b90d,
0x07b521f5,
+/* 0x0567: main_not_ctx_chan */
0xb0420ef4,
0x1bf402e4,
0x3c87f12e,
@@ -553,14 +612,17 @@ uint32_t nvc0_grhub_code[] = {
0xf094bd06,
0x89d00799,
0x110ef400,
+/* 0x0598: main_not_ctx_save */
0xf010ef94,
0x21f501f5,
0x0ef502ec,
+/* 0x05a6: main_done */
0x17f1fed1,
0x14b60820,
0xf024bd06,
0x12d01f29,
0xbe0ef500,
+/* 0x05b9: ih */
0xfe80f9fe,
0x80f90188,
0xa0f990f9,
@@ -574,16 +636,19 @@ uint32_t nvc0_grhub_code[] = {
0x21f400bf,
0x00b0b704,
0x01e7f004,
+/* 0x05ef: ih_no_fifo */
0xe400bed0,
0xf40100ab,
0xd7f00d0b,
0x01e7f108,
0x0421f440,
+/* 0x0600: ih_no_ctxsw */
0x0104b7f1,
0xabffb0bd,
0x0d0bf4b4,
0x0c1ca7f1,
0xd006a4b6,
+/* 0x0616: ih_no_other */
0x0ad000ab,
0xfcf0fc40,
0xfcd0fce0,
@@ -591,32 +656,40 @@ uint32_t nvc0_grhub_code[] = {
0xfe80fc90,
0x80fc0088,
0xf80032f4,
+/* 0x0631: ctx_4160s */
0x60e7f101,
0x40e3f041,
0xf401f7f0,
+/* 0x063e: ctx_4160s_wait */
0x21f48d21,
0x04ffc868,
0xf8fa0bf4,
+/* 0x0649: ctx_4160c */
0x60e7f100,
0x40e3f041,
0x21f4f4bd,
+/* 0x0657: ctx_4170s */
0xf100f88d,
0xf04170e7,
0xf5f040e3,
0x8d21f410,
+/* 0x0666: ctx_4170w */
0xe7f100f8,
0xe3f04170,
0x6821f440,
0xf410f4f0,
0x00f8f31b,
+/* 0x0678: ctx_redswitch */
0x0614e7f1,
0xf106e4b6,
0xd00270f7,
0xf7f000ef,
+/* 0x0689: ctx_redswitch_delay */
0x01f2b608,
0xf1fd1bf4,
0xd00770f7,
0x00f800ef,
+/* 0x0698: ctx_86c */
0x086ce7f1,
0xd006e4b6,
0xe7f100ef,
@@ -625,6 +698,7 @@ uint32_t nvc0_grhub_code[] = {
0xa86ce7f1,
0xf441e3f0,
0x00f88d21,
+/* 0x06b8: ctx_load */
0x083c87f1,
0xbd0684b6,
0x0599f094,
@@ -639,6 +713,7 @@ uint32_t nvc0_grhub_code[] = {
0x0614b60a,
0xd00747f0,
0x14d00012,
+/* 0x06f1: ctx_chan_wait_0 */
0x4014cf40,
0xf41f44f0,
0x32d0fa1b,
@@ -688,6 +763,7 @@ uint32_t nvc0_grhub_code[] = {
0xbd0684b6,
0x0599f094,
0xf80089d0,
+/* 0x07b5: ctx_chan */
0x3121f500,
0xb821f506,
0x0ca7f006,
@@ -695,39 +771,48 @@ uint32_t nvc0_grhub_code[] = {
0xb60a1017,
0x27f00614,
0x0012d005,
+/* 0x07d0: ctx_chan_wait */
0xfd0012cf,
0x1bf40522,
0x4921f5fa,
+/* 0x07df: ctx_mmio_exec */
0x9800f806,
0x27f18103,
0x24b60a04,
0x0023d006,
+/* 0x07ee: ctx_mmio_loop */
0x34c434bd,
0x0f1bf4ff,
0x030057f1,
0xfa0653f0,
0x03f80535,
+/* 0x0800: ctx_mmio_pull */
0x98c04e98,
0x21f4c14f,
0x0830b68d,
0xf40112b6,
+/* 0x0812: ctx_mmio_done */
0x0398df1b,
0x0023d016,
0xf1800080,
0xf0020017,
0x01fa0613,
0xf803f806,
+/* 0x0829: ctx_xfer */
0x0611f400,
+/* 0x082f: ctx_xfer_pre */
0xf01102f4,
0x21f510f7,
0x21f50698,
0x11f40631,
+/* 0x083d: ctx_xfer_pre_load */
0x02f7f01c,
0x065721f5,
0x066621f5,
0x067821f5,
0x21f5f4bd,
0x21f50657,
+/* 0x0856: ctx_xfer_exec */
0x019806b8,
0x1427f116,
0x0624b604,
@@ -762,9 +847,11 @@ uint32_t nvc0_grhub_code[] = {
0x0a1017f1,
0xf00614b6,
0x12d00527,
+/* 0x08dd: ctx_xfer_post_save_wait */
0x0012cf00,
0xf40522fd,
0x02f4fa1b,
+/* 0x08e9: ctx_xfer_post */
0x02f7f032,
0x065721f5,
0x21f5f4bd,
@@ -776,7 +863,9 @@ uint32_t nvc0_grhub_code[] = {
0x11fd8001,
0x070bf405,
0x07df21f5,
+/* 0x0914: ctx_xfer_no_post_mmio */
0x064921f5,
+/* 0x0918: ctx_xfer_done */
0x000000f8,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc
new file mode 100644
index 0000000000000000000000000000000000000000..138eeaa286650998797b11eba246d6d2e72ecc0c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc
@@ -0,0 +1,780 @@
+/* fuc microcode for nve0 PGRAPH/HUB
+ *
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+/* To build:
+ * m4 nve0_grhub.fuc | envyas -a -w -m fuc -V nva3 -o nve0_grhub.fuc.h
+ */
+
+.section #nve0_grhub_data
+include(`nve0.fuc')
+gpc_count: .b32 0
+rop_count: .b32 0
+cmd_queue: queue_init
+hub_mmio_list_head: .b32 0
+hub_mmio_list_tail: .b32 0
+
+ctx_current: .b32 0
+
+chipsets:
+.b8 0xe4 0 0 0
+.b16 #nve4_hub_mmio_head
+.b16 #nve4_hub_mmio_tail
+.b8 0xe7 0 0 0
+.b16 #nve4_hub_mmio_head
+.b16 #nve4_hub_mmio_tail
+.b8 0 0 0 0
+
+nve4_hub_mmio_head:
+mmctx_data(0x17e91c, 2)
+mmctx_data(0x400204, 2)
+mmctx_data(0x404010, 7)
+mmctx_data(0x4040a8, 9)
+mmctx_data(0x4040d0, 7)
+mmctx_data(0x4040f8, 1)
+mmctx_data(0x404130, 3)
+mmctx_data(0x404150, 3)
+mmctx_data(0x404164, 1)
+mmctx_data(0x4041a0, 4)
+mmctx_data(0x404200, 4)
+mmctx_data(0x404404, 14)
+mmctx_data(0x404460, 4)
+mmctx_data(0x404480, 1)
+mmctx_data(0x404498, 1)
+mmctx_data(0x404604, 4)
+mmctx_data(0x404618, 4)
+mmctx_data(0x40462c, 2)
+mmctx_data(0x404640, 1)
+mmctx_data(0x404654, 1)
+mmctx_data(0x404660, 1)
+mmctx_data(0x404678, 19)
+mmctx_data(0x4046c8, 3)
+mmctx_data(0x404700, 3)
+mmctx_data(0x404718, 10)
+mmctx_data(0x404744, 2)
+mmctx_data(0x404754, 1)
+mmctx_data(0x405800, 1)
+mmctx_data(0x405830, 3)
+mmctx_data(0x405854, 1)
+mmctx_data(0x405870, 4)
+mmctx_data(0x405a00, 2)
+mmctx_data(0x405a18, 1)
+mmctx_data(0x405b00, 1)
+mmctx_data(0x405b10, 1)
+mmctx_data(0x406020, 1)
+mmctx_data(0x406028, 4)
+mmctx_data(0x4064a8, 2)
+mmctx_data(0x4064b4, 2)
+mmctx_data(0x4064c0, 12)
+mmctx_data(0x4064fc, 1)
+mmctx_data(0x407040, 1)
+mmctx_data(0x407804, 1)
+mmctx_data(0x40780c, 6)
+mmctx_data(0x4078bc, 1)
+mmctx_data(0x408000, 7)
+mmctx_data(0x408064, 1)
+mmctx_data(0x408800, 3)
+mmctx_data(0x408840, 1)
+mmctx_data(0x408900, 3)
+mmctx_data(0x408980, 1)
+nve4_hub_mmio_tail:
+
+.align 256
+chan_data:
+chan_mmio_count: .b32 0
+chan_mmio_address: .b32 0
+
+.align 256
+xfer_data: .b32 0
+
+.section #nve0_grhub_code
+bra #init
+define(`include_code')
+include(`nve0.fuc')
+
+// reports an exception to the host
+//
+// In: $r15 error code (see nve0.fuc)
+//
+error:
+ push $r14
+ mov $r14 0x814
+ shl b32 $r14 6
+ iowr I[$r14 + 0x000] $r15 // CC_SCRATCH[5] = error code
+ mov $r14 0xc1c
+ shl b32 $r14 6
+ mov $r15 1
+ iowr I[$r14 + 0x000] $r15 // INTR_UP_SET
+ pop $r14
+ ret
+
+// HUB fuc initialisation, executed by triggering ucode start, will
+// fall through to main loop after completion.
+//
+// Input:
+// CC_SCRATCH[0]: chipset (PMC_BOOT_0 read returns 0x0bad0bad... sigh)
+//
+// Output:
+// CC_SCRATCH[0]:
+// 31:31: set to signal completion
+// CC_SCRATCH[1]:
+// 31:0: total PGRAPH context size
+//
+init:
+ clear b32 $r0
+ mov $sp $r0
+ mov $xdbase $r0
+
+ // enable fifo access
+ mov $r1 0x1200
+ mov $r2 2
+ iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE
+
+ // setup i0 handler, and route all interrupts to it
+ mov $r1 #ih
+ mov $iv0 $r1
+ mov $r1 0x400
+ iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH
+
+ // route HUB_CHANNEL_SWITCH to fuc interrupt 8
+ mov $r3 0x404
+ shl b32 $r3 6
+ mov $r2 0x2003 // { HUB_CHANNEL_SWITCH, ZERO } -> intr 8
+ iowr I[$r3 + 0x000] $r2
+
+ // not sure what these are, route them because NVIDIA does, and
+ // the IRQ handler will signal the host if we ever get one.. we
+ // may find out if/why we need to handle these if so..
+ //
+ mov $r2 0x2004
+ iowr I[$r3 + 0x004] $r2 // { 0x04, ZERO } -> intr 9
+ mov $r2 0x200b
+ iowr I[$r3 + 0x008] $r2 // { 0x0b, ZERO } -> intr 10
+ mov $r2 0x200c
+ iowr I[$r3 + 0x01c] $r2 // { 0x0c, ZERO } -> intr 15
+
+ // enable all INTR_UP interrupts
+ mov $r2 0xc24
+ shl b32 $r2 6
+ not b32 $r3 $r0
+ iowr I[$r2] $r3
+
+ // enable fifo, ctxsw, 9, 10, 15 interrupts
+ mov $r2 -0x78fc // 0x8704
+ sethi $r2 0
+ iowr I[$r1 + 0x000] $r2 // INTR_EN_SET
+
+ // fifo level triggered, rest edge
+ sub b32 $r1 0x100
+ mov $r2 4
+ iowr I[$r1] $r2
+
+ // enable interrupts
+ bset $flags ie0
+
+ // fetch enabled GPC/ROP counts
+ mov $r14 -0x69fc // 0x409604
+ sethi $r14 0x400000
+ call #nv_rd32
+ extr $r1 $r15 16:20
+ st b32 D[$r0 + #rop_count] $r1
+ and $r15 0x1f
+ st b32 D[$r0 + #gpc_count] $r15
+
+ // set BAR_REQMASK to GPC mask
+ mov $r1 1
+ shl b32 $r1 $r15
+ sub b32 $r1 1
+ mov $r2 0x40c
+ shl b32 $r2 6
+ iowr I[$r2 + 0x000] $r1
+ iowr I[$r2 + 0x100] $r1
+
+ // find context data for this chipset
+ mov $r2 0x800
+ shl b32 $r2 6
+ iord $r2 I[$r2 + 0x000] // CC_SCRATCH[0]
+ mov $r15 #chipsets - 8
+ init_find_chipset:
+ add b32 $r15 8
+ ld b32 $r3 D[$r15 + 0x00]
+ cmpu b32 $r3 $r2
+ bra e #init_context
+ cmpu b32 $r3 0
+ bra ne #init_find_chipset
+ // unknown chipset
+ ret
+
+ // context size calculation, reserve first 256 bytes for use by fuc
+ init_context:
+ mov $r1 256
+
+ // calculate size of mmio context data
+ ld b16 $r14 D[$r15 + 4]
+ ld b16 $r15 D[$r15 + 6]
+ sethi $r14 0
+ st b32 D[$r0 + #hub_mmio_list_head] $r14
+ st b32 D[$r0 + #hub_mmio_list_tail] $r15
+ call #mmctx_size
+
+ // set mmctx base addresses now so we don't have to do it later,
+ // they don't (currently) ever change
+ mov $r3 0x700
+ shl b32 $r3 6
+ shr b32 $r4 $r1 8
+ iowr I[$r3 + 0x000] $r4 // MMCTX_SAVE_SWBASE
+ iowr I[$r3 + 0x100] $r4 // MMCTX_LOAD_SWBASE
+ add b32 $r3 0x1300
+ add b32 $r1 $r15
+ shr b32 $r15 2
+ iowr I[$r3 + 0x000] $r15 // MMCTX_LOAD_COUNT, wtf for?!?
+
+ // strands, base offset needs to be aligned to 256 bytes
+ shr b32 $r1 8
+ add b32 $r1 1
+ shl b32 $r1 8
+ mov b32 $r15 $r1
+ call #strand_ctx_init
+ add b32 $r1 $r15
+
+ // initialise each GPC in sequence by passing in the offset of its
+ // context data in GPCn_CC_SCRATCH[1], and starting its FUC (which
+ // has previously been uploaded by the host) running.
+ //
+ // the GPC fuc init sequence will set GPCn_CC_SCRATCH[0] bit 31
+ // when it has completed, and return the size of its context data
+ // in GPCn_CC_SCRATCH[1]
+ //
+ ld b32 $r3 D[$r0 + #gpc_count]
+ mov $r4 0x2000
+ sethi $r4 0x500000
+ init_gpc:
+ // setup, and start GPC ucode running
+ add b32 $r14 $r4 0x804
+ mov b32 $r15 $r1
+ call #nv_wr32 // CC_SCRATCH[1] = ctx offset
+ add b32 $r14 $r4 0x800
+ mov b32 $r15 $r2
+ call #nv_wr32 // CC_SCRATCH[0] = chipset
+ add b32 $r14 $r4 0x10c
+ clear b32 $r15
+ call #nv_wr32
+ add b32 $r14 $r4 0x104
+ call #nv_wr32 // ENTRY
+ add b32 $r14 $r4 0x100
+ mov $r15 2 // CTRL_START_TRIGGER
+ call #nv_wr32 // CTRL
+
+ // wait for it to complete, and adjust context size
+ add b32 $r14 $r4 0x800
+ init_gpc_wait:
+ call #nv_rd32
+ xbit $r15 $r15 31
+ bra e #init_gpc_wait
+ add b32 $r14 $r4 0x804
+ call #nv_rd32
+ add b32 $r1 $r15
+
+ // next!
+ add b32 $r4 0x8000
+ sub b32 $r3 1
+ bra ne #init_gpc
+
+ // save context size, and tell host we're ready
+ mov $r2 0x800
+ shl b32 $r2 6
+ iowr I[$r2 + 0x100] $r1 // CC_SCRATCH[1] = context size
+ add b32 $r2 0x800
+ clear b32 $r1
+ bset $r1 31
+ iowr I[$r2 + 0x000] $r1 // CC_SCRATCH[0] |= 0x80000000
+
+// Main program loop, very simple, sleeps until woken up by the interrupt
+// handler, pulls a command from the queue and executes its handler
+//
+main:
+ // sleep until we have something to do
+ bset $flags $p0
+ sleep $p0
+ mov $r13 #cmd_queue
+ call #queue_get
+ bra $p1 #main
+
+ // context switch, requested by GPU?
+ cmpu b32 $r14 0x4001
+ bra ne #main_not_ctx_switch
+ trace_set(T_AUTO)
+ mov $r1 0xb00
+ shl b32 $r1 6
+ iord $r2 I[$r1 + 0x100] // CHAN_NEXT
+ iord $r1 I[$r1 + 0x000] // CHAN_CUR
+
+ xbit $r3 $r1 31
+ bra e #chsw_no_prev
+ xbit $r3 $r2 31
+ bra e #chsw_prev_no_next
+ push $r2
+ mov b32 $r2 $r1
+ trace_set(T_SAVE)
+ bclr $flags $p1
+ bset $flags $p2
+ call #ctx_xfer
+ trace_clr(T_SAVE);
+ pop $r2
+ trace_set(T_LOAD);
+ bset $flags $p1
+ call #ctx_xfer
+ trace_clr(T_LOAD);
+ bra #chsw_done
+ chsw_prev_no_next:
+ push $r2
+ mov b32 $r2 $r1
+ bclr $flags $p1
+ bclr $flags $p2
+ call #ctx_xfer
+ pop $r2
+ mov $r1 0xb00
+ shl b32 $r1 6
+ iowr I[$r1] $r2
+ bra #chsw_done
+ chsw_no_prev:
+ xbit $r3 $r2 31
+ bra e #chsw_done
+ bset $flags $p1
+ bclr $flags $p2
+ call #ctx_xfer
+
+ // ack the context switch request
+ chsw_done:
+ mov $r1 0xb0c
+ shl b32 $r1 6
+ mov $r2 1
+ iowr I[$r1 + 0x000] $r2 // 0x409b0c
+ trace_clr(T_AUTO)
+ bra #main
+
+ // request to set current channel? (*not* a context switch)
+ main_not_ctx_switch:
+ cmpu b32 $r14 0x0001
+ bra ne #main_not_ctx_chan
+ mov b32 $r2 $r15
+ call #ctx_chan
+ bra #main_done
+
+ // request to store current channel context?
+ main_not_ctx_chan:
+ cmpu b32 $r14 0x0002
+ bra ne #main_not_ctx_save
+ trace_set(T_SAVE)
+ bclr $flags $p1
+ bclr $flags $p2
+ call #ctx_xfer
+ trace_clr(T_SAVE)
+ bra #main_done
+
+ main_not_ctx_save:
+ shl b32 $r15 $r14 16
+ or $r15 E_BAD_COMMAND
+ call #error
+ bra #main
+
+ main_done:
+ mov $r1 0x820
+ shl b32 $r1 6
+ clear b32 $r2
+ bset $r2 31
+ iowr I[$r1 + 0x000] $r2 // CC_SCRATCH[0] |= 0x80000000
+ bra #main
+
+// interrupt handler
+ih:
+ push $r8
+ mov $r8 $flags
+ push $r8
+ push $r9
+ push $r10
+ push $r11
+ push $r13
+ push $r14
+ push $r15
+
+ // incoming fifo command?
+ iord $r10 I[$r0 + 0x200] // INTR
+ and $r11 $r10 0x00000004
+ bra e #ih_no_fifo
+ // queue incoming fifo command for later processing
+ mov $r11 0x1900
+ mov $r13 #cmd_queue
+ iord $r14 I[$r11 + 0x100] // FIFO_CMD
+ iord $r15 I[$r11 + 0x000] // FIFO_DATA
+ call #queue_put
+ add b32 $r11 0x400
+ mov $r14 1
+ iowr I[$r11 + 0x000] $r14 // FIFO_ACK
+
+ // context switch request?
+ ih_no_fifo:
+ and $r11 $r10 0x00000100
+ bra e #ih_no_ctxsw
+ // enqueue a context switch for later processing
+ mov $r13 #cmd_queue
+ mov $r14 0x4001
+ call #queue_put
+
+ // anything we didn't handle, bring it to the host's attention
+ ih_no_ctxsw:
+ mov $r11 0x104
+ not b32 $r11
+ and $r11 $r10 $r11
+ bra e #ih_no_other
+ mov $r10 0xc1c
+ shl b32 $r10 6
+ iowr I[$r10] $r11 // INTR_UP_SET
+
+ // ack, and wake up main()
+ ih_no_other:
+ iowr I[$r0 + 0x100] $r10 // INTR_ACK
+
+ pop $r15
+ pop $r14
+ pop $r13
+ pop $r11
+ pop $r10
+ pop $r9
+ pop $r8
+ mov $flags $r8
+ pop $r8
+ bclr $flags $p0
+ iret
+
+// Again, not real sure
+//
+// In: $r15 value to set 0x404170 to
+//
+ctx_4170s:
+ mov $r14 0x4170
+ sethi $r14 0x400000
+ or $r15 0x10
+ call #nv_wr32
+ ret
+
+// Waits for a ctx_4170s() call to complete
+//
+ctx_4170w:
+ mov $r14 0x4170
+ sethi $r14 0x400000
+ call #nv_rd32
+ and $r15 0x10
+ bra ne #ctx_4170w
+ ret
+
+// Disables various things, waits a bit, and re-enables them..
+//
+// Not sure how exactly this helps, perhaps "ENABLE" is not such a
+// good description for the bits we turn off? Anyways, without this,
+// funny things happen.
+//
+ctx_redswitch:
+ mov $r14 0x614
+ shl b32 $r14 6
+ mov $r15 0x270
+ iowr I[$r14] $r15 // HUB_RED_SWITCH = ENABLE_GPC, POWER_ALL
+ mov $r15 8
+ ctx_redswitch_delay:
+ sub b32 $r15 1
+ bra ne #ctx_redswitch_delay
+ mov $r15 0x770
+ iowr I[$r14] $r15 // HUB_RED_SWITCH = ENABLE_ALL, POWER_ALL
+ ret
+
+// Not a clue what this is for, except that unless the value is 0x10, the
+// strand context is saved (and presumably restored) incorrectly..
+//
+// In: $r15 value to set to (0x00/0x10 are used)
+//
+ctx_86c:
+ mov $r14 0x86c
+ shl b32 $r14 6
+ iowr I[$r14] $r15 // HUB(0x86c) = val
+ mov $r14 -0x75ec
+ sethi $r14 0x400000
+ call #nv_wr32 // ROP(0xa14) = val
+ mov $r14 -0x5794
+ sethi $r14 0x410000
+ call #nv_wr32 // GPC(0x86c) = val
+ ret
+
+// ctx_load - load's a channel's ctxctl data, and selects its vm
+//
+// In: $r2 channel address
+//
+ctx_load:
+ trace_set(T_CHAN)
+
+ // switch to channel, somewhat magic in parts..
+ mov $r10 12 // DONE_UNK12
+ call #wait_donez
+ mov $r1 0xa24
+ shl b32 $r1 6
+ iowr I[$r1 + 0x000] $r0 // 0x409a24
+ mov $r3 0xb00
+ shl b32 $r3 6
+ iowr I[$r3 + 0x100] $r2 // CHAN_NEXT
+ mov $r1 0xa0c
+ shl b32 $r1 6
+ mov $r4 7
+ iowr I[$r1 + 0x000] $r2 // MEM_CHAN
+ iowr I[$r1 + 0x100] $r4 // MEM_CMD
+ ctx_chan_wait_0:
+ iord $r4 I[$r1 + 0x100]
+ and $r4 0x1f
+ bra ne #ctx_chan_wait_0
+ iowr I[$r3 + 0x000] $r2 // CHAN_CUR
+
+ // load channel header, fetch PGRAPH context pointer
+ mov $xtargets $r0
+ bclr $r2 31
+ shl b32 $r2 4
+ add b32 $r2 2
+
+ trace_set(T_LCHAN)
+ mov $r1 0xa04
+ shl b32 $r1 6
+ iowr I[$r1 + 0x000] $r2 // MEM_BASE
+ mov $r1 0xa20
+ shl b32 $r1 6
+ mov $r2 0x0002
+ sethi $r2 0x80000000
+ iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vram
+ mov $r1 0x10 // chan + 0x0210
+ mov $r2 #xfer_data
+ sethi $r2 0x00020000 // 16 bytes
+ xdld $r1 $r2
+ xdwait
+ trace_clr(T_LCHAN)
+
+ // update current context
+ ld b32 $r1 D[$r0 + #xfer_data + 4]
+ shl b32 $r1 24
+ ld b32 $r2 D[$r0 + #xfer_data + 0]
+ shr b32 $r2 8
+ or $r1 $r2
+ st b32 D[$r0 + #ctx_current] $r1
+
+ // set transfer base to start of context, and fetch context header
+ trace_set(T_LCTXH)
+ mov $r2 0xa04
+ shl b32 $r2 6
+ iowr I[$r2 + 0x000] $r1 // MEM_BASE
+ mov $r2 1
+ mov $r1 0xa20
+ shl b32 $r1 6
+ iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vm
+ mov $r1 #chan_data
+ sethi $r1 0x00060000 // 256 bytes
+ xdld $r0 $r1
+ xdwait
+ trace_clr(T_LCTXH)
+
+ trace_clr(T_CHAN)
+ ret
+
+// ctx_chan - handler for HUB_SET_CHAN command, will set a channel as
+// the active channel for ctxctl, but not actually transfer
+// any context data. intended for use only during initial
+// context construction.
+//
+// In: $r2 channel address
+//
+ctx_chan:
+ call #ctx_load
+ mov $r10 12 // DONE_UNK12
+ call #wait_donez
+ mov $r1 0xa10
+ shl b32 $r1 6
+ mov $r2 5
+ iowr I[$r1 + 0x000] $r2 // MEM_CMD = 5 (???)
+ ctx_chan_wait:
+ iord $r2 I[$r1 + 0x000]
+ or $r2 $r2
+ bra ne #ctx_chan_wait
+ ret
+
+// Execute per-context state overrides list
+//
+// Only executed on the first load of a channel. Might want to look into
+// removing this and having the host directly modify the channel's context
+// to change this state... The nouveau DRM already builds this list as
+// it's definitely needed for NVIDIA's, so we may as well use it for now
+//
+// Input: $r1 mmio list length
+//
+ctx_mmio_exec:
+ // set transfer base to be the mmio list
+ ld b32 $r3 D[$r0 + #chan_mmio_address]
+ mov $r2 0xa04
+ shl b32 $r2 6
+ iowr I[$r2 + 0x000] $r3 // MEM_BASE
+
+ clear b32 $r3
+ ctx_mmio_loop:
+ // fetch next 256 bytes of mmio list if necessary
+ and $r4 $r3 0xff
+ bra ne #ctx_mmio_pull
+ mov $r5 #xfer_data
+ sethi $r5 0x00060000 // 256 bytes
+ xdld $r3 $r5
+ xdwait
+
+ // execute a single list entry
+ ctx_mmio_pull:
+ ld b32 $r14 D[$r4 + #xfer_data + 0x00]
+ ld b32 $r15 D[$r4 + #xfer_data + 0x04]
+ call #nv_wr32
+
+ // next!
+ add b32 $r3 8
+ sub b32 $r1 1
+ bra ne #ctx_mmio_loop
+
+ // set transfer base back to the current context
+ ctx_mmio_done:
+ ld b32 $r3 D[$r0 + #ctx_current]
+ iowr I[$r2 + 0x000] $r3 // MEM_BASE
+
+ // disable the mmio list now, we don't need/want to execute it again
+ st b32 D[$r0 + #chan_mmio_count] $r0
+ mov $r1 #chan_data
+ sethi $r1 0x00060000 // 256 bytes
+ xdst $r0 $r1
+ xdwait
+ ret
+
+// Transfer HUB context data between GPU and storage area
+//
+// In: $r2 channel address
+// $p1 clear on save, set on load
+// $p2 set if opposite direction done/will be done, so:
+// on save it means: "a load will follow this save"
+// on load it means: "a save preceeded this load"
+//
+ctx_xfer:
+ bra not $p1 #ctx_xfer_pre
+ bra $p2 #ctx_xfer_pre_load
+ ctx_xfer_pre:
+ mov $r15 0x10
+ call #ctx_86c
+ bra not $p1 #ctx_xfer_exec
+
+ ctx_xfer_pre_load:
+ mov $r15 2
+ call #ctx_4170s
+ call #ctx_4170w
+ call #ctx_redswitch
+ clear b32 $r15
+ call #ctx_4170s
+ call #ctx_load
+
+ // fetch context pointer, and initiate xfer on all GPCs
+ ctx_xfer_exec:
+ ld b32 $r1 D[$r0 + #ctx_current]
+ mov $r2 0x414
+ shl b32 $r2 6
+ iowr I[$r2 + 0x000] $r0 // BAR_STATUS = reset
+ mov $r14 -0x5b00
+ sethi $r14 0x410000
+ mov b32 $r15 $r1
+ call #nv_wr32 // GPC_BCAST_WRCMD_DATA = ctx pointer
+ add b32 $r14 4
+ xbit $r15 $flags $p1
+ xbit $r2 $flags $p2
+ shl b32 $r2 1
+ or $r15 $r2
+ call #nv_wr32 // GPC_BCAST_WRCMD_CMD = GPC_XFER(type)
+
+ // strands
+ mov $r1 0x4afc
+ sethi $r1 0x20000
+ mov $r2 0xc
+ iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c
+ call #strand_wait
+ mov $r2 0x47fc
+ sethi $r2 0x20000
+ iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00
+ xbit $r2 $flags $p1
+ add b32 $r2 3
+ iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD)
+
+ // mmio context
+ xbit $r10 $flags $p1 // direction
+ or $r10 6 // first, last
+ mov $r11 0 // base = 0
+ ld b32 $r12 D[$r0 + #hub_mmio_list_head]
+ ld b32 $r13 D[$r0 + #hub_mmio_list_tail]
+ mov $r14 0 // not multi
+ call #mmctx_xfer
+
+ // wait for GPCs to all complete
+ mov $r10 8 // DONE_BAR
+ call #wait_doneo
+
+ // wait for strand xfer to complete
+ call #strand_wait
+
+ // post-op
+ bra $p1 #ctx_xfer_post
+ mov $r10 12 // DONE_UNK12
+ call #wait_donez
+ mov $r1 0xa10
+ shl b32 $r1 6
+ mov $r2 5
+ iowr I[$r1] $r2 // MEM_CMD
+ ctx_xfer_post_save_wait:
+ iord $r2 I[$r1]
+ or $r2 $r2
+ bra ne #ctx_xfer_post_save_wait
+
+ bra $p2 #ctx_xfer_done
+ ctx_xfer_post:
+ mov $r15 2
+ call #ctx_4170s
+ clear b32 $r15
+ call #ctx_86c
+ call #strand_post
+ call #ctx_4170w
+ clear b32 $r15
+ call #ctx_4170s
+
+ bra not $p1 #ctx_xfer_no_post_mmio
+ ld b32 $r1 D[$r0 + #chan_mmio_count]
+ or $r1 $r1
+ bra e #ctx_xfer_no_post_mmio
+ call #ctx_mmio_exec
+
+ ctx_xfer_no_post_mmio:
+
+ ctx_xfer_done:
+ ret
+
+.align 256
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
new file mode 100644
index 0000000000000000000000000000000000000000..decf0c60ca3bf079d5f0ad2e3a94a887737a026d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
@@ -0,0 +1,857 @@
+uint32_t nve0_grhub_data[] = {
+/* 0x0000: gpc_count */
+ 0x00000000,
+/* 0x0004: rop_count */
+ 0x00000000,
+/* 0x0008: cmd_queue */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0050: hub_mmio_list_head */
+ 0x00000000,
+/* 0x0054: hub_mmio_list_tail */
+ 0x00000000,
+/* 0x0058: ctx_current */
+ 0x00000000,
+/* 0x005c: chipsets */
+ 0x000000e4,
+ 0x013c0070,
+ 0x000000e7,
+ 0x013c0070,
+ 0x00000000,
+/* 0x0070: nve4_hub_mmio_head */
+ 0x0417e91c,
+ 0x04400204,
+ 0x18404010,
+ 0x204040a8,
+ 0x184040d0,
+ 0x004040f8,
+ 0x08404130,
+ 0x08404150,
+ 0x00404164,
+ 0x0c4041a0,
+ 0x0c404200,
+ 0x34404404,
+ 0x0c404460,
+ 0x00404480,
+ 0x00404498,
+ 0x0c404604,
+ 0x0c404618,
+ 0x0440462c,
+ 0x00404640,
+ 0x00404654,
+ 0x00404660,
+ 0x48404678,
+ 0x084046c8,
+ 0x08404700,
+ 0x24404718,
+ 0x04404744,
+ 0x00404754,
+ 0x00405800,
+ 0x08405830,
+ 0x00405854,
+ 0x0c405870,
+ 0x04405a00,
+ 0x00405a18,
+ 0x00405b00,
+ 0x00405b10,
+ 0x00406020,
+ 0x0c406028,
+ 0x044064a8,
+ 0x044064b4,
+ 0x2c4064c0,
+ 0x004064fc,
+ 0x00407040,
+ 0x00407804,
+ 0x1440780c,
+ 0x004078bc,
+ 0x18408000,
+ 0x00408064,
+ 0x08408800,
+ 0x00408840,
+ 0x08408900,
+ 0x00408980,
+/* 0x013c: nve4_hub_mmio_tail */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0200: chan_data */
+/* 0x0200: chan_mmio_count */
+ 0x00000000,
+/* 0x0204: chan_mmio_address */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0300: xfer_data */
+ 0x00000000,
+};
+
+uint32_t nve0_grhub_code[] = {
+ 0x03090ef5,
+/* 0x0004: queue_put */
+ 0x9800d898,
+ 0x86f001d9,
+ 0x0489b808,
+ 0xf00c1bf4,
+ 0x21f502f7,
+ 0x00f802ec,
+/* 0x001c: queue_put_next */
+ 0xb60798c4,
+ 0x8dbb0384,
+ 0x0880b600,
+ 0x80008e80,
+ 0x90b6018f,
+ 0x0f94f001,
+ 0xf801d980,
+/* 0x0039: queue_get */
+ 0x0131f400,
+ 0x9800d898,
+ 0x89b801d9,
+ 0x210bf404,
+ 0xb60789c4,
+ 0x9dbb0394,
+ 0x0890b600,
+ 0x98009e98,
+ 0x80b6019f,
+ 0x0f84f001,
+ 0xf400d880,
+/* 0x0066: queue_get_done */
+ 0x00f80132,
+/* 0x0068: nv_rd32 */
+ 0x0728b7f1,
+ 0xb906b4b6,
+ 0xc9f002ec,
+ 0x00bcd01f,
+/* 0x0078: nv_rd32_wait */
+ 0xc800bccf,
+ 0x1bf41fcc,
+ 0x06a7f0fa,
+ 0x010321f5,
+ 0xf840bfcf,
+/* 0x008d: nv_wr32 */
+ 0x28b7f100,
+ 0x06b4b607,
+ 0xb980bfd0,
+ 0xc9f002ec,
+ 0x1ec9f01f,
+/* 0x00a3: nv_wr32_wait */
+ 0xcf00bcd0,
+ 0xccc800bc,
+ 0xfa1bf41f,
+/* 0x00ae: watchdog_reset */
+ 0x87f100f8,
+ 0x84b60430,
+ 0x1ff9f006,
+ 0xf8008fd0,
+/* 0x00bd: watchdog_clear */
+ 0x3087f100,
+ 0x0684b604,
+ 0xf80080d0,
+/* 0x00c9: wait_donez */
+ 0x3c87f100,
+ 0x0684b608,
+ 0x99f094bd,
+ 0x0089d000,
+ 0x081887f1,
+ 0xd00684b6,
+/* 0x00e2: wait_done_wait_donez */
+ 0x87f1008a,
+ 0x84b60400,
+ 0x0088cf06,
+ 0xf4888aff,
+ 0x87f1f31b,
+ 0x84b6085c,
+ 0xf094bd06,
+ 0x89d00099,
+/* 0x0103: wait_doneo */
+ 0xf100f800,
+ 0xb6083c87,
+ 0x94bd0684,
+ 0xd00099f0,
+ 0x87f10089,
+ 0x84b60818,
+ 0x008ad006,
+/* 0x011c: wait_done_wait_doneo */
+ 0x040087f1,
+ 0xcf0684b6,
+ 0x8aff0088,
+ 0xf30bf488,
+ 0x085c87f1,
+ 0xbd0684b6,
+ 0x0099f094,
+ 0xf80089d0,
+/* 0x013d: mmctx_size */
+/* 0x013f: nv_mmctx_size_loop */
+ 0x9894bd00,
+ 0x85b600e8,
+ 0x0180b61a,
+ 0xbb0284b6,
+ 0xe0b60098,
+ 0x04efb804,
+ 0xb9eb1bf4,
+ 0x00f8029f,
+/* 0x015c: mmctx_xfer */
+ 0x083c87f1,
+ 0xbd0684b6,
+ 0x0199f094,
+ 0xf10089d0,
+ 0xb6071087,
+ 0x94bd0684,
+ 0xf405bbfd,
+ 0x8bd0090b,
+ 0x0099f000,
+/* 0x0180: mmctx_base_disabled */
+ 0xf405eefd,
+ 0x8ed00c0b,
+ 0xc08fd080,
+/* 0x018f: mmctx_multi_disabled */
+ 0xb70199f0,
+ 0xc8010080,
+ 0xb4b600ab,
+ 0x0cb9f010,
+ 0xb601aec8,
+ 0xbefd11e4,
+ 0x008bd005,
+/* 0x01a8: mmctx_exec_loop */
+/* 0x01a8: mmctx_wait_free */
+ 0xf0008ecf,
+ 0x0bf41fe4,
+ 0x00ce98fa,
+ 0xd005e9fd,
+ 0xc0b6c08e,
+ 0x04cdb804,
+ 0xc8e81bf4,
+ 0x1bf402ab,
+/* 0x01c9: mmctx_fini_wait */
+ 0x008bcf18,
+ 0xb01fb4f0,
+ 0x1bf410b4,
+ 0x02a7f0f7,
+ 0xf4c921f4,
+/* 0x01de: mmctx_stop */
+ 0xabc81b0e,
+ 0x10b4b600,
+ 0xf00cb9f0,
+ 0x8bd012b9,
+/* 0x01ed: mmctx_stop_wait */
+ 0x008bcf00,
+ 0xf412bbc8,
+/* 0x01f6: mmctx_done */
+ 0x87f1fa1b,
+ 0x84b6085c,
+ 0xf094bd06,
+ 0x89d00199,
+/* 0x0207: strand_wait */
+ 0xf900f800,
+ 0x02a7f0a0,
+ 0xfcc921f4,
+/* 0x0213: strand_pre */
+ 0xf100f8a0,
+ 0xf04afc87,
+ 0x97f00283,
+ 0x0089d00c,
+ 0x020721f5,
+/* 0x0226: strand_post */
+ 0x87f100f8,
+ 0x83f04afc,
+ 0x0d97f002,
+ 0xf50089d0,
+ 0xf8020721,
+/* 0x0239: strand_set */
+ 0xfca7f100,
+ 0x02a3f04f,
+ 0x0500aba2,
+ 0xd00fc7f0,
+ 0xc7f000ac,
+ 0x00bcd00b,
+ 0x020721f5,
+ 0xf000aed0,
+ 0xbcd00ac7,
+ 0x0721f500,
+/* 0x0263: strand_ctx_init */
+ 0xf100f802,
+ 0xb6083c87,
+ 0x94bd0684,
+ 0xd00399f0,
+ 0x21f50089,
+ 0xe7f00213,
+ 0x3921f503,
+ 0xfca7f102,
+ 0x02a3f046,
+ 0x0400aba0,
+ 0xf040a0d0,
+ 0xbcd001c7,
+ 0x0721f500,
+ 0x010c9202,
+ 0xf000acd0,
+ 0xbcd002c7,
+ 0x0721f500,
+ 0x2621f502,
+ 0x8087f102,
+ 0x0684b608,
+ 0xb70089cf,
+ 0x95220080,
+/* 0x02ba: ctx_init_strand_loop */
+ 0x8ed008fe,
+ 0x408ed000,
+ 0xb6808acf,
+ 0xa0b606a5,
+ 0x00eabb01,
+ 0xb60480b6,
+ 0x1bf40192,
+ 0x08e4b6e8,
+ 0xf1f2efbc,
+ 0xb6085c87,
+ 0x94bd0684,
+ 0xd00399f0,
+ 0x00f80089,
+/* 0x02ec: error */
+ 0xe7f1e0f9,
+ 0xe4b60814,
+ 0x00efd006,
+ 0x0c1ce7f1,
+ 0xf006e4b6,
+ 0xefd001f7,
+ 0xf8e0fc00,
+/* 0x0309: init */
+ 0xfe04bd00,
+ 0x07fe0004,
+ 0x0017f100,
+ 0x0227f012,
+ 0xf10012d0,
+ 0xfe05b917,
+ 0x17f10010,
+ 0x10d00400,
+ 0x0437f1c0,
+ 0x0634b604,
+ 0x200327f1,
+ 0xf10032d0,
+ 0xd0200427,
+ 0x27f10132,
+ 0x32d0200b,
+ 0x0c27f102,
+ 0x0732d020,
+ 0x0c2427f1,
+ 0xb90624b6,
+ 0x23d00003,
+ 0x0427f100,
+ 0x0023f087,
+ 0xb70012d0,
+ 0xf0010012,
+ 0x12d00427,
+ 0x1031f400,
+ 0x9604e7f1,
+ 0xf440e3f0,
+ 0xf1c76821,
+ 0x01018090,
+ 0x801ff4f0,
+ 0x17f0000f,
+ 0x041fbb01,
+ 0xf10112b6,
+ 0xb6040c27,
+ 0x21d00624,
+ 0x4021d000,
+ 0x080027f1,
+ 0xcf0624b6,
+ 0xf7f00022,
+/* 0x03a9: init_find_chipset */
+ 0x08f0b654,
+ 0xb800f398,
+ 0x0bf40432,
+ 0x0034b00b,
+ 0xf8f11bf4,
+/* 0x03bd: init_context */
+ 0x0017f100,
+ 0x02fe5801,
+ 0xf003ff58,
+ 0x0e8000e3,
+ 0x150f8014,
+ 0x013d21f5,
+ 0x070037f1,
+ 0x950634b6,
+ 0x34d00814,
+ 0x4034d000,
+ 0x130030b7,
+ 0xb6001fbb,
+ 0x3fd002f5,
+ 0x0815b600,
+ 0xb60110b6,
+ 0x1fb90814,
+ 0x6321f502,
+ 0x001fbb02,
+ 0xf1000398,
+ 0xf0200047,
+/* 0x040e: init_gpc */
+ 0x4ea05043,
+ 0x1fb90804,
+ 0x8d21f402,
+ 0x08004ea0,
+ 0xf4022fb9,
+ 0x4ea08d21,
+ 0xf4bd010c,
+ 0xa08d21f4,
+ 0xf401044e,
+ 0x4ea08d21,
+ 0xf7f00100,
+ 0x8d21f402,
+ 0x08004ea0,
+/* 0x0440: init_gpc_wait */
+ 0xc86821f4,
+ 0x0bf41fff,
+ 0x044ea0fa,
+ 0x6821f408,
+ 0xb7001fbb,
+ 0xb6800040,
+ 0x1bf40132,
+ 0x0027f1b4,
+ 0x0624b608,
+ 0xb74021d0,
+ 0xbd080020,
+ 0x1f19f014,
+/* 0x0473: main */
+ 0xf40021d0,
+ 0x28f40031,
+ 0x08d7f000,
+ 0xf43921f4,
+ 0xe4b1f401,
+ 0x1bf54001,
+ 0x87f100d1,
+ 0x84b6083c,
+ 0xf094bd06,
+ 0x89d00499,
+ 0x0017f100,
+ 0x0614b60b,
+ 0xcf4012cf,
+ 0x13c80011,
+ 0x7e0bf41f,
+ 0xf41f23c8,
+ 0x20f95a0b,
+ 0xf10212b9,
+ 0xb6083c87,
+ 0x94bd0684,
+ 0xd00799f0,
+ 0x32f40089,
+ 0x0231f401,
+ 0x07fb21f5,
+ 0x085c87f1,
+ 0xbd0684b6,
+ 0x0799f094,
+ 0xfc0089d0,
+ 0x3c87f120,
+ 0x0684b608,
+ 0x99f094bd,
+ 0x0089d006,
+ 0xf50131f4,
+ 0xf107fb21,
+ 0xb6085c87,
+ 0x94bd0684,
+ 0xd00699f0,
+ 0x0ef40089,
+/* 0x0509: chsw_prev_no_next */
+ 0xb920f931,
+ 0x32f40212,
+ 0x0232f401,
+ 0x07fb21f5,
+ 0x17f120fc,
+ 0x14b60b00,
+ 0x0012d006,
+/* 0x0527: chsw_no_prev */
+ 0xc8130ef4,
+ 0x0bf41f23,
+ 0x0131f40d,
+ 0xf50232f4,
+/* 0x0537: chsw_done */
+ 0xf107fb21,
+ 0xb60b0c17,
+ 0x27f00614,
+ 0x0012d001,
+ 0x085c87f1,
+ 0xbd0684b6,
+ 0x0499f094,
+ 0xf50089d0,
+/* 0x0557: main_not_ctx_switch */
+ 0xb0ff200e,
+ 0x1bf401e4,
+ 0x02f2b90d,
+ 0x078f21f5,
+/* 0x0567: main_not_ctx_chan */
+ 0xb0420ef4,
+ 0x1bf402e4,
+ 0x3c87f12e,
+ 0x0684b608,
+ 0x99f094bd,
+ 0x0089d007,
+ 0xf40132f4,
+ 0x21f50232,
+ 0x87f107fb,
+ 0x84b6085c,
+ 0xf094bd06,
+ 0x89d00799,
+ 0x110ef400,
+/* 0x0598: main_not_ctx_save */
+ 0xf010ef94,
+ 0x21f501f5,
+ 0x0ef502ec,
+/* 0x05a6: main_done */
+ 0x17f1fed1,
+ 0x14b60820,
+ 0xf024bd06,
+ 0x12d01f29,
+ 0xbe0ef500,
+/* 0x05b9: ih */
+ 0xfe80f9fe,
+ 0x80f90188,
+ 0xa0f990f9,
+ 0xd0f9b0f9,
+ 0xf0f9e0f9,
+ 0xc4800acf,
+ 0x0bf404ab,
+ 0x00b7f11d,
+ 0x08d7f019,
+ 0xcf40becf,
+ 0x21f400bf,
+ 0x00b0b704,
+ 0x01e7f004,
+/* 0x05ef: ih_no_fifo */
+ 0xe400bed0,
+ 0xf40100ab,
+ 0xd7f00d0b,
+ 0x01e7f108,
+ 0x0421f440,
+/* 0x0600: ih_no_ctxsw */
+ 0x0104b7f1,
+ 0xabffb0bd,
+ 0x0d0bf4b4,
+ 0x0c1ca7f1,
+ 0xd006a4b6,
+/* 0x0616: ih_no_other */
+ 0x0ad000ab,
+ 0xfcf0fc40,
+ 0xfcd0fce0,
+ 0xfca0fcb0,
+ 0xfe80fc90,
+ 0x80fc0088,
+ 0xf80032f4,
+/* 0x0631: ctx_4170s */
+ 0x70e7f101,
+ 0x40e3f041,
+ 0xf410f5f0,
+ 0x00f88d21,
+/* 0x0640: ctx_4170w */
+ 0x4170e7f1,
+ 0xf440e3f0,
+ 0xf4f06821,
+ 0xf31bf410,
+/* 0x0652: ctx_redswitch */
+ 0xe7f100f8,
+ 0xe4b60614,
+ 0x70f7f106,
+ 0x00efd002,
+/* 0x0663: ctx_redswitch_delay */
+ 0xb608f7f0,
+ 0x1bf401f2,
+ 0x70f7f1fd,
+ 0x00efd007,
+/* 0x0672: ctx_86c */
+ 0xe7f100f8,
+ 0xe4b6086c,
+ 0x00efd006,
+ 0x8a14e7f1,
+ 0xf440e3f0,
+ 0xe7f18d21,
+ 0xe3f0a86c,
+ 0x8d21f441,
+/* 0x0692: ctx_load */
+ 0x87f100f8,
+ 0x84b6083c,
+ 0xf094bd06,
+ 0x89d00599,
+ 0x0ca7f000,
+ 0xf1c921f4,
+ 0xb60a2417,
+ 0x10d00614,
+ 0x0037f100,
+ 0x0634b60b,
+ 0xf14032d0,
+ 0xb60a0c17,
+ 0x47f00614,
+ 0x0012d007,
+/* 0x06cb: ctx_chan_wait_0 */
+ 0xcf4014d0,
+ 0x44f04014,
+ 0xfa1bf41f,
+ 0xfe0032d0,
+ 0x2af0000b,
+ 0x0424b61f,
+ 0xf10220b6,
+ 0xb6083c87,
+ 0x94bd0684,
+ 0xd00899f0,
+ 0x17f10089,
+ 0x14b60a04,
+ 0x0012d006,
+ 0x0a2017f1,
+ 0xf00614b6,
+ 0x23f10227,
+ 0x12d08000,
+ 0x1017f000,
+ 0x030027f1,
+ 0xfa0223f0,
+ 0x03f80512,
+ 0x085c87f1,
+ 0xbd0684b6,
+ 0x0899f094,
+ 0x980089d0,
+ 0x14b6c101,
+ 0xc0029818,
+ 0xfd0825b6,
+ 0x01800512,
+ 0x3c87f116,
+ 0x0684b608,
+ 0x99f094bd,
+ 0x0089d009,
+ 0x0a0427f1,
+ 0xd00624b6,
+ 0x27f00021,
+ 0x2017f101,
+ 0x0614b60a,
+ 0xf10012d0,
+ 0xf0020017,
+ 0x01fa0613,
+ 0xf103f805,
+ 0xb6085c87,
+ 0x94bd0684,
+ 0xd00999f0,
+ 0x87f10089,
+ 0x84b6085c,
+ 0xf094bd06,
+ 0x89d00599,
+/* 0x078f: ctx_chan */
+ 0xf500f800,
+ 0xf0069221,
+ 0x21f40ca7,
+ 0x1017f1c9,
+ 0x0614b60a,
+ 0xd00527f0,
+/* 0x07a6: ctx_chan_wait */
+ 0x12cf0012,
+ 0x0522fd00,
+ 0xf8fa1bf4,
+/* 0x07b1: ctx_mmio_exec */
+ 0x81039800,
+ 0x0a0427f1,
+ 0xd00624b6,
+ 0x34bd0023,
+/* 0x07c0: ctx_mmio_loop */
+ 0xf4ff34c4,
+ 0x57f10f1b,
+ 0x53f00300,
+ 0x0535fa06,
+/* 0x07d2: ctx_mmio_pull */
+ 0x4e9803f8,
+ 0xc14f98c0,
+ 0xb68d21f4,
+ 0x12b60830,
+ 0xdf1bf401,
+/* 0x07e4: ctx_mmio_done */
+ 0xd0160398,
+ 0x00800023,
+ 0x0017f180,
+ 0x0613f002,
+ 0xf80601fa,
+/* 0x07fb: ctx_xfer */
+ 0xf400f803,
+ 0x02f40611,
+/* 0x0801: ctx_xfer_pre */
+ 0x10f7f00d,
+ 0x067221f5,
+/* 0x080b: ctx_xfer_pre_load */
+ 0xf01c11f4,
+ 0x21f502f7,
+ 0x21f50631,
+ 0x21f50640,
+ 0xf4bd0652,
+ 0x063121f5,
+ 0x069221f5,
+/* 0x0824: ctx_xfer_exec */
+ 0xf1160198,
+ 0xb6041427,
+ 0x20d00624,
+ 0x00e7f100,
+ 0x41e3f0a5,
+ 0xf4021fb9,
+ 0xe0b68d21,
+ 0x01fcf004,
+ 0xb6022cf0,
+ 0xf2fd0124,
+ 0x8d21f405,
+ 0x4afc17f1,
+ 0xf00213f0,
+ 0x12d00c27,
+ 0x0721f500,
+ 0xfc27f102,
+ 0x0223f047,
+ 0xf00020d0,
+ 0x20b6012c,
+ 0x0012d003,
+ 0xf001acf0,
+ 0xb7f006a5,
+ 0x140c9800,
+ 0xf0150d98,
+ 0x21f500e7,
+ 0xa7f0015c,
+ 0x0321f508,
+ 0x0721f501,
+ 0x2201f402,
+ 0xf40ca7f0,
+ 0x17f1c921,
+ 0x14b60a10,
+ 0x0527f006,
+/* 0x08ab: ctx_xfer_post_save_wait */
+ 0xcf0012d0,
+ 0x22fd0012,
+ 0xfa1bf405,
+/* 0x08b7: ctx_xfer_post */
+ 0xf02e02f4,
+ 0x21f502f7,
+ 0xf4bd0631,
+ 0x067221f5,
+ 0x022621f5,
+ 0x064021f5,
+ 0x21f5f4bd,
+ 0x11f40631,
+ 0x80019810,
+ 0xf40511fd,
+ 0x21f5070b,
+/* 0x08e2: ctx_xfer_no_post_mmio */
+/* 0x08e2: ctx_xfer_done */
+ 0x00f807b1,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/nvc0.fuc
similarity index 100%
rename from drivers/gpu/drm/nouveau/nvc0_graph.fuc
rename to drivers/gpu/drm/nouveau/core/engine/graph/fuc/nvc0.fuc
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/nve0.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/nve0.fuc
new file mode 100644
index 0000000000000000000000000000000000000000..f16a5d53319debf9954a317015495980d5b8adda
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/nve0.fuc
@@ -0,0 +1,400 @@
+/* fuc microcode util functions for nve0 PGRAPH
+ *
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+define(`mmctx_data', `.b32 eval((($2 - 1) << 26) | $1)')
+define(`queue_init', `.skip eval((2 * 4) + ((8 * 4) * 2))')
+
+ifdef(`include_code', `
+// Error codes
+define(`E_BAD_COMMAND', 0x01)
+define(`E_CMD_OVERFLOW', 0x02)
+
+// Util macros to help with debugging ucode hangs etc
+define(`T_WAIT', 0)
+define(`T_MMCTX', 1)
+define(`T_STRWAIT', 2)
+define(`T_STRINIT', 3)
+define(`T_AUTO', 4)
+define(`T_CHAN', 5)
+define(`T_LOAD', 6)
+define(`T_SAVE', 7)
+define(`T_LCHAN', 8)
+define(`T_LCTXH', 9)
+
+define(`trace_set', `
+ mov $r8 0x83c
+ shl b32 $r8 6
+ clear b32 $r9
+ bset $r9 $1
+ iowr I[$r8 + 0x000] $r9 // CC_SCRATCH[7]
+')
+
+define(`trace_clr', `
+ mov $r8 0x85c
+ shl b32 $r8 6
+ clear b32 $r9
+ bset $r9 $1
+ iowr I[$r8 + 0x000] $r9 // CC_SCRATCH[7]
+')
+
+// queue_put - add request to queue
+//
+// In : $r13 queue pointer
+// $r14 command
+// $r15 data
+//
+queue_put:
+ // make sure we have space..
+ ld b32 $r8 D[$r13 + 0x0] // GET
+ ld b32 $r9 D[$r13 + 0x4] // PUT
+ xor $r8 8
+ cmpu b32 $r8 $r9
+ bra ne #queue_put_next
+ mov $r15 E_CMD_OVERFLOW
+ call #error
+ ret
+
+ // store cmd/data on queue
+ queue_put_next:
+ and $r8 $r9 7
+ shl b32 $r8 3
+ add b32 $r8 $r13
+ add b32 $r8 8
+ st b32 D[$r8 + 0x0] $r14
+ st b32 D[$r8 + 0x4] $r15
+
+ // update PUT
+ add b32 $r9 1
+ and $r9 0xf
+ st b32 D[$r13 + 0x4] $r9
+ ret
+
+// queue_get - fetch request from queue
+//
+// In : $r13 queue pointer
+//
+// Out: $p1 clear on success (data available)
+// $r14 command
+// $r15 data
+//
+queue_get:
+ bset $flags $p1
+ ld b32 $r8 D[$r13 + 0x0] // GET
+ ld b32 $r9 D[$r13 + 0x4] // PUT
+ cmpu b32 $r8 $r9
+ bra e #queue_get_done
+ // fetch first cmd/data pair
+ and $r9 $r8 7
+ shl b32 $r9 3
+ add b32 $r9 $r13
+ add b32 $r9 8
+ ld b32 $r14 D[$r9 + 0x0]
+ ld b32 $r15 D[$r9 + 0x4]
+
+ // update GET
+ add b32 $r8 1
+ and $r8 0xf
+ st b32 D[$r13 + 0x0] $r8
+ bclr $flags $p1
+queue_get_done:
+ ret
+
+// nv_rd32 - read 32-bit value from nv register
+//
+// In : $r14 register
+// Out: $r15 value
+//
+nv_rd32:
+ mov $r11 0x728
+ shl b32 $r11 6
+ mov b32 $r12 $r14
+ bset $r12 31 // MMIO_CTRL_PENDING
+ iowr I[$r11 + 0x000] $r12 // MMIO_CTRL
+ nv_rd32_wait:
+ iord $r12 I[$r11 + 0x000]
+ xbit $r12 $r12 31
+ bra ne #nv_rd32_wait
+ mov $r10 6 // DONE_MMIO_RD
+ call #wait_doneo
+ iord $r15 I[$r11 + 0x100] // MMIO_RDVAL
+ ret
+
+// nv_wr32 - write 32-bit value to nv register
+//
+// In : $r14 register
+// $r15 value
+//
+nv_wr32:
+ mov $r11 0x728
+ shl b32 $r11 6
+ iowr I[$r11 + 0x200] $r15 // MMIO_WRVAL
+ mov b32 $r12 $r14
+ bset $r12 31 // MMIO_CTRL_PENDING
+ bset $r12 30 // MMIO_CTRL_WRITE
+ iowr I[$r11 + 0x000] $r12 // MMIO_CTRL
+ nv_wr32_wait:
+ iord $r12 I[$r11 + 0x000]
+ xbit $r12 $r12 31
+ bra ne #nv_wr32_wait
+ ret
+
+// (re)set watchdog timer
+//
+// In : $r15 timeout
+//
+watchdog_reset:
+ mov $r8 0x430
+ shl b32 $r8 6
+ bset $r15 31
+ iowr I[$r8 + 0x000] $r15
+ ret
+
+// clear watchdog timer
+watchdog_clear:
+ mov $r8 0x430
+ shl b32 $r8 6
+ iowr I[$r8 + 0x000] $r0
+ ret
+
+// wait_done{z,o} - wait on FUC_DONE bit to become clear/set
+//
+// In : $r10 bit to wait on
+//
+define(`wait_done', `
+$1:
+ trace_set(T_WAIT);
+ mov $r8 0x818
+ shl b32 $r8 6
+ iowr I[$r8 + 0x000] $r10 // CC_SCRATCH[6] = wait bit
+ wait_done_$1:
+ mov $r8 0x400
+ shl b32 $r8 6
+ iord $r8 I[$r8 + 0x000] // DONE
+ xbit $r8 $r8 $r10
+ bra $2 #wait_done_$1
+ trace_clr(T_WAIT)
+ ret
+')
+wait_done(wait_donez, ne)
+wait_done(wait_doneo, e)
+
+// mmctx_size - determine size of a mmio list transfer
+//
+// In : $r14 mmio list head
+// $r15 mmio list tail
+// Out: $r15 transfer size (in bytes)
+//
+mmctx_size:
+ clear b32 $r9
+ nv_mmctx_size_loop:
+ ld b32 $r8 D[$r14]
+ shr b32 $r8 26
+ add b32 $r8 1
+ shl b32 $r8 2
+ add b32 $r9 $r8
+ add b32 $r14 4
+ cmpu b32 $r14 $r15
+ bra ne #nv_mmctx_size_loop
+ mov b32 $r15 $r9
+ ret
+
+// mmctx_xfer - execute a list of mmio transfers
+//
+// In : $r10 flags
+// bit 0: direction (0 = save, 1 = load)
+// bit 1: set if first transfer
+// bit 2: set if last transfer
+// $r11 base
+// $r12 mmio list head
+// $r13 mmio list tail
+// $r14 multi_stride
+// $r15 multi_mask
+//
+mmctx_xfer:
+ trace_set(T_MMCTX)
+ mov $r8 0x710
+ shl b32 $r8 6
+ clear b32 $r9
+ or $r11 $r11
+ bra e #mmctx_base_disabled
+ iowr I[$r8 + 0x000] $r11 // MMCTX_BASE
+ bset $r9 0 // BASE_EN
+ mmctx_base_disabled:
+ or $r14 $r14
+ bra e #mmctx_multi_disabled
+ iowr I[$r8 + 0x200] $r14 // MMCTX_MULTI_STRIDE
+ iowr I[$r8 + 0x300] $r15 // MMCTX_MULTI_MASK
+ bset $r9 1 // MULTI_EN
+ mmctx_multi_disabled:
+ add b32 $r8 0x100
+
+ xbit $r11 $r10 0
+ shl b32 $r11 16 // DIR
+ bset $r11 12 // QLIMIT = 0x10
+ xbit $r14 $r10 1
+ shl b32 $r14 17
+ or $r11 $r14 // START_TRIGGER
+ iowr I[$r8 + 0x000] $r11 // MMCTX_CTRL
+
+ // loop over the mmio list, and send requests to the hw
+ mmctx_exec_loop:
+ // wait for space in mmctx queue
+ mmctx_wait_free:
+ iord $r14 I[$r8 + 0x000] // MMCTX_CTRL
+ and $r14 0x1f
+ bra e #mmctx_wait_free
+
+ // queue up an entry
+ ld b32 $r14 D[$r12]
+ or $r14 $r9
+ iowr I[$r8 + 0x300] $r14
+ add b32 $r12 4
+ cmpu b32 $r12 $r13
+ bra ne #mmctx_exec_loop
+
+ xbit $r11 $r10 2
+ bra ne #mmctx_stop
+ // wait for queue to empty
+ mmctx_fini_wait:
+ iord $r11 I[$r8 + 0x000] // MMCTX_CTRL
+ and $r11 0x1f
+ cmpu b32 $r11 0x10
+ bra ne #mmctx_fini_wait
+ mov $r10 2 // DONE_MMCTX
+ call #wait_donez
+ bra #mmctx_done
+ mmctx_stop:
+ xbit $r11 $r10 0
+ shl b32 $r11 16 // DIR
+ bset $r11 12 // QLIMIT = 0x10
+ bset $r11 18 // STOP_TRIGGER
+ iowr I[$r8 + 0x000] $r11 // MMCTX_CTRL
+ mmctx_stop_wait:
+ // wait for STOP_TRIGGER to clear
+ iord $r11 I[$r8 + 0x000] // MMCTX_CTRL
+ xbit $r11 $r11 18
+ bra ne #mmctx_stop_wait
+ mmctx_done:
+ trace_clr(T_MMCTX)
+ ret
+
+// Wait for DONE_STRAND
+//
+strand_wait:
+ push $r10
+ mov $r10 2
+ call #wait_donez
+ pop $r10
+ ret
+
+// unknown - call before issuing strand commands
+//
+strand_pre:
+ mov $r8 0x4afc
+ sethi $r8 0x20000
+ mov $r9 0xc
+ iowr I[$r8] $r9
+ call #strand_wait
+ ret
+
+// unknown - call after issuing strand commands
+//
+strand_post:
+ mov $r8 0x4afc
+ sethi $r8 0x20000
+ mov $r9 0xd
+ iowr I[$r8] $r9
+ call #strand_wait
+ ret
+
+// Selects strand set?!
+//
+// In: $r14 id
+//
+strand_set:
+ mov $r10 0x4ffc
+ sethi $r10 0x20000
+ sub b32 $r11 $r10 0x500
+ mov $r12 0xf
+ iowr I[$r10 + 0x000] $r12 // 0x93c = 0xf
+ mov $r12 0xb
+ iowr I[$r11 + 0x000] $r12 // 0x928 = 0xb
+ call #strand_wait
+ iowr I[$r10 + 0x000] $r14 // 0x93c =
+ mov $r12 0xa
+ iowr I[$r11 + 0x000] $r12 // 0x928 = 0xa
+ call #strand_wait
+ ret
+
+// Initialise strand context data
+//
+// In : $r15 context base
+// Out: $r15 context size (in bytes)
+//
+// Strandset(?) 3 hardcoded currently
+//
+strand_ctx_init:
+ trace_set(T_STRINIT)
+ call #strand_pre
+ mov $r14 3
+ call #strand_set
+ mov $r10 0x46fc
+ sethi $r10 0x20000
+ add b32 $r11 $r10 0x400
+ iowr I[$r10 + 0x100] $r0 // STRAND_FIRST_GENE = 0
+ mov $r12 1
+ iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_FIRST_GENE
+ call #strand_wait
+ sub b32 $r12 $r0 1
+ iowr I[$r10 + 0x000] $r12 // STRAND_GENE_CNT = 0xffffffff
+ mov $r12 2
+ iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_GENE_CNT
+ call #strand_wait
+ call #strand_post
+
+ // read the size of each strand, poke the context offset of
+ // each into STRAND_{SAVE,LOAD}_SWBASE now, no need to worry
+ // about it later then.
+ mov $r8 0x880
+ shl b32 $r8 6
+ iord $r9 I[$r8 + 0x000] // STRANDS
+ add b32 $r8 0x2200
+ shr b32 $r14 $r15 8
+ ctx_init_strand_loop:
+ iowr I[$r8 + 0x000] $r14 // STRAND_SAVE_SWBASE
+ iowr I[$r8 + 0x100] $r14 // STRAND_LOAD_SWBASE
+ iord $r10 I[$r8 + 0x200] // STRAND_SIZE
+ shr b32 $r10 6
+ add b32 $r10 1
+ add b32 $r14 $r10
+ add b32 $r8 4
+ sub b32 $r9 1
+ bra ne #ctx_init_strand_loop
+
+ shl b32 $r14 8
+ sub b32 $r15 $r14 $r15
+ trace_clr(T_STRINIT)
+ ret
+')
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
new file mode 100644
index 0000000000000000000000000000000000000000..6185282484578658ceae6a6e5430f68ed6689f11
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
@@ -0,0 +1,1387 @@
+/*
+ * Copyright 2007 Stephane Marchesin
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include